From 321cd1053a464ff07313f6cb0ff0eb56efdec1dd Mon Sep 17 00:00:00 2001 From: cg-cnu Date: Fri, 1 Sep 2017 10:33:34 +0530 Subject: [PATCH 1/3] Corrected documentation Fixes #18 --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 08c26c4..53f79ac 100644 --- a/README.md +++ b/README.md @@ -99,8 +99,8 @@ Raised when there is an error backing up a table __Example__ ``` backup.on('error', function(data) { - console.log('Error backing up ' + data.tableName); - console.log(data.error); + console.log('Error backing up ' + data.table); + console.log(data.err); }); ``` @@ -110,7 +110,7 @@ Raised when the backup of a table is begining __Example__ ``` -backup.on('start-backup', function(tableName) { +backup.on('start-backup', function(tableName, startTime) { console.log('Starting to copy table ' + tableName); }); ``` @@ -121,7 +121,7 @@ Raised when the backup of a table is finished __Example__ ``` -backup.on('end-backup', function(tableName) { +backup.on('end-backup', function(tableName, endTime) { console.log('Done copying table ' + tableName); }); ``` From 8d274f8b373a24607f57db94b80824ab2d9ccdad Mon Sep 17 00:00:00 2001 From: sreenivas alapati Date: Mon, 4 Sep 2017 09:32:06 +0530 Subject: [PATCH 2/3] fixed end-backup example --- README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 53f79ac..f4fd2c8 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ Raised when the backup of a table is finished __Example__ ``` -backup.on('end-backup', function(tableName, endTime) { +backup.on('end-backup', function(tableName, duration) { console.log('Done copying table ' + tableName); }); ``` @@ -146,8 +146,8 @@ __Arguments__ * `tableName` - name of the table to backup * `backupPath` - (optional) the path to use for the backup. - The iterator is passed a `callback(err)` which must be called once it has - completed. If no error has occurred, the `callback` should be run without + The iterator is passed a `callback(err)` which must be called once it has + completed. If no error has occurred, the `callback` should be run without arguments or with an explicit `null` argument. * `callback(err)` - A callback which is called when the table has finished backing up, or an error occurs @@ -157,13 +157,13 @@ __Arguments__ ## Restore S3 backups back to Dynamo. -`dynamo-restore-from-s3` is a utility that restores backups in S3 back to dynamo. It streams data down from S3 and throttles the download speed to match the rate of batch writes to Dynamo. +`dynamo-restore-from-s3` is a utility that restores backups in S3 back to dynamo. It streams data down from S3 and throttles the download speed to match the rate of batch writes to Dynamo. It is suitable for restoring large tables without needing to write to disk or use a large amount of memory. Use it on an AWS EC2 instance for best results and to minimise network latency, this should yield restore speeds of around 15min per GB. Use `--overwrite` if the table already exists. Otherwise it will attempt to generate table on the fly. -Can be run as a command line script or as an npm module. +Can be run as a command line script or as an npm module. # Command line usage @@ -193,10 +193,10 @@ Can be run as a command line script or as an npm module. ``` # Restore over existing table (cmd.exe). - > node ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite + > node ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite # Restore over existing table (shell). - $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite + $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite # Restore over existing table, 1000 concurrent requests. Stop if any batch fails 1000 times. $ ./bin/dynamo-restore-from-s3 -t acme-customers -c 1000 -s s3://my-backups/acme-customers.json --overwrite -sf @@ -204,23 +204,23 @@ Can be run as a command line script or as an npm module. # Restore over existing table, 1000 concurrent requests. When finished, set read capacity to 50 and write capacity to 10 (both needed). $ ./bin/dynamo-restore-from-s3 -t acme-customers -c 1000 -s s3://my-backups/acme-customers.json --overwrite --readcapacity 50 --writecapacity 10 - # Auto-generate table (determine PK from backup). + # Auto-generate table (determine PK from backup). $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json # Auto-generate table with partition and sort key. - $ ./bin/dynamo-restore-from-s3 -t acme-orders -s s3://my-backups/acme-orders.json -pk customerId -sk createDate + $ ./bin/dynamo-restore-from-s3 -t acme-orders -s s3://my-backups/acme-orders.json -pk customerId -sk createDate # Auto-generate table, defined PK. Concurrency 2000 (~ 2GB backup). - $ ./bin/dynamo-restore-from-s3 -t acme-orders -pk orderId -c 2000 -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -pk orderId -c 2000 -s s3://my-backups/acme-orders.json # Auto-generate table. 2000 write units during restore. When finished set 50 write units and 100 write units (both needed). $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 2000 -s s3://my-backups/acme-orders.json --readcapacity 100 --writecapacity 50 # Auto-generate table. Concurrency 50 (10 MB backup or less). - $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -s s3://my-backups/acme-orders.json # Auto-generate table. Concurrency 50. Stop process if any batch fails 50 times. - $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -sf -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -sf -s s3://my-backups/acme-orders.json ``` @@ -313,7 +313,7 @@ __Example__ ``` restore.on('send-batch', function(batches, requests, streamMeta) { console.log('Batch Sent'); - console.log('Num cached batches: ', batches); + console.log('Num cached batches: ', batches); console.log('Num requests in flight: ', requests); console.log('Stream metadata:, JSON.stringify(streamMeta)); }); From 2e4088847ca20fe11e8e21a53658324927a4c9da Mon Sep 17 00:00:00 2001 From: sreenivas alapati Date: Mon, 4 Sep 2017 09:35:58 +0530 Subject: [PATCH 3/3] fixed end-backup doc --- README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 53f79ac..f4fd2c8 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ Raised when the backup of a table is finished __Example__ ``` -backup.on('end-backup', function(tableName, endTime) { +backup.on('end-backup', function(tableName, duration) { console.log('Done copying table ' + tableName); }); ``` @@ -146,8 +146,8 @@ __Arguments__ * `tableName` - name of the table to backup * `backupPath` - (optional) the path to use for the backup. - The iterator is passed a `callback(err)` which must be called once it has - completed. If no error has occurred, the `callback` should be run without + The iterator is passed a `callback(err)` which must be called once it has + completed. If no error has occurred, the `callback` should be run without arguments or with an explicit `null` argument. * `callback(err)` - A callback which is called when the table has finished backing up, or an error occurs @@ -157,13 +157,13 @@ __Arguments__ ## Restore S3 backups back to Dynamo. -`dynamo-restore-from-s3` is a utility that restores backups in S3 back to dynamo. It streams data down from S3 and throttles the download speed to match the rate of batch writes to Dynamo. +`dynamo-restore-from-s3` is a utility that restores backups in S3 back to dynamo. It streams data down from S3 and throttles the download speed to match the rate of batch writes to Dynamo. It is suitable for restoring large tables without needing to write to disk or use a large amount of memory. Use it on an AWS EC2 instance for best results and to minimise network latency, this should yield restore speeds of around 15min per GB. Use `--overwrite` if the table already exists. Otherwise it will attempt to generate table on the fly. -Can be run as a command line script or as an npm module. +Can be run as a command line script or as an npm module. # Command line usage @@ -193,10 +193,10 @@ Can be run as a command line script or as an npm module. ``` # Restore over existing table (cmd.exe). - > node ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite + > node ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite # Restore over existing table (shell). - $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite + $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite # Restore over existing table, 1000 concurrent requests. Stop if any batch fails 1000 times. $ ./bin/dynamo-restore-from-s3 -t acme-customers -c 1000 -s s3://my-backups/acme-customers.json --overwrite -sf @@ -204,23 +204,23 @@ Can be run as a command line script or as an npm module. # Restore over existing table, 1000 concurrent requests. When finished, set read capacity to 50 and write capacity to 10 (both needed). $ ./bin/dynamo-restore-from-s3 -t acme-customers -c 1000 -s s3://my-backups/acme-customers.json --overwrite --readcapacity 50 --writecapacity 10 - # Auto-generate table (determine PK from backup). + # Auto-generate table (determine PK from backup). $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json # Auto-generate table with partition and sort key. - $ ./bin/dynamo-restore-from-s3 -t acme-orders -s s3://my-backups/acme-orders.json -pk customerId -sk createDate + $ ./bin/dynamo-restore-from-s3 -t acme-orders -s s3://my-backups/acme-orders.json -pk customerId -sk createDate # Auto-generate table, defined PK. Concurrency 2000 (~ 2GB backup). - $ ./bin/dynamo-restore-from-s3 -t acme-orders -pk orderId -c 2000 -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -pk orderId -c 2000 -s s3://my-backups/acme-orders.json # Auto-generate table. 2000 write units during restore. When finished set 50 write units and 100 write units (both needed). $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 2000 -s s3://my-backups/acme-orders.json --readcapacity 100 --writecapacity 50 # Auto-generate table. Concurrency 50 (10 MB backup or less). - $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -s s3://my-backups/acme-orders.json # Auto-generate table. Concurrency 50. Stop process if any batch fails 50 times. - $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -sf -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -sf -s s3://my-backups/acme-orders.json ``` @@ -313,7 +313,7 @@ __Example__ ``` restore.on('send-batch', function(batches, requests, streamMeta) { console.log('Batch Sent'); - console.log('Num cached batches: ', batches); + console.log('Num cached batches: ', batches); console.log('Num requests in flight: ', requests); console.log('Stream metadata:, JSON.stringify(streamMeta)); });