diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..8758d54 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,5 @@ +FROM node:7.9.0-alpine + +# Add project +COPY . /home/root +RUN cd /home/root && npm install diff --git a/README.md b/README.md index 08c26c4..c7ab1af 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ Can be run as a command line script or as an npm module. -p, --backup-path backup path to store table dumps in. default is DynamoDB-backup-YYYY-MM-DD-HH-mm-ss -e, --base64-encode-binary if passed, encode binary fields in base64 before exporting -d, --save-datapipeline-format save in format compatible with the AWS datapipeline import. Default to false (save as exported by DynamoDb) + -f, --save-schema save table schema. Default to true --aws-key AWS access key. Will use AWS_ACCESS_KEY_ID env var if --aws-key not set --aws-secret AWS secret key. Will use AWS_SECRET_ACCESS_KEY env var if --aws-secret not set --aws-region AWS region. Will use AWS_DEFAULT_REGION env var if --aws-region not set @@ -161,7 +162,7 @@ __Arguments__ It is suitable for restoring large tables without needing to write to disk or use a large amount of memory. Use it on an AWS EC2 instance for best results and to minimise network latency, this should yield restore speeds of around 15min per GB. -Use `--overwrite` if the table already exists. Otherwise it will attempt to generate table on the fly. +Use `--overwrite` if the table already exists. Otherwise it will attempt to generate table on the fly using table.schema.json created on the backup. Can be run as a command line script or as an npm module. diff --git a/backup.sh b/backup.sh new file mode 100755 index 0000000..2b670ae --- /dev/null +++ b/backup.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./home/root/bin/dynamo-backup-to-s3 --bucket uniplaces.com.backups -p $(date +%Y-%m-%d) -r 0.1 --aws-key $AWS_KEY_BACKUP --aws-secret $AWS_SECRET_BACKUP --aws-region eu-west-1 --excluded-tables prod-search-offers,prod-admin-session,prod-ap-session,prod-core-session,prod-ops-session,prod-photography-session,prod-spa-session diff --git a/bin/dynamo-backup-to-s3 b/bin/dynamo-backup-to-s3 index d06b72b..b969909 100755 --- a/bin/dynamo-backup-to-s3 +++ b/bin/dynamo-backup-to-s3 @@ -26,6 +26,7 @@ program .option('-p, --backup-path ', 'backup path to store table dumps in. default is DynamoDB-backup-YYYY-MM-DD-HH-mm-ss') .option('-e, --base64-encode-binary', 'encode binary fields in base64 before exporting') .option('-d, --save-data-pipeline-format', 'save in format compatible with the AWS Data Pipeline import. Default to false (save as exported by DynamoDb)') + .option('-f, --save-schema', 'save table schema. Default to false') .option('--aws-key ', 'AWS access key. Will use AWS_ACCESS_KEY_ID env var if --aws-key not set') .option('--aws-secret ', 'AWS secret key. Will use AWS_SECRET_ACCESS_KEY env var if --aws-secret not set') .option('--aws-region ', 'AWS region. Will use AWS_DEFAULT_REGION env var if --aws-region not set') @@ -46,7 +47,8 @@ var dynamoBackup = new DynamoBackup({ readPercentage: program.readPercentage, stopOnFailure: program.stopOnFailure, base64Binary: program.base64EncodeBinary, - saveDataPipelineFormat: program.saveDataPipelineFormat + saveDataPipelineFormat: program.saveDataPipelineFormat, + saveSchema: program.saveSchema }); dynamoBackup.on('error', function(data) { diff --git a/bin/dynamo-restore-from-s3 b/bin/dynamo-restore-from-s3 index f2b8dd0..86d0f9c 100755 --- a/bin/dynamo-restore-from-s3 +++ b/bin/dynamo-restore-from-s3 @@ -1,5 +1,6 @@ #!/usr/bin/env node - +var MY_SLACK_WEBHOOK_URL = 'https://hooks.slack.com/services/T1JC01J20/B56R0998F/' + process.env.SLACK_ID; +var slack = require('slack-notify')(MY_SLACK_WEBHOOK_URL); var program = require('commander'), fs = require('fs'), DynamoRestore = require('../').Restore; @@ -11,6 +12,7 @@ program .option('-t, --table [name]', 'Name of the Dynamo Table to restore to (Required)') .option('-o, --overwrite', 'Table already exists, skip auto-create. Default is false.') .option('-c, --concurrency ', 'Number of concurrent requests & dynamo capacity units. Defaults to 200.') + .option('-ic, --index-concurrency ', 'Number of concurrent requests & dynamo capacity units for global secondary indexes. Defaults to 200.') .option('-pk, --partitionkey [columnname]', 'Name of Primary Partition Key. If not provided will try determine from backup.') .option('-sk, --sortkey [columnname]', 'Name of Secondary Sort Key. Ignored unless --partitionkey is provided.') .option('-rc, --readcapacity ', 'Read Units for new table (when finished). Default is 5.') @@ -39,6 +41,7 @@ var dynamoRestore = new DynamoRestore({ table: program.table, overwrite: !!program.overwrite, concurrency: program.concurrency, + indexConcurrency: program.indexConcurrency, stopOnFailure: !!program.stopOnFailure, // New table properties partitionkey: program.partitionkey, @@ -62,8 +65,16 @@ function translate(contentLength) { // Define events dynamoRestore.on('error', function(message) { + slack.note({ + channel: '#ped_backup_restore', + text: 'Something went wrong restoring: ' + program.table, + fields: { + 'Error': message, + }, + username: 'Pato' + }); console.log(message); - process.exit(-1); + setTimeout(function() { process.exit(-1); }, 3000); }); dynamoRestore.on('warning', function(message) { @@ -71,11 +82,17 @@ dynamoRestore.on('warning', function(message) { }); dynamoRestore.on('start-download', function(streamMeta) { + slack.note({ + channel: '#ped_backup_restore', + text: 'Restoring ' + program.table + '. Write capacity set to: ' + program.concurrency, + username: 'Pato' + }); var time = runTimes.startDownload = new Date().getTime(); console.log('Starting download. %s remaining...', translate(streamMeta.ContentLength)); }); dynamoRestore.on('send-batch', function(batches, requests, streamMeta) { + return; console.log('Batch sent. %d in flight. %s remaining to download...', requests, translate(streamMeta.RemainingLength)); }); @@ -85,6 +102,12 @@ dynamoRestore.run(function() { diff = time - runTimes.start, minutes = Math.floor(diff / (1000 * 60)), seconds = Math.floor((diff % 1000 * 60) / 1000); + + slack.note({ + channel: '#ped_backup_restore', + text: 'Restore completed for ' + program.table + ' in ' + minutes + ' minutes ' + seconds + ' seconds. Write capacity: ' + program.writecapacity + ', Read capacity: ' + program.writecapacity, + username: 'Pato' + }); console.log('Done! Process completed in %s minutes %s seconds.', minutes, seconds); - process.exit(0); -}); \ No newline at end of file + setTimeout(function() { process.exit(0); }, 3000); +}); diff --git a/deploy.sh b/deploy.sh new file mode 100755 index 0000000..5191e9a --- /dev/null +++ b/deploy.sh @@ -0,0 +1,67 @@ +# SET YOUR AWS CONFIG BEFORE RUNNING +$(aws ecr get-login --region eu-west-1) +docker build -t dynamo-backup-to-s3 . +docker tag dynamo-backup-to-s3:latest 584629324139.dkr.ecr.eu-west-1.amazonaws.com/dynamo-backup-to-s3:latest +docker push 584629324139.dkr.ecr.eu-west-1.amazonaws.com/dynamo-backup-to-s3:latest + +# more bash-friendly output for jq +JQ="jq --raw-output --exit-status" + +# create task def +make_task_def() { + task_def="[ + { + \"name\": \"dynamo-backup-to-s3\", + \"image\": \"584629324139.dkr.ecr.eu-west-1.amazonaws.com/dynamo-backup-to-s3:latest\", + \"essential\": true, + \"memory\": 2500, + \"cpu\": 800, + \"logConfiguration\": { + \"logDriver\": \"awslogs\", + \"options\": { + \"awslogs-group\": \"prod-jobs\", + \"awslogs-region\": \"eu-west-1\", + \"awslogs-stream-prefix\": \"dynamo-backup-job\" + } + }, + \"environment\": [ + { + \"name\": \"AWS_KEY_BACKUP\", + \"value\":\"$AWS_KEY_BACKUP\" + }, + { + \"name\": \"AWS_SECRET_BACKUP\", + \"value\": \"$AWS_SECRET_BACKUP\" + }, + { + \"name\": \"AWS_KEY_RESTORE\", + \"value\": \"$AWS_KEY_RESTORE\" + }, + { + \"name\": \"AWS_SECRET_RESTORE\", + \"value\": \"$AWS_SECRET_RESTORE\" + }, + { + \"name\": \"SLACK_ID\", + \"value\": \"$SLACK_ID\" + } + ] + } + ]" +} + +# register definition +register_definition() { + family="dynamo-backup-to-s3" + if revision=$(aws ecs register-task-definition --container-definitions "$task_def" --family $family | $JQ '.taskDefinition.taskDefinitionArn'); then + echo "Revision: $revision" + echo "$revision" > arn_revision.txt + echo $revision | sed -n -e 's/^.*task-definition\///p' > task_revision.txt + else + echo "Failed to register task definition" + return 1 + fi +} + +make_task_def +register_definition \ No newline at end of file diff --git a/ecs-overrides-backup.json b/ecs-overrides-backup.json new file mode 100644 index 0000000..c9f018f --- /dev/null +++ b/ecs-overrides-backup.json @@ -0,0 +1,8 @@ +{ + "containerOverrides": [ + { + "name": "dynamo-backup-to-s3", + "command": ["sh","/home/root/backup.sh"] + } + ] +} \ No newline at end of file diff --git a/ecs-overrides-restore.json b/ecs-overrides-restore.json new file mode 100644 index 0000000..47a44d2 --- /dev/null +++ b/ecs-overrides-restore.json @@ -0,0 +1,8 @@ +{ + "containerOverrides": [ + { + "name": "dynamo-backup-to-s3", + "command": ["sh","/home/root/restore.sh"] + } + ] +} \ No newline at end of file diff --git a/lib/dynamo-backup.js b/lib/dynamo-backup.js index a81e469..715c5a5 100644 --- a/lib/dynamo-backup.js +++ b/lib/dynamo-backup.js @@ -22,6 +22,7 @@ function DynamoBackup(options) { this.stopOnFailure = options.stopOnFailure || false; this.base64Binary = options.base64Binary || false; this.saveDataPipelineFormat = options.saveDataPipelineFormat || false; + this.saveSchema = options.saveSchema || true; this.awsAccessKey = options.awsAccessKey; this.awsSecretKey = options.awsSecretKey; this.awsRegion = options.awsRegion; @@ -47,28 +48,28 @@ DynamoBackup.prototype.listTables = function (callback) { DynamoBackup.prototype.backupTable = function (tableName, backupPath, callback) { var self = this; - var stream = new ReadableStream(); + var stream = new ReadableStream({ highWaterMark: 5000000 }); if (callback === undefined) { callback = backupPath; backupPath = self._getBackupPath(); } - var params = {}; - if (self.awsRegion) { - params.region = self.awsRegion; - } - if (self.awsAccessKey && self.awsSecretKey) { + var params = {}; + if (self.awsRegion) { + params.region = self.awsRegion; + } + if (self.awsAccessKey && self.awsSecretKey) { params.accessKey = self.awsAccessKey; params.secretKey = self.awsSecretKey; } - params.bucket = self.bucket; - params.objectName = path.join(backupPath, tableName + '.json'); - params.stream = stream; - params.debug = self.debug; + params.bucket = self.bucket; + params.objectName = path.join(backupPath, tableName + '.json'); + params.stream = stream; + params.debug = self.debug; - var upload = new Uploader(params); + var upload = new Uploader(params); var startTime = moment.utc(); self.emit('start-backup', tableName, startTime); @@ -79,9 +80,26 @@ DynamoBackup.prototype.backupTable = function (tableName, backupPath, callback) err: err }); } + + if (self.saveSchema) { + var schemaParams = { + Bucket: params.bucket, + Key: path.join(backupPath, tableName + '.schema.json') + } + self._copyTableSchema(tableName, schemaParams, function (err, data) { + var endTime = moment.utc(); + var backupDuration = new dateRange(startTime, endTime); + self.emit('end-backup', tableName, backupDuration); + + return callback(err); + }); + return; + } + var endTime = moment.utc(); var backupDuration = new dateRange(startTime, endTime); self.emit('end-backup', tableName, backupDuration); + return callback(err); }); @@ -89,20 +107,7 @@ DynamoBackup.prototype.backupTable = function (tableName, backupPath, callback) tableName, function (items) { items.forEach(function (item) { - if (self.base64Binary) { - _.each(item, function (value, key) { - if (value && value.B) { - value.B = new Buffer(value.B).toString('base64'); - } - }); - } - - if (self.saveDataPipelineFormat) { - stream.append(self._formatForDataPipeline(item)); - } else { - stream.append(JSON.stringify(item)); - } - stream.append('\n'); + stream.append(JSON.stringify(item) + "\n"); }); }, function (err) { @@ -114,7 +119,7 @@ DynamoBackup.prototype.backupTable = function (tableName, backupPath, callback) }); } } - ); + ); }; DynamoBackup.prototype.backupAllTables = function (callback) { @@ -141,7 +146,7 @@ DynamoBackup.prototype.backupAllTables = function (callback) { }) }, callback - ); + ); }); }; @@ -166,6 +171,52 @@ DynamoBackup.prototype._copyTable = function (tableName, itemsReceived, callback }); }; +DynamoBackup.prototype._copyTableSchema = function (tableName, params, callback) { + var self = this; + var ddb = new AWS.DynamoDB(); + var s3 = new AWS.S3(); + ddb.describeTable({ TableName: tableName }, function (err, data) { + if (err) { + return callback(err); + } + + params.Body = JSON.stringify(self._cleanupSchema(data.Table)); + s3.upload(params, function(err, data) { + if (err) { + return callback(err); + } + + return callback(); + }); + }); +}; + +DynamoBackup.prototype._cleanupSchema = function (schema) { + delete schema.TableStatus; + delete schema.CreationDateTime; + delete schema.TableSizeBytes; + delete schema.ItemCount; + delete schema.TableArn; + if (schema.ProvisionedThroughput) { + delete schema.ProvisionedThroughput.NumberOfDecreasesToday + } + if (schema.GlobalSecondaryIndexes) { + for (var p in schema.GlobalSecondaryIndexes) { + delete schema.GlobalSecondaryIndexes[p].ProvisionedThroughput.NumberOfDecreasesToday; + delete schema.GlobalSecondaryIndexes[p].ProvisionedThroughput.LastIncreaseDateTime; + delete schema.GlobalSecondaryIndexes[p].ProvisionedThroughput.LastDecreaseDateTime; + delete schema.GlobalSecondaryIndexes[p].CreationDateTime; + delete schema.GlobalSecondaryIndexes[p].ItemCount; + delete schema.GlobalSecondaryIndexes[p].TableSizeBytes; + delete schema.GlobalSecondaryIndexes[p].IndexArn; + delete schema.GlobalSecondaryIndexes[p].IndexStatus; + delete schema.GlobalSecondaryIndexes[p].IndexSizeBytes; + } + } + + return schema; +} + DynamoBackup.prototype._streamItems = function fetchItems(tableName, startKey, limit, itemsReceived, callback) { var self = this; var ddb = new AWS.DynamoDB(); @@ -213,50 +264,4 @@ DynamoBackup.prototype._fetchTables = function (lastTable, tables, callback) { }); }; -/** - * AWS Data Pipeline import requires that each key in the Attribute list - * be lower-cased and for sets start with a lower-case character followed - * by an 'S'. - * - * Go through each attribute and create a new entry with the correct case - */ -DynamoBackup.prototype._formatForDataPipeline = function (item) { - var self = this; - _.each(item, function (value, key) { - //value will be of the form: {S: 'xxx'}. Convert the key - _.each(value, function (v, k) { - var dataPipelineValueKey = self._getDataPipelineAttributeValueKey(k); - value[dataPipelineValueKey] = v; - value[k] = undefined; - // for MAps and Lists, recurse until the elements are created with the correct case - if(k === 'M' || k === 'L') { - self._formatForDataPipeline(v); - } - }); - }); - return JSON.stringify(item); -}; - -DynamoBackup.prototype._getDataPipelineAttributeValueKey = function (type) { - switch (type) { - case 'S': - case 'N': - case 'B': - case 'M': - case 'L': - case 'NULL': - return type.toLowerCase(); - case 'BOOL': - return 'bOOL'; - case 'SS': - return 'sS'; - case 'NS': - return 'nS'; - case 'BS': - return 'bS'; - default: - throw new Error('Unknown AttributeValue key: ' + type); - } -}; - module.exports = DynamoBackup; \ No newline at end of file diff --git a/lib/dynamo-restore.js b/lib/dynamo-restore.js index b341fb8..7db057d 100755 --- a/lib/dynamo-restore.js +++ b/lib/dynamo-restore.js @@ -18,13 +18,14 @@ function DynamoRestore(options) { options.concurrency = options.concurrency || 200; options.minConcurrency = 1; options.maxConcurrency = options.concurrency; + options.indexConcurrency = options.indexConcurrency || options.concurrency; options.readcapacity = options.readcapacity || 5; - options.writecapacity = options.writecapacity || 0; + options.writecapacity = options.writecapacity || 5; options.stopOnFailure = options.stopOnFailure || false; options.awsKey = options.awsKey || process.env.AWS_ACCESS_KEY_ID; options.awsSecret = options.awsSecret || process.env.AWS_SECRET_ACCESS_KEY; options.awsRegion = options.awsRegion || process.env.AWS_DEFAULT_REGION || 'ap-southeast-2'; - + AWS.config.update({ accessKeyId: options.awsKey, secretAccessKey: options.awsSecret, @@ -58,13 +59,44 @@ DynamoRestore.prototype.run = function(finishCallback) { options = this.options; // Do we need to update write capacity? if (options.writecapacity) { - dynamodb.updateTable({ - TableName: options.table, - ProvisionedThroughput: { - ReadCapacityUnits: options.readcapacity, - WriteCapacityUnits: options.writecapacity + dynamodb.describeTable({TableName: this.options.table}, (function(err, data) { + if (err || !data) { + return this.emit('error', 'Fatal Error. Failed describing table. ' + err); + } + gsiParams = []; + for (p in data['Table']['GlobalSecondaryIndexes']) { + indexName = data['Table']['GlobalSecondaryIndexes'][p]['IndexName']; + gsiParams.push({ + Update: { + IndexName: indexName, + ProvisionedThroughput: { + ReadCapacityUnits: options.readcapacity, + WriteCapacityUnits: options.writecapacity + } + } + }); } - }, finishCallback); + params = { + TableName: options.table, + ProvisionedThroughput: { + ReadCapacityUnits: options.readcapacity, + WriteCapacityUnits: options.writecapacity + } + }; + if (gsiParams.length > 0) { + params['GlobalSecondaryIndexUpdates'] = gsiParams; + } + + retry = false; + dynamodb.updateTable(params, (function(error, data) { + if (error || !data) { + retry = true; + return this.emit('warning', 'WARN: FAILED TO UPDATE TABLE. ' + error); + } + finishCallback(); + }).bind(this)); + setTimeout((function() { if (retry) this.emit('finish') }).bind(this), 600); + }).bind(this)); } else { finishCallback(); } @@ -86,6 +118,8 @@ DynamoRestore.prototype._validateS3Backup = function(options) { // Break up into individual components options.s3bucket = url.hostname; options.s3path = url.pathname.substr(1); + options.s3schemapath = url.pathname.split('.')[0] + '.schema.json'; + options.s3schemapath = options.s3schemapath.substr(1); options.s3filename = url.pathname.split('.').pop(); }; @@ -99,6 +133,7 @@ DynamoRestore.prototype._validateTable = function(options) { DynamoRestore.prototype._checkTableExists = function(error, data) { var dynamodb = this.dynamodb; + var options = this.options; if (error || !data || !data.TableNames) { return this.emit('error', 'Fatal Error. Could not connect to AWS DynamoDB engine. Please check your credentials.'); } @@ -106,6 +141,41 @@ DynamoRestore.prototype._checkTableExists = function(error, data) { // Table exists, should we overwrite it?? if (this.options.overwrite) { this.emit('warning', util.format('WARN: table [%s] will be overwritten.', this.options.table)); + + dynamodb.describeTable({TableName: this.options.table}, (function(err, data) { + if (error || !data) { + return this.emit('error', 'Fatal Error. Failed describing table. ' + error); + } + gsiParams = []; + for (p in data['Table']['GlobalSecondaryIndexes']) { + indexName = data['Table']['GlobalSecondaryIndexes'][p]['IndexName']; + gsiParams.push({ + Update: { + IndexName: indexName, + ProvisionedThroughput: { + ReadCapacityUnits: options.readcapacity, + WriteCapacityUnits: options.indexConcurrency + } + } + }); + } + params = { + TableName: options.table, + ProvisionedThroughput: { + ReadCapacityUnits: options.readcapacity, + WriteCapacityUnits: options.concurrency + } + }; + if (gsiParams.length > 0) { + params['GlobalSecondaryIndexUpdates'] = gsiParams; + } + + dynamodb.updateTable(params, (function(error, data) { + if (error || !data) { + return this.emit('warning', 'WARN: FAILED TO UPDATE TABLE. ' + error); + } + }).bind(this)); + }).bind(this)); setTimeout(dynamodb.describeTable.bind(dynamodb, { TableName: this.options.table }, this._checkTableReady.bind(this)), 1000); } else { this.emit('error', 'Fatal Error. The destination table already exists! Exiting process..'); @@ -123,6 +193,26 @@ DynamoRestore.prototype._checkTableExists = function(error, data) { DynamoRestore.prototype._startDownload = function() { var s3 = new AWS.S3(); + + // fetch schema to recreate table + if (!this.options.overwrite) { + var params = { + Bucket: this.options.s3bucket, + Key: this.options.s3schemapath + }; + s3.getObject(params, (function(error, data) { + if (error) { + if (error.code === 'NotFound') { + this.emit('error', util.format('Could not find file schema for %s', this.options.source)); + } else { + this.emit('error', util.format('Error downloading file from s3: %s', error)); + } + return; + } + this.options.schema = JSON.parse(data.Body.toString('utf-8')); + }).bind(this)); + } + var params = { Bucket: this.options.s3bucket, Key: this.options.s3path @@ -235,6 +325,13 @@ DynamoRestore.prototype._createTable = function(callback) { if (!options.table || !options.partitionkey) { return this.emit('error', 'Fatal Error. Could not create dynamo table. Not enough information provided.'); } + + // we want to create a table but we still don't have the schema + if (!options.overwrite && !options.schema) { + this.emit('warning', 'Schema not read yet. Waiting a bit...') + setTimeout(this._createTable.bind(this), 1000); + } + var params = { TableName: options.table, AttributeDefinitions: [{ @@ -260,13 +357,34 @@ DynamoRestore.prototype._createTable = function(callback) { KeyType: 'RANGE' }); } + + // we have a schema read from a file, override params + if (options.schema) { + // set GSI troughtput + for (var p in options.schema.GlobalSecondaryIndexes) { + options.schema.GlobalSecondaryIndexes[p].ProvisionedThroughput.ReadCapacityUnits = options.readcapacity; + options.schema.GlobalSecondaryIndexes[p].ProvisionedThroughput.WriteCapacityUnits = options.concurrency; + } + + params = { + TableName: options.table, + AttributeDefinitions: options.schema.AttributeDefinitions, + KeySchema: options.schema.KeySchema, + ProvisionedThroughput: { + ReadCapacityUnits: options.readcapacity, + WriteCapacityUnits: options.concurrency // Need this high for pumping data, but will reduce it later. + }, + GlobalSecondaryIndexes: options.schema.GlobalSecondaryIndexes + } + } + dynamodb.createTable(params, (function(error, data) { if (error || !data) { return this.emit('error', 'Fatal Error. Failed to create new table. ' + error); } data = data.TableDescription; // Wait before hammering table.. - setTimeout(dynamodb.describeTable.bind(dynamodb, { TableName: data.TableName }, this._checkTableReady.bind(this)), 5000); + setTimeout(dynamodb.describeTable.bind(dynamodb, { TableName: data.TableName }, this._checkTableReady.bind(this)), 10000); }).bind(this)); }; @@ -275,9 +393,7 @@ DynamoRestore.prototype._checkTableReady = function(error, data) { if (error || !data || !data.Table) { return this.emit('error', 'Error creating table ' + this.options.table); } - if (data && - data.Table && - data.Table.TableStatus === 'ACTIVE') { + if (data && data.Table && data.Table.TableStatus === 'ACTIVE') { // All ready, now we can start inserting records this.tableready = true; this.readline.resume(); @@ -307,7 +423,7 @@ DynamoRestore.prototype._sendBatch = function() { this.emit('warning', 'Failed to upload same batch too many times, removing from queue.. \n' + JSON.stringify(batch)); } } else { - this.emit('warning', 'Error processing batch, putting back in the queue.'); + this.emit('warning', 'Error processing batch, putting back in the queue: ' + error); batch.attempts++; this.batches.push(batch); } @@ -315,7 +431,6 @@ DynamoRestore.prototype._sendBatch = function() { var unprocessedItems = data && data.UnprocessedItems && data.UnprocessedItems[options.table] || []; if (unprocessedItems.length) { // Retry unprocessed items - this.emit('warning', unprocessedItems.length + ' unprocessed items. Add to queue and back off a bit.'); this.batches.push({ items: unprocessedItems, attempts: batch.attempts + 1 @@ -353,7 +468,9 @@ DynamoRestore.prototype._finishBatches = function() { } else { // Send remaining batches if (this.requests.length < this.options.concurrency) { - this._sendBatch(); + while (this.batches.length && this.requests.length < 25) { + this._sendBatch(); + } } } // Repeat until finished diff --git a/package.json b/package.json index bc46c90..e950c7a 100644 --- a/package.json +++ b/package.json @@ -28,7 +28,8 @@ "lodash": "^3.10.1", "moment": "^2.10.6", "moment-range": "^2.0.3", - "s3-streaming-upload": "^0.2.1" + "s3-streaming-upload": "^0.2.1", + "slack-notify": "0.1.6" }, "engines": { "node": ">=0.10.0" diff --git a/restore.sh b/restore.sh new file mode 100755 index 0000000..ae883d9 --- /dev/null +++ b/restore.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +./home/root/bin/dynamo-restore-from-s3 -t staging-accommodation-offers -c 1000 --index-concurrency 250 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-accommodation-offers.json --overwrite --readcapacity 15 --writecapacity 3 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-properties -c 800 --index-concurrency 75 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-properties.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-property-photos -c 800 --index-concurrency 75 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-property-photos.json --overwrite --readcapacity 50 --writecapacity 3 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-accounts -c 800 --index-concurrency 75 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-accounts.json --overwrite --readcapacity 10 --writecapacity 5 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-bookings -c 800 --index-concurrency 250 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-bookings.json --overwrite --readcapacity 15 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-enquire-availabilities -c 800 --index-concurrency 500 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-enquire-availabilities.json --overwrite --readcapacity 30 --writecapacity 3 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-prospective-bookings -c 800 --index-concurrency 250 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-prospective-bookings.json --overwrite --readcapacity 5 --writecapacity 3 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-guests -c 800 --index-concurrency 500 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-guests.json --overwrite --readcapacity 10 --writecapacity 5 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 + +./home/root/bin/dynamo-restore-from-s3 -t staging-prospective-properties -c 500 --index-concurrency 75 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-prospective-properties.json --overwrite --readcapacity 10 --writecapacity 3 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-prospective-booking-events -c 500 --index-concurrency 75 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-prospective-booking-events.json --overwrite --readcapacity 15 --writecapacity 2 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-booking-events -c 500 --index-concurrency 75 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-booking-events.json --overwrite --readcapacity 5 --writecapacity 3 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-booking-summaries -c 500 --index-concurrency 75 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-booking-summaries.json --overwrite --readcapacity 10 --writecapacity 5 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-accommodation-providers -c 500 --index-concurrency 300 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-accommodation-providers.json --overwrite --readcapacity 10 --writecapacity 20 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 + +./home/root/bin/dynamo-restore-from-s3 -t staging-agency-staffs -c 51 --index-concurrency 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-agency-staffs.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 + +./home/root/bin/dynamo-restore-from-s3 -t staging-accommodation-provider-contacts -c 5 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-accommodation-provider-contacts.json --overwrite --readcapacity 4 --writecapacity 2 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 + +./home/root/bin/dynamo-restore-from-s3 -t staging-accommodation-provider-push-notifications -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-accommodation-provider-push-notifications.json --overwrite --readcapacity 5 --writecapacity 5 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-accommodation-types -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-accommodation-types.json --partitionkey code --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-admin-staffs -c 5 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-admin-staffs.json --overwrite --readcapacity 5 --writecapacity 5 --index-concurrency 5 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-agencies -c 5 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-agencies.json --overwrite --readcapacity 1 --writecapacity 1 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-bank-details -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-bank-details.json --overwrite --readcapacity 5 --writecapacity 5 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-booking-complaints -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-booking-complaints.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-booking-fees -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-booking-fees.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-booking-impact-radius -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-booking-impact-radius.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-booking-notes -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-booking-notes.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-booking-reviews -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-booking-reviews.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-chatbot-chat -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-chatbot-chat.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-chatbot-intent-feedback -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-chatbot-intent-feedback.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-cities -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-cities.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-college-pages -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-college-pages.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-colleges -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-colleges.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-contact-messages -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-contact-messages.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-counters -c 15 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-counters.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey name --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-countries -c 15 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-countries.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-credits -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-credits.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-currencies -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-currencies.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-dynamic-pages -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-dynamic-pages.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-features -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-features.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-feeds -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-feeds.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey slug --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-fixed-contract-availabilities -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-fixed-contract-availabilities.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-invoice-details -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-invoice-details.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-languages -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-languages.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-locales -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-locales.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-neighborhood-pages -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-neighborhood-pages.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-neighborhoods -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-neighborhoods.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-oauth-access -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-oauth-access.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-oauth-client -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-oauth-client.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-oauth-refresh -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-oauth-refresh.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-offer-integration-enriched-jsons -c 200 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-offer-integration-enriched-jsons.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-partners -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-partners.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-payment-operations -c 400 --index-concurrency 400 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-payment-operations.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-promocodes -c 50 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-promocodes.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-property-rules -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-property-rules.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-property-types -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-property-types.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 + +./home/root/bin/dynamo-restore-from-s3 -t staging-roles -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-roles.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-search-aggregates -c 100 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-search-aggregates.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey slug --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-standard-contract-availabilities -c 200 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-standard-contract-availabilities.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-standard-unitary-contract-availabilities -c 200 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-standard-unitary-contract-availabilities.json --overwrite --readcapacity 10 --writecapacity 10 --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-tag-types -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-tag-types.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-tags -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-tags.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1 +./home/root/bin/dynamo-restore-from-s3 -t staging-unit-types -c 11 -s s3://uniplaces.com.backups/$(date +%Y-%m-%d)/prod-unit-types.json --overwrite --readcapacity 10 --writecapacity 10 --partitionkey code --aws-key $AWS_KEY_RESTORE --aws-secret $AWS_SECRET_RESTORE --aws-region eu-west-1