diff --git a/docker/logical-backup/Dockerfile b/docker/logical-backup/Dockerfile index 5c1ee6e39..4dbe1762f 100644 --- a/docker/logical-backup/Dockerfile +++ b/docker/logical-backup/Dockerfile @@ -15,6 +15,8 @@ RUN apt-get update \ gnupg \ gcc \ libffi-dev \ + nodejs \ + npm \ && pip3 install --upgrade pip \ && pip3 install --no-cache-dir awscli --upgrade \ && pip3 install --no-cache-dir gsutil --upgrade \ @@ -33,6 +35,8 @@ RUN apt-get update \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* +RUN npm install --global azbak + COPY dump.sh ./ ENV PG_DIR=/usr/lib/postgresql diff --git a/docker/logical-backup/dump.sh b/docker/logical-backup/dump.sh index 2627ac8c9..a0cae79cf 100755 --- a/docker/logical-backup/dump.sh +++ b/docker/logical-backup/dump.sh @@ -109,8 +109,18 @@ function aws_upload { aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}" } +function azure_upload { + + # mimic bucket setup from Spilo + # to keep logical backups at the same path as WAL + # NB: $LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX already contains the leading "/" when set by the Postgres Operator + PATH_TO_BACKUP=s3://$LOGICAL_BACKUP_AZ_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz + + azbak - $PATH_TO_BACKUP --storage-account $LOGICAL_BACKUP_AZ_STORAGE_ACCOUNT --no-suffix +} + function gcs_upload { - PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz + PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_GS_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_GS_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP" } @@ -120,6 +130,9 @@ function upload { "gcs") gcs_upload ;; + "azure") + azure_upload + ;; *) aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF)) aws_delete_outdated diff --git a/docs/administrator.md b/docs/administrator.md index cd4504c31..6e94c5885 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -1146,6 +1146,15 @@ of the backup cron job. `cronjobs` resource from the `batch` API group for the operator service account. See [example RBAC](https://github.com/zalando/postgres-operator/blob/master/manifests/operator-service-account-rbac.yaml) +Logical Backup environment Variables: +1. AWS: + "LOGICAL_BACKUP_S3_BUCKET", "LOGICAL_BACKUP_S3_REGION", "LOGICAL_BACKUP_S3_ENDPOINT", "LOGICAL_BACKUP_S3_SSE", + "LOGICAL_BACKUP_S3_RETENTION_TIME", "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX" +2. GCP: + "LOGICAL_BACKUP_GS_BUCKET", "LOGICAL_BACKUP_GS_BUCKET_SCOPE_SUFFIX", "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS" +3. Azure: + "LOGICAL_BACKUP_AZ_BUCKET", "LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX", "LOGICAL_BACKUP_AZ_STORAGE_ACCOUNT" + ## Sidecars for Postgres clusters A list of sidecars is added to each cluster created by the operator. The default diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 382abd1e2..aa7e328a8 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -2161,39 +2161,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { }, }, }, - // Bucket env vars { Name: "LOGICAL_BACKUP_PROVIDER", Value: c.OpConfig.LogicalBackup.LogicalBackupProvider, }, - { - Name: "LOGICAL_BACKUP_S3_BUCKET", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket, - }, - { - Name: "LOGICAL_BACKUP_S3_REGION", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region, - }, - { - Name: "LOGICAL_BACKUP_S3_ENDPOINT", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint, - }, - { - Name: "LOGICAL_BACKUP_S3_SSE", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3SSE, - }, - { - Name: "LOGICAL_BACKUP_S3_RETENTION_TIME", - Value: c.OpConfig.LogicalBackup.LogicalBackupS3RetentionTime, - }, - { - Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", - Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())), - }, - { - Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", - Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials, - }, // Postgres env vars { Name: "PG_VERSION", @@ -2228,6 +2199,25 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar { }, } + // logical backup storage env vars + if c.OpConfig.LogicalBackup.LogicalBackupProvider == "aws" || c.OpConfig.LogicalBackup.LogicalBackupProvider == "s3" { + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_REGION", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_ENDPOINT", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_SSE", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SSE}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_RETENTION_TIME", Value: c.OpConfig.LogicalBackup.LogicalBackupS3RetentionTime}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID()))}) + } else if c.OpConfig.LogicalBackup.LogicalBackupProvider == "google" || c.OpConfig.LogicalBackup.LogicalBackupProvider == "gcs" { + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GS_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupGSBucket}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GS_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID()))}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials}) + } else if c.OpConfig.LogicalBackup.LogicalBackupProvider == "azure" { + // assumes logical backups are going to the same place as wal archives. + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZ_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupAZBucket}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID()))}) + envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZ_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount}) + } + if c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID != "" { envVars = append(envVars, v1.EnvVar{Name: "AWS_ACCESS_KEY_ID", Value: c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID}) }