Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions docker/logical-backup/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ RUN apt-get update \
gnupg \
gcc \
libffi-dev \
nodejs \
npm \
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hm ins't there another CLI we can use to avoid these dependencies? What about az?

&& pip3 install --upgrade pip \
&& pip3 install --no-cache-dir awscli --upgrade \
&& pip3 install --no-cache-dir gsutil --upgrade \
Expand All @@ -33,6 +35,8 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

RUN npm install --global azbak

COPY dump.sh ./

ENV PG_DIR=/usr/lib/postgresql
Expand Down
15 changes: 14 additions & 1 deletion docker/logical-backup/dump.sh
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,18 @@ function aws_upload {
aws s3 cp - "$PATH_TO_BACKUP" "${args[@]//\'/}"
}

function azure_upload {

# mimic bucket setup from Spilo
# to keep logical backups at the same path as WAL
# NB: $LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX already contains the leading "/" when set by the Postgres Operator
PATH_TO_BACKUP=s3://$LOGICAL_BACKUP_AZ_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz

azbak - $PATH_TO_BACKUP --storage-account $LOGICAL_BACKUP_AZ_STORAGE_ACCOUNT --no-suffix
}

function gcs_upload {
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_GS_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_GS_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz

gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP"
}
Expand All @@ -120,6 +130,9 @@ function upload {
"gcs")
gcs_upload
;;
"azure")
azure_upload
;;
*)
aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF))
aws_delete_outdated
Expand Down
9 changes: 9 additions & 0 deletions docs/administrator.md
Original file line number Diff line number Diff line change
Expand Up @@ -1146,6 +1146,15 @@ of the backup cron job.
`cronjobs` resource from the `batch` API group for the operator service account.
See [example RBAC](https://github.com/zalando/postgres-operator/blob/master/manifests/operator-service-account-rbac.yaml)

Logical Backup environment Variables:
1. AWS:
"LOGICAL_BACKUP_S3_BUCKET", "LOGICAL_BACKUP_S3_REGION", "LOGICAL_BACKUP_S3_ENDPOINT", "LOGICAL_BACKUP_S3_SSE",
"LOGICAL_BACKUP_S3_RETENTION_TIME", "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"
2. GCP:
"LOGICAL_BACKUP_GS_BUCKET", "LOGICAL_BACKUP_GS_BUCKET_SCOPE_SUFFIX", "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS"
3. Azure:
"LOGICAL_BACKUP_AZ_BUCKET", "LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX", "LOGICAL_BACKUP_AZ_STORAGE_ACCOUNT"

## Sidecars for Postgres clusters

A list of sidecars is added to each cluster created by the operator. The default
Expand Down
48 changes: 19 additions & 29 deletions pkg/cluster/k8sres.go
Original file line number Diff line number Diff line change
Expand Up @@ -2161,39 +2161,10 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
},
},
},
// Bucket env vars
{
Name: "LOGICAL_BACKUP_PROVIDER",
Value: c.OpConfig.LogicalBackup.LogicalBackupProvider,
},
{
Name: "LOGICAL_BACKUP_S3_BUCKET",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket,
},
{
Name: "LOGICAL_BACKUP_S3_REGION",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region,
},
{
Name: "LOGICAL_BACKUP_S3_ENDPOINT",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint,
},
{
Name: "LOGICAL_BACKUP_S3_SSE",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3SSE,
},
{
Name: "LOGICAL_BACKUP_S3_RETENTION_TIME",
Value: c.OpConfig.LogicalBackup.LogicalBackupS3RetentionTime,
},
{
Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX",
Value: getBucketScopeSuffix(string(c.Postgresql.GetUID())),
},
{
Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS",
Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials,
},
// Postgres env vars
{
Name: "PG_VERSION",
Expand Down Expand Up @@ -2228,6 +2199,25 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
},
}

// logical backup storage env vars
if c.OpConfig.LogicalBackup.LogicalBackupProvider == "aws" || c.OpConfig.LogicalBackup.LogicalBackupProvider == "s3" {
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Bucket})
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_REGION", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Region})
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_ENDPOINT", Value: c.OpConfig.LogicalBackup.LogicalBackupS3Endpoint})
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_SSE", Value: c.OpConfig.LogicalBackup.LogicalBackupS3SSE})
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_RETENTION_TIME", Value: c.OpConfig.LogicalBackup.LogicalBackupS3RetentionTime})
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID()))})
} else if c.OpConfig.LogicalBackup.LogicalBackupProvider == "google" || c.OpConfig.LogicalBackup.LogicalBackupProvider == "gcs" {
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GS_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupGSBucket})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This option does not exists. You have to implement support.

envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GS_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID()))})
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials})
} else if c.OpConfig.LogicalBackup.LogicalBackupProvider == "azure" {
// assumes logical backups are going to the same place as wal archives.
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZ_BUCKET", Value: c.OpConfig.LogicalBackup.LogicalBackupAZBucket})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

See my comment on LogicalBackupGSBucket

envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZ_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(c.Postgresql.GetUID()))})
envVars = append(envVars, v1.EnvVar{Name: "LOGICAL_BACKUP_AZ_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You should not reuse the storage account you use for WAL backups. There should be a LogicalBackupAZStorageAccount option.

}

if c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID != "" {
envVars = append(envVars, v1.EnvVar{Name: "AWS_ACCESS_KEY_ID", Value: c.OpConfig.LogicalBackup.LogicalBackupS3AccessKeyID})
}
Expand Down