diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index c6da405a5..f94aff110 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -178,6 +178,10 @@ spec: type: array items: type: string + dropped_pod_capabilities: + type: array + items: + type: string cluster_domain: type: string default: "cluster.local" diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index d66aa5608..4c390e68a 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -98,6 +98,10 @@ configKubernetes: # additional_pod_capabilities: # - "SYS_NICE" + # list of dropped capabilities for postgres container + # dropped_pod_capabilities: + # - "ALL" + # default DNS domain of K8s cluster where operator is running cluster_domain: cluster.local # additional labels assigned to the cluster objects diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 7e7cbeaf0..43c06440d 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -525,6 +525,10 @@ configuration they are grouped under the `kubernetes` key. PodSecruityPolicy allows the capabilities listed here. Otherwise, the container will not start. The default is empty. +* **dropped_pod_capabilities** + list of capabilities to be dropped from the postgres container's + SecurityContext (e.g. ALL etc.). + * **master_pod_move_timeout** The period of time to wait for the success of migration of master pods from an unschedulable node. The migration includes Patroni switchovers to diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 2c0ba9151..9d549e167 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -5,6 +5,7 @@ metadata: data: # additional_owner_roles: "cron_admin" # additional_pod_capabilities: "SYS_NICE" + # dropped_pod_capabilities: "ALL" # additional_secret_mount: "some-secret-name" # additional_secret_mount_path: "/some/dir" api_port: "8080" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 6556b333c..dc62721c6 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -176,6 +176,10 @@ spec: type: array items: type: string + dropped_pod_capabilities: + type: array + items: + type: string cluster_domain: type: string default: "cluster.local" diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 389d9325a..a0a3cd00a 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -44,6 +44,8 @@ configuration: kubernetes: # additional_pod_capabilities: # - "SYS_NICE" + # dropped_pod_capabilities: + # - "ALL" cluster_domain: cluster.local cluster_labels: application: spilo diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index cd11b9173..04cf5c99a 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -67,6 +67,7 @@ type KubernetesMetaConfiguration struct { SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"` SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` AdditionalPodCapabilities []string `json:"additional_pod_capabilities,omitempty"` + DroppedPodCapabilities []string `json:"dropped_pod_capabilities,omitempty"` WatchedNamespace string `json:"watched_namespace,omitempty"` PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` PDBMasterLabelSelector *bool `json:"pdb_master_label_selector,omitempty"` diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 5d0a5b341..db3e5d4dd 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -188,6 +188,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura *out = make([]string, len(*in)) copy(*out, *in) } + if in.DroppedPodCapabilities != nil { + in, out := &in.DroppedPodCapabilities, &out.DroppedPodCapabilities + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.PDBMasterLabelSelector != nil { in, out := &in.PDBMasterLabelSelector, &out.PDBMasterLabelSelector *out = new(bool) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index e05a54553..7cf041386 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -485,17 +485,30 @@ func getLocalAndBoostrapPostgreSQLParameters(parameters map[string]string) (loca return } -func generateCapabilities(capabilities []string) *v1.Capabilities { - additionalCapabilities := make([]v1.Capability, 0, len(capabilities)) - for _, capability := range capabilities { - additionalCapabilities = append(additionalCapabilities, v1.Capability(strings.ToUpper(capability))) +func generateCapabilities(added, dropped []string) *v1.Capabilities { + if len(added) == 0 && len(dropped) == 0 { + return nil } - if len(additionalCapabilities) > 0 { - return &v1.Capabilities{ - Add: additionalCapabilities, + + capabilities := &v1.Capabilities{} + + if len(added) > 0 { + additionalCapabilities := make([]v1.Capability, 0, len(added)) + for _, capability := range added { + additionalCapabilities = append(additionalCapabilities, v1.Capability(strings.ToUpper(capability))) } + capabilities.Add = additionalCapabilities } - return nil + + if len(dropped) > 0 { + droppedCapabilities := make([]v1.Capability, 0, len(dropped)) + for _, capability := range dropped { + droppedCapabilities = append(droppedCapabilities, v1.Capability(strings.ToUpper(capability))) + } + capabilities.Drop = droppedCapabilities + } + + return capabilities } func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinity *v1.NodeAffinity) *v1.Affinity { @@ -1391,7 +1404,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef volumeMounts, c.OpConfig.Resources.SpiloPrivileged, c.OpConfig.Resources.SpiloAllowPrivilegeEscalation, - generateCapabilities(c.OpConfig.AdditionalPodCapabilities), + generateCapabilities(c.OpConfig.AdditionalPodCapabilities, c.OpConfig.DroppedPodCapabilities), ) // Patroni responds 200 to probe only if it either owns the leader lock or postgres is running and DCS is accessible diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 137c24081..1e51b8ae6 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -3943,41 +3943,53 @@ func TestGenerateLogicalBackupPodEnvVars(t *testing.T) { func TestGenerateCapabilities(t *testing.T) { tests := []struct { subTest string - configured []string + added []string + dropped []string capabilities *v1.Capabilities - err error }{ { subTest: "no capabilities", - configured: nil, capabilities: nil, - err: fmt.Errorf("could not parse capabilities configuration of nil"), }, { subTest: "empty capabilities", - configured: []string{}, + added: []string{}, + dropped: []string{}, capabilities: nil, - err: fmt.Errorf("could not parse empty capabilities configuration"), }, { - subTest: "configured capability", - configured: []string{"SYS_NICE"}, + subTest: "added one capability", + added: []string{"SYS_NICE"}, capabilities: &v1.Capabilities{ Add: []v1.Capability{"SYS_NICE"}, }, - err: fmt.Errorf("could not generate one configured capability"), }, { - subTest: "configured capabilities", - configured: []string{"SYS_NICE", "CHOWN"}, + subTest: "added two capabilities", + added: []string{"SYS_NICE", "CHOWN"}, capabilities: &v1.Capabilities{ Add: []v1.Capability{"SYS_NICE", "CHOWN"}, }, - err: fmt.Errorf("could not generate multiple configured capabilities"), + }, + { + subTest: "dropped capabilities", + dropped: []string{"ALL"}, + capabilities: &v1.Capabilities{ + Drop: []v1.Capability{"ALL"}, + }, + }, + { + subTest: "added and dropped capabilities", + added: []string{"CHOWN"}, + dropped: []string{"SYS_NICE"}, + capabilities: &v1.Capabilities{ + Add: []v1.Capability{"CHOWN"}, + Drop: []v1.Capability{"SYS_NICE"}, + }, }, } for _, tt := range tests { - caps := generateCapabilities(tt.configured) + caps := generateCapabilities(tt.added, tt.dropped) if !reflect.DeepEqual(caps, tt.capabilities) { t.Errorf("%s %s: expected `%v` but got `%v`", t.Name(), tt.subTest, tt.capabilities, caps) diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 9fadd6a5b..b5e128f77 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -39,6 +39,7 @@ type Resources struct { SpiloPrivileged bool `name:"spilo_privileged" default:"false"` SpiloAllowPrivilegeEscalation *bool `name:"spilo_allow_privilege_escalation" default:"true"` AdditionalPodCapabilities []string `name:"additional_pod_capabilities" default:""` + DroppedPodCapabilities []string `name:"dropped_pod_capabilities" default:""` ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` InheritedLabels []string `name:"inherited_labels" default:""` InheritedAnnotations []string `name:"inherited_annotations" default:""`