From 1cbb0a680b15d80291d33371719ceb915e804db1 Mon Sep 17 00:00:00 2001 From: Rene Dekker Date: Mon, 29 Dec 2025 16:59:09 -0800 Subject: [PATCH 1/6] feat(recommended labels): Set recommended labels as per https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ --- .../apiserver/apiserver_controller.go | 4 +- .../applicationlayer_controller.go | 2 +- .../authentication_controller.go | 2 +- .../clusterconnection_controller.go | 2 +- .../compliance/compliance_controller.go | 4 +- pkg/controller/csr/csr_controller.go | 2 +- .../egressgateway/egressgateway_controller.go | 4 +- .../gatewayapi/gatewayapi_controller.go | 6 +- .../gatewayapi/gatewayapi_controller_test.go | 2 +- pkg/controller/goldmane/controller.go | 2 +- .../installation/core_controller.go | 6 +- .../installation/core_controller_test.go | 2 +- .../installation/windows_controller.go | 2 +- .../intrusiondetection_controller.go | 4 +- pkg/controller/ippool/pool_controller.go | 2 +- pkg/controller/istio/istio_controller.go | 4 +- .../logcollector/logcollector_controller.go | 4 +- .../dashboards/dashboards_controller.go | 4 +- .../logstorage/elastic/elastic_controller.go | 2 +- .../elastic/external_elastic_controller.go | 4 +- .../esmetrics/esmetrics_controller.go | 2 +- .../initializer/initializing_controller.go | 4 +- .../kubecontrollers/es_kube_controllers.go | 2 +- .../logstorage/linseed/linseed_controller.go | 4 +- .../managed_cluster_controller.go | 4 +- .../logstorage/secrets/secret_controller.go | 4 +- .../logstorage/users/users_controller.go | 16 ++- pkg/controller/manager/manager_controller.go | 2 +- pkg/controller/monitor/monitor_controller.go | 2 +- .../nonclusterhost_controller.go | 13 +- .../packetcapture/packetcapture_controller.go | 2 +- .../policyrecommendation_controller.go | 2 +- .../secrets/cluster_ca_controller.go | 6 +- pkg/controller/secrets/tenant_controller.go | 6 +- pkg/controller/tiers/tiers_controller.go | 12 +- pkg/controller/utils/component.go | 84 ++++++++++-- pkg/controller/utils/component_test.go | 128 ++++++++++++++---- pkg/controller/whisker/controller.go | 2 +- 38 files changed, 262 insertions(+), 97 deletions(-) diff --git a/pkg/controller/apiserver/apiserver_controller.go b/pkg/controller/apiserver/apiserver_controller.go index 059832d2a5..69f403c031 100644 --- a/pkg/controller/apiserver/apiserver_controller.go +++ b/pkg/controller/apiserver/apiserver_controller.go @@ -264,7 +264,7 @@ func (r *ReconcileAPIServer) Reconcile(ctx context.Context, request reconcile.Re } // Query for the installation object. - _, installationSpec, err := utils.GetInstallation(context.Background(), r.client) + variant, installationSpec, err := utils.GetInstallation(context.Background(), r.client) if err != nil { if errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) @@ -443,7 +443,7 @@ func (r *ReconcileAPIServer) Reconcile(ctx context.Context, request reconcile.Re } // Create a component handler to manage the rendered component. - handler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + handler := utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) // Render the desired objects from the CRD and create or update them. reqLogger.V(3).Info("rendering components") diff --git a/pkg/controller/applicationlayer/applicationlayer_controller.go b/pkg/controller/applicationlayer/applicationlayer_controller.go index 81956d9fa3..94257ac198 100644 --- a/pkg/controller/applicationlayer/applicationlayer_controller.go +++ b/pkg/controller/applicationlayer/applicationlayer_controller.go @@ -283,7 +283,7 @@ func (r *ReconcileApplicationLayer) Reconcile(ctx context.Context, request recon } component := applicationlayer.ApplicationLayer(config) - ch := utils.NewComponentHandler(log, r.client, r.scheme, instance) + ch := utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) diff --git a/pkg/controller/authentication/authentication_controller.go b/pkg/controller/authentication/authentication_controller.go index 60019812f2..d22e456f86 100644 --- a/pkg/controller/authentication/authentication_controller.go +++ b/pkg/controller/authentication/authentication_controller.go @@ -383,7 +383,7 @@ func (r *ReconcileAuthentication) Reconcile(ctx context.Context, request reconci dexCfg := render.NewDexConfig(install.CertificateManagement, authentication, idpSecret, secretProviderClass, r.clusterDomain) // Create a component handler to manage the rendered component. - hlr := utils.NewComponentHandler(log, r.client, r.scheme, authentication) + hlr := utils.NewComponentHandler(log, r.client, r.scheme, authentication, &variant) dexComponentCfg := &render.DexComponentConfiguration{ PullSecrets: pullSecrets, diff --git a/pkg/controller/clusterconnection/clusterconnection_controller.go b/pkg/controller/clusterconnection/clusterconnection_controller.go index 612eca3236..10c437d4de 100644 --- a/pkg/controller/clusterconnection/clusterconnection_controller.go +++ b/pkg/controller/clusterconnection/clusterconnection_controller.go @@ -450,7 +450,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R } - ch := utils.NewComponentHandler(log, r.cli, r.scheme, managementClusterConnection) + ch := utils.NewComponentHandler(log, r.cli, r.scheme, managementClusterConnection, &variant) guardianCfg := &render.GuardianConfiguration{ URL: managementClusterConnection.Spec.ManagementClusterAddr, PodProxies: r.resolvedPodProxies, diff --git a/pkg/controller/compliance/compliance_controller.go b/pkg/controller/compliance/compliance_controller.go index d3e8bb92d5..98c8edb807 100644 --- a/pkg/controller/compliance/compliance_controller.go +++ b/pkg/controller/compliance/compliance_controller.go @@ -427,7 +427,7 @@ func (r *ReconcileCompliance) Reconcile(ctx context.Context, request reconcile.R } // Create a component handler to manage the rendered component. - handler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + handler := utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) keyValidatorConfig, err := utils.GetKeyValidatorConfig(ctx, r.client, authenticationCR, r.clusterDomain) if err != nil { @@ -498,7 +498,7 @@ func (r *ReconcileCompliance) Reconcile(ctx context.Context, request reconcile.R setupHandler := handler if tenant.MultiTenant() { // In standard installs, the Compliance CR owns all the objects. For multi-tenant, pull secrets are owned by the Tenant instance. - setupHandler = utils.NewComponentHandler(log, r.client, r.scheme, tenant) + setupHandler = utils.NewComponentHandler(log, r.client, r.scheme, tenant, &variant) } if err := setupHandler.CreateOrUpdateOrDelete(ctx, setUp, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) diff --git a/pkg/controller/csr/csr_controller.go b/pkg/controller/csr/csr_controller.go index 6fe6ef079a..42d926729f 100644 --- a/pkg/controller/csr/csr_controller.go +++ b/pkg/controller/csr/csr_controller.go @@ -215,7 +215,7 @@ func (r *reconcileCSR) Reconcile(ctx context.Context, request reconcile.Request) } } - componentHandler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + componentHandler := utils.NewComponentHandler(log, r.client, r.scheme, instance, &instance.Spec.Variant) var passthrough render.Component if needsCSRRole { // This controller creates the cluster role for any pod in the cluster that requires certificate management. diff --git a/pkg/controller/egressgateway/egressgateway_controller.go b/pkg/controller/egressgateway/egressgateway_controller.go index 5606f2cf86..9d53241c25 100644 --- a/pkg/controller/egressgateway/egressgateway_controller.go +++ b/pkg/controller/egressgateway/egressgateway_controller.go @@ -150,7 +150,7 @@ func (r *ReconcileEgressGateway) Reconcile(ctx context.Context, request reconcil } // If there are no Egress Gateway resources, return. - ch := utils.NewComponentHandler(log, r.client, r.scheme, nil) + ch := utils.NewComponentHandler(log, r.client, r.scheme, nil, nil) if len(egws) == 0 { var objects []client.Object if r.provider.IsOpenShift() { @@ -395,7 +395,7 @@ func (r *ReconcileEgressGateway) reconcileEgressGateway(ctx context.Context, egw } component := egressgateway.EgressGateway(config) - ch := utils.NewComponentHandler(log, r.client, r.scheme, egw) + ch := utils.NewComponentHandler(log, r.client, r.scheme, egw, &variant) if err = imageset.ApplyImageSet(ctx, r.client, variant, component); err != nil { reqLogger.Error(err, "Error with images from ImageSet") diff --git a/pkg/controller/gatewayapi/gatewayapi_controller.go b/pkg/controller/gatewayapi/gatewayapi_controller.go index 49f0e20d16..0377590b10 100644 --- a/pkg/controller/gatewayapi/gatewayapi_controller.go +++ b/pkg/controller/gatewayapi/gatewayapi_controller.go @@ -145,7 +145,7 @@ type ReconcileGatewayAPI struct { status status.StatusManager clusterDomain string multiTenant bool - newComponentHandler func(log logr.Logger, client client.Client, scheme *runtime.Scheme, cr metav1.Object) utils.ComponentHandler + newComponentHandler func(log logr.Logger, client client.Client, scheme *runtime.Scheme, cr metav1.Object, variant *operatorv1.ProductVariant) utils.ComponentHandler watchEnvoyProxy func(namespacedName operatorv1.NamespacedName) error watchEnvoyGateway func(namespacedName operatorv1.NamespacedName) error } @@ -214,7 +214,7 @@ func (r *ReconcileGatewayAPI) Reconcile(ctx context.Context, request reconcile.R // would ideally install, to provide more options to our users; but this controller will // only warn if any of those cannot be installed (and do not already exist). essentialCRDs, optionalCRDs := gatewayapi.GatewayAPICRDs(installation.KubernetesProvider) - handler := r.newComponentHandler(log, r.client, r.scheme, nil) + handler := r.newComponentHandler(log, r.client, r.scheme, nil, &variant) if gatewayAPI.Spec.CRDManagement == nil || *gatewayAPI.Spec.CRDManagement == operatorv1.CRDManagementPreferExisting { handler.SetCreateOnly() } @@ -416,7 +416,7 @@ func (r *ReconcileGatewayAPI) Reconcile(ctx context.Context, request reconcile.R return reconcile.Result{}, err } - err = r.newComponentHandler(log, r.client, r.scheme, gatewayAPI).CreateOrUpdateOrDelete(ctx, nonCRDComponent, r.status) + err = r.newComponentHandler(log, r.client, r.scheme, gatewayAPI, &variant).CreateOrUpdateOrDelete(ctx, nonCRDComponent, r.status) if err != nil { r.status.SetDegraded(operatorv1.ResourceCreateError, "Error rendering GatewayAPI resources", err, log) return reconcile.Result{}, err diff --git a/pkg/controller/gatewayapi/gatewayapi_controller_test.go b/pkg/controller/gatewayapi/gatewayapi_controller_test.go index 7c30203f2c..64f1214501 100644 --- a/pkg/controller/gatewayapi/gatewayapi_controller_test.go +++ b/pkg/controller/gatewayapi/gatewayapi_controller_test.go @@ -694,7 +694,7 @@ var _ = Describe("Gateway API controller tests", func() { var fakeComponentHandlers []*fakeComponentHandler -func FakeComponentHandler(log logr.Logger, client client.Client, scheme *runtime.Scheme, cr metav1.Object) utils.ComponentHandler { +func FakeComponentHandler(log logr.Logger, client client.Client, scheme *runtime.Scheme, cr metav1.Object, _ *operatorv1.ProductVariant) utils.ComponentHandler { h := &fakeComponentHandler{ client: client, scheme: scheme, diff --git a/pkg/controller/goldmane/controller.go b/pkg/controller/goldmane/controller.go index 4564ef6e03..bf05ff9ecb 100644 --- a/pkg/controller/goldmane/controller.go +++ b/pkg/controller/goldmane/controller.go @@ -229,7 +229,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, err } - ch := utils.NewComponentHandler(log, r.cli, r.scheme, goldmaneCR) + ch := utils.NewComponentHandler(log, r.cli, r.scheme, goldmaneCR, &variant) cfg := &goldmane.Configuration{ PullSecrets: pullSecrets, OpenShift: r.provider.IsOpenShift(), diff --git a/pkg/controller/installation/core_controller.go b/pkg/controller/installation/core_controller.go index 0dca6f3575..f11d814bea 100644 --- a/pkg/controller/installation/core_controller.go +++ b/pkg/controller/installation/core_controller.go @@ -378,7 +378,7 @@ type ReconcileInstallation struct { manageCRDs bool tierWatchReady *utils.ReadyFlag // newComponentHandler returns a new component handler. Useful stub for unit testing. - newComponentHandler func(log logr.Logger, client client.Client, scheme *runtime.Scheme, cr metav1.Object) utils.ComponentHandler + newComponentHandler func(log logr.Logger, client client.Client, scheme *runtime.Scheme, cr metav1.Object, variant *operatorv1.ProductVariant) utils.ComponentHandler } // getActivePools returns the full set of enabled IP pools in the cluster. @@ -1235,7 +1235,7 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile } // Create a component handler to create or update the rendered components. - handler := r.newComponentHandler(log, r.client, r.scheme, instance) + handler := r.newComponentHandler(log, r.client, r.scheme, instance, &instance.Spec.Variant) // Render namespaces first - this ensures that any other controllers blocked on namespace existence can proceed. namespaceCfg := &render.NamespaceConfiguration{ @@ -2109,7 +2109,7 @@ func (r *ReconcileInstallation) updateCRDs(ctx context.Context, variant operator crdComponent := render.NewPassthrough(crds.ToRuntimeObjects(crds.GetCRDs(variant)...)...) // Specify nil for the CR so no ownership is put on the CRDs. We do this so removing the // Installation CR will not remove the CRDs. - handler := r.newComponentHandler(log, r.client, r.scheme, nil) + handler := r.newComponentHandler(log, r.client, r.scheme, nil, &variant) if err := handler.CreateOrUpdateOrDelete(ctx, crdComponent, nil); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating CRD resource", err, log) return err diff --git a/pkg/controller/installation/core_controller_test.go b/pkg/controller/installation/core_controller_test.go index b143cf1ad0..8faf4dafb9 100644 --- a/pkg/controller/installation/core_controller_test.go +++ b/pkg/controller/installation/core_controller_test.go @@ -2299,7 +2299,7 @@ var _ = Describe("Testing core-controller installation", func() { enterpriseCRDsExist: true, migrationChecked: true, tierWatchReady: ready, - newComponentHandler: func(logr.Logger, client.Client, *runtime.Scheme, metav1.Object) utils.ComponentHandler { + newComponentHandler: func(logr.Logger, client.Client, *runtime.Scheme, metav1.Object, *operator.ProductVariant) utils.ComponentHandler { return componentHandler }, } diff --git a/pkg/controller/installation/windows_controller.go b/pkg/controller/installation/windows_controller.go index 5f1772d150..129425b444 100644 --- a/pkg/controller/installation/windows_controller.go +++ b/pkg/controller/installation/windows_controller.go @@ -407,7 +407,7 @@ func (r *ReconcileWindows) Reconcile(ctx context.Context, request reconcile.Requ } // Create a component handler to create or update the rendered components. - handler := utils.NewComponentHandler(logw, r.client, r.scheme, instance) + handler := utils.NewComponentHandler(logw, r.client, r.scheme, instance, &instance.Spec.Variant) if err := handler.CreateOrUpdateOrDelete(ctx, component, nil); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) return reconcile.Result{}, err diff --git a/pkg/controller/intrusiondetection/intrusiondetection_controller.go b/pkg/controller/intrusiondetection/intrusiondetection_controller.go index 70b1447b89..a292173967 100644 --- a/pkg/controller/intrusiondetection/intrusiondetection_controller.go +++ b/pkg/controller/intrusiondetection/intrusiondetection_controller.go @@ -436,7 +436,7 @@ func (r *ReconcileIntrusionDetection) Reconcile(ctx context.Context, request rec } // Create a component handler to manage the rendered component. - handler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + handler := utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) // Determine the namespaces to which we must bind the cluster role. namespaces, err := helper.TenantNamespaces(r.client) @@ -567,7 +567,7 @@ func (r *ReconcileIntrusionDetection) Reconcile(ctx context.Context, request rec setupHandler := handler if tenant.MultiTenant() { // In standard installs, the IntrusionDetection CR owns all the objects. For multi-tenant, pull secrets are owned by the Tenant instance. - setupHandler = utils.NewComponentHandler(log, r.client, r.scheme, tenant) + setupHandler = utils.NewComponentHandler(log, r.client, r.scheme, tenant, &variant) } if err := setupHandler.CreateOrUpdateOrDelete(ctx, setUp, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) diff --git a/pkg/controller/ippool/pool_controller.go b/pkg/controller/ippool/pool_controller.go index 819355500d..34ebef3cca 100644 --- a/pkg/controller/ippool/pool_controller.go +++ b/pkg/controller/ippool/pool_controller.go @@ -364,7 +364,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( // will remain even though all other Calico resources will be deleted. This is intentional - deleting IP pools requires the Calico API server to be // running, and we don't want to block the deletion of the Installation on the API server being available, as it introduces too many ways for // things to go wrong upon deleting the Installation API. Users can manually delete the IP pools if they are no longer needed. - handler := utils.NewComponentHandler(log, r.client, r.scheme, nil) + handler := utils.NewComponentHandler(log, r.client, r.scheme, nil, &installation.Spec.Variant) passThru := render.NewPassthroughWithLog(log, toCreateOrUpdate...) if err := handler.CreateOrUpdateOrDelete(ctx, passThru, nil); err != nil { diff --git a/pkg/controller/istio/istio_controller.go b/pkg/controller/istio/istio_controller.go index 67042800cf..1d825f972a 100644 --- a/pkg/controller/istio/istio_controller.go +++ b/pkg/controller/istio/istio_controller.go @@ -192,7 +192,7 @@ func (r *ReconcileIstio) Reconcile(ctx context.Context, request reconcile.Reques essentialCRDs, optionalCRDs := gatewayapi.K8SGatewayAPICRDs(installation.KubernetesProvider) // Check CRDs are present and only create it if not - handler := utils.NewComponentHandler(log, r, r.scheme, nil) + handler := utils.NewComponentHandler(log, r, r.scheme, nil, nil) handler.SetCreateOnly() err = handler.CreateOrUpdateOrDelete(ctx, render.NewPassthrough(essentialCRDs...), nil) if err != nil && !errors.IsAlreadyExists(err) { @@ -232,7 +232,7 @@ func (r *ReconcileIstio) Reconcile(ctx context.Context, request reconcile.Reques } // Deploy Istio components, passing the Istio CR for the owner this time. - err = utils.NewComponentHandler(log, r, r.scheme, instance).CreateOrUpdateOrDelete(ctx, istioComponent, r.status) + err = utils.NewComponentHandler(log, r, r.scheme, instance, &variant).CreateOrUpdateOrDelete(ctx, istioComponent, r.status) if err != nil { r.status.SetDegraded(operatorv1.ResourceCreateError, "Error rendering Calico Istio resources", err, log) return reconcile.Result{}, err diff --git a/pkg/controller/logcollector/logcollector_controller.go b/pkg/controller/logcollector/logcollector_controller.go index 40d0c8fbc9..44d88ef952 100644 --- a/pkg/controller/logcollector/logcollector_controller.go +++ b/pkg/controller/logcollector/logcollector_controller.go @@ -581,7 +581,7 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } // Create a component handler to manage the rendered component. - handler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + handler := utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) fluentdCfg := &render.FluentdConfiguration{ LogCollector: instance, @@ -683,7 +683,7 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } // Create a component handler to manage the rendered component. - handler = utils.NewComponentHandler(log, r.client, r.scheme, instance) + handler = utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) if err := handler.CreateOrUpdateOrDelete(ctx, comp, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) diff --git a/pkg/controller/logstorage/dashboards/dashboards_controller.go b/pkg/controller/logstorage/dashboards/dashboards_controller.go index 76a5efc5fe..32b7a8f9ad 100644 --- a/pkg/controller/logstorage/dashboards/dashboards_controller.go +++ b/pkg/controller/logstorage/dashboards/dashboards_controller.go @@ -360,9 +360,9 @@ func (d DashboardsSubController) Reconcile(ctx context.Context, request reconcil // In standard installs, the LogStorage owns the dashboards. For multi-tenant, it's owned by the Tenant instance. var hdler utils.ComponentHandler if d.multiTenant { - hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, tenant) + hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, tenant, &variant) } else { - hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, logStorage) + hdler = utils.NewComponentHandler(reqLogger, d.client, d.scheme, logStorage, &variant) } if err := hdler.CreateOrUpdateOrDelete(ctx, dashboardsComponent, d.status); err != nil { d.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating / deleting resource", err, reqLogger) diff --git a/pkg/controller/logstorage/elastic/elastic_controller.go b/pkg/controller/logstorage/elastic/elastic_controller.go index 7d1fdf1e86..3c4805631e 100644 --- a/pkg/controller/logstorage/elastic/elastic_controller.go +++ b/pkg/controller/logstorage/elastic/elastic_controller.go @@ -489,7 +489,7 @@ func (r *ElasticSubController) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, err } - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls, &variant) components := []render.Component{ eck.ECK(&eck.Configuration{ diff --git a/pkg/controller/logstorage/elastic/external_elastic_controller.go b/pkg/controller/logstorage/elastic/external_elastic_controller.go index 42f25352f1..066be78fab 100644 --- a/pkg/controller/logstorage/elastic/external_elastic_controller.go +++ b/pkg/controller/logstorage/elastic/external_elastic_controller.go @@ -123,7 +123,7 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. } r.status.OnCRFound() - _, install, err := utils.GetInstallation(context.Background(), r.client) + variant, install, err := utils.GetInstallation(context.Background(), r.client) if err != nil { if errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) @@ -156,7 +156,7 @@ func (r *ExternalESController) Reconcile(ctx context.Context, request reconcile. flowShards := logstoragecommon.CalculateFlowShards(ls.Spec.Nodes, logstoragecommon.DefaultElasticsearchShards) clusterConfig := relasticsearch.NewClusterConfig(render.DefaultElasticsearchClusterName, ls.Replicas(), logstoragecommon.DefaultElasticsearchShards, flowShards) - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls, &variant) externalElasticsearch := externalelasticsearch.ExternalElasticsearch(install, clusterConfig, pullSecrets, r.multiTenant) for _, component := range []render.Component{externalElasticsearch} { if err := hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { diff --git a/pkg/controller/logstorage/esmetrics/esmetrics_controller.go b/pkg/controller/logstorage/esmetrics/esmetrics_controller.go index 0badb1a835..fd88697f80 100644 --- a/pkg/controller/logstorage/esmetrics/esmetrics_controller.go +++ b/pkg/controller/logstorage/esmetrics/esmetrics_controller.go @@ -244,7 +244,7 @@ func (r *ESMetricsSubController) Reconcile(ctx context.Context, request reconcil return reconcile.Result{}, err } - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage, &variant) if err = hdler.CreateOrUpdateOrDelete(ctx, esMetricsComponent, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) diff --git a/pkg/controller/logstorage/initializer/initializing_controller.go b/pkg/controller/logstorage/initializer/initializing_controller.go index 70e968da2f..584fc13da2 100644 --- a/pkg/controller/logstorage/initializer/initializing_controller.go +++ b/pkg/controller/logstorage/initializer/initializing_controller.go @@ -204,7 +204,7 @@ func (r *LogStorageInitializer) Reconcile(ctx context.Context, request reconcile r.status.OnCRFound() // Get Installation resource. - _, install, err := utils.GetInstallation(context.Background(), r.client) + variant, install, err := utils.GetInstallation(context.Background(), r.client) if err != nil { if errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) @@ -247,7 +247,7 @@ func (r *LogStorageInitializer) Reconcile(ctx context.Context, request reconcile } // Before we can create secrets, we need to ensure the tigera-elasticsearch namespace exists. - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls, &variant) components := []render.Component{render.NewSetup(&render.SetUpConfiguration{ OpenShift: r.provider.IsOpenShift(), Installation: install, diff --git a/pkg/controller/logstorage/kubecontrollers/es_kube_controllers.go b/pkg/controller/logstorage/kubecontrollers/es_kube_controllers.go index be48ef851f..ff3844739c 100644 --- a/pkg/controller/logstorage/kubecontrollers/es_kube_controllers.go +++ b/pkg/controller/logstorage/kubecontrollers/es_kube_controllers.go @@ -278,7 +278,7 @@ func (r *ESKubeControllersController) Reconcile(ctx context.Context, request rec return reconcile.Result{}, err } - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage, &variant) // Get the Authentication resource. authentication, err := utils.GetAuthentication(ctx, r.client) diff --git a/pkg/controller/logstorage/linseed/linseed_controller.go b/pkg/controller/logstorage/linseed/linseed_controller.go index b2b1814b9d..c8f8a44643 100644 --- a/pkg/controller/logstorage/linseed/linseed_controller.go +++ b/pkg/controller/logstorage/linseed/linseed_controller.go @@ -458,9 +458,9 @@ func (r *LinseedSubController) Reconcile(ctx context.Context, request reconcile. // In standard installs, the LogStorage owns Linseed. For multi-tenant, it's owned by the Tenant instance. var hdler utils.ComponentHandler if r.multiTenant { - hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant, &variant) } else { - hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage) + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage, &variant) } for _, component := range []render.Component{setup, linseedComponent} { if err := hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { diff --git a/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go b/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go index fe801f47c9..881b0a2b27 100644 --- a/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go +++ b/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go @@ -1,4 +1,4 @@ -// Copyright (c) 2022-2024 Tigera, Inc. All rights reserved. +// Copyright (c) 2022-2025 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -139,7 +139,7 @@ func (r *LogStorageManagedClusterController) Reconcile(ctx context.Context, requ Installation: install, } component := render.NewManagedClusterLogStorage(cfg) - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, managementClusterConnection) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, managementClusterConnection, &variant) if err := hdler.CreateOrUpdateOrDelete(ctx, component, nil); err != nil { return reconcile.Result{}, err } diff --git a/pkg/controller/logstorage/secrets/secret_controller.go b/pkg/controller/logstorage/secrets/secret_controller.go index 259dda2d0c..b008c2589f 100644 --- a/pkg/controller/logstorage/secrets/secret_controller.go +++ b/pkg/controller/logstorage/secrets/secret_controller.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023-2024 Tigera, Inc. All rights reserved. +// Copyright (c) 2023-2025 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -259,7 +259,7 @@ func (r *SecretSubController) Reconcile(ctx context.Context, request reconcile.R operatorSigner.AddToStatusManager(r.status, render.ElasticsearchNamespace) // Provision secrets and the trusted bundle into the cluster. - hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls) + hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls, &install.Variant) // Internal ES modes: // - Zero-tenant: everything installed in tigera-elasticsearch/tigera-kibana Namespaces. We need a single trusted bundle in each. diff --git a/pkg/controller/logstorage/users/users_controller.go b/pkg/controller/logstorage/users/users_controller.go index 2f6865d2fc..5f72ec919a 100644 --- a/pkg/controller/logstorage/users/users_controller.go +++ b/pkg/controller/logstorage/users/users_controller.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023-2024 Tigera, Inc. All rights reserved. +// Copyright (c) 2023-2025 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -283,12 +283,22 @@ func (r *UserController) Reconcile(ctx context.Context, request reconcile.Reques } credentialComponent := render.NewPassthrough(credentialSecrets...) + variant, _, err := utils.GetInstallation(context.Background(), r.client) + if err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) + return reconcile.Result{}, err + } + r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Installation", err, reqLogger) + return reconcile.Result{}, err + } + // In standard installs, the LogStorage owns the secret. For multi-tenant, it's owned by the tenant. var hdler utils.ComponentHandler if r.multiTenant { - hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant) + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, tenant, &variant) } else { - hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage) + hdler = utils.NewComponentHandler(reqLogger, r.client, r.scheme, logStorage, &variant) } if err = hdler.CreateOrUpdateOrDelete(ctx, credentialComponent, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating Linseed user secret", err, reqLogger) diff --git a/pkg/controller/manager/manager_controller.go b/pkg/controller/manager/manager_controller.go index a4cdfcda03..4a468c8240 100644 --- a/pkg/controller/manager/manager_controller.go +++ b/pkg/controller/manager/manager_controller.go @@ -609,7 +609,7 @@ func (r *ReconcileManager) Reconcile(ctx context.Context, request reconcile.Requ } // Create a component handler to manage the rendered component. - defaultHandler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + defaultHandler := utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) // Set replicas to 1 for management or managed clusters. // TODO Remove after MCM tigera-manager HA deployment is supported. diff --git a/pkg/controller/monitor/monitor_controller.go b/pkg/controller/monitor/monitor_controller.go index 0b9fa4d664..10bdb4a639 100644 --- a/pkg/controller/monitor/monitor_controller.go +++ b/pkg/controller/monitor/monitor_controller.go @@ -357,7 +357,7 @@ func (r *ReconcileMonitor) Reconcile(ctx context.Context, request reconcile.Requ } // Create a component handler to manage the rendered component. - hdler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + hdler := utils.NewComponentHandler(log, r.client, r.scheme, instance, &variant) alertmanagerConfigSecret, createInOperatorNamespace, err := r.readAlertmanagerConfigSecret(ctx) if err != nil { diff --git a/pkg/controller/nonclusterhost/nonclusterhost_controller.go b/pkg/controller/nonclusterhost/nonclusterhost_controller.go index 7b04702351..a2b4c8dd2b 100644 --- a/pkg/controller/nonclusterhost/nonclusterhost_controller.go +++ b/pkg/controller/nonclusterhost/nonclusterhost_controller.go @@ -19,6 +19,7 @@ import ( "fmt" "net" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -118,7 +119,17 @@ func (r *ReconcileNonClusterHost) Reconcile(ctx context.Context, request reconci } component := nonclusterhost.NonClusterHost(config) - ch := utils.NewComponentHandler(logc, r.client, r.scheme, instance) + variant, _, err := utils.GetInstallation(ctx, r.client) + if err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, logc) + return reconcile.Result{}, err + } + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying installation", err, logc) + return reconcile.Result{}, err + } + + ch := utils.NewComponentHandler(logc, r.client, r.scheme, instance, &variant) if err = ch.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, logc) return reconcile.Result{}, err diff --git a/pkg/controller/packetcapture/packetcapture_controller.go b/pkg/controller/packetcapture/packetcapture_controller.go index deda02e766..c00df78263 100644 --- a/pkg/controller/packetcapture/packetcapture_controller.go +++ b/pkg/controller/packetcapture/packetcapture_controller.go @@ -215,7 +215,7 @@ func (r *ReconcilePacketCapture) Reconcile(ctx context.Context, request reconcil } // Create a component handler to manage the rendered component. - handler := utils.NewComponentHandler(log, r.client, r.scheme, packetcaptureapi) + handler := utils.NewComponentHandler(log, r.client, r.scheme, packetcaptureapi, &variant) certificateManager, err := certificatemanager.Create(r.client, installationSpec, r.clusterDomain, common.OperatorNamespace()) if err != nil { diff --git a/pkg/controller/policyrecommendation/policyrecommendation_controller.go b/pkg/controller/policyrecommendation/policyrecommendation_controller.go index ea86d4041c..5b09895df0 100644 --- a/pkg/controller/policyrecommendation/policyrecommendation_controller.go +++ b/pkg/controller/policyrecommendation/policyrecommendation_controller.go @@ -340,7 +340,7 @@ func (r *ReconcilePolicyRecommendation) Reconcile(ctx context.Context, request r } // Create a component handler to manage the rendered component. - defaultHandler := utils.NewComponentHandler(log, r.client, r.scheme, policyRecommendation) + defaultHandler := utils.NewComponentHandler(log, r.client, r.scheme, policyRecommendation, &variant) // Determine the namespaces to which we must bind the cluster role. // For multi-tenant, the cluster role will be bind to the service account in the tenant namespace diff --git a/pkg/controller/secrets/cluster_ca_controller.go b/pkg/controller/secrets/cluster_ca_controller.go index cc88153259..9b70f29d99 100644 --- a/pkg/controller/secrets/cluster_ca_controller.go +++ b/pkg/controller/secrets/cluster_ca_controller.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023-2024 Tigera, Inc. All rights reserved. +// Copyright (c) 2023-2025 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -82,7 +82,7 @@ func (r *ClusterCAController) Reconcile(ctx context.Context, request reconcile.R logc := r.log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) // Get Installation resource. - _, instance, err := utils.GetInstallation(ctx, r.client) + variant, instance, err := utils.GetInstallation(ctx, r.client) if err != nil { if errors.IsNotFound(err) { logc.Info("Installation not found") @@ -116,7 +116,7 @@ func (r *ClusterCAController) Reconcile(ctx context.Context, request reconcile.R return reconcile.Result{}, err } - hdler := utils.NewComponentHandler(logc, r.client, r.scheme, ownerResource) + hdler := utils.NewComponentHandler(logc, r.client, r.scheme, ownerResource, &variant) if err = hdler.CreateOrUpdateOrDelete(ctx, component, nil); err != nil { return reconcile.Result{}, err } diff --git a/pkg/controller/secrets/tenant_controller.go b/pkg/controller/secrets/tenant_controller.go index 95dd1da8d0..16b5754858 100644 --- a/pkg/controller/secrets/tenant_controller.go +++ b/pkg/controller/secrets/tenant_controller.go @@ -1,4 +1,4 @@ -// Copyright (c) 2023-2024 Tigera, Inc. All rights reserved. +// Copyright (c) 2023-2025 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -137,7 +137,7 @@ func (r *TenantController) Reconcile(ctx context.Context, request reconcile.Requ } } // Get Installation resource. - _, installation, err := utils.GetInstallation(context.Background(), r.client) + variant, installation, err := utils.GetInstallation(context.Background(), r.client) if err != nil { if errors.IsNotFound(err) { r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, logc) @@ -207,7 +207,7 @@ func (r *TenantController) Reconcile(ctx context.Context, request reconcile.Requ TrustedBundle: trustedBundleWithSystemCAs, }) - hdler := utils.NewComponentHandler(logc, r.client, r.scheme, tenant) + hdler := utils.NewComponentHandler(logc, r.client, r.scheme, tenant, &variant) if err = hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, logc) return reconcile.Result{}, err diff --git a/pkg/controller/tiers/tiers_controller.go b/pkg/controller/tiers/tiers_controller.go index f2480724cf..1785951026 100644 --- a/pkg/controller/tiers/tiers_controller.go +++ b/pkg/controller/tiers/tiers_controller.go @@ -160,7 +160,17 @@ func (r *ReconcileTiers) Reconcile(ctx context.Context, request reconcile.Reques component := tiers.Tiers(tiersConfig) - componentHandler := utils.NewComponentHandler(log, r.client, r.scheme, nil) + variant, _, err := utils.GetInstallation(ctx, r.client) + if err != nil { + if apierrors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) + return reconcile.Result{}, nil + } + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying installation", err, reqLogger) + return reconcile.Result{}, err + } + + componentHandler := utils.NewComponentHandler(log, r.client, r.scheme, nil, &variant) err = componentHandler.CreateOrUpdateOrDelete(ctx, component, nil) if err != nil { r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) diff --git a/pkg/controller/utils/component.go b/pkg/controller/utils/component.go index c3bdc3fea1..4364d8ca5e 100644 --- a/pkg/controller/utils/component.go +++ b/pkg/controller/utils/component.go @@ -40,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" + operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/common" "github.com/tigera/operator/pkg/controller/status" "github.com/tigera/operator/pkg/render" @@ -71,12 +72,13 @@ type ComponentHandler interface { // cr is allowed to be nil in the case we don't want to put ownership on a resource, // this is useful for CRD management so that they are not removed automatically. -func NewComponentHandler(log logr.Logger, cli client.Client, scheme *runtime.Scheme, cr metav1.Object) ComponentHandler { +func NewComponentHandler(log logr.Logger, cli client.Client, scheme *runtime.Scheme, cr metav1.Object, variant *operatorv1.ProductVariant) ComponentHandler { return &componentHandler{ - client: cli, - scheme: scheme, - cr: cr, - log: log, + client: cli, + scheme: scheme, + cr: cr, + log: log, + variant: variant, } } @@ -86,6 +88,7 @@ type componentHandler struct { cr metav1.Object log logr.Logger createOnly bool + variant *operatorv1.ProductVariant } func (c *componentHandler) SetCreateOnly() { @@ -235,7 +238,7 @@ func (c *componentHandler) createOrUpdateObject(ctx context.Context, obj client. setProbeTimeouts(obj) // Make sure we have our standard selector and pod labels - setStandardSelectorAndLabels(obj) + setStandardSelectorAndLabels(obj, c.cr, c.variant) if err := ensureTLSCiphers(ctx, obj, c.client); err != nil { return fmt.Errorf("failed to set TLS Ciphers: %w", err) @@ -951,10 +954,20 @@ func setProbeTimeouts(obj client.Object) { } } -// setStandardSelectorAndLabels will set the k8s-app and app.kubernetes.io/name Labels on the podTemplates +// setStandardSelectorAndLabels will set the recommended labels found at +// https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +// It will also set the k8s-app and app.kubernetes.io/name Labels on the podTemplates // for Deployments and Daemonsets. If there is no Selector specified a selector will also be added // that selects the k8s-app label. -func setStandardSelectorAndLabels(obj client.Object) { +func setStandardSelectorAndLabels(obj client.Object, customResource metav1.Object, variant *operatorv1.ProductVariant) { + if obj.GetLabels() == nil { + obj.SetLabels(make(map[string]string)) + } + addNameLabel(obj, obj.GetName()) + addInstanceLabel(obj, customResource) + addComponentLabel(obj, customResource) + addPartOfLabel(obj, variant) + addManagedByLabel(obj, obj.GetName()) var podTemplate *v1.PodTemplateSpec var name string switch obj := obj.(type) { @@ -995,8 +1008,59 @@ func setStandardSelectorAndLabels(obj client.Object) { if podTemplate.Labels["k8s-app"] == "" { podTemplate.Labels["k8s-app"] = name } - if podTemplate.Labels["app.kubernetes.io/name"] == "" { - podTemplate.Labels["app.kubernetes.io/name"] = name + addNameLabel(podTemplate, obj.GetName()) + addInstanceLabel(podTemplate, customResource) + addComponentLabel(podTemplate, customResource) + addPartOfLabel(podTemplate, variant) + addManagedByLabel(podTemplate, obj.GetName()) +} + +// addNameLabel sets the name of the application. +// For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +func addNameLabel(obj metav1.Object, name string) { + if obj.GetLabels()["app.kubernetes.io/name"] == "" { + obj.GetLabels()["app.kubernetes.io/name"] = name + } + if obj.GetLabels()["k8s-app"] == "" { + obj.GetLabels()["k8s-app"] = name + } +} + +// addInstanceLabel sets a unique name identifying the instance of an application. We use the name of the custom resource +// that owns this object. +// For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +func addInstanceLabel(obj metav1.Object, cr metav1.Object) { + if obj.GetLabels()["app.kubernetes.io/instance"] == "" && cr != nil { + obj.GetLabels()["app.kubernetes.io/instance"] = cr.GetName() + } +} + +// addComponentLabel sets the component within the architecture. We use the kind of the custom resource that owns this +// object. +// For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +func addComponentLabel(obj metav1.Object, cr metav1.Object) { + if obj.GetLabels()["app.kubernetes.io/component"] == "" && cr != nil { + owner, ok := cr.(runtime.Object) + if ok && owner.GetObjectKind() != nil && owner.GetObjectKind() != nil { + obj.GetLabels()["app.kubernetes.io/component"] = owner.GetObjectKind().GroupVersionKind().GroupKind().String() + + } + } +} + +// addPartOfLabel sets the name of a higher level application this one is part of. We use the product variant. +// For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +func addPartOfLabel(obj metav1.Object, variant *operatorv1.ProductVariant) { + if obj.GetLabels()["app.kubernetes.io/part-of"] == "" && variant != nil { + obj.GetLabels()["app.kubernetes.io/part-of"] = string(*variant) + } +} + +// addManagedByLabel sets the tool being used to manage the operation of an application. +// For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ +func addManagedByLabel(obj metav1.Object, version string) { + if obj.GetLabels()["app.kubernetes.io/managed-by"] == "" { + obj.GetLabels()["app.kubernetes.io/managed-by"] = common.OperatorName() } } diff --git a/pkg/controller/utils/component_test.go b/pkg/controller/utils/component_test.go index 01a277c151..40168b9255 100644 --- a/pkg/controller/utils/component_test.go +++ b/pkg/controller/utils/component_test.go @@ -87,7 +87,7 @@ var _ = Describe("Component handler tests", func() { TypeMeta: metav1.TypeMeta{Kind: "Manager", APIVersion: "operator.tigera.io/v1"}, ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, } - handler = NewComponentHandler(logf.Log, c, scheme, instance) + handler = NewComponentHandler(logf.Log, c, scheme, instance, &operatorv1.Calico) }) It("adds Owner references when Custom Resource is provided", func() { @@ -414,7 +414,13 @@ var _ = Describe("Component handler tests", func() { By("checking that the namespace is created and desired label is present") expectedLabels := map[string]string{ - fakeComponentLabelKey: fakeComponentLabelValue, + fakeComponentLabelKey: fakeComponentLabelValue, + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/part-of": "Calico", + "app.kubernetes.io/name": "test-namespace", + "k8s-app": "test-namespace", + "app.kubernetes.io/component": "Manager.operator.tigera.io", } nsKey := client.ObjectKey{ Name: "test-namespace", @@ -463,8 +469,14 @@ var _ = Describe("Component handler tests", func() { By("retrieving the namespace and checking that both current and desired labels are still present") expectedLabels = map[string]string{ - "extra": "extra-value", - fakeComponentLabelKey: fakeComponentLabelValue, + "extra": "extra-value", + fakeComponentLabelKey: fakeComponentLabelValue, + "k8s-app": "test-namespace", + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/name": "test-namespace", + "app.kubernetes.io/part-of": "Calico", } ns = &corev1.Namespace{} err = c.Get(ctx, nsKey, ns) @@ -473,9 +485,15 @@ var _ = Describe("Component handler tests", func() { By("changing a desired label") labels = map[string]string{ - "extra": "extra-value", - "cattle-not-pets": "indeed", - fakeComponentLabelKey: "not-present", + "extra": "extra-value", + "cattle-not-pets": "indeed", + fakeComponentLabelKey: "not-present", + "app.kubernetes.io/part-of": "Calico", + "k8s-app": "test-namespace", + "app.kubernetes.io/name": "test-namespace", + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/instance": "tigera-secure", } ns.Labels = labels err = c.Update(ctx, ns) @@ -483,9 +501,15 @@ var _ = Describe("Component handler tests", func() { By("checking that the namespace is updated with new modified label") expectedLabels = map[string]string{ - "cattle-not-pets": "indeed", - "extra": "extra-value", - fakeComponentLabelKey: "not-present", + "cattle-not-pets": "indeed", + "extra": "extra-value", + fakeComponentLabelKey: "not-present", + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/name": "test-namespace", + "app.kubernetes.io/part-of": "Calico", + "k8s-app": "test-namespace", } nsKey = client.ObjectKey{ Name: "test-namespace", @@ -515,9 +539,15 @@ var _ = Describe("Component handler tests", func() { By("retrieving the namespace and checking that desired label is reconciled, everything else is left as-is") expectedLabels = map[string]string{ - "cattle-not-pets": "indeed", - "extra": "extra-value", - fakeComponentLabelKey: fakeComponentLabelValue, + "cattle-not-pets": "indeed", + "extra": "extra-value", + fakeComponentLabelKey: fakeComponentLabelValue, + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/name": "test-namespace", + "k8s-app": "test-namespace", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/part-of": "Calico", } ns = &corev1.Namespace{} err = c.Get(ctx, nsKey, ns) @@ -1198,7 +1228,13 @@ var _ = Describe("Component handler tests", func() { ObjectMeta: metav1.ObjectMeta{ Name: "my-service", Labels: map[string]string{ - "old": "should-be-preserved", + "old": "should-be-preserved", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/name": "my-service", + "app.kubernetes.io/part-of": "Calico", + "k8s-app": "my-service", + "app.kubernetes.io/component": "Manager.operator.tigera.io", }, }, Spec: corev1.ServiceSpec{ @@ -1216,7 +1252,13 @@ var _ = Describe("Component handler tests", func() { Expect(c.Get(ctx, client.ObjectKey{Name: "my-service"}, svcWithIP)).NotTo(HaveOccurred()) Expect(svcWithIP.Spec.ClusterIP).To(Equal("10.96.0.1")) Expect(svcWithIP.Labels).To(Equal(map[string]string{ - "old": "should-be-preserved", + "old": "should-be-preserved", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/name": "my-service", + "app.kubernetes.io/part-of": "Calico", + "k8s-app": "my-service", + "app.kubernetes.io/component": "Manager.operator.tigera.io", })) // Now pretend we're the new operator version, wanting to remove the cluster IP. @@ -1242,8 +1284,14 @@ var _ = Describe("Component handler tests", func() { Expect(c.Get(ctx, client.ObjectKey{Name: "my-service"}, svcNoIP)).NotTo(HaveOccurred()) Expect(svcNoIP.Spec.ClusterIP).To(Equal("None")) Expect(svcNoIP.Labels).To(Equal(map[string]string{ - "old": "should-be-preserved", - "new": "should-be-added", + "old": "should-be-preserved", + "new": "should-be-added", + "k8s-app": "my-service", + "app.kubernetes.io/name": "my-service", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/part-of": "Calico", + "app.kubernetes.io/component": "Manager.operator.tigera.io", })) // The fake client resets the resource version to 1 on create. @@ -1256,9 +1304,15 @@ var _ = Describe("Component handler tests", func() { Expect(err).NotTo(HaveOccurred()) Expect(c.Get(ctx, client.ObjectKey{Name: "my-service"}, svcNoIP)).NotTo(HaveOccurred()) Expect(svcNoIP.Labels).To(Equal(map[string]string{ - "old": "should-be-preserved", - "new": "should-be-added", - "newer": "should-be-added", + "old": "should-be-preserved", + "new": "should-be-added", + "newer": "should-be-added", + "k8s-app": "my-service", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/name": "my-service", + "app.kubernetes.io/part-of": "Calico", })) Expect(svcNoIP.ObjectMeta.ResourceVersion).To(Equal("2"), "Expected update to rev ResourceVersion") @@ -1726,8 +1780,12 @@ var _ = Describe("Component handler tests", func() { By("checking that the daemonset is created and labels are added") expectedLabels := map[string]string{ - "k8s-app": "test-daemonset", - "app.kubernetes.io/name": "test-daemonset", + "k8s-app": "test-daemonset", + "app.kubernetes.io/name": "test-daemonset", + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/part-of": "Calico", } expectedSelector := metav1.LabelSelector{ MatchLabels: map[string]string{"k8s-app": "test-daemonset"}, @@ -1764,8 +1822,12 @@ var _ = Describe("Component handler tests", func() { Expect(err).To(BeNil()) expectedLabels := map[string]string{ - "k8s-app": "test-daemonset", - "app.kubernetes.io/name": "test-daemonset", + "k8s-app": "test-daemonset", + "app.kubernetes.io/name": "test-daemonset", + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/part-of": "Calico", } expectedSelector := metav1.LabelSelector{ MatchLabels: map[string]string{"preset-key": "preset-value"}, @@ -1797,8 +1859,12 @@ var _ = Describe("Component handler tests", func() { Expect(err).To(BeNil()) expectedLabels := map[string]string{ - "k8s-app": "test-deployment", - "app.kubernetes.io/name": "test-deployment", + "k8s-app": "test-deployment", + "app.kubernetes.io/name": "test-deployment", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/part-of": "Calico", + "app.kubernetes.io/component": "Manager.operator.tigera.io", } expectedSelector := metav1.LabelSelector{ MatchLabels: map[string]string{"k8s-app": "test-deployment"}, @@ -1836,8 +1902,12 @@ var _ = Describe("Component handler tests", func() { Expect(err).To(BeNil()) expectedLabels := map[string]string{ - "k8s-app": "test-deployment", - "app.kubernetes.io/name": "test-deployment", + "k8s-app": "test-deployment", + "app.kubernetes.io/name": "test-deployment", + "app.kubernetes.io/component": "Manager.operator.tigera.io", + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/part-of": "Calico", } expectedSelector := metav1.LabelSelector{ MatchLabels: map[string]string{"preset-key": "preset-value"}, @@ -1992,7 +2062,7 @@ var _ = Describe("Mocked client Component handler tests", func() { c = &mc ctx = context.Background() - handler = NewComponentHandler(logf.Log, c, runtime.NewScheme(), nil) + handler = NewComponentHandler(logf.Log, c, runtime.NewScheme(), nil, &operatorv1.Calico) // Use a new cache for each test. dCache = newCache() diff --git a/pkg/controller/whisker/controller.go b/pkg/controller/whisker/controller.go index 5099d22519..58d15eb4fb 100644 --- a/pkg/controller/whisker/controller.go +++ b/pkg/controller/whisker/controller.go @@ -234,7 +234,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, request reconcile.Request) ( return reconcile.Result{}, err } - ch := utils.NewComponentHandler(log, r.cli, r.scheme, whiskerCR) + ch := utils.NewComponentHandler(log, r.cli, r.scheme, whiskerCR, &variant) cfg := &whisker.Configuration{ PullSecrets: pullSecrets, OpenShift: r.provider.IsOpenShift(), From 93dd32ce7e52684e19080e4e3bbe03e6508e0e55 Mon Sep 17 00:00:00 2001 From: Rene Dekker Date: Tue, 30 Dec 2025 11:14:51 -0800 Subject: [PATCH 2/6] feat(recommended labels): Set recommended labels as per https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ --- pkg/controller/ippool/pool_controller_test.go | 2 +- test/pool_test.go | 13 ++++++------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/pkg/controller/ippool/pool_controller_test.go b/pkg/controller/ippool/pool_controller_test.go index 67e25e529d..0e0d735fde 100644 --- a/pkg/controller/ippool/pool_controller_test.go +++ b/pkg/controller/ippool/pool_controller_test.go @@ -258,7 +258,7 @@ var _ = Describe("IP Pool controller tests", func() { } for _, pool := range instance.Spec.CalicoNetwork.IPPools { Expect(poolsByCIDR).To(HaveKey(pool.CIDR)) - Expect(poolsByCIDR[pool.CIDR].Labels).To(Equal(map[string]string{"app.kubernetes.io/managed-by": "tigera-operator"})) + Expect(poolsByCIDR[pool.CIDR].Labels).To(HaveKeyWithValue("app.kubernetes.io/managed-by", "tigera-operator")) } }) diff --git a/test/pool_test.go b/test/pool_test.go index ddb38f7a18..c24e8de951 100644 --- a/test/pool_test.go +++ b/test/pool_test.go @@ -20,6 +20,7 @@ import ( "strings" "time" + "github.com/elastic/cloud-on-k8s/v2/pkg/utils/maps" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -189,7 +190,7 @@ var _ = Describe("IPPool FV tests", func() { Expect(ipPools.Items[0].Spec.Disabled).To(Equal(false)) Expect(ipPools.Items[0].Spec.BlockSize).To(Equal(26)) Expect(ipPools.Items[0].Spec.NodeSelector).To(Equal("all()")) - Expect(ipPools.Items[0].Labels).To(HaveLen(1)) + Expect(ipPools.Items[0].Labels).To(HaveKeyWithValue("app.kubernetes.io/managed-by", "tigera-operator")) Expect(ipPools.Items[0].Spec.AssignmentMode).To(Equal(operator.AssignmentModeAutomatic)) }) @@ -251,7 +252,7 @@ var _ = Describe("IPPool FV tests", func() { Expect(len(ipPools.Items)).To(Equal(1), fmt.Sprintf("Expected 1 IP pool, but got: %+v", ipPools.Items)) // This proves the operator has not assumed control. - Expect(ipPools.Items[0].Labels).To(HaveLen(0)) + Expect(ipPools.Items[0].Labels).NotTo(HaveKey("app.kubernetes.io/managed-by")) // Now, install the API server. createAPIServer(c, mgr, shutdownContext, nil) @@ -268,14 +269,12 @@ var _ = Describe("IPPool FV tests", func() { if len(v3Pools.Items) != 1 { return fmt.Errorf("Expected 1 IP pool, but got: %+v", v3Pools.Items) } - if len(v3Pools.Items[0].Labels) != 1 { - return fmt.Errorf("Expected 1 label on IP pool, but got: %+v", v3Pools.Items[0].Labels) + if !maps.ContainsKeys(v3Pools.Items[0].Labels, "app.kubernetes.io/managed-by") { + return fmt.Errorf("Expected app.kubernetes.io/managed-by label, but got: %+v", v3Pools.Items[0].Labels) } return nil }, 5*time.Second, 1*time.Second).ShouldNot(HaveOccurred()) - Expect(v3Pools.Items[0].Labels).To(HaveKey("app.kubernetes.io/managed-by")) - // Verify that the default IPv4 pool has been subsumed by the operator. Expect(v3Pools.Items[0].Name).To(Equal("default-ipv4-ippool")) Expect(v3Pools.Items[0].Spec.CIDR).To(Equal("192.168.0.0/24")) @@ -351,7 +350,7 @@ var _ = Describe("IPPool FV tests", func() { Expect(len(ipPools.Items)).To(Equal(1), fmt.Sprintf("Expected 1 IP pool, but got: %+v", ipPools.Items)) // This proves the operator has not assumed control. - Expect(ipPools.Items[0].Labels).To(HaveLen(0)) + Expect(ipPools.Items[0].Labels).NotTo(HaveKey("app.kubernetes.io/managed-by")) // Now, install the API server. createAPIServer(c, mgr, shutdownContext, nil) From 54f04ce55f82778ec1031288bfcbabb20887d011 Mon Sep 17 00:00:00 2001 From: Rene Dekker Date: Tue, 30 Dec 2025 13:20:38 -0800 Subject: [PATCH 3/6] feat(recommended labels): Set recommended labels as per https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ --- .../monitor/monitor_controller_test.go | 11 ++++++++-- pkg/controller/utils/component.go | 21 +++++++++++++------ 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/pkg/controller/monitor/monitor_controller_test.go b/pkg/controller/monitor/monitor_controller_test.go index 6960bcea18..b2a549c2da 100644 --- a/pkg/controller/monitor/monitor_controller_test.go +++ b/pkg/controller/monitor/monitor_controller_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021-2024 Tigera, Inc. All rights reserved. +// Copyright (c) 2021-2025 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -259,7 +259,14 @@ var _ = Describe("Monitor controller tests", func() { Expect(serviceMonitor.Spec.Endpoints).To(HaveLen(1)) // Verify that the default settings are propagated. - Expect(serviceMonitor.Labels).To(Equal(map[string]string{render.AppLabelName: monitor.TigeraExternalPrometheus})) + Expect(serviceMonitor.Labels).To(Equal(map[string]string{ + render.AppLabelName: monitor.TigeraExternalPrometheus, + "app.kubernetes.io/instance": "tigera-secure", + "app.kubernetes.io/managed-by": "tigera-operator", + "app.kubernetes.io/name": "tigera-external-prometheus", + "app.kubernetes.io/part-of": "TigeraSecureEnterprise", + "app.kubernetes.io/component": "Monitor.operator.tigera.io", + })) Expect(serviceMonitor.Spec.Endpoints[0]).To(Equal(monitoringv1.Endpoint{ Params: map[string][]string{"match[]": {"{__name__=~\".+\"}"}}, Port: "web", diff --git a/pkg/controller/utils/component.go b/pkg/controller/utils/component.go index 4364d8ca5e..9e233961b5 100644 --- a/pkg/controller/utils/component.go +++ b/pkg/controller/utils/component.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "reflect" + "regexp" "slices" "strings" "sync" @@ -1015,14 +1016,22 @@ func setStandardSelectorAndLabels(obj client.Object, customResource metav1.Objec addManagedByLabel(podTemplate, obj.GetName()) } +// sanitizeLabel cleans an input string to conform to the validation for labels. A valid label must be an empty string +// or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character and it +// is validated with regex '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?'. +func sanitizeLabel(input string) string { + sanitized := regexp.MustCompile(`[^a-zA-Z0-9_.-]`).ReplaceAllString(input, "_") + return strings.Trim(sanitized, "-_.") +} + // addNameLabel sets the name of the application. // For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ func addNameLabel(obj metav1.Object, name string) { if obj.GetLabels()["app.kubernetes.io/name"] == "" { - obj.GetLabels()["app.kubernetes.io/name"] = name + obj.GetLabels()["app.kubernetes.io/name"] = sanitizeLabel(name) } if obj.GetLabels()["k8s-app"] == "" { - obj.GetLabels()["k8s-app"] = name + obj.GetLabels()["k8s-app"] = sanitizeLabel(name) } } @@ -1031,7 +1040,7 @@ func addNameLabel(obj metav1.Object, name string) { // For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ func addInstanceLabel(obj metav1.Object, cr metav1.Object) { if obj.GetLabels()["app.kubernetes.io/instance"] == "" && cr != nil { - obj.GetLabels()["app.kubernetes.io/instance"] = cr.GetName() + obj.GetLabels()["app.kubernetes.io/instance"] = sanitizeLabel(cr.GetName()) } } @@ -1042,7 +1051,7 @@ func addComponentLabel(obj metav1.Object, cr metav1.Object) { if obj.GetLabels()["app.kubernetes.io/component"] == "" && cr != nil { owner, ok := cr.(runtime.Object) if ok && owner.GetObjectKind() != nil && owner.GetObjectKind() != nil { - obj.GetLabels()["app.kubernetes.io/component"] = owner.GetObjectKind().GroupVersionKind().GroupKind().String() + obj.GetLabels()["app.kubernetes.io/component"] = sanitizeLabel(owner.GetObjectKind().GroupVersionKind().GroupKind().String()) } } @@ -1052,7 +1061,7 @@ func addComponentLabel(obj metav1.Object, cr metav1.Object) { // For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ func addPartOfLabel(obj metav1.Object, variant *operatorv1.ProductVariant) { if obj.GetLabels()["app.kubernetes.io/part-of"] == "" && variant != nil { - obj.GetLabels()["app.kubernetes.io/part-of"] = string(*variant) + obj.GetLabels()["app.kubernetes.io/part-of"] = sanitizeLabel(string(*variant)) } } @@ -1060,7 +1069,7 @@ func addPartOfLabel(obj metav1.Object, variant *operatorv1.ProductVariant) { // For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ func addManagedByLabel(obj metav1.Object, version string) { if obj.GetLabels()["app.kubernetes.io/managed-by"] == "" { - obj.GetLabels()["app.kubernetes.io/managed-by"] = common.OperatorName() + obj.GetLabels()["app.kubernetes.io/managed-by"] = sanitizeLabel(common.OperatorName()) } } From 2eccea96551b2d68cc6533cb71be889c93bfe725 Mon Sep 17 00:00:00 2001 From: Rene Dekker Date: Tue, 30 Dec 2025 13:40:13 -0800 Subject: [PATCH 4/6] feat(recommended labels): Set recommended labels as per https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ --- .../nonclusterhost/nonclusterhost_controller_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pkg/controller/nonclusterhost/nonclusterhost_controller_test.go b/pkg/controller/nonclusterhost/nonclusterhost_controller_test.go index af756e2178..d6cddcee12 100644 --- a/pkg/controller/nonclusterhost/nonclusterhost_controller_test.go +++ b/pkg/controller/nonclusterhost/nonclusterhost_controller_test.go @@ -44,6 +44,7 @@ var _ = Describe("NonClusterHost controller tests", func() { r ReconcileNonClusterHost scheme *runtime.Scheme nonclusterhost *operatorv1.NonClusterHost + installation *operatorv1.Installation ) BeforeEach(func() { @@ -77,6 +78,13 @@ var _ = Describe("NonClusterHost controller tests", func() { TyphaEndpoint: "1.2.3.4:5473", }, } + installation = &operatorv1.Installation{ + TypeMeta: metav1.TypeMeta{Kind: "Installation", APIVersion: "operator.tigera.io/v1"}, + ObjectMeta: metav1.ObjectMeta{Name: "default"}, + Spec: operatorv1.InstallationSpec{ + Variant: operatorv1.TigeraSecureEnterprise, + }, + } }) AfterEach(func() { @@ -93,6 +101,7 @@ var _ = Describe("NonClusterHost controller tests", func() { It("should render NonClusterHost resources", func() { Expect(cli.Create(ctx, nonclusterhost)).NotTo(HaveOccurred()) + Expect(cli.Create(ctx, installation)).NotTo(HaveOccurred()) _, err := r.Reconcile(ctx, reconcile.Request{}) Expect(err).NotTo(HaveOccurred()) From 2cbc87306d2008a943199fe2467a401ccd588067 Mon Sep 17 00:00:00 2001 From: Rene Dekker Date: Tue, 30 Dec 2025 13:55:40 -0800 Subject: [PATCH 5/6] feat(recommended labels): Set recommended labels as per https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ --- pkg/controller/utils/component.go | 10 +++++----- pkg/controller/utils/component_test.go | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/pkg/controller/utils/component.go b/pkg/controller/utils/component.go index 9e233961b5..7324777201 100644 --- a/pkg/controller/utils/component.go +++ b/pkg/controller/utils/component.go @@ -968,13 +968,13 @@ func setStandardSelectorAndLabels(obj client.Object, customResource metav1.Objec addInstanceLabel(obj, customResource) addComponentLabel(obj, customResource) addPartOfLabel(obj, variant) - addManagedByLabel(obj, obj.GetName()) + addManagedByLabel(obj) var podTemplate *v1.PodTemplateSpec var name string switch obj := obj.(type) { case *apps.Deployment: d := obj - name = d.Name + name = sanitizeLabel(d.Name) if d.Labels == nil { d.Labels = make(map[string]string) } @@ -990,7 +990,7 @@ func setStandardSelectorAndLabels(obj client.Object, customResource metav1.Objec podTemplate = &d.Spec.Template case *apps.DaemonSet: d := obj - name = d.Name + name = sanitizeLabel(d.Name) if d.Spec.Selector == nil { d.Spec.Selector = &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -1013,7 +1013,7 @@ func setStandardSelectorAndLabels(obj client.Object, customResource metav1.Objec addInstanceLabel(podTemplate, customResource) addComponentLabel(podTemplate, customResource) addPartOfLabel(podTemplate, variant) - addManagedByLabel(podTemplate, obj.GetName()) + addManagedByLabel(podTemplate) } // sanitizeLabel cleans an input string to conform to the validation for labels. A valid label must be an empty string @@ -1067,7 +1067,7 @@ func addPartOfLabel(obj metav1.Object, variant *operatorv1.ProductVariant) { // addManagedByLabel sets the tool being used to manage the operation of an application. // For more on recommended labels see: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ -func addManagedByLabel(obj metav1.Object, version string) { +func addManagedByLabel(obj metav1.Object) { if obj.GetLabels()["app.kubernetes.io/managed-by"] == "" { obj.GetLabels()["app.kubernetes.io/managed-by"] = sanitizeLabel(common.OperatorName()) } diff --git a/pkg/controller/utils/component_test.go b/pkg/controller/utils/component_test.go index 40168b9255..e83284aa59 100644 --- a/pkg/controller/utils/component_test.go +++ b/pkg/controller/utils/component_test.go @@ -1922,6 +1922,20 @@ var _ = Describe("Component handler tests", func() { Expect(d.Spec.Template.GetLabels()).To(Equal(expectedLabels)) Expect(*d.Spec.Selector).To(Equal(expectedSelector)) }) + DescribeTable("should sanitize common labels so that they pass regexp validation", func(in string) { + Expect(sanitizeLabel(in)).To(MatchRegexp(`(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?`)) + }, + Entry("Valid, should remain unchanged", "My-Test_String.123"), + Entry("Invalid start/end, should be trimmed", "__My-Test_String.123.."), + Entry("Invalid characters (spaces)", "String with spaces"), + Entry("Invalid characters", "special-chars!@#$%^&*"), + Entry("Invalid start/end", "-leading-and-trailing-"), + Entry("Invalid start/end (multiple)", "____-leading-and-trailing-____"), + Entry("Empty string, should remain empty", ""), + Entry("Invalid, should become empty", "."), + Entry("Valid single character", "a"), + Entry("Valid", "a-b_c.d"), + Entry("Valid", "1.2.3.4")) }) Context("services account updates should not result in removal of data", func() { It("preserves secrets and image pull secrets that were present before object updates", func() { From 54bfbbcd050d1e33382b731fa098039a5cc8daf5 Mon Sep 17 00:00:00 2001 From: Rene Dekker Date: Tue, 30 Dec 2025 14:34:26 -0800 Subject: [PATCH 6/6] feat(recommended labels): Set recommended labels as per https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/ --- pkg/controller/utils/component.go | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/pkg/controller/utils/component.go b/pkg/controller/utils/component.go index 7324777201..44c5af55b3 100644 --- a/pkg/controller/utils/component.go +++ b/pkg/controller/utils/component.go @@ -964,11 +964,16 @@ func setStandardSelectorAndLabels(obj client.Object, customResource metav1.Objec if obj.GetLabels() == nil { obj.SetLabels(make(map[string]string)) } - addNameLabel(obj, obj.GetName()) - addInstanceLabel(obj, customResource) - addComponentLabel(obj, customResource) - addPartOfLabel(obj, variant) - addManagedByLabel(obj) + if customResource != nil { + // We do not want to set these labels on objects without a CR. They are usually deliberately not getting an + // owner ref and are not controlled by our operator. + addNameLabel(obj, obj.GetName()) + addInstanceLabel(obj, customResource) + addComponentLabel(obj, customResource) + addPartOfLabel(obj, variant) + addManagedByLabel(obj) + } + var podTemplate *v1.PodTemplateSpec var name string switch obj := obj.(type) { @@ -1009,11 +1014,16 @@ func setStandardSelectorAndLabels(obj client.Object, customResource metav1.Objec if podTemplate.Labels["k8s-app"] == "" { podTemplate.Labels["k8s-app"] = name } - addNameLabel(podTemplate, obj.GetName()) - addInstanceLabel(podTemplate, customResource) - addComponentLabel(podTemplate, customResource) - addPartOfLabel(podTemplate, variant) - addManagedByLabel(podTemplate) + if customResource != nil { + // We do not want to set these labels on objects without a CR. They are usually deliberately not getting an + // owner ref and are not controlled by our operator. + addNameLabel(podTemplate, obj.GetName()) + addInstanceLabel(podTemplate, customResource) + addComponentLabel(podTemplate, customResource) + addPartOfLabel(podTemplate, variant) + addManagedByLabel(podTemplate) + } + } // sanitizeLabel cleans an input string to conform to the validation for labels. A valid label must be an empty string