diff --git a/controllers/clusterpermission_controller.go b/controllers/clusterpermission_controller.go index f9f3128..fea33bf 100644 --- a/controllers/clusterpermission_controller.go +++ b/controllers/clusterpermission_controller.go @@ -28,14 +28,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" corev1 "k8s.io/api/core/v1" @@ -54,43 +51,133 @@ const VALIDATION_MW_RETRY_INTERVAL = 10 * time.Second type ClusterPermissionReconciler struct { client.Client Scheme *runtime.Scheme + // customInformer is the custom informer for ManagedClusterAddOn resources + customInformer *ManagedClusterAddOnInformer } // SetupWithManager sets up the controller with the Manager. func (r *ClusterPermissionReconciler) SetupWithManager(mgr ctrl.Manager) error { + // Create custom informer for ManagedClusterAddOn resources + config := mgr.GetConfig() + eventHandler := r.createCustomInformerEventHandler() + + customInformer, err := NewManagedClusterAddOnInformer(config, eventHandler) + if err != nil { + return err + } + + // Store the custom informer in the reconciler + r.customInformer = customInformer + + // Start the custom informer in a goroutine with retry logic + go func() { + backoff := wait.Backoff{ + Duration: 1 * time.Second, + Factor: 2.0, + Jitter: 0.1, + Steps: 5, // Retry up to 5 times (1s, 2s, 4s, 8s, 16s) + } + + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + if err := customInformer.Start(); err != nil { + log.Log.Error(err, "Failed to start custom ManagedClusterAddOn informer, will retry") + return false, nil // Retry + } + log.Log.Info("Successfully started custom ManagedClusterAddOn informer") + return true, nil // Success + }) + + if err != nil { + log.Log.Error(err, "Failed to start custom ManagedClusterAddOn informer after retries, controller cannot function properly") + panic("custom ManagedClusterAddOn informer failed to start after retries") + } + }() + + // Setup the controller without the built-in ManagedClusterAddOn watching + // since we're using the custom informer instead return ctrl.NewControllerManagedBy(mgr). For(&cpv1alpha1.ClusterPermission{}). - Watches(&addonv1alpha1.ManagedClusterAddOn{}, - r.managedClusterAddOnEventHandler(), - builder.WithPredicates(predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return false // Don't process Create events - }, - UpdateFunc: func(e event.UpdateEvent) bool { - // Only process Update events for managed-serviceaccount addon when status.namespace changes - oldAddon, oldOk := e.ObjectOld.(*addonv1alpha1.ManagedClusterAddOn) - newAddon, newOk := e.ObjectNew.(*addonv1alpha1.ManagedClusterAddOn) + Complete(r) +} - if !oldOk || !newOk { - return false - } +// Stop stops the custom informer if it exists +func (r *ClusterPermissionReconciler) Stop() { + if r.customInformer != nil { + r.customInformer.Stop() + } +} - // Only process if this is the managed-serviceaccount addon - if newAddon.Name != msacommon.AddonName { - return false - } +// createCustomInformerEventHandler creates an event handler for the custom informer +func (r *ClusterPermissionReconciler) createCustomInformerEventHandler() func(obj *addonv1alpha1.ManagedClusterAddOn) { + return func(addon *addonv1alpha1.ManagedClusterAddOn) { + log := log.Log.WithName("CustomInformerEventHandler") - // Only process if status.namespace has changed - return oldAddon.Status.Namespace != newAddon.Status.Namespace - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return false // Don't process Delete events - }, - GenericFunc: func(e event.GenericEvent) bool { - return false // Don't process Generic events - }, - })). - Complete(r) + // Find all ClusterPermissions in this addon's namespace that have ManagedServiceAccount subjects + ctx := context.Background() + var clusterPermissions cpv1alpha1.ClusterPermissionList + err := r.List(ctx, &clusterPermissions, &client.ListOptions{ + Namespace: addon.Namespace, + }) + if err != nil { + log.Error(err, "failed to list ClusterPermissions", "namespace", addon.Namespace) + return + } + + // Process each ClusterPermission that uses ManagedServiceAccount + for _, cp := range clusterPermissions.Items { + if r.clusterPermissionUsesManagedServiceAccount(&cp) { + log.Info("Triggering reconciliation for ClusterPermission due to ManagedClusterAddOn change", + "clusterPermission", cp.Name, + "namespace", cp.Namespace, + "addonNamespace", addon.Status.Namespace, + ) + + // Trigger reconciliation by updating the ClusterPermission + r.reconcileClusterPermission(ctx, &cp) + } + } + } +} + +// reconcileClusterPermission triggers reconciliation of a specific ClusterPermission +func (r *ClusterPermissionReconciler) reconcileClusterPermission(ctx context.Context, cp *cpv1alpha1.ClusterPermission) { + log := log.FromContext(ctx) + + // Create a reconcile request + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: cp.Name, + Namespace: cp.Namespace, + }, + } + + // Retry with exponential backoff + backoff := wait.Backoff{ + Duration: 1 * time.Second, + Factor: 2.0, + Jitter: 0.1, + Steps: 3, // Retry up to 3 times (1s, 2s, 4s) + } + + err := wait.ExponentialBackoffWithContext(ctx, backoff, func(context.Context) (bool, error) { + result, err := r.Reconcile(ctx, req) + if err != nil { + log.Error(err, "Failed to reconcile ClusterPermission, will retry", "name", cp.Name, "namespace", cp.Namespace) + return false, nil // Retry on error + } + if result.Requeue || result.RequeueAfter > 0 { + log.Info("Reconcile requested requeue, will retry", "name", cp.Name, "namespace", cp.Namespace, "requeueAfter", result.RequeueAfter) + if result.RequeueAfter > 0 { + time.Sleep(result.RequeueAfter) + } + return false, nil // Retry if requeue requested + } + return true, nil // Success + }) + + if err != nil { + log.Error(err, "Failed to reconcile ClusterPermission after retries", "name", cp.Name, "namespace", cp.Namespace) + } } //+kubebuilder:rbac:groups=rbac.open-cluster-management.io,resources=clusterpermissions,verbs=get;list;watch;create;update;patch;delete @@ -694,47 +781,6 @@ func joinStrings(strings []string, separator string) string { return result } -// managedClusterAddOnEventHandler returns an event handler that reconciles ClusterPermissions -// when a ManagedClusterAddOn's status.namespace changes -func (r *ClusterPermissionReconciler) managedClusterAddOnEventHandler() handler.EventHandler { - return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - log := log.FromContext(ctx) - - addon, ok := obj.(*addonv1alpha1.ManagedClusterAddOn) - if !ok { - log.Error(nil, "object is not a ManagedClusterAddOn", "object", obj) - return []reconcile.Request{} - } - - // Find all ClusterPermissions in this addon's namespace that have ManagedServiceAccount subjects - var clusterPermissions cpv1alpha1.ClusterPermissionList - err := r.List(ctx, &clusterPermissions, &client.ListOptions{ - Namespace: addon.Namespace, - }) - if err != nil { - log.Error(err, "failed to list ClusterPermissions", "namespace", addon.Namespace) - return []reconcile.Request{} - } - - var requests []reconcile.Request - for _, cp := range clusterPermissions.Items { - if r.clusterPermissionUsesManagedServiceAccount(&cp) { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: cp.Name, - Namespace: cp.Namespace, - }, - }) - } - } - - log.Info("ManagedClusterAddOn status.namespace changed, reconciling ClusterPermissions", - "addon", addon.Name, "namespace", addon.Namespace, "requests", len(requests)) - - return requests - }) -} - // clusterPermissionUsesManagedServiceAccount checks if a ClusterPermission uses ManagedServiceAccount subjects func (r *ClusterPermissionReconciler) clusterPermissionUsesManagedServiceAccount(cp *cpv1alpha1.ClusterPermission) bool { // Check ClusterRoleBinding diff --git a/controllers/managed_cluster_addon_informer.go b/controllers/managed_cluster_addon_informer.go new file mode 100644 index 0000000..b0ef0a9 --- /dev/null +++ b/controllers/managed_cluster_addon_informer.go @@ -0,0 +1,293 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/log" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + msacommon "open-cluster-management.io/managed-serviceaccount/pkg/common" +) + +// ManagedClusterAddOnInformer is a custom informer that watches ManagedClusterAddOn resources +// with metadata.name = "managed-serviceaccount" +type ManagedClusterAddOnInformer struct { + // client is the Kubernetes client + client *rest.RESTClient + // stopCh is used to signal the informer to stop + stopCh chan struct{} + // workqueue is used to queue events for processing + workqueue workqueue.RateLimitingInterface + // informer is the Kubernetes informer + informer cache.SharedIndexInformer + // eventHandler is the function to call when events occur + eventHandler func(obj *addonv1alpha1.ManagedClusterAddOn) + // logger for this informer + logger logr.Logger +} + +// NewManagedClusterAddOnInformer creates a new custom informer for ManagedClusterAddOn resources +// that only selects resources with metadata.name = "managed-serviceaccount" +func NewManagedClusterAddOnInformer(config *rest.Config, eventHandler func(obj *addonv1alpha1.ManagedClusterAddOn)) (*ManagedClusterAddOnInformer, error) { + // Create a scheme and register the addon types + addonScheme := runtime.NewScheme() + if err := addonv1alpha1.AddToScheme(addonScheme); err != nil { + return nil, fmt.Errorf("failed to add addon types to scheme: %w", err) + } + + // Create a REST client for the addon API + config = rest.CopyConfig(config) + config.GroupVersion = &addonv1alpha1.GroupVersion + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.NewCodecFactory(addonScheme).WithoutConversion() + + client, err := rest.RESTClientFor(config) + if err != nil { + return nil, fmt.Errorf("failed to create REST client: %w", err) + } + + // Create a field selector to only watch resources with name "managed-serviceaccount" + fieldSelector := fields.OneTermEqualSelector("metadata.name", msacommon.AddonName) + + // Create a list watcher with the field selector + listWatcher := cache.NewListWatchFromClient( + client, + "managedclusteraddons", + metav1.NamespaceAll, + fieldSelector, + ) + + // Create the informer + informer := cache.NewSharedIndexInformer( + listWatcher, + &addonv1alpha1.ManagedClusterAddOn{}, + time.Minute*10, // resync period + cache.Indexers{}, + ) + + // Create workqueue with rate limiting + workqueue := workqueue.NewNamedRateLimitingQueue( + workqueue.DefaultControllerRateLimiter(), + "ManagedClusterAddOnInformer", + ) + + return &ManagedClusterAddOnInformer{ + client: client, + stopCh: make(chan struct{}), + workqueue: workqueue, + informer: informer, + eventHandler: eventHandler, + logger: log.Log.WithName("ManagedClusterAddOnInformer"), + }, nil +} + +// Start starts the informer +func (i *ManagedClusterAddOnInformer) Start() error { + i.logger.Info("Starting ManagedClusterAddOnInformer") + + // Add event handlers + _, err := i.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj any) { + // no-op + }, + UpdateFunc: func(oldObj, newObj any) { + if addon, ok := newObj.(*addonv1alpha1.ManagedClusterAddOn); ok { + i.enqueueAddon(addon) + } else { + i.logger.Error(nil, "newObj is not a ManagedClusterAddOn", "object", newObj) + } + }, + DeleteFunc: func(obj any) { + // no-op + }, + }) + if err != nil { + return fmt.Errorf("failed to add event handler: %w", err) + } + + // Start the informer + go i.informer.Run(i.stopCh) + + // Wait for cache to sync + if !cache.WaitForCacheSync(i.stopCh, i.informer.HasSynced) { + return fmt.Errorf("failed to sync cache") + } + + // Start workers to process the queue + go i.runWorker() + + return nil +} + +// Stop stops the informer +func (i *ManagedClusterAddOnInformer) Stop() { + i.logger.Info("Stopping ManagedClusterAddOnInformer") + close(i.stopCh) + i.workqueue.ShutDown() +} + +// enqueueAddon adds a ManagedClusterAddOn to the work queue +func (i *ManagedClusterAddOnInformer) enqueueAddon(addon *addonv1alpha1.ManagedClusterAddOn) { + key := fmt.Sprintf("%s/%s", addon.Namespace, addon.Name) + i.workqueue.Add(key) +} + +// runWorker processes items from the work queue +func (i *ManagedClusterAddOnInformer) runWorker() { + for i.processNextWorkItem() { + } +} + +// processNextWorkItem processes the next item in the work queue +func (i *ManagedClusterAddOnInformer) processNextWorkItem() bool { + obj, shutdown := i.workqueue.Get() + if shutdown { + return false + } + + // We wrap this block in a func so we can defer c.workqueue.Done. + err := func(obj any) error { + // We call Done here so the workqueue knows we have finished + // processing this item. We also must remember to call Forget if we + // do not want this work item being re-queued. For example, we do + // not call Forget if a transient error occurs, instead the item is + // put back on the workqueue and attempted again after a back-off + // period. + defer i.workqueue.Done(obj) + var key string + var ok bool + // We expect strings to come off the workqueue. These are of the + // form namespace/name. We do this as the delayed nature of the + // workqueue means the items in the informer cache may actually be + // more up to date than when the item was initially put onto the + // workqueue. + if key, ok = obj.(string); !ok { + // As the item in the workqueue is actually invalid, we call + // Forget here else we'd go into a loop of attempting to + // process a work item that is invalid. + i.workqueue.Forget(obj) + i.logger.Error(nil, "expected string in workqueue but got %#v", obj) + return nil + } + // Run the syncHandler, passing it the namespace/name string of the + // resource to be synced. + if err := i.syncHandler(key); err != nil { + // Put the item back on the workqueue to handle any transient errors. + i.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + // Finally, if no error occurs we Forget this item so it does not + // get queued again until another change happens. + i.workqueue.Forget(obj) + return nil + }(obj) + + if err != nil { + i.logger.Error(err, "error processing work item") + return true + } + + return true +} + +// syncHandler processes a single item from the work queue +func (i *ManagedClusterAddOnInformer) syncHandler(key string) error { + // Convert the namespace/name string into a distinct namespace and name + _, _, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + i.logger.Error(err, "invalid resource key", "key", key) + return nil + } + + // Get the ManagedClusterAddOn from the informer cache + obj, exists, err := i.informer.GetIndexer().GetByKey(key) + if err != nil { + i.logger.Error(err, "failed to get ManagedClusterAddOn from cache", "key", key) + return err + } + + if !exists { + // The object no longer exists, so we can stop processing + i.logger.Info("ManagedClusterAddOn no longer exists", "key", key) + return nil + } + + addon, ok := obj.(*addonv1alpha1.ManagedClusterAddOn) + if !ok { + i.logger.Error(nil, "object is not a ManagedClusterAddOn", "key", key, "object", obj) + return nil + } + + // Call the event handler + if i.eventHandler != nil { + i.eventHandler(addon) + } + + return nil +} + +// GetManagedClusterAddOn retrieves a ManagedClusterAddOn by namespace and name +func (i *ManagedClusterAddOnInformer) GetManagedClusterAddOn(namespace, name string) (*addonv1alpha1.ManagedClusterAddOn, error) { + key := fmt.Sprintf("%s/%s", namespace, name) + obj, exists, err := i.informer.GetIndexer().GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(addonv1alpha1.Resource("managedclusteraddons"), name) + } + + addon, ok := obj.(*addonv1alpha1.ManagedClusterAddOn) + if !ok { + return nil, fmt.Errorf("object is not a ManagedClusterAddOn") + } + + return addon, nil +} + +// ListManagedClusterAddOns lists all ManagedClusterAddOns in the cache +func (i *ManagedClusterAddOnInformer) ListManagedClusterAddOns() ([]*addonv1alpha1.ManagedClusterAddOn, error) { + items := i.informer.GetIndexer().List() + addons := make([]*addonv1alpha1.ManagedClusterAddOn, 0, len(items)) + + for _, item := range items { + if addon, ok := item.(*addonv1alpha1.ManagedClusterAddOn); ok { + addons = append(addons, addon) + } else { + i.logger.Error(nil, "object is not a ManagedClusterAddOn", "object", item) + } + } + + return addons, nil +} + +// HasSynced returns true if the informer has synced +func (i *ManagedClusterAddOnInformer) HasSynced() bool { + return i.informer.HasSynced() +} diff --git a/controllers/managed_cluster_addon_informer_test.go b/controllers/managed_cluster_addon_informer_test.go new file mode 100644 index 0000000..bc8c4e9 --- /dev/null +++ b/controllers/managed_cluster_addon_informer_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" + msacommon "open-cluster-management.io/managed-serviceaccount/pkg/common" +) + +func TestManagedClusterAddOnInformer(t *testing.T) { + // Create test environment + testEnv := &envtest.Environment{ + CRDDirectoryPaths: []string{ + "../hack/crds", + }, + ErrorIfCRDPathMissing: true, + } + + cfg, err := testEnv.Start() + require.NoError(t, err) + defer testEnv.Stop() + + // The informer will create its own scheme and register addon types internally + // We just need to pass the config + + // Create event handler that captures events + events := make(chan string, 10) + eventHandler := func(addon *addonv1alpha1.ManagedClusterAddOn) { + events <- addon.Name + } + + // Create informer + informer, err := NewManagedClusterAddOnInformer(cfg, eventHandler) + require.NoError(t, err) + + // Start informer + _, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + err = informer.Start() + require.NoError(t, err) + + // Wait for sync + timeout := time.After(10 * time.Second) + for !informer.HasSynced() { + select { + case <-timeout: + t.Fatal("Informer failed to sync within timeout") + case <-time.After(100 * time.Millisecond): + // Continue waiting + } + } + + // Test that informer is working + assert.True(t, informer.HasSynced()) + + // Stop informer + informer.Stop() +} + +func TestManagedClusterAddOnInformerEventHandling(t *testing.T) { + // This test validates that the informer can be created successfully + // even with a simple config + config := &rest.Config{ + Host: "http://localhost:8080", + } + + events := make(chan string, 10) + eventHandler := func(addon *addonv1alpha1.ManagedClusterAddOn) { + events <- addon.Name + } + + // Create informer - this should succeed as it only sets up the structure + informer, err := NewManagedClusterAddOnInformer(config, eventHandler) + // The informer creation should succeed + assert.NoError(t, err) + assert.NotNil(t, informer) + + // Starting the informer would fail due to connection issues, but we don't test that here +} + +func TestManagedClusterAddOnInformerFieldSelector(t *testing.T) { + // Test that the field selector is correctly configured + // This is more of a unit test for the field selector logic + + // Create a mock addon with the correct name + addon := &addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: msacommon.AddonName, + Namespace: "test-cluster", + }, + Spec: addonv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: "open-cluster-management-agent-addon", + }, + Status: addonv1alpha1.ManagedClusterAddOnStatus{ + Namespace: "open-cluster-management-agent-addon", + }, + } + + // Verify the addon has the expected name + assert.Equal(t, msacommon.AddonName, addon.Name) + assert.Equal(t, "test-cluster", addon.Namespace) +} + +func TestManagedClusterAddOnInformerWorkQueue(t *testing.T) { + // Test work queue functionality + config := &rest.Config{ + Host: "http://localhost:8080", + } + + events := make(chan string, 10) + eventHandler := func(addon *addonv1alpha1.ManagedClusterAddOn) { + events <- addon.Name + } + + // Create informer + informer, err := NewManagedClusterAddOnInformer(config, eventHandler) + require.NoError(t, err) + + // Test that workqueue was created + assert.NotNil(t, informer.workqueue) + + // Test enqueue functionality + addon := &addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Name: msacommon.AddonName, + Namespace: "test-cluster", + }, + } + + // This should not panic + informer.enqueueAddon(addon) +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index ef00f92..57f8f01 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -20,6 +20,7 @@ import ( "context" "path/filepath" "testing" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -43,11 +44,12 @@ import ( // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( - cfg *rest.Config - k8sClient client.Client - testEnv *envtest.Environment - ctx context.Context - cancel context.CancelFunc + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + reconciler *ClusterPermissionReconciler ) func TestAPIs(t *testing.T) { @@ -67,6 +69,10 @@ var _ = BeforeSuite(func() { filepath.Join("..", "config", "crds"), }, ErrorIfCRDPathMissing: true, + // Add timeout configuration to prevent kube-apiserver timeout issues + AttachControlPlaneOutput: true, + // Increase timeout for stopping the control plane + ControlPlaneStopTimeout: 30 * time.Second, } var err error @@ -96,10 +102,11 @@ var _ = BeforeSuite(func() { }) Expect(err).ToNot(HaveOccurred()) - err = (&ClusterPermissionReconciler{ + reconciler = &ClusterPermissionReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager) + } + err = reconciler.SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) go func() { @@ -110,8 +117,20 @@ var _ = BeforeSuite(func() { }) var _ = AfterSuite(func() { - cancel() By("tearing down the test environment") + + // Stop the custom informer if it exists + if reconciler != nil { + reconciler.Stop() + } + + // Cancel the context to stop the manager + cancel() + + // Give more time for goroutines to finish + // This helps prevent the kube-apiserver timeout issue + time.Sleep(500 * time.Millisecond) + err := testEnv.Stop() Expect(err).NotTo(HaveOccurred()) }) diff --git a/e2e/run_e2e.sh b/e2e/run_e2e.sh index e173a08..2a25220 100755 --- a/e2e/run_e2e.sh +++ b/e2e/run_e2e.sh @@ -200,3 +200,103 @@ else echo "All referenced cluster roles were not found" exit 1 fi + +echo "TEST ManagedClusterAddOn Custom Informer" +kubectl config use-context kind-hub + +# Create a ManagedClusterAddOn with the name "managed-serviceaccount" that the informer watches +echo "Creating ManagedClusterAddOn 'managed-serviceaccount' in cluster1..." +cat <