Skip to content

Commit b3aeefc

Browse files
committed
test e2e
Signed-off-by: zhujian <[email protected]>
1 parent 0844710 commit b3aeefc

File tree

1 file changed

+130
-5
lines changed

1 file changed

+130
-5
lines changed

test/e2e/addonmanagement_test.go

Lines changed: 130 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ var (
7474
}
7575
)
7676

77-
var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.Label("addon-manager"), func() {
77+
var _ = ginkgo.FDescribe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.Label("addon-manager"), func() {
7878
addOnName := "hello-template"
7979
addonInstallNamespace := "test-addon-template"
8080

@@ -832,7 +832,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
832832
}).ShouldNot(gomega.HaveOccurred())
833833
})
834834

835-
ginkgo.It("ClusterManagementAddon deletion should wait for ManagedClusterAddons cleanup", func() {
835+
ginkgo.FIt("ClusterManagementAddon deletion should wait for ManagedClusterAddons cleanup", func() {
836836
ginkgo.By("Make sure addon is functioning before deletion")
837837
configmap := &corev1.ConfigMap{
838838
ObjectMeta: metav1.ObjectMeta{
@@ -867,18 +867,143 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered,
867867
context.TODO(), addOnName, metav1.DeleteOptions{})
868868
gomega.Expect(err).ToNot(gomega.HaveOccurred())
869869

870+
ginkgo.By("Check that the ManagedClusterAddon is in deletion status")
871+
gomega.Eventually(func() error {
872+
addon, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(
873+
context.TODO(), addOnName, metav1.GetOptions{})
874+
if err != nil {
875+
if errors.IsNotFound(err) {
876+
klog.Infof("ManagedClusterAddon %s is already deleted", addOnName)
877+
return nil
878+
}
879+
klog.Errorf("Error getting ManagedClusterAddon: %v", err)
880+
return err
881+
}
882+
if addon.DeletionTimestamp == nil {
883+
klog.Infof("ManagedClusterAddon %s deletion timestamp not set yet", addOnName)
884+
return fmt.Errorf("addon should have deletion timestamp set")
885+
}
886+
klog.Infof("ManagedClusterAddon %s has deletion timestamp set: %v", addOnName, addon.DeletionTimestamp)
887+
klog.Infof("ManagedClusterAddon %s finalizers: %v", addOnName, addon.Finalizers)
888+
klog.Infof("ManagedClusterAddon %s status conditions: %v", addOnName, addon.Status.Conditions)
889+
return nil
890+
}).ShouldNot(gomega.HaveOccurred())
891+
870892
ginkgo.By("The pre-delete job should clean up the configmap")
871893
gomega.Eventually(func() error {
872-
_, err := spoke.KubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(
894+
// Debug: Check all jobs in addon namespace first
895+
allJobs, allJobsErr := spoke.KubeClient.BatchV1().Jobs(addonInstallNamespace).List(
896+
context.Background(), metav1.ListOptions{})
897+
if allJobsErr != nil {
898+
klog.Errorf("Error listing all jobs in namespace %s: %v", addonInstallNamespace, allJobsErr)
899+
} else {
900+
klog.Infof("Found %d total jobs in namespace %s", len(allJobs.Items), addonInstallNamespace)
901+
for _, j := range allJobs.Items {
902+
klog.Infof("Job %s: Active=%d, Succeeded=%d, Failed=%d", j.Name, j.Status.Active, j.Status.Succeeded, j.Status.Failed)
903+
}
904+
}
905+
906+
// Debug: Check pre-delete job status
907+
job, jobErr := spoke.KubeClient.BatchV1().Jobs(addonInstallNamespace).Get(
908+
context.Background(), "hello-template-cleanup-configmap", metav1.GetOptions{})
909+
if jobErr != nil {
910+
if errors.IsNotFound(jobErr) {
911+
klog.Infof("Pre-delete job not found yet")
912+
} else {
913+
klog.Errorf("Error getting pre-delete job: %v", jobErr)
914+
}
915+
} else {
916+
klog.Infof("Pre-delete job status - Active: %d, Succeeded: %d, Failed: %d, Conditions: %v",
917+
job.Status.Active, job.Status.Succeeded, job.Status.Failed, job.Status.Conditions)
918+
}
919+
920+
// Debug: Check addon-manager logs to see if there are any issues
921+
addonManagerPods, addonManagerErr := hub.KubeClient.CoreV1().Pods("open-cluster-management-hub").List(
922+
context.Background(), metav1.ListOptions{
923+
LabelSelector: "app=clustermanager-addon-manager-controller",
924+
})
925+
if addonManagerErr != nil {
926+
klog.Errorf("Error listing addon-manager pods: %v", addonManagerErr)
927+
} else {
928+
klog.Infof("Found %d addon-manager pods", len(addonManagerPods.Items))
929+
for _, pod := range addonManagerPods.Items {
930+
klog.Infof("Addon-manager pod %s status: Phase=%s", pod.Name, pod.Status.Phase)
931+
932+
// Get recent addon-manager logs (last 50 lines)
933+
logs, logErr := hub.KubeClient.CoreV1().Pods("open-cluster-management-hub").GetLogs(
934+
pod.Name, &corev1.PodLogOptions{
935+
TailLines: func(i int64) *int64 { return &i }(50),
936+
}).DoRaw(context.Background())
937+
if logErr != nil {
938+
klog.Errorf("Error getting addon-manager logs for pod %s: %v", pod.Name, logErr)
939+
} else {
940+
klog.Infof("Recent addon-manager logs from pod %s: %s", pod.Name, string(logs))
941+
}
942+
}
943+
}
944+
945+
// Debug: Check ManifestWorks for this addon
946+
manifestWorks, mwErr := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).List(
947+
context.Background(), metav1.ListOptions{
948+
LabelSelector: fmt.Sprintf("addon.open-cluster-management.io/addon-name=%s", addOnName),
949+
})
950+
if mwErr != nil {
951+
klog.Errorf("Error listing ManifestWorks: %v", mwErr)
952+
} else {
953+
klog.Infof("Found %d ManifestWorks for addon %s", len(manifestWorks.Items), addOnName)
954+
for _, mw := range manifestWorks.Items {
955+
klog.Infof("ManifestWork %s: DeletionTimestamp=%v, Status=%+v",
956+
mw.Name, mw.DeletionTimestamp, mw.Status)
957+
if len(mw.Status.ResourceStatus.Manifests) > 0 {
958+
for _, manifest := range mw.Status.ResourceStatus.Manifests {
959+
klog.Infof(" Resource %s/%s: Conditions=%v",
960+
manifest.ResourceMeta.Kind, manifest.ResourceMeta.Name, manifest.Conditions)
961+
}
962+
}
963+
}
964+
}
965+
966+
if jobErr == nil {
967+
968+
// Debug: Get pods associated with this job to check their status
969+
pods, podErr := spoke.KubeClient.CoreV1().Pods(addonInstallNamespace).List(
970+
context.Background(), metav1.ListOptions{
971+
LabelSelector: "job=hello-template-cleanup-configmap",
972+
})
973+
if podErr != nil {
974+
klog.Errorf("Error listing job pods: %v", podErr)
975+
} else {
976+
for _, pod := range pods.Items {
977+
klog.Infof("Job pod %s status: Phase=%s, Ready=%v, Conditions=%v",
978+
pod.Name, pod.Status.Phase, pod.Status.ContainerStatuses, pod.Status.Conditions)
979+
980+
// If pod has failed, try to get logs
981+
if pod.Status.Phase == "Failed" || pod.Status.Phase == "Succeeded" {
982+
logs, logErr := spoke.KubeClient.CoreV1().Pods(addonInstallNamespace).GetLogs(
983+
pod.Name, &corev1.PodLogOptions{}).DoRaw(context.Background())
984+
if logErr != nil {
985+
klog.Errorf("Error getting logs for pod %s: %v", pod.Name, logErr)
986+
} else {
987+
klog.Infof("Logs for job pod %s: %s", pod.Name, string(logs))
988+
}
989+
}
990+
}
991+
}
992+
}
993+
994+
cms, err := spoke.KubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get(
873995
context.Background(), configmap.Name, metav1.GetOptions{})
874996
if err != nil {
875997
if errors.IsNotFound(err) {
998+
klog.Infof("Configmap %s successfully deleted", configmap.Name)
876999
return nil
8771000
}
1001+
klog.Errorf("Error getting configmap: %v", err)
8781002
return err
8791003
}
880-
return fmt.Errorf("the configmap should be deleted")
881-
}, time.Second*180, time.Second*5).ShouldNot(gomega.HaveOccurred())
1004+
klog.Infof("Configmap %s still exists, waiting for deletion", configmap.Name)
1005+
return fmt.Errorf("the configmap should be deleted, %+v", cms)
1006+
}, time.Second*300, time.Second*3).ShouldNot(gomega.HaveOccurred())
8821007

8831008
ginkgo.By("ManagedClusterAddon should eventually be deleted after pre-delete job completes")
8841009
gomega.Eventually(func() error {

0 commit comments

Comments
 (0)