From 2e356775564ebfd7da6e7bbfeb20b39a28715432 Mon Sep 17 00:00:00 2001 From: Thomas-David Griedel Date: Mon, 24 Feb 2025 20:14:33 +0100 Subject: [PATCH 1/7] introduce destructive label Signed-off-by: Thomas-David Griedel griedel911@gmail.com Removed the cleartext bracket label and used Ginkgo labels for destructive tests --- tests/cdiconfig_test.go | 40 +- tests/import_proxy_test.go | 2 +- tests/monitoring_test.go | 2 +- tests/operator_test.go | 1979 ++++++++++++++++++------------------ 4 files changed, 1010 insertions(+), 1013 deletions(-) diff --git a/tests/cdiconfig_test.go b/tests/cdiconfig_test.go index eee6af9229..7d7d88e26d 100644 --- a/tests/cdiconfig_test.go +++ b/tests/cdiconfig_test.go @@ -528,30 +528,28 @@ var _ = Describe("CDI route config tests", Serial, func() { }) }) -var _ = Describe("CDIConfig instance management", Serial, func() { +var _ = Describe("CDIConfig instance management", Label("Destructive"), Serial, func() { f := framework.NewFramework("cdiconfig-test") - Context("[Destructive]", Serial, func() { - It("[test_id:4952]Should re-create the object if deleted", func() { - By("Verifying the object exists") - config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - // Save the UID, so we can check it against a new one. - orgUID := config.GetUID() - _, _ = fmt.Fprintf(GinkgoWriter, "Original CDIConfig UID: %s\n", orgUID) - By("Deleting the object") - err = f.CdiClient.CdiV1beta1().CDIConfigs().Delete(context.TODO(), config.Name, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + It("[test_id:4952]Should re-create the object if deleted", func() { + By("Verifying the object exists") + config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + // Save the UID, so we can check it against a new one. + orgUID := config.GetUID() + _, _ = fmt.Fprintf(GinkgoWriter, "Original CDIConfig UID: %s\n", orgUID) + By("Deleting the object") + err = f.CdiClient.CdiV1beta1().CDIConfigs().Delete(context.TODO(), config.Name, metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - newConfig, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) - if err != nil { - return false - } - _, _ = fmt.Fprintf(GinkgoWriter, "New CDIConfig UID: %s\n", newConfig.GetUID()) - return orgUID != newConfig.GetUID() && !apiequality.Semantic.DeepEqual(newConfig.Status, cdiv1.CDIConfigStatus{}) - }, time.Second*30, time.Second).Should(BeTrue()) - }) + Eventually(func() bool { + newConfig, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) + if err != nil { + return false + } + _, _ = fmt.Fprintf(GinkgoWriter, "New CDIConfig UID: %s\n", newConfig.GetUID()) + return orgUID != newConfig.GetUID() && !apiequality.Semantic.DeepEqual(newConfig.Status, cdiv1.CDIConfigStatus{}) + }, time.Second*30, time.Second).Should(BeTrue()) }) }) diff --git a/tests/import_proxy_test.go b/tests/import_proxy_test.go index 805d7894e3..29cc5f2c10 100644 --- a/tests/import_proxy_test.go +++ b/tests/import_proxy_test.go @@ -160,7 +160,7 @@ var _ = Describe("Import Proxy tests", func() { }, time.Second*60, time.Second).Should(BeTrue()) } - Context("[Destructive]", Serial, func() { + Context("", Label("Destructive"), Serial, func() { DescribeTable("should", func(args importProxyTestArguments) { now := time.Now() diff --git a/tests/monitoring_test.go b/tests/monitoring_test.go index 0ed2773bd1..ac59949d64 100644 --- a/tests/monitoring_test.go +++ b/tests/monitoring_test.go @@ -39,7 +39,7 @@ const ( metricConsistentPollingTimeout = 2 * time.Minute ) -var _ = Describe("[Destructive] Monitoring Tests", Serial, func() { +var _ = Describe("Monitoring Tests", Label("Destructive"), Serial, func() { f := framework.NewFramework("monitoring-test") var ( diff --git a/tests/operator_test.go b/tests/operator_test.go index 9a1b1158c1..50d7436cee 100644 --- a/tests/operator_test.go +++ b/tests/operator_test.go @@ -41,1204 +41,1203 @@ import ( "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk" ) -var _ = Describe("ALL Operator tests", func() { - Context("[Destructive]", Serial, func() { - var _ = Describe("Operator tests", func() { - f := framework.NewFramework("operator-test") - - Context("Adding versions to datavolume CRD", func() { - deploymentName := "cdi-operator" - var originalReplicaVal int32 - - AfterEach(func() { - By(fmt.Sprintf("Setting %s replica number back to the original value %d", deploymentName, originalReplicaVal)) - scaleDeployment(f, deploymentName, originalReplicaVal) - Eventually(func() int32 { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - return depl.Status.ReadyReplicas - }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Equal(originalReplicaVal)) - }) - It("[test_id:9696]Alpha version of CDI CRD is removed even if it was briefly a storage version", func() { - By("Scaling down CDI operator") - originalReplicaVal = scaleDeployment(f, deploymentName, 0) - Eventually(func(g Gomega) { - _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) - _, _ = fmt.Fprintf(GinkgoWriter, "couldn't scale down CDI operator deployment; %v\n", err) - g.Expect(errors.IsNotFound(err)).Should(BeTrue()) - }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(Succeed()) - - By("Appending v1alpha1 version as stored version") - cdiCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - oldVer := cdiCrd.Spec.Versions[0].DeepCopy() - oldVer.Name = "v1alpha1" - cdiCrd.Spec.Versions[0].Storage = false - oldVer.Storage = true - cdiCrd.Spec.Versions = append(cdiCrd.Spec.Versions, *oldVer) - - _, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - - By("Restoring CRD with newer version as storage") - cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - // This is done because due to the way CRDs are applied, - // the scenario where alpha is the "storage: true" isn't - // possible - so the code doesn't handle it. - for i, ver := range cdiCrd.Spec.Versions { - if ver.Name == "v1alpha1" { - cdiCrd.Spec.Versions[i].Storage = false - } else { - cdiCrd.Spec.Versions[i].Storage = true - } - } - cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - - By("Scaling up CDI operator") - scaleDeployment(f, deploymentName, originalReplicaVal) - By("Eventually, CDI will restore v1beta1 to be the only stored version") - Eventually(func(g Gomega) { - cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - for _, ver := range cdiCrd.Spec.Versions { - g.Expect(ver.Name).Should(Equal("v1beta1")) - g.Expect(ver.Storage).Should(BeTrue()) - } - }, 1*time.Minute, 2*time.Second).Should(Succeed()) - }) - - It("[test_id:9704]Alpha versions of datavolume CRD are removed, previously existing objects remain and are unmodified", func() { - fillData := "123456789012345678901234567890123456789012345678901234567890" - fillDataFSMD5sum := "fabc176de7eb1b6ca90b3aa4c7e035f3" - testFile := utils.DefaultPvcMountPath + "/source.txt" - fillCommand := "echo \"" + fillData + "\" >> " + testFile - - By("Creating datavolume without GC and custom changes") - dv := utils.NewDataVolumeWithHTTPImport("alpha-tests-dv", "500Mi", fmt.Sprintf(utils.TinyCoreIsoURL, f.CdiInstallNs)) - dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) - err = utils.WaitForDataVolumePhase(f, dv.Namespace, cdiv1.Succeeded, dv.Name) - Expect(err).ToNot(HaveOccurred()) - - pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(context.TODO(), dv.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - f.PopulatePVC(pvc, "modify-dv-contents", fillCommand) - - By("Scaling down CDI operator") - originalReplicaVal = scaleDeployment(f, deploymentName, 0) - Eventually(func() bool { - _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) - return errors.IsNotFound(err) - }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(BeTrue()) +var _ = Describe("ALL Operator tests", Label("Destructive"), Serial, func() { + var _ = Describe("Operator tests", func() { + f := framework.NewFramework("operator-test") - By("Appending v1alpha1 version as stored version") - dvCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - oldVer := dvCrd.Spec.Versions[0].DeepCopy() - oldVer.Name = "v1alpha1" - dvCrd.Spec.Versions[0].Storage = false - oldVer.Storage = true - dvCrd.Spec.Versions = append(dvCrd.Spec.Versions, *oldVer) - - dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), dvCrd, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - Expect(dvCrd.Status.StoredVersions).Should(ContainElement("v1alpha1")) - - By("Making sure we can get datavolume in v1alpha1 version") - Eventually(func() error { - u := &unstructured.Unstructured{} - gvk := schema.GroupVersionKind{ - Group: "cdi.kubevirt.io", - Version: "v1alpha1", - Kind: "DataVolume", - } - u.SetGroupVersionKind(gvk) - nn := crclient.ObjectKey{Namespace: dv.Namespace, Name: dv.Name} - err = f.CrClient.Get(context.TODO(), nn, u) - return err - }, 1*time.Minute, 2*time.Second).Should(BeNil()) - - By("Scaling up CDI operator") - scaleDeployment(f, deploymentName, originalReplicaVal) - By("Eventually, CDI will restore v1beta1 to be the only stored version") - Eventually(func() bool { - dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - for _, ver := range dvCrd.Spec.Versions { - if !(ver.Name == "v1beta1" && ver.Storage == true) { - return false - } - } - return true - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + Context("Adding versions to datavolume CRD", func() { + deploymentName := "cdi-operator" + var originalReplicaVal int32 - By("Datavolume is still there") - _, err = f.CdiClient.CdiV1beta1().DataVolumes(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verify no import - the PVC still includes our custom changes") - md5Match, err := f.VerifyTargetPVCContentMD5(f.Namespace, pvc, testFile, fillDataFSMD5sum) + AfterEach(func() { + By(fmt.Sprintf("Setting %s replica number back to the original value %d", deploymentName, originalReplicaVal)) + scaleDeployment(f, deploymentName, originalReplicaVal) + Eventually(func() int32 { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - Expect(md5Match).To(BeTrue()) - }) + return depl.Status.ReadyReplicas + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Equal(originalReplicaVal)) }) + It("[test_id:9696]Alpha version of CDI CRD is removed even if it was briefly a storage version", func() { + By("Scaling down CDI operator") + originalReplicaVal = scaleDeployment(f, deploymentName, 0) + Eventually(func(g Gomega) { + _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) + _, _ = fmt.Fprintf(GinkgoWriter, "couldn't scale down CDI operator deployment; %v\n", err) + g.Expect(errors.IsNotFound(err)).Should(BeTrue()) + }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(Succeed()) + + By("Appending v1alpha1 version as stored version") + cdiCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + oldVer := cdiCrd.Spec.Versions[0].DeepCopy() + oldVer.Name = "v1alpha1" + cdiCrd.Spec.Versions[0].Storage = false + oldVer.Storage = true + cdiCrd.Spec.Versions = append(cdiCrd.Spec.Versions, *oldVer) - It("[test_id:3951]should create a route in OpenShift", func() { - if !utils.IsOpenshift(f.K8sClient) { - Skip("This test is OpenShift specific") - } - - routeClient, err := routeclient.NewForConfig(f.RestConfig) + _, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) Expect(err).ToNot(HaveOccurred()) - r, err := routeClient.RouteV1().Routes(f.CdiInstallNs).Get(context.TODO(), "cdi-uploadproxy", metav1.GetOptions{}) + By("Restoring CRD with newer version as storage") + cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + // This is done because due to the way CRDs are applied, + // the scenario where alpha is the "storage: true" isn't + // possible - so the code doesn't handle it. + for i, ver := range cdiCrd.Spec.Versions { + if ver.Name == "v1alpha1" { + cdiCrd.Spec.Versions[i].Storage = false + } else { + cdiCrd.Spec.Versions[i].Storage = true + } + } + cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) Expect(err).ToNot(HaveOccurred()) - Expect(r.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationReencrypt)) + By("Scaling up CDI operator") + scaleDeployment(f, deploymentName, originalReplicaVal) + By("Eventually, CDI will restore v1beta1 to be the only stored version") + Eventually(func(g Gomega) { + cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + for _, ver := range cdiCrd.Spec.Versions { + g.Expect(ver.Name).Should(Equal("v1beta1")) + g.Expect(ver.Storage).Should(BeTrue()) + } + }, 1*time.Minute, 2*time.Second).Should(Succeed()) }) - It("[test_id:4351]should create a prometheus service in cdi namespace", func() { - promService, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) + It("[test_id:9704]Alpha versions of datavolume CRD are removed, previously existing objects remain and are unmodified", func() { + fillData := "123456789012345678901234567890123456789012345678901234567890" + fillDataFSMD5sum := "fabc176de7eb1b6ca90b3aa4c7e035f3" + testFile := utils.DefaultPvcMountPath + "/source.txt" + fillCommand := "echo \"" + fillData + "\" >> " + testFile + + By("Creating datavolume without GC and custom changes") + dv := utils.NewDataVolumeWithHTTPImport("alpha-tests-dv", "500Mi", fmt.Sprintf(utils.TinyCoreIsoURL, f.CdiInstallNs)) + dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + err = utils.WaitForDataVolumePhase(f, dv.Namespace, cdiv1.Succeeded, dv.Name) Expect(err).ToNot(HaveOccurred()) - Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) - Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) - originalTimeStamp := promService.ObjectMeta.CreationTimestamp - By("Deleting the service") - err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Delete(context.TODO(), common.PrometheusServiceName, metav1.DeleteOptions{}) + pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(context.TODO(), dv.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - By("Verifying the operator has re-created the service") - Eventually(func() bool { - promService, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) - if err == nil { - return originalTimeStamp.Before(&promService.ObjectMeta.CreationTimestamp) - } - return false - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) - Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) - }) + f.PopulatePVC(pvc, "modify-dv-contents", fillCommand) - It("[test_id:3952]add cdi-sa to containerized-data-importer scc", func() { - if !utils.IsOpenshift(f.K8sClient) { - Skip("This test is OpenShift specific") - } + By("Scaling down CDI operator") + originalReplicaVal = scaleDeployment(f, deploymentName, 0) + Eventually(func() bool { + _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) + return errors.IsNotFound(err) + }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(BeTrue()) - secClient, err := secclient.NewForConfig(f.RestConfig) + By("Appending v1alpha1 version as stored version") + dvCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + oldVer := dvCrd.Spec.Versions[0].DeepCopy() + oldVer.Name = "v1alpha1" + dvCrd.Spec.Versions[0].Storage = false + oldVer.Storage = true + dvCrd.Spec.Versions = append(dvCrd.Spec.Versions, *oldVer) - scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) + dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), dvCrd, metav1.UpdateOptions{}) Expect(err).ToNot(HaveOccurred()) + Expect(dvCrd.Status.StoredVersions).Should(ContainElement("v1alpha1")) - cdiSA := fmt.Sprintf("system:serviceaccount:%s:cdi-sa", f.CdiInstallNs) - Expect(scc.Users).Should(ContainElement(cdiSA)) - }) - - // Condition flags can be found here with their meaning https://github.com/kubevirt/hyperconverged-cluster-operator/blob/main/docs/conditions.md - It("[test_id:3953]Condition flags on CR should be healthy and operating", func() { - cdiObject := getCDI(f) - conditionMap := sdk.GetConditionValues(cdiObject.Status.Conditions) - // Application should be fully operational and healthy. - Expect(conditionMap[conditions.ConditionAvailable]).To(Equal(corev1.ConditionTrue)) - Expect(conditionMap[conditions.ConditionProgressing]).To(Equal(corev1.ConditionFalse)) - Expect(conditionMap[conditions.ConditionDegraded]).To(Equal(corev1.ConditionFalse)) - }) + By("Making sure we can get datavolume in v1alpha1 version") + Eventually(func() error { + u := &unstructured.Unstructured{} + gvk := schema.GroupVersionKind{ + Group: "cdi.kubevirt.io", + Version: "v1alpha1", + Kind: "DataVolume", + } + u.SetGroupVersionKind(gvk) + nn := crclient.ObjectKey{Namespace: dv.Namespace, Name: dv.Name} + err = f.CrClient.Get(context.TODO(), nn, u) + return err + }, 1*time.Minute, 2*time.Second).Should(BeNil()) - It("should make CDI config authority", func() { + By("Scaling up CDI operator") + scaleDeployment(f, deploymentName, originalReplicaVal) + By("Eventually, CDI will restore v1beta1 to be the only stored version") Eventually(func() bool { - cdiObject := getCDI(f) - _, ok := cdiObject.Annotations["cdi.kubevirt.io/configAuthority"] - return ok + dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + for _, ver := range dvCrd.Spec.Versions { + if !(ver.Name == "v1beta1" && ver.Storage == true) { + return false + } + } + return true }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + + By("Datavolume is still there") + _, err = f.CdiClient.CdiV1beta1().DataVolumes(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verify no import - the PVC still includes our custom changes") + md5Match, err := f.VerifyTargetPVCContentMD5(f.Namespace, pvc, testFile, fillDataFSMD5sum) + Expect(err).ToNot(HaveOccurred()) + Expect(md5Match).To(BeTrue()) }) }) - var _ = Describe("Tests needing the restore of nodes", func() { - var nodes *corev1.NodeList - var cdiPods *corev1.PodList - var err error + It("[test_id:3951]should create a route in OpenShift", func() { + if !utils.IsOpenshift(f.K8sClient) { + Skip("This test is OpenShift specific") + } - f := framework.NewFramework("operator-delete-cdi-test") + routeClient, err := routeclient.NewForConfig(f.RestConfig) + Expect(err).ToNot(HaveOccurred()) - BeforeEach(func() { - nodes, err = f.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - Expect(nodes.Items).ToNot(BeEmpty(), "There should be some compute node") - Expect(err).ToNot(HaveOccurred()) + r, err := routeClient.RouteV1().Routes(f.CdiInstallNs).Get(context.TODO(), "cdi-uploadproxy", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - cdiPods = getCDIPods(f) - }) + Expect(r.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationReencrypt)) + }) - AfterEach(func() { - var errors []error - var newCdiPods *corev1.PodList - By("Restoring nodes") - for _, node := range nodes.Items { - newNode, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + It("[test_id:4351]should create a prometheus service in cdi namespace", func() { + promService, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) + Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) + originalTimeStamp := promService.ObjectMeta.CreationTimestamp - newNode.Spec = node.Spec - _, err = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}) - if err != nil { - errors = append(errors, err) - } + By("Deleting the service") + err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Delete(context.TODO(), common.PrometheusServiceName, metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verifying the operator has re-created the service") + Eventually(func() bool { + promService, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) + if err == nil { + return originalTimeStamp.Before(&promService.ObjectMeta.CreationTimestamp) } - Expect(errors).Should(BeEmpty(), "failed restoring one or more nodes") + return false + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) + Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) + }) - By("Waiting for there to be as many CDI pods as before") - Eventually(func() bool { - newCdiPods = getCDIPods(f) - By(fmt.Sprintf("number of cdi pods: %d\n new number of cdi pods: %d\n", len(cdiPods.Items), len(newCdiPods.Items))) - return len(cdiPods.Items) == len(newCdiPods.Items) - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + It("[test_id:3952]add cdi-sa to containerized-data-importer scc", func() { + if !utils.IsOpenshift(f.K8sClient) { + Skip("This test is OpenShift specific") + } - for _, newCdiPod := range newCdiPods.Items { - By(fmt.Sprintf("Waiting for CDI pod %s to be ready", newCdiPod.Name)) - err := utils.WaitTimeoutForPodReady(f.K8sClient, newCdiPod.Name, newCdiPod.Namespace, 20*time.Minute) - Expect(err).ToNot(HaveOccurred()) - } + secClient, err := secclient.NewForConfig(f.RestConfig) + Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - services, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) - Expect(err).ToNot(HaveOccurred(), "failed getting CDI services") - for _, service := range services.Items { - if service.Name != "cdi-prometheus-metrics" { - endpoint, err := f.K8sClient.CoreV1().Endpoints(f.CdiInstallNs).Get(context.TODO(), service.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred(), "failed getting service endpoint") - for _, subset := range endpoint.Subsets { - if len(subset.NotReadyAddresses) > 0 { - By(fmt.Sprintf("Not all endpoints of service %s are ready", service.Name)) - return false - } - } - } - } - return true - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - }) + scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - It("should deploy components that tolerate CriticalAddonsOnly taint", func() { - cr := getCDI(f) - criticalAddonsToleration := corev1.Toleration{ - Key: "CriticalAddonsOnly", - Operator: corev1.TolerationOpExists, - } + cdiSA := fmt.Sprintf("system:serviceaccount:%s:cdi-sa", f.CdiInstallNs) + Expect(scc.Users).Should(ContainElement(cdiSA)) + }) - if !tolerationExists(cr.Spec.Infra.NodePlacement.Tolerations, criticalAddonsToleration) { - Skip("Unexpected CDI CR (not from cdi-cr.yaml), doesn't tolerate CriticalAddonsOnly") - } + // Condition flags can be found here with their meaning https://github.com/kubevirt/hyperconverged-cluster-operator/blob/main/docs/conditions.md + It("[test_id:3953]Condition flags on CR should be healthy and operating", func() { + cdiObject := getCDI(f) + conditionMap := sdk.GetConditionValues(cdiObject.Status.Conditions) + // Application should be fully operational and healthy. + Expect(conditionMap[conditions.ConditionAvailable]).To(Equal(corev1.ConditionTrue)) + Expect(conditionMap[conditions.ConditionProgressing]).To(Equal(corev1.ConditionFalse)) + Expect(conditionMap[conditions.ConditionDegraded]).To(Equal(corev1.ConditionFalse)) + }) - labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"cdi.kubevirt.io/testing": ""}} - cdiTestPods, err := f.K8sClient.CoreV1().Pods(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{ - LabelSelector: labels.Set(labelSelector.MatchLabels).String(), - }) - Expect(err).ToNot(HaveOccurred(), "failed listing cdi testing pods") - Expect(cdiTestPods.Items).ToNot(BeEmpty(), "no cdi testing pods found") - - By("adding taints to all nodes") - criticalPodTaint := corev1.Taint{ - Key: "CriticalAddonsOnly", - Value: "", - Effect: corev1.TaintEffectNoExecute, - } + It("should make CDI config authority", func() { + Eventually(func() bool { + cdiObject := getCDI(f) + _, ok := cdiObject.Annotations["cdi.kubevirt.io/configAuthority"] + return ok + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + }) + }) - for _, node := range nodes.Items { - Eventually(func() bool { - nodeCopy, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + var _ = Describe("Tests needing the restore of nodes", func() { + var nodes *corev1.NodeList + var cdiPods *corev1.PodList + var err error - if nodeHasTaint(*nodeCopy, criticalPodTaint) { - return true - } + f := framework.NewFramework("operator-delete-cdi-test") - nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, criticalPodTaint) - _, _ = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), nodeCopy, metav1.UpdateOptions{}) - return false - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + BeforeEach(func() { + nodes, err = f.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + Expect(nodes.Items).ToNot(BeEmpty(), "There should be some compute node") + Expect(err).ToNot(HaveOccurred()) + + cdiPods = getCDIPods(f) + }) + + AfterEach(func() { + var errors []error + var newCdiPods *corev1.PodList + By("Restoring nodes") + for _, node := range nodes.Items { + newNode, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + newNode.Spec = node.Spec + _, err = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}) + if err != nil { + errors = append(errors, err) } + } + Expect(errors).Should(BeEmpty(), "failed restoring one or more nodes") + + By("Waiting for there to be as many CDI pods as before") + Eventually(func() bool { + newCdiPods = getCDIPods(f) + By(fmt.Sprintf("number of cdi pods: %d\n new number of cdi pods: %d\n", len(cdiPods.Items), len(newCdiPods.Items))) + return len(cdiPods.Items) == len(newCdiPods.Items) + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + + for _, newCdiPod := range newCdiPods.Items { + By(fmt.Sprintf("Waiting for CDI pod %s to be ready", newCdiPod.Name)) + err := utils.WaitTimeoutForPodReady(f.K8sClient, newCdiPod.Name, newCdiPod.Namespace, 20*time.Minute) + Expect(err).ToNot(HaveOccurred()) + } - By("Waiting for all CDI testing pods to terminate") - Eventually(func() bool { - for _, cdiTestPod := range cdiTestPods.Items { - By(fmt.Sprintf("CDI test pod: %s", cdiTestPod.Name)) - _, err := f.K8sClient.CoreV1().Pods(cdiTestPod.Namespace).Get(context.TODO(), cdiTestPod.Name, metav1.GetOptions{}) - if !errors.IsNotFound(err) { - return false + Eventually(func() bool { + services, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred(), "failed getting CDI services") + for _, service := range services.Items { + if service.Name != "cdi-prometheus-metrics" { + endpoint, err := f.K8sClient.CoreV1().Endpoints(f.CdiInstallNs).Get(context.TODO(), service.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred(), "failed getting service endpoint") + for _, subset := range endpoint.Subsets { + if len(subset.NotReadyAddresses) > 0 { + By(fmt.Sprintf("Not all endpoints of service %s are ready", service.Name)) + return false + } } } - return true - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - - By("Checking that all the non-testing pods are running") - for _, cdiPod := range cdiPods.Items { - if _, isTestingComponent := cdiPod.Labels["cdi.kubevirt.io/testing"]; isTestingComponent { - continue - } - By(fmt.Sprintf("Non-test CDI pod: %s", cdiPod.Name)) - podUpdated, err := f.K8sClient.CoreV1().Pods(cdiPod.Namespace).Get(context.TODO(), cdiPod.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred(), "failed setting taint on node") - Expect(podUpdated.Status.Phase).To(Equal(corev1.PodRunning)) } - }) - + return true + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) }) - var _ = Describe("Operator delete CDI CR tests", func() { - var cr *cdiv1.CDI - f := framework.NewFramework("operator-delete-cdi-test") - var cdiPods *corev1.PodList - - BeforeEach(func() { - cr = getCDI(f) - cdiPods = getCDIPods(f) - }) - - removeCDI := func() { - removeCDI(f, cr) + It("should deploy components that tolerate CriticalAddonsOnly taint", func() { + cr := getCDI(f) + criticalAddonsToleration := corev1.Toleration{ + Key: "CriticalAddonsOnly", + Operator: corev1.TolerationOpExists, } - ensureCDI := func() { - ensureCDI(f, cr, cdiPods) + if !tolerationExists(cr.Spec.Infra.NodePlacement.Tolerations, criticalAddonsToleration) { + Skip("Unexpected CDI CR (not from cdi-cr.yaml), doesn't tolerate CriticalAddonsOnly") } - AfterEach(func() { - removeCDI() - ensureCDI() + labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"cdi.kubevirt.io/testing": ""}} + cdiTestPods, err := f.K8sClient.CoreV1().Pods(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labels.Set(labelSelector.MatchLabels).String(), }) + Expect(err).ToNot(HaveOccurred(), "failed listing cdi testing pods") + Expect(cdiTestPods.Items).ToNot(BeEmpty(), "no cdi testing pods found") + + By("adding taints to all nodes") + criticalPodTaint := corev1.Taint{ + Key: "CriticalAddonsOnly", + Value: "", + Effect: corev1.TaintEffectNoExecute, + } - It("[test_id:4986]should remove/install CDI a number of times successfully", func() { - for i := 0; i < 5; i++ { - err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) + for _, node := range nodes.Items { + Eventually(func() bool { + nodeCopy, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - ensureCDI() - } - }) - It("[test_id:3954]should delete an upload pod", func() { - dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") - - By("Creating datavolume") - dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + if nodeHasTaint(*nodeCopy, criticalPodTaint) { + return true + } - pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - uploadPodName := utils.UploadPodName(pvc) + nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, criticalPodTaint) + _, _ = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), nodeCopy, metav1.UpdateOptions{}) + return false + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + } - By("Waiting for pod to be running") - Eventually(func() bool { - pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), uploadPodName, metav1.GetOptions{}) - if errors.IsNotFound(err) { + By("Waiting for all CDI testing pods to terminate") + Eventually(func() bool { + for _, cdiTestPod := range cdiTestPods.Items { + By(fmt.Sprintf("CDI test pod: %s", cdiTestPod.Name)) + _, err := f.K8sClient.CoreV1().Pods(cdiTestPod.Namespace).Get(context.TODO(), cdiTestPod.Name, metav1.GetOptions{}) + if !errors.IsNotFound(err) { return false } - Expect(err).ToNot(HaveOccurred()) - return pod.Status.Phase == corev1.PodRunning - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + } + return true + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - if us := cr.Spec.UninstallStrategy; us != nil && *us == cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist { - err = utils.DeleteDataVolume(f.CdiClient, dv.Namespace, dv.Name) - Expect(err).ToNot(HaveOccurred()) + By("Checking that all the non-testing pods are running") + for _, cdiPod := range cdiPods.Items { + if _, isTestingComponent := cdiPod.Labels["cdi.kubevirt.io/testing"]; isTestingComponent { + continue } + By(fmt.Sprintf("Non-test CDI pod: %s", cdiPod.Name)) + podUpdated, err := f.K8sClient.CoreV1().Pods(cdiPod.Namespace).Get(context.TODO(), cdiPod.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred(), "failed setting taint on node") + Expect(podUpdated.Status.Phase).To(Equal(corev1.PodRunning)) + } + }) - By("Deleting CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + }) - By("Waiting for pod to be deleted") - Eventually(func() bool { - _, err = f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), "cdi-upload-"+dv.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true - } - Expect(err).ToNot(HaveOccurred()) - return false - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + var _ = Describe("Operator delete CDI CR tests", func() { + var cr *cdiv1.CDI + f := framework.NewFramework("operator-delete-cdi-test") + var cdiPods *corev1.PodList - It("[test_id:3955]should block CDI delete", func() { - uninstallStrategy := cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist - updateUninstallStrategy(f, &uninstallStrategy) + BeforeEach(func() { + cr = getCDI(f) + cdiPods = getCDIPods(f) + }) - By("Creating datavolume") - dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") - dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + removeCDI := func() { + removeCDI(f, cr) + } - By("Creating datavolume with DataImportCron label") - dv = utils.NewDataVolumeForUpload("retain-me", "1Gi") - dv.Labels = map[string]string{common.DataImportCronLabel: "dic"} - dv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + ensureCDI := func() { + ensureCDI(f, cr, cdiPods) + } - By("Cannot delete CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("there are still DataVolumes present")) + AfterEach(func() { + removeCDI() + ensureCDI() + }) - By("Delete the unlabeled datavolume") - err = f.CdiClient.CdiV1beta1().DataVolumes(f.Namespace.Name).Delete(context.TODO(), "delete-me", metav1.DeleteOptions{}) + It("[test_id:4986]should remove/install CDI a number of times successfully", func() { + for i := 0; i < 5; i++ { + err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) + ensureCDI() + } + }) - By("Can delete CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) - Expect(err).ToNot(HaveOccurred()) - }) + It("[test_id:3954]should delete an upload pod", func() { + dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") - It("[test_id:8087]CDI CR deletion should delete DataImportCron CRD and all DataImportCrons", func() { - reg, err := getDataVolumeSourceRegistry(f) - Expect(err).ToNot(HaveOccurred()) + By("Creating datavolume") + dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + + pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + uploadPodName := utils.UploadPodName(pvc) - By("Create new DataImportCron") - cron := utils.NewDataImportCron("cron-test", "5Gi", scheduleEveryMinute, "ds", 1, *reg) - cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) + By("Waiting for pod to be running") + Eventually(func() bool { + pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), uploadPodName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return false + } Expect(err).ToNot(HaveOccurred()) + return pod.Status.Phase == corev1.PodRunning + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - By("Verify cron first import completed") - Eventually(func() bool { - cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Get(context.TODO(), cron.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - upToDateCond := controller.FindDataImportCronConditionByType(cron, cdiv1.DataImportCronUpToDate) - return upToDateCond != nil && upToDateCond.Status == corev1.ConditionTrue - }, dataImportCronTimeout, pollingInterval).Should(BeTrue()) + if us := cr.Spec.UninstallStrategy; us != nil && *us == cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist { + err = utils.DeleteDataVolume(f.CdiClient, dv.Namespace, dv.Name) + Expect(err).ToNot(HaveOccurred()) + } - pvc := cron.Status.LastImportedPVC - Expect(pvc).ToNot(BeNil()) + By("Deleting CDI") + err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) - By("Verify dv succeeded") - err = utils.WaitForDataVolumePhase(f, pvc.Namespace, cdiv1.Succeeded, pvc.Name) + By("Waiting for pod to be deleted") + Eventually(func() bool { + _, err = f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), "cdi-upload-"+dv.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return true + } Expect(err).ToNot(HaveOccurred()) + return false + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - By("Start goroutine creating DataImportCrons") - go func() { - defer GinkgoRecover() - var err error - for i := 0; i < 100 && err == nil; i++ { - cronName := fmt.Sprintf("cron-test-%d", i) - cron := utils.NewDataImportCron(cronName, "5Gi", scheduleEveryMinute, "ds", 1, *reg) - _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) - } - }() + It("[test_id:3955]should block CDI delete", func() { + uninstallStrategy := cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist + updateUninstallStrategy(f, &uninstallStrategy) - removeCDI() + By("Creating datavolume") + dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") + dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) - By("Verify no DataImportCrons are found") - Eventually(func() bool { - _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) - return err != nil && errors.IsNotFound(err) - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + By("Creating datavolume with DataImportCron label") + dv = utils.NewDataVolumeForUpload("retain-me", "1Gi") + dv.Labels = map[string]string{common.DataImportCronLabel: "dic"} + dv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) - By("Verify no cronjobs left") - Eventually(func() bool { - cronjobs, err := f.K8sClient.BatchV1().CronJobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) - Expect(err).ToNot(HaveOccurred()) - return len(cronjobs.Items) == 0 - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + By("Cannot delete CDI") + err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("there are still DataVolumes present")) - By("Verify no jobs left") - Eventually(func() bool { - jobs, err := f.K8sClient.BatchV1().Jobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) - Expect(err).ToNot(HaveOccurred()) - return len(jobs.Items) == 0 - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - }) + By("Delete the unlabeled datavolume") + err = f.CdiClient.CdiV1beta1().DataVolumes(f.Namespace.Name).Delete(context.TODO(), "delete-me", metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By("Can delete CDI") + err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) + Expect(err).ToNot(HaveOccurred()) }) - var _ = Describe("[rfe_id:4784][crit:high] CDI Operator deployment + CDI CR delete tests", func() { - var restoreCdiCr *cdiv1.CDI - var restoreCdiOperatorDeployment *appsv1.Deployment - f := framework.NewFramework("operator-delete-cdi-test") + It("[test_id:8087]CDI CR deletion should delete DataImportCron CRD and all DataImportCrons", func() { + reg, err := getDataVolumeSourceRegistry(f) + Expect(err).ToNot(HaveOccurred()) + + By("Create new DataImportCron") + cron := utils.NewDataImportCron("cron-test", "5Gi", scheduleEveryMinute, "ds", 1, *reg) + cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) - removeCDI := func() { - By("Deleting CDI CR") - err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), restoreCdiCr.Name, metav1.DeleteOptions{}) + By("Verify cron first import completed") + Eventually(func() bool { + cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Get(context.TODO(), cron.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + upToDateCond := controller.FindDataImportCronConditionByType(cron, cdiv1.DataImportCronUpToDate) + return upToDateCond != nil && upToDateCond.Status == corev1.ConditionTrue + }, dataImportCronTimeout, pollingInterval).Should(BeTrue()) - By("Waiting for CDI CR and infra deployments to be deleted after CDI CR was removed") - Eventually(func() bool { return infraDeploymentGone(f) && crGone(f, restoreCdiCr) }, 15*time.Minute, 2*time.Second).Should(BeTrue()) + pvc := cron.Status.LastImportedPVC + Expect(pvc).ToNot(BeNil()) - By("Deleting CDI operator") - err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Delete(context.TODO(), "cdi-operator", metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Verify dv succeeded") + err = utils.WaitForDataVolumePhase(f, pvc.Namespace, cdiv1.Succeeded, pvc.Name) + Expect(err).ToNot(HaveOccurred()) - By("Waiting for CDI operator deployment to be deleted") - Eventually(func() bool { return cdiOperatorDeploymentGone(f) }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - } + By("Start goroutine creating DataImportCrons") + go func() { + defer GinkgoRecover() + var err error + for i := 0; i < 100 && err == nil; i++ { + cronName := fmt.Sprintf("cron-test-%d", i) + cron := utils.NewDataImportCron(cronName, "5Gi", scheduleEveryMinute, "ds", 1, *reg) + _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) + } + }() - ensureCDI := func(cr *cdiv1.CDI) { - By("Re-creating CDI (CR and deployment)") - _, err := f.CdiClient.CdiV1beta1().CDIs().Create(context.TODO(), cr, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) + removeCDI() - By("Recreating CDI operator") - _, err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Create(context.TODO(), restoreCdiOperatorDeployment, metav1.CreateOptions{}) + By("Verify no DataImportCrons are found") + Eventually(func() bool { + _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + return err != nil && errors.IsNotFound(err) + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + + By("Verify no cronjobs left") + Eventually(func() bool { + cronjobs, err := f.K8sClient.BatchV1().CronJobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) Expect(err).ToNot(HaveOccurred()) + return len(cronjobs.Items) == 0 + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - By("Verifying CDI apiserver, deployment, uploadproxy exist, before continuing") - Eventually(func() bool { return infraDeploymentAvailable(f, restoreCdiCr) }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI deployments") + By("Verify no jobs left") + Eventually(func() bool { + jobs, err := f.K8sClient.BatchV1().Jobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + return len(jobs.Items) == 0 + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + }) + }) - By("Verifying CDI config object exists, before continuing") - Eventually(func() bool { - _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false - } - Expect(err).ToNot(HaveOccurred(), "Unable to read CDI Config, %v, expect more failures", err) - return true - }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI Config, expect more failures") - } + var _ = Describe("[rfe_id:4784][crit:high] CDI Operator deployment + CDI CR delete tests", func() { + var restoreCdiCr *cdiv1.CDI + var restoreCdiOperatorDeployment *appsv1.Deployment + f := framework.NewFramework("operator-delete-cdi-test") - BeforeEach(func() { - currentCR := getCDI(f) - restoreCdiCr = &cdiv1.CDI{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentCR.Name, - }, - Spec: currentCR.Spec, - } + removeCDI := func() { + By("Deleting CDI CR") + err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), restoreCdiCr.Name, metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) - currentCdiOperatorDeployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), "cdi-operator", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Waiting for CDI CR and infra deployments to be deleted after CDI CR was removed") + Eventually(func() bool { return infraDeploymentGone(f) && crGone(f, restoreCdiCr) }, 15*time.Minute, 2*time.Second).Should(BeTrue()) - restoreCdiOperatorDeployment = &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cdi-operator", - Namespace: f.CdiInstallNs, - Labels: currentCdiOperatorDeployment.Labels, - }, - Spec: currentCdiOperatorDeployment.Spec, - } + By("Deleting CDI operator") + err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Delete(context.TODO(), "cdi-operator", metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) - removeCDI() - }) + By("Waiting for CDI operator deployment to be deleted") + Eventually(func() bool { return cdiOperatorDeploymentGone(f) }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + } - AfterEach(func() { - removeCDI() - ensureCDI(restoreCdiCr) - }) + ensureCDI := func(cr *cdiv1.CDI) { + By("Re-creating CDI (CR and deployment)") + _, err := f.CdiClient.CdiV1beta1().CDIs().Create(context.TODO(), cr, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) - It("[test_id:4782] Should install CDI infrastructure pods with node placement", func() { - By("Creating modified CDI CR, with infra nodePlacement") - localSpec := restoreCdiCr.Spec.DeepCopy() - nodePlacement := f.TestNodePlacementValues() + By("Recreating CDI operator") + _, err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Create(context.TODO(), restoreCdiOperatorDeployment, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) - localSpec.Infra.NodePlacement = nodePlacement + By("Verifying CDI apiserver, deployment, uploadproxy exist, before continuing") + Eventually(func() bool { return infraDeploymentAvailable(f, restoreCdiCr) }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI deployments") - tempCdiCr := &cdiv1.CDI{ - ObjectMeta: metav1.ObjectMeta{ - Name: restoreCdiCr.Name, - }, - Spec: *localSpec, + By("Verifying CDI config object exists, before continuing") + Eventually(func() bool { + _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return false } + Expect(err).ToNot(HaveOccurred(), "Unable to read CDI Config, %v, expect more failures", err) + return true + }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI Config, expect more failures") + } - ensureCDI(tempCdiCr) + BeforeEach(func() { + currentCR := getCDI(f) + restoreCdiCr = &cdiv1.CDI{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentCR.Name, + }, + Spec: currentCR.Spec, + } - By("Testing all infra deployments have the chosen node placement") - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - deployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verify the deployment has nodeSelector") - Expect(deployment.Spec.Template.Spec.NodeSelector).To(Equal(framework.NodeSelectorTestValue)) + currentCdiOperatorDeployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), "cdi-operator", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - By("Verify the deployment has affinity") - checkAntiAffinity(deploymentName, deployment.Spec.Template.Spec.Affinity) + restoreCdiOperatorDeployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cdi-operator", + Namespace: f.CdiInstallNs, + Labels: currentCdiOperatorDeployment.Labels, + }, + Spec: currentCdiOperatorDeployment.Spec, + } - By("Verify the deployment has tolerations") - Expect(deployment.Spec.Template.Spec.Tolerations).To(ContainElement(framework.TolerationsTestValue[0])) - } - }) + removeCDI() }) - var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]Strict Reconciliation tests", func() { - f := framework.NewFramework("strict-reconciliation-test") - - It("[test_id:5573]cdi-deployment replicas back to original value on attempt to scale", func() { - By("Overwrite number of replicas with 10") - deploymentName := "cdi-deployment" - originalReplicaVal := scaleDeployment(f, deploymentName, 10) + AfterEach(func() { + removeCDI() + ensureCDI(restoreCdiCr) + }) - By("Ensuring original value of replicas restored & extra deployment pod was cleaned up") - Eventually(func() bool { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) - return *depl.Spec.Replicas == originalReplicaVal && err == nil - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - }) + It("[test_id:4782] Should install CDI infrastructure pods with node placement", func() { + By("Creating modified CDI CR, with infra nodePlacement") + localSpec := restoreCdiCr.Spec.DeepCopy() + nodePlacement := f.TestNodePlacementValues() - It("[test_id:5574]Service spec.selector restored on overwrite attempt", func() { - service, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - originalSelectorVal := service.Spec.Selector[common.CDIComponentLabel] + localSpec.Infra.NodePlacement = nodePlacement - By("Overwrite spec.selector with empty string") - service.Spec.Selector[common.CDIComponentLabel] = "" - _, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Update(context.TODO(), service, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + tempCdiCr := &cdiv1.CDI{ + ObjectMeta: metav1.ObjectMeta{ + Name: restoreCdiCr.Name, + }, + Spec: *localSpec, + } - Eventually(func() bool { - svc, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Waiting until original spec.selector value: %s\n Matches current: %s\n", originalSelectorVal, svc.Spec.Selector[common.CDIComponentLabel])) - return svc.Spec.Selector[common.CDIComponentLabel] == originalSelectorVal - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + ensureCDI(tempCdiCr) - It("[test_id:5575]ClusterRole verb restored on deletion attempt", func() { - clusterRole, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) + By("Testing all infra deployments have the chosen node placement") + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + deployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By("Verify the deployment has nodeSelector") + Expect(deployment.Spec.Template.Spec.NodeSelector).To(Equal(framework.NodeSelectorTestValue)) - By("Remove list verb") - clusterRole.Rules = []rbacv1.PolicyRule{ - { - APIGroups: []string{ - "cdi.kubevirt.io", - }, - Resources: []string{ - "cdiconfigs", - }, - Verbs: []string{ - "get", - // "list", - "watch", - }, - }, - } + By("Verify the deployment has affinity") + checkAntiAffinity(deploymentName, deployment.Spec.Template.Spec.Affinity) - _, err = f.K8sClient.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Verify the deployment has tolerations") + Expect(deployment.Spec.Template.Spec.Tolerations).To(ContainElement(framework.TolerationsTestValue[0])) + } + }) + }) - Eventually(func() bool { - role, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Waiting until list verb exists") - for _, verb := range role.Rules[0].Verbs { - if verb == "list" { - return true - } - } - return false - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]Strict Reconciliation tests", func() { + f := framework.NewFramework("strict-reconciliation-test") + + It("[test_id:5573]cdi-deployment replicas back to original value on attempt to scale", func() { + By("Overwrite number of replicas with 10") + deploymentName := "cdi-deployment" + originalReplicaVal := scaleDeployment(f, deploymentName, 10) - It("[test_id:5576]ServiceAccount values restored on update attempt", func() { - serviceAccount, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) + By("Ensuring original value of replicas restored & extra deployment pod was cleaned up") + Eventually(func() bool { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) + return *depl.Spec.Replicas == originalReplicaVal && err == nil + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + }) - By("Change one of ServiceAccount labels") - serviceAccount.Labels[common.CDIComponentLabel] = "somebadvalue" + It("[test_id:5574]Service spec.selector restored on overwrite attempt", func() { + service, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + originalSelectorVal := service.Spec.Selector[common.CDIComponentLabel] - _, err = f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) + By("Overwrite spec.selector with empty string") + service.Spec.Selector[common.CDIComponentLabel] = "" + _, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Update(context.TODO(), service, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + svc, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By(fmt.Sprintf("Waiting until original spec.selector value: %s\n Matches current: %s\n", originalSelectorVal, svc.Spec.Selector[common.CDIComponentLabel])) + return svc.Spec.Selector[common.CDIComponentLabel] == originalSelectorVal + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - Eventually(func() bool { - sa, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Waiting until label value restored") - return sa.Labels[common.CDIComponentLabel] == "" - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + It("[test_id:5575]ClusterRole verb restored on deletion attempt", func() { + clusterRole, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - It("[test_id:5577]Certificate restored to ConfigMap on deletion attempt", func() { - configMap, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Remove list verb") + clusterRole.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "cdi.kubevirt.io", + }, + Resources: []string{ + "cdiconfigs", + }, + Verbs: []string{ + "get", + // "list", + "watch", + }, + }, + } - By("Empty ConfigMap's data") - configMap.Data = map[string]string{} + _, err = f.K8sClient.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - _, err = f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + Eventually(func() bool { + role, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By("Waiting until list verb exists") + for _, verb := range role.Rules[0].Verbs { + if verb == "list" { + return true + } + } + return false + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - Eventually(func() bool { - cm, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Waiting until ConfigMap's data is not empty") - return len(cm.Data) != 0 - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + It("[test_id:5576]ServiceAccount values restored on update attempt", func() { + serviceAccount, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - It("[test_id:5578]Cant enable featureGate by editing CDIConfig resource", func() { - feature := "nonExistantFeature" - cdiConfig, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Change one of ServiceAccount labels") + serviceAccount.Labels[common.CDIComponentLabel] = "somebadvalue" - By("Enable non existent featureGate") - cdiConfig.Spec = cdiv1.CDIConfigSpec{ - FeatureGates: []string{feature}, - } + _, err = f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Update(context.TODO(), cdiConfig, metav1.UpdateOptions{}) + Eventually(func() bool { + sa, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By("Waiting until label value restored") + return sa.Labels[common.CDIComponentLabel] == "" + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - Eventually(func() bool { - config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Waiting until %s featureGate doesn't exist", feature)) - for _, fgate := range config.Spec.FeatureGates { - if fgate == feature { - return false - } - } - return true - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + It("[test_id:5577]Certificate restored to ConfigMap on deletion attempt", func() { + configMap, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - It("SCC priority always reset to default", func() { - if !utils.IsOpenshift(f.K8sClient) { - Skip("This test is OpenShift specific") - } + By("Empty ConfigMap's data") + configMap.Data = map[string]string{} - secClient, err := secclient.NewForConfig(f.RestConfig) - Expect(err).ToNot(HaveOccurred()) + _, err = f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) + Eventually(func() bool { + cm, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By("Waiting until ConfigMap's data is not empty") + return len(cm.Data) != 0 + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - By("Overwrite priority of SCC") - scc.Priority = ptr.To[int32](10) - _, err = secClient.SecurityV1().SecurityContextConstraints().Update(context.TODO(), scc, metav1.UpdateOptions{}) + It("[test_id:5578]Cant enable featureGate by editing CDIConfig resource", func() { + feature := "nonExistantFeature" + cdiConfig, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By("Enable non existent featureGate") + cdiConfig.Spec = cdiv1.CDIConfigSpec{ + FeatureGates: []string{feature}, + } + + _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Update(context.TODO(), cdiConfig, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + By(fmt.Sprintf("Waiting until %s featureGate doesn't exist", feature)) + for _, fgate := range config.Spec.FeatureGates { + if fgate == feature { + return false + } + } + return true + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - Eventually(func() *int32 { - scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - return scc.Priority - }, 2*time.Minute, 1*time.Second).Should(BeNil()) - }) - It("[test_id:4785] Should update infra pod number when modify the replica in CDI CR", func() { - By("Modify the replica separately") - cdi := getCDI(f) - apiserverTmpReplica := int32(2) - deploymentTmpReplica := int32(3) - uploadproxyTmpReplica := int32(4) - - cdi.Spec.Infra.APIServerReplicas = &apiserverTmpReplica - cdi.Spec.Infra.DeploymentReplicas = &deploymentTmpReplica - cdi.Spec.Infra.UploadProxyReplicas = &uploadproxyTmpReplica - - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + It("SCC priority always reset to default", func() { + if !utils.IsOpenshift(f.K8sClient) { + Skip("This test is OpenShift specific") + } + + secClient, err := secclient.NewForConfig(f.RestConfig) + Expect(err).ToNot(HaveOccurred()) + + scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + By("Overwrite priority of SCC") + scc.Priority = ptr.To[int32](10) + _, err = secClient.SecurityV1().SecurityContextConstraints().Update(context.TODO(), scc, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() *int32 { + scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) + return scc.Priority + }, 2*time.Minute, 1*time.Second).Should(BeNil()) + }) + It("[test_id:4785] Should update infra pod number when modify the replica in CDI CR", func() { + By("Modify the replica separately") + cdi := getCDI(f) + apiserverTmpReplica := int32(2) + deploymentTmpReplica := int32(3) + uploadproxyTmpReplica := int32(4) + + cdi.Spec.Infra.APIServerReplicas = &apiserverTmpReplica + cdi.Spec.Infra.DeploymentReplicas = &deploymentTmpReplica + cdi.Spec.Infra.UploadProxyReplicas = &uploadproxyTmpReplica + + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - if err != nil || *depl.Spec.Replicas == 1 { - return false - } + Eventually(func() bool { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + if err != nil || *depl.Spec.Replicas == 1 { + return false } - By("Replicas in deployments update complete") - return true - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - - By("Verify the replica of cdi-apiserver") + } + By("Replicas in deployments update complete") + return true + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - Eventually(func() bool { - return getPodNumByPrefix(f, "cdi-apiserver") == 2 - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + By("Verify the replica of cdi-apiserver") - By("Verify the replica of cdi-deployment") - Eventually(func() bool { - return getPodNumByPrefix(f, "cdi-deployment") == 3 - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + Eventually(func() bool { + return getPodNumByPrefix(f, "cdi-apiserver") == 2 + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - By("Verify the replica of cdi-uploadproxy") - Eventually(func() bool { - return getPodNumByPrefix(f, "cdi-uploadproxy") == 4 - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + By("Verify the replica of cdi-deployment") + Eventually(func() bool { + return getPodNumByPrefix(f, "cdi-deployment") == 3 + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - By("Reset replica for CDI CR") - cdi = getCDI(f) - cdi.Spec.Infra.APIServerReplicas = nil - cdi.Spec.Infra.DeploymentReplicas = nil - cdi.Spec.Infra.UploadProxyReplicas = nil + By("Verify the replica of cdi-uploadproxy") + Eventually(func() bool { + return getPodNumByPrefix(f, "cdi-uploadproxy") == 4 + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Reset replica for CDI CR") + cdi = getCDI(f) + cdi.Spec.Infra.APIServerReplicas = nil + cdi.Spec.Infra.DeploymentReplicas = nil + cdi.Spec.Infra.UploadProxyReplicas = nil - By("Replica should be 1 when replica dosen't set in CDI CR") + _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) - if err != nil || *depl.Spec.Replicas != 1 { - return false - } + By("Replica should be 1 when replica dosen't set in CDI CR") + Eventually(func() bool { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) + if err != nil || *depl.Spec.Replicas != 1 { + return false } - return true - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + } + return true - }) - It("Should update infra deployments when modify customizeComponents in CDI Cr", func() { - By("Modify the customizeComponents separately") - cdi := getCDI(f) - testJSONPatch := "test-json-patch" - testStrategicPatch := "test-strategic-patch" - testMergePatch := "test-merge-patch" - cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{ - Patches: []cdiv1.CustomizeComponentsPatch{ - { - ResourceName: "cdi-apiserver", - ResourceType: "Deployment", - Patch: fmt.Sprintf(`[{"op":"add","path":"/metadata/annotations/%s","value":"%s"}]`, testJSONPatch, testJSONPatch), - Type: cdiv1.JSONPatchType, - }, - { - ResourceName: "cdi-deployment", - ResourceType: "Deployment", - Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testStrategicPatch, testStrategicPatch), - Type: cdiv1.StrategicMergePatchType, - }, - { - ResourceName: "cdi-uploadproxy", - ResourceType: "Deployment", - Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testMergePatch, testMergePatch), - Type: cdiv1.MergePatchType, - }, + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + + }) + It("Should update infra deployments when modify customizeComponents in CDI Cr", func() { + By("Modify the customizeComponents separately") + cdi := getCDI(f) + testJSONPatch := "test-json-patch" + testStrategicPatch := "test-strategic-patch" + testMergePatch := "test-merge-patch" + cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{ + Patches: []cdiv1.CustomizeComponentsPatch{ + { + ResourceName: "cdi-apiserver", + ResourceType: "Deployment", + Patch: fmt.Sprintf(`[{"op":"add","path":"/metadata/annotations/%s","value":"%s"}]`, testJSONPatch, testJSONPatch), + Type: cdiv1.JSONPatchType, }, - Flags: &cdiv1.Flags{ - API: map[string]string{"v": "5", "skip_headers": ""}, - Controller: map[string]string{"v": "6", "skip_headers": ""}, - UploadProxy: map[string]string{"v": "7", "skip_headers": ""}, + { + ResourceName: "cdi-deployment", + ResourceType: "Deployment", + Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testStrategicPatch, testStrategicPatch), + Type: cdiv1.StrategicMergePatchType, + }, + { + ResourceName: "cdi-uploadproxy", + ResourceType: "Deployment", + Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testMergePatch, testMergePatch), + Type: cdiv1.MergePatchType, }, + }, + Flags: &cdiv1.Flags{ + API: map[string]string{"v": "5", "skip_headers": ""}, + Controller: map[string]string{"v": "6", "skip_headers": ""}, + UploadProxy: map[string]string{"v": "7", "skip_headers": ""}, + }, + } + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] == "" { + return false + } } - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Patches applied") + return true + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + verifyPatches := func(deployment, annoKey, annoValue string, desiredArgs ...string) { + By(fmt.Sprintf("Verify patches of %s", deployment)) Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - - if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] == "" { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deployment, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + args := strings.Join(depl.Spec.Template.Spec.Containers[0].Args, " ") + for _, a := range desiredArgs { + if !strings.Contains(args, a) { return false } } - By("Patches applied") - return true + return depl.GetAnnotations()[annoKey] == annoValue }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + } + verifyPatches("cdi-apiserver", testJSONPatch, testJSONPatch, "-v 5", "-skip_headers") + verifyPatches("cdi-deployment", testStrategicPatch, testStrategicPatch, "-v 6", "-skip_headers") + verifyPatches("cdi-uploadproxy", testMergePatch, testMergePatch, "-v 7", "-skip_headers") - verifyPatches := func(deployment, annoKey, annoValue string, desiredArgs ...string) { - By(fmt.Sprintf("Verify patches of %s", deployment)) - Eventually(func() bool { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deployment, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - args := strings.Join(depl.Spec.Template.Spec.Containers[0].Args, " ") - for _, a := range desiredArgs { - if !strings.Contains(args, a) { - return false - } - } - return depl.GetAnnotations()[annoKey] == annoValue - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - } - verifyPatches("cdi-apiserver", testJSONPatch, testJSONPatch, "-v 5", "-skip_headers") - verifyPatches("cdi-deployment", testStrategicPatch, testStrategicPatch, "-v 6", "-skip_headers") - verifyPatches("cdi-uploadproxy", testMergePatch, testMergePatch, "-v 7", "-skip_headers") - - By("Reset CustomizeComponents for CDI CR") - cdi = getCDI(f) + By("Reset CustomizeComponents for CDI CR") + cdi = getCDI(f) - cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{} - _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{} + _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) - if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] != "" { - return false - } + _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) + if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] != "" { + return false } - return true - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + } + return true + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - }) }) + }) - var _ = Describe("Operator cert config tests", func() { - var cdi *cdiv1.CDI - f := framework.NewFramework("operator-cert-config-test") + var _ = Describe("Operator cert config tests", func() { + var cdi *cdiv1.CDI + f := framework.NewFramework("operator-cert-config-test") - BeforeEach(func() { - cdi = getCDI(f) - }) + BeforeEach(func() { + cdi = getCDI(f) + }) - AfterEach(func() { - if cdi == nil { - return - } + AfterEach(func() { + if cdi == nil { + return + } - cr, err := f.CdiClient.CdiV1beta1().CDIs().Get(context.TODO(), cdi.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + cr, err := f.CdiClient.CdiV1beta1().CDIs().Get(context.TODO(), cdi.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - cr.Spec.CertConfig = cdi.Spec.CertConfig + cr.Spec.CertConfig = cdi.Spec.CertConfig - _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - }) + _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + }) - getSecrets := func(secrets []string) []corev1.Secret { - var result []corev1.Secret - for _, s := range secrets { - s, err := f.K8sClient.CoreV1().Secrets(f.CdiInstallNs).Get(context.TODO(), s, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - result = append(result, *s) - } - return result + getSecrets := func(secrets []string) []corev1.Secret { + var result []corev1.Secret + for _, s := range secrets { + s, err := f.K8sClient.CoreV1().Secrets(f.CdiInstallNs).Get(context.TODO(), s, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + result = append(result, *s) } + return result + } + + validateCertConfig := func(obj metav1.Object, lifetime, refresh string) { + fmt.Fprintf(GinkgoWriter, "validateCertConfig") + cca, ok := obj.GetAnnotations()["operator.cdi.kubevirt.io/certConfig"] + Expect(ok).To(BeTrue()) + certConfig := make(map[string]interface{}) + err := json.Unmarshal([]byte(cca), &certConfig) + Expect(err).ToNot(HaveOccurred()) + l, ok := certConfig["lifetime"] + Expect(ok).To(BeTrue()) + Expect(l.(string)).To(Equal(lifetime)) + r, ok := certConfig["refresh"] + Expect(ok).To(BeTrue()) + Expect(r.(string)).To(Equal(refresh)) + } - validateCertConfig := func(obj metav1.Object, lifetime, refresh string) { - fmt.Fprintf(GinkgoWriter, "validateCertConfig") - cca, ok := obj.GetAnnotations()["operator.cdi.kubevirt.io/certConfig"] - Expect(ok).To(BeTrue()) - certConfig := make(map[string]interface{}) - err := json.Unmarshal([]byte(cca), &certConfig) + It("should allow update", func() { + caSecretNames := []string{"cdi-apiserver-signer", "cdi-uploadproxy-signer", "cdi-uploadserver-client-signer"} + serverSecretNames := []string{"cdi-apiserver-server-cert", "cdi-uploadproxy-server-cert"} + clientSecretNames := []string{"cdi-uploadserver-client-cert"} + + ts := time.Now() + // Time comparison here is in seconds, so make sure there is an interval + time.Sleep(2 * time.Second) + + Eventually(func() bool { + cr := getCDI(f) + cr.Spec.CertConfig = &cdiv1.CDICertConfig{ + CA: &cdiv1.CertConfig{ + Duration: &metav1.Duration{Duration: time.Minute * 20}, + RenewBefore: &metav1.Duration{Duration: time.Minute * 5}, + }, + Server: &cdiv1.CertConfig{ + Duration: &metav1.Duration{Duration: time.Minute * 5}, + RenewBefore: &metav1.Duration{Duration: time.Minute * 2}, + }, + Client: &cdiv1.CertConfig{ + Duration: &metav1.Duration{Duration: time.Minute * 2}, + RenewBefore: &metav1.Duration{Duration: time.Minute * 1}, + }, + } + newCR, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + if errors.IsConflict(err) { + return false + } Expect(err).ToNot(HaveOccurred()) - l, ok := certConfig["lifetime"] - Expect(ok).To(BeTrue()) - Expect(l.(string)).To(Equal(lifetime)) - r, ok := certConfig["refresh"] - Expect(ok).To(BeTrue()) - Expect(r.(string)).To(Equal(refresh)) - } + Expect(newCR.Spec.CertConfig).To(Equal(cr.Spec.CertConfig)) + By("Cert config update complete") + return true + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - It("should allow update", func() { - caSecretNames := []string{"cdi-apiserver-signer", "cdi-uploadproxy-signer", "cdi-uploadserver-client-signer"} - serverSecretNames := []string{"cdi-apiserver-server-cert", "cdi-uploadproxy-server-cert"} - clientSecretNames := []string{"cdi-uploadserver-client-cert"} + Eventually(func() bool { + caSecrets := getSecrets(caSecretNames) + serverSecrets := getSecrets(serverSecretNames) + clientSecrets := getSecrets(clientSecretNames) - ts := time.Now() - // Time comparison here is in seconds, so make sure there is an interval - time.Sleep(2 * time.Second) + for _, s := range append(caSecrets, append(serverSecrets, clientSecrets...)...) { + fmt.Fprintf(GinkgoWriter, "Comparing not-before to time.Now() for all\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + if ts.After(t) { + fmt.Fprintf(GinkgoWriter, "%s is after\n", s.Name) + return false + } + } - Eventually(func() bool { - cr := getCDI(f) - cr.Spec.CertConfig = &cdiv1.CDICertConfig{ - CA: &cdiv1.CertConfig{ - Duration: &metav1.Duration{Duration: time.Minute * 20}, - RenewBefore: &metav1.Duration{Duration: time.Minute * 5}, - }, - Server: &cdiv1.CertConfig{ - Duration: &metav1.Duration{Duration: time.Minute * 5}, - RenewBefore: &metav1.Duration{Duration: time.Minute * 2}, - }, - Client: &cdiv1.CertConfig{ - Duration: &metav1.Duration{Duration: time.Minute * 2}, - RenewBefore: &metav1.Duration{Duration: time.Minute * 1}, - }, + for _, s := range caSecrets { + fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for caSecrets\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + naa := s.Annotations["auth.openshift.io/certificate-not-after"] + t2, err := time.Parse(time.RFC3339, naa) + Expect(err).ToNot(HaveOccurred()) + if t2.Sub(t) < time.Minute*20 { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s)\n", nba, naa) + return false } - newCR, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - if errors.IsConflict(err) { + if t2.Sub(t)-(time.Minute*20) > time.Second { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) return false } - Expect(err).ToNot(HaveOccurred()) - Expect(newCR.Spec.CertConfig).To(Equal(cr.Spec.CertConfig)) - By("Cert config update complete") - return true - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + // 20m - 5m = 15m + validateCertConfig(&s, "20m0s", "15m0s") + } - Eventually(func() bool { - caSecrets := getSecrets(caSecretNames) - serverSecrets := getSecrets(serverSecretNames) - clientSecrets := getSecrets(clientSecretNames) - - for _, s := range append(caSecrets, append(serverSecrets, clientSecrets...)...) { - fmt.Fprintf(GinkgoWriter, "Comparing not-before to time.Now() for all\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - if ts.After(t) { - fmt.Fprintf(GinkgoWriter, "%s is after\n", s.Name) - return false - } + for _, s := range serverSecrets { + fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for serverSecrets\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + naa := s.Annotations["auth.openshift.io/certificate-not-after"] + t2, err := time.Parse(time.RFC3339, naa) + Expect(err).ToNot(HaveOccurred()) + if t2.Sub(t) < time.Minute*5 { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s)\n", nba, naa) + return false } - - for _, s := range caSecrets { - fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for caSecrets\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - naa := s.Annotations["auth.openshift.io/certificate-not-after"] - t2, err := time.Parse(time.RFC3339, naa) - Expect(err).ToNot(HaveOccurred()) - if t2.Sub(t) < time.Minute*20 { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s)\n", nba, naa) - return false - } - if t2.Sub(t)-(time.Minute*20) > time.Second { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) - return false - } - // 20m - 5m = 15m - validateCertConfig(&s, "20m0s", "15m0s") + if t2.Sub(t)-(time.Minute*5) > time.Second { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) + return false } + // 5m - 2m = 3m + validateCertConfig(&s, "5m0s", "3m0s") + } - for _, s := range serverSecrets { - fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for serverSecrets\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - naa := s.Annotations["auth.openshift.io/certificate-not-after"] - t2, err := time.Parse(time.RFC3339, naa) - Expect(err).ToNot(HaveOccurred()) - if t2.Sub(t) < time.Minute*5 { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s)\n", nba, naa) - return false - } - if t2.Sub(t)-(time.Minute*5) > time.Second { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) - return false - } - // 5m - 2m = 3m - validateCertConfig(&s, "5m0s", "3m0s") + for _, s := range clientSecrets { + fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for clientSecrets\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + naa := s.Annotations["auth.openshift.io/certificate-not-after"] + t2, err := time.Parse(time.RFC3339, naa) + Expect(err).ToNot(HaveOccurred()) + if t2.Sub(t) < time.Minute*2 { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s)\n", nba, naa) + return false } - - for _, s := range clientSecrets { - fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for clientSecrets\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - naa := s.Annotations["auth.openshift.io/certificate-not-after"] - t2, err := time.Parse(time.RFC3339, naa) - Expect(err).ToNot(HaveOccurred()) - if t2.Sub(t) < time.Minute*2 { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s)\n", nba, naa) - return false - } - if t2.Sub(t)-(time.Minute*2) > time.Second { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) - return false - } - // 2m - 1m = 1m - validateCertConfig(&s, "2m0s", "1m0s") + if t2.Sub(t)-(time.Minute*2) > time.Second { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) + return false } + // 2m - 1m = 1m + validateCertConfig(&s, "2m0s", "1m0s") + } - return true - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + return true + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) }) + }) - var _ = Describe("Priority class tests", func() { - var ( - cdi *cdiv1.CDI - cdiPods *corev1.PodList - systemClusterCritical = cdiv1.CDIPriorityClass("system-cluster-critical") - osUserCrit = &schedulev1.PriorityClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourcesutils.CDIPriorityClass, - }, - Value: 10000, - } - ) - f := framework.NewFramework("operator-priority-class-test") - verifyPodPriorityClass := func(prefix, priorityClassName, labelSelector string) { - Eventually(func() string { - controllerPod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, prefix, labelSelector) - if err != nil { - return "" - } - return controllerPod.Spec.PriorityClassName - }, 2*time.Minute, 1*time.Second).Should(BeEquivalentTo(priorityClassName)) + var _ = Describe("Priority class tests", func() { + var ( + cdi *cdiv1.CDI + cdiPods *corev1.PodList + systemClusterCritical = cdiv1.CDIPriorityClass("system-cluster-critical") + osUserCrit = &schedulev1.PriorityClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcesutils.CDIPriorityClass, + }, + Value: 10000, } - - BeforeEach(func() { - cdiPods = getCDIPods(f) - cdi = getCDI(f) - if cdi.Spec.PriorityClass != nil { - By(fmt.Sprintf("Current priority class is: [%s]", *cdi.Spec.PriorityClass)) + ) + f := framework.NewFramework("operator-priority-class-test") + verifyPodPriorityClass := func(prefix, priorityClassName, labelSelector string) { + Eventually(func() string { + controllerPod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, prefix, labelSelector) + if err != nil { + return "" } - }) + return controllerPod.Spec.PriorityClassName + }, 2*time.Minute, 1*time.Second).Should(BeEquivalentTo(priorityClassName)) + } - AfterEach(func() { - if cdi == nil { - return - } + BeforeEach(func() { + cdiPods = getCDIPods(f) + cdi = getCDI(f) + if cdi.Spec.PriorityClass != nil { + By(fmt.Sprintf("Current priority class is: [%s]", *cdi.Spec.PriorityClass)) + } + }) - cr := getCDI(f) - cr.Spec.PriorityClass = cdi.Spec.PriorityClass - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + AfterEach(func() { + if cdi == nil { + return + } - if !utils.IsOpenshift(f.K8sClient) { - Eventually(func() bool { - return errors.IsNotFound(f.K8sClient.SchedulingV1().PriorityClasses().Delete(context.TODO(), osUserCrit.Name, metav1.DeleteOptions{})) - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - } - By("Ensuring the CDI priority class is restored") - prioClass := "" - if cr.Spec.PriorityClass != nil { - prioClass = string(*cr.Spec.PriorityClass) - } else if utils.IsOpenshift(f.K8sClient) { - prioClass = osUserCrit.Name - } - // Deployment - verifyPodPriorityClass(cdiDeploymentPodPrefix, prioClass, common.CDILabelSelector) - // API server - verifyPodPriorityClass(cdiAPIServerPodPrefix, prioClass, common.CDILabelSelector) - // Upload server - verifyPodPriorityClass(cdiUploadProxyPodPrefix, prioClass, common.CDILabelSelector) - By("Verifying there is just a single cdi controller pod") - Eventually(func() error { - _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) - return err - }, 2*time.Minute, 1*time.Second).Should(BeNil()) + cr := getCDI(f) + cr.Spec.PriorityClass = cdi.Spec.PriorityClass + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - pod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) - Expect(err).ToNot(HaveOccurred()) + if !utils.IsOpenshift(f.K8sClient) { + Eventually(func() bool { + return errors.IsNotFound(f.K8sClient.SchedulingV1().PriorityClasses().Delete(context.TODO(), osUserCrit.Name, metav1.DeleteOptions{})) + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + } + By("Ensuring the CDI priority class is restored") + prioClass := "" + if cr.Spec.PriorityClass != nil { + prioClass = string(*cr.Spec.PriorityClass) + } else if utils.IsOpenshift(f.K8sClient) { + prioClass = osUserCrit.Name + } + // Deployment + verifyPodPriorityClass(cdiDeploymentPodPrefix, prioClass, common.CDILabelSelector) + // API server + verifyPodPriorityClass(cdiAPIServerPodPrefix, prioClass, common.CDILabelSelector) + // Upload server + verifyPodPriorityClass(cdiUploadProxyPodPrefix, prioClass, common.CDILabelSelector) + By("Verifying there is just a single cdi controller pod") + Eventually(func() error { + _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) + return err + }, 2*time.Minute, 1*time.Second).Should(BeNil()) + + pod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) + Expect(err).ToNot(HaveOccurred()) - By("Ensuring this pod is the leader") - Eventually(func() (string, error) { - out, err := f.K8sClient.CoreV1(). - Pods(f.CdiInstallNs). - GetLogs(pod.Name, &corev1.PodLogOptions{SinceTime: &metav1.Time{Time: CurrentSpecReport().StartTime}}). - DoRaw(context.Background()) - return string(out), err - }, 2*time.Minute, time.Second).Should(ContainSubstring("successfully acquired lease")) + By("Ensuring this pod is the leader") + Eventually(func() (string, error) { + out, err := f.K8sClient.CoreV1(). + Pods(f.CdiInstallNs). + GetLogs(pod.Name, &corev1.PodLogOptions{SinceTime: &metav1.Time{Time: CurrentSpecReport().StartTime}}). + DoRaw(context.Background()) + return string(out), err + }, 2*time.Minute, time.Second).Should(ContainSubstring("successfully acquired lease")) - waitCDI(f, cr, cdiPods) - }) + waitCDI(f, cr, cdiPods) + }) - It("should use kubernetes priority class if set", func() { - cr := getCDI(f) - By("Setting the priority class to system cluster critical, which is known to exist") - cr.Spec.PriorityClass = &systemClusterCritical - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verifying the CDI deployment is updated") - verifyPodPriorityClass(cdiDeploymentPodPrefix, string(systemClusterCritical), common.CDILabelSelector) - By("Verifying the CDI api server is updated") - verifyPodPriorityClass(cdiAPIServerPodPrefix, string(systemClusterCritical), common.CDILabelSelector) - By("Verifying the CDI upload proxy server is updated") - verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(systemClusterCritical), common.CDILabelSelector) - }) + It("should use kubernetes priority class if set", func() { + cr := getCDI(f) + By("Setting the priority class to system cluster critical, which is known to exist") + cr.Spec.PriorityClass = &systemClusterCritical + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verifying the CDI deployment is updated") + verifyPodPriorityClass(cdiDeploymentPodPrefix, string(systemClusterCritical), common.CDILabelSelector) + By("Verifying the CDI api server is updated") + verifyPodPriorityClass(cdiAPIServerPodPrefix, string(systemClusterCritical), common.CDILabelSelector) + By("Verifying the CDI upload proxy server is updated") + verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(systemClusterCritical), common.CDILabelSelector) + }) - It("should use openshift priority class if not set and available", func() { - if utils.IsOpenshift(f.K8sClient) { - Skip("This test is not needed in OpenShift") - } - getCDI(f) - _, err := f.K8sClient.SchedulingV1().PriorityClasses().Create(context.TODO(), osUserCrit, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verifying the CDI control plane is updated") - // Deployment - verifyPodPriorityClass(cdiDeploymentPodPrefix, osUserCrit.Name, common.CDILabelSelector) - // API server - verifyPodPriorityClass(cdiAPIServerPodPrefix, osUserCrit.Name, common.CDILabelSelector) - // Upload server - verifyPodPriorityClass(cdiUploadProxyPodPrefix, osUserCrit.Name, common.CDILabelSelector) - }) + It("should use openshift priority class if not set and available", func() { + if utils.IsOpenshift(f.K8sClient) { + Skip("This test is not needed in OpenShift") + } + getCDI(f) + _, err := f.K8sClient.SchedulingV1().PriorityClasses().Create(context.TODO(), osUserCrit, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verifying the CDI control plane is updated") + // Deployment + verifyPodPriorityClass(cdiDeploymentPodPrefix, osUserCrit.Name, common.CDILabelSelector) + // API server + verifyPodPriorityClass(cdiAPIServerPodPrefix, osUserCrit.Name, common.CDILabelSelector) + // Upload server + verifyPodPriorityClass(cdiUploadProxyPodPrefix, osUserCrit.Name, common.CDILabelSelector) }) }) + }) func getCDIPods(f *framework.Framework) *corev1.PodList { From 8906f01b989807f61679b60216715aabbf4f00be Mon Sep 17 00:00:00 2001 From: Thomas-David Griedel Date: Wed, 5 Mar 2025 17:45:15 +0100 Subject: [PATCH 2/7] replace skip and focus in scripts For this to also work in the test jobs, the proper variable needs to be utilized Signed-off-by: Thomas-David Griedel griedel911@gmail.com --- automation/ceph-wffc.sh | 1 - automation/ceph.sh | 1 - automation/destructive.sh | 2 +- automation/istio.sh | 1 - automation/latest-hpp.sh | 1 - automation/nfs.sh | 1 - automation/non-csi-hpp.sh | 1 - automation/previous-hpp.sh | 1 - automation/test.sh | 5 ++++- automation/upgrade.sh | 1 - 10 files changed, 5 insertions(+), 10 deletions(-) diff --git a/automation/ceph-wffc.sh b/automation/ceph-wffc.sh index f08cb0259a..4bb647a47b 100755 --- a/automation/ceph-wffc.sh +++ b/automation/ceph-wffc.sh @@ -23,7 +23,6 @@ export TARGET=k8s-1.30 export RANDOM_CR=true export KUBEVIRT_STORAGE=rook-ceph-default export CEPH_WFFC=true -export CDI_E2E_SKIP=Destructive export SNAPSHOT_SC=rook-ceph-block-wffc export BLOCK_SC=rook-ceph-block-wffc export CSICLONE_SC=rook-ceph-block-wffc diff --git a/automation/ceph.sh b/automation/ceph.sh index 996911c400..8d8dabe2c2 100755 --- a/automation/ceph.sh +++ b/automation/ceph.sh @@ -22,5 +22,4 @@ export TARGET=k8s-1.30 #ensure no hard coded cdi cr in tests. export RANDOM_CR=true export KUBEVIRT_STORAGE=rook-ceph-default -export CDI_E2E_SKIP=Destructive automation/test.sh diff --git a/automation/destructive.sh b/automation/destructive.sh index 4ff7e47a43..b5d653fc8e 100755 --- a/automation/destructive.sh +++ b/automation/destructive.sh @@ -21,5 +21,5 @@ set -ex export TARGET=k8s-1.31 export KUBEVIRT_STORAGE=hpp export KUBEVIRT_DEPLOY_PROMETHEUS=true -export CDI_E2E_FOCUS=Destructive +export CDI_LABEL_FILTER=Destructive automation/test.sh diff --git a/automation/istio.sh b/automation/istio.sh index 8ead2d9577..d1dad9a0e8 100755 --- a/automation/istio.sh +++ b/automation/istio.sh @@ -20,6 +20,5 @@ set -ex export TARGET=k8s-1.31 export KUBEVIRT_STORAGE=hpp -export CDI_E2E_SKIP=Destructive export KUBEVIRT_DEPLOY_ISTIO=true automation/test.sh diff --git a/automation/latest-hpp.sh b/automation/latest-hpp.sh index 2a9801dcb8..6f77cdf824 100755 --- a/automation/latest-hpp.sh +++ b/automation/latest-hpp.sh @@ -20,5 +20,4 @@ set -ex export TARGET=k8s-1.31 export KUBEVIRT_STORAGE=hpp -export CDI_E2E_SKIP=Destructive automation/test.sh diff --git a/automation/nfs.sh b/automation/nfs.sh index 8bce0db62f..607bdd17eb 100755 --- a/automation/nfs.sh +++ b/automation/nfs.sh @@ -21,5 +21,4 @@ set -ex export TARGET=k8s-1.31 export KUBEVIRT_DEPLOY_NFS_CSI=true export KUBEVIRT_STORAGE=nfs -export CDI_E2E_SKIP=Destructive automation/test.sh diff --git a/automation/non-csi-hpp.sh b/automation/non-csi-hpp.sh index d8ee0326c7..97228c8f99 100755 --- a/automation/non-csi-hpp.sh +++ b/automation/non-csi-hpp.sh @@ -21,5 +21,4 @@ set -ex export TARGET=k8s-1.31 export KUBEVIRT_STORAGE=hpp export HPP_CLASSIC=true -export CDI_E2E_SKIP=Destructive automation/test.sh diff --git a/automation/previous-hpp.sh b/automation/previous-hpp.sh index 5bbe1612b1..efa38aa5e2 100755 --- a/automation/previous-hpp.sh +++ b/automation/previous-hpp.sh @@ -21,5 +21,4 @@ set -ex export TARGET=k8s-1.30 export KUBEVIRT_STORAGE=hpp export KUBEVIRT_DEPLOY_PROMETHEUS=true -export CDI_E2E_SKIP=Destructive automation/test.sh diff --git a/automation/test.sh b/automation/test.sh index dff9c32d85..e4b30fe3af 100755 --- a/automation/test.sh +++ b/automation/test.sh @@ -76,6 +76,9 @@ kubectl() { cluster-up/kubectl.sh "$@"; } export CDI_NAMESPACE="${CDI_NAMESPACE:-cdi}" +# Skip destructive tests by default +export CDI_LABEL_FILTER="${CDI_LABEL_FILTER:-'!Destructive'}" + make cluster-down # Create .bazelrc to use remote cache cat >ci.bazelrc < Date: Fri, 5 Sep 2025 18:25:38 +0200 Subject: [PATCH 3/7] Reintroduce Destructive Description Label Signed-off-by: Thomas-David Griedel griedel911@gmail.com --- tests/cdiconfig_test.go | 2 +- tests/import_proxy_test.go | 2 +- tests/monitoring_test.go | 2 +- tests/operator_test.go | 10 +++++----- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/cdiconfig_test.go b/tests/cdiconfig_test.go index 7d7d88e26d..b031bcfc62 100644 --- a/tests/cdiconfig_test.go +++ b/tests/cdiconfig_test.go @@ -528,7 +528,7 @@ var _ = Describe("CDI route config tests", Serial, func() { }) }) -var _ = Describe("CDIConfig instance management", Label("Destructive"), Serial, func() { +var _ = Describe("[Destructive] CDIConfig instance management", Label("Destructive"), Serial, func() { f := framework.NewFramework("cdiconfig-test") It("[test_id:4952]Should re-create the object if deleted", func() { diff --git a/tests/import_proxy_test.go b/tests/import_proxy_test.go index 29cc5f2c10..d42eecd7e0 100644 --- a/tests/import_proxy_test.go +++ b/tests/import_proxy_test.go @@ -160,7 +160,7 @@ var _ = Describe("Import Proxy tests", func() { }, time.Second*60, time.Second).Should(BeTrue()) } - Context("", Label("Destructive"), Serial, func() { + Context("[Destructive]", Label("Destructive"), Serial, func() { DescribeTable("should", func(args importProxyTestArguments) { now := time.Now() diff --git a/tests/monitoring_test.go b/tests/monitoring_test.go index ac59949d64..ed93a910f2 100644 --- a/tests/monitoring_test.go +++ b/tests/monitoring_test.go @@ -39,7 +39,7 @@ const ( metricConsistentPollingTimeout = 2 * time.Minute ) -var _ = Describe("Monitoring Tests", Label("Destructive"), Serial, func() { +var _ = Describe("[Destructive] Monitoring Tests", Label("Destructive"), Serial, func() { f := framework.NewFramework("monitoring-test") var ( diff --git a/tests/operator_test.go b/tests/operator_test.go index 002d879ae3..d478579ed0 100644 --- a/tests/operator_test.go +++ b/tests/operator_test.go @@ -41,7 +41,7 @@ import ( "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk" ) -var _ = Describe("ALL Operator tests", Label("Destructive"), Serial, func() { +var _ = Describe("[Destructive] ALL Operator tests", Label("Destructive"), Serial, func() { var _ = Describe("Operator tests", func() { f := framework.NewFramework("operator-test") @@ -401,10 +401,10 @@ var _ = Describe("ALL Operator tests", Label("Destructive"), Serial, func() { ensureCDI(f, cr, cdiPods) } - By("Cannot delete CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("there are still 1 DataVolumes present")) + By("Cannot delete CDI") + err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("there are still 1 DataVolumes present")) It("[test_id:4986]should remove/install CDI a number of times successfully", func() { for i := 0; i < 5; i++ { From 8e357795bcfdfa5ca6930092fcd6945c4b508162 Mon Sep 17 00:00:00 2001 From: Thomas-David Griedel Date: Fri, 5 Sep 2025 18:30:36 +0200 Subject: [PATCH 4/7] Remove nfs flag again Signed-off-by: Thomas-David Griedel griedel911@gmail.com --- automation/nfs.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/automation/nfs.sh b/automation/nfs.sh index 7ab9365e74..2eda0b3d0f 100755 --- a/automation/nfs.sh +++ b/automation/nfs.sh @@ -22,5 +22,4 @@ export TARGET=k8s-1.33 export KUBEVIRT_DEPLOY_NFS_CSI=true export KUBEVIRT_STORAGE=nfs export KUBEVIRT_NFS_DIR=${KUBEVIRT_NFS_DIR:-/var/lib/containers/nfs-data} -export CDI_E2E_SKIP=Destructive automation/test.sh From f3387bd571baa7a68c28d47bcd25b6ee8ab3deb0 Mon Sep 17 00:00:00 2001 From: Thomas-David Griedel Date: Mon, 8 Sep 2025 00:46:03 +0200 Subject: [PATCH 5/7] Refactor operator tests Signed-off-by: Thomas-David Griedel griedel911@gmail.com --- tests/operator_test.go | 1977 ++++++++++++++++++++-------------------- 1 file changed, 989 insertions(+), 988 deletions(-) diff --git a/tests/operator_test.go b/tests/operator_test.go index d478579ed0..4901d281f8 100644 --- a/tests/operator_test.go +++ b/tests/operator_test.go @@ -41,1203 +41,1204 @@ import ( "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk" ) -var _ = Describe("[Destructive] ALL Operator tests", Label("Destructive"), Serial, func() { - var _ = Describe("Operator tests", func() { - f := framework.NewFramework("operator-test") +var _ = Describe("ALL Operator tests", func() { + Context("[Destructive]", Serial, func() { + var _ = Describe("Operator tests", func() { + f := framework.NewFramework("operator-test") + + Context("Adding versions to datavolume CRD", func() { + deploymentName := "cdi-operator" + var originalReplicaVal int32 + + AfterEach(func() { + By(fmt.Sprintf("Setting %s replica number back to the original value %d", deploymentName, originalReplicaVal)) + scaleDeployment(f, deploymentName, originalReplicaVal) + Eventually(func() int32 { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + return depl.Status.ReadyReplicas + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Equal(originalReplicaVal)) + }) + It("[test_id:9696]Alpha version of CDI CRD is removed even if it was briefly a storage version", func() { + By("Scaling down CDI operator") + originalReplicaVal = scaleDeployment(f, deploymentName, 0) + Eventually(func(g Gomega) { + _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) + _, _ = fmt.Fprintf(GinkgoWriter, "couldn't scale down CDI operator deployment; %v\n", err) + g.Expect(errors.IsNotFound(err)).Should(BeTrue()) + }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(Succeed()) + + By("Appending v1alpha1 version as stored version") + cdiCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + oldVer := cdiCrd.Spec.Versions[0].DeepCopy() + oldVer.Name = "v1alpha1" + cdiCrd.Spec.Versions[0].Storage = false + oldVer.Storage = true + cdiCrd.Spec.Versions = append(cdiCrd.Spec.Versions, *oldVer) - Context("Adding versions to datavolume CRD", func() { - deploymentName := "cdi-operator" - var originalReplicaVal int32 + _, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - AfterEach(func() { - By(fmt.Sprintf("Setting %s replica number back to the original value %d", deploymentName, originalReplicaVal)) - scaleDeployment(f, deploymentName, originalReplicaVal) - Eventually(func() int32 { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + By("Restoring CRD with newer version as storage") + cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + // This is done because due to the way CRDs are applied, + // the scenario where alpha is the "storage: true" isn't + // possible - so the code doesn't handle it. + for i, ver := range cdiCrd.Spec.Versions { + if ver.Name == "v1alpha1" { + cdiCrd.Spec.Versions[i].Storage = false + } else { + cdiCrd.Spec.Versions[i].Storage = true + } + } + cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) Expect(err).ToNot(HaveOccurred()) - return depl.Status.ReadyReplicas - }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Equal(originalReplicaVal)) - }) - It("[test_id:9696]Alpha version of CDI CRD is removed even if it was briefly a storage version", func() { - By("Scaling down CDI operator") - originalReplicaVal = scaleDeployment(f, deploymentName, 0) - Eventually(func(g Gomega) { - _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) - _, _ = fmt.Fprintf(GinkgoWriter, "couldn't scale down CDI operator deployment; %v\n", err) - g.Expect(errors.IsNotFound(err)).Should(BeTrue()) - }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(Succeed()) - - By("Appending v1alpha1 version as stored version") - cdiCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - oldVer := cdiCrd.Spec.Versions[0].DeepCopy() - oldVer.Name = "v1alpha1" - cdiCrd.Spec.Versions[0].Storage = false - oldVer.Storage = true - cdiCrd.Spec.Versions = append(cdiCrd.Spec.Versions, *oldVer) - _, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Scaling up CDI operator") + scaleDeployment(f, deploymentName, originalReplicaVal) + By("Eventually, CDI will restore v1beta1 to be the only stored version") + Eventually(func(g Gomega) { + cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) + g.Expect(err).ToNot(HaveOccurred()) + for _, ver := range cdiCrd.Spec.Versions { + g.Expect(ver.Name).Should(Equal("v1beta1")) + g.Expect(ver.Storage).Should(BeTrue()) + } + }, 1*time.Minute, 2*time.Second).Should(Succeed()) + }) + + It("[test_id:9704]Alpha versions of datavolume CRD are removed, previously existing objects remain and are unmodified", func() { + fillData := "123456789012345678901234567890123456789012345678901234567890" + fillDataFSMD5sum := "fabc176de7eb1b6ca90b3aa4c7e035f3" + testFile := utils.DefaultPvcMountPath + "/source.txt" + fillCommand := "echo \"" + fillData + "\" >> " + testFile + + By("Creating datavolume without GC and custom changes") + dv := utils.NewDataVolumeWithHTTPImport("alpha-tests-dv", "500Mi", fmt.Sprintf(utils.TinyCoreIsoURL, f.CdiInstallNs)) + dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + err = utils.WaitForDataVolumePhase(f, dv.Namespace, cdiv1.Succeeded, dv.Name) + Expect(err).ToNot(HaveOccurred()) - By("Restoring CRD with newer version as storage") - cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - // This is done because due to the way CRDs are applied, - // the scenario where alpha is the "storage: true" isn't - // possible - so the code doesn't handle it. - for i, ver := range cdiCrd.Spec.Versions { - if ver.Name == "v1alpha1" { - cdiCrd.Spec.Versions[i].Storage = false - } else { - cdiCrd.Spec.Versions[i].Storage = true - } - } - cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), cdiCrd, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(context.TODO(), dv.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + f.PopulatePVC(pvc, "modify-dv-contents", fillCommand) - By("Scaling up CDI operator") - scaleDeployment(f, deploymentName, originalReplicaVal) - By("Eventually, CDI will restore v1beta1 to be the only stored version") - Eventually(func(g Gomega) { - cdiCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "cdis.cdi.kubevirt.io", metav1.GetOptions{}) - g.Expect(err).ToNot(HaveOccurred()) - for _, ver := range cdiCrd.Spec.Versions { - g.Expect(ver.Name).Should(Equal("v1beta1")) - g.Expect(ver.Storage).Should(BeTrue()) - } - }, 1*time.Minute, 2*time.Second).Should(Succeed()) + By("Scaling down CDI operator") + originalReplicaVal = scaleDeployment(f, deploymentName, 0) + Eventually(func() bool { + _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) + return errors.IsNotFound(err) + }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(BeTrue()) + + By("Appending v1alpha1 version as stored version") + dvCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + oldVer := dvCrd.Spec.Versions[0].DeepCopy() + oldVer.Name = "v1alpha1" + dvCrd.Spec.Versions[0].Storage = false + oldVer.Storage = true + dvCrd.Spec.Versions = append(dvCrd.Spec.Versions, *oldVer) + + dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), dvCrd, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(dvCrd.Status.StoredVersions).Should(ContainElement("v1alpha1")) + + By("Making sure we can get datavolume in v1alpha1 version") + Eventually(func() error { + u := &unstructured.Unstructured{} + gvk := schema.GroupVersionKind{ + Group: "cdi.kubevirt.io", + Version: "v1alpha1", + Kind: "DataVolume", + } + u.SetGroupVersionKind(gvk) + nn := crclient.ObjectKey{Namespace: dv.Namespace, Name: dv.Name} + err = f.CrClient.Get(context.TODO(), nn, u) + return err + }, 1*time.Minute, 2*time.Second).Should(BeNil()) + + By("Scaling up CDI operator") + scaleDeployment(f, deploymentName, originalReplicaVal) + By("Eventually, CDI will restore v1beta1 to be the only stored version") + Eventually(func() bool { + dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + for _, ver := range dvCrd.Spec.Versions { + if !(ver.Name == "v1beta1" && ver.Storage == true) { + return false + } + } + return true + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + + By("Datavolume is still there") + _, err = f.CdiClient.CdiV1beta1().DataVolumes(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verify no import - the PVC still includes our custom changes") + md5Match, err := f.VerifyTargetPVCContentMD5(f.Namespace, pvc, testFile, fillDataFSMD5sum) + Expect(err).ToNot(HaveOccurred()) + Expect(md5Match).To(BeTrue()) + }) }) - It("[test_id:9704]Alpha versions of datavolume CRD are removed, previously existing objects remain and are unmodified", func() { - fillData := "123456789012345678901234567890123456789012345678901234567890" - fillDataFSMD5sum := "fabc176de7eb1b6ca90b3aa4c7e035f3" - testFile := utils.DefaultPvcMountPath + "/source.txt" - fillCommand := "echo \"" + fillData + "\" >> " + testFile + It("[test_id:3951]should create a route in OpenShift", func() { + if !utils.IsOpenshift(f.K8sClient) { + Skip("This test is OpenShift specific") + } - By("Creating datavolume without GC and custom changes") - dv := utils.NewDataVolumeWithHTTPImport("alpha-tests-dv", "500Mi", fmt.Sprintf(utils.TinyCoreIsoURL, f.CdiInstallNs)) - dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) - err = utils.WaitForDataVolumePhase(f, dv.Namespace, cdiv1.Succeeded, dv.Name) + routeClient, err := routeclient.NewForConfig(f.RestConfig) Expect(err).ToNot(HaveOccurred()) - pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(context.TODO(), dv.Name, metav1.GetOptions{}) + r, err := routeClient.RouteV1().Routes(f.CdiInstallNs).Get(context.TODO(), "cdi-uploadproxy", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - f.PopulatePVC(pvc, "modify-dv-contents", fillCommand) - By("Scaling down CDI operator") - originalReplicaVal = scaleDeployment(f, deploymentName, 0) - Eventually(func() bool { - _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) - return errors.IsNotFound(err) - }).WithTimeout(time.Second * 60).WithPolling(time.Second * 5).Should(BeTrue()) + Expect(r.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationReencrypt)) + }) - By("Appending v1alpha1 version as stored version") - dvCrd, err := f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) + It("[test_id:4351]should create a prometheus service in cdi namespace", func() { + promService, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - oldVer := dvCrd.Spec.Versions[0].DeepCopy() - oldVer.Name = "v1alpha1" - dvCrd.Spec.Versions[0].Storage = false - oldVer.Storage = true - dvCrd.Spec.Versions = append(dvCrd.Spec.Versions, *oldVer) + Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) + Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) + originalTimeStamp := promService.ObjectMeta.CreationTimestamp - dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), dvCrd, metav1.UpdateOptions{}) + By("Deleting the service") + err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Delete(context.TODO(), common.PrometheusServiceName, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) - Expect(dvCrd.Status.StoredVersions).Should(ContainElement("v1alpha1")) - - By("Making sure we can get datavolume in v1alpha1 version") - Eventually(func() error { - u := &unstructured.Unstructured{} - gvk := schema.GroupVersionKind{ - Group: "cdi.kubevirt.io", - Version: "v1alpha1", - Kind: "DataVolume", - } - u.SetGroupVersionKind(gvk) - nn := crclient.ObjectKey{Namespace: dv.Namespace, Name: dv.Name} - err = f.CrClient.Get(context.TODO(), nn, u) - return err - }, 1*time.Minute, 2*time.Second).Should(BeNil()) - - By("Scaling up CDI operator") - scaleDeployment(f, deploymentName, originalReplicaVal) - By("Eventually, CDI will restore v1beta1 to be the only stored version") + By("Verifying the operator has re-created the service") Eventually(func() bool { - dvCrd, err = f.ExtClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), "datavolumes.cdi.kubevirt.io", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - for _, ver := range dvCrd.Spec.Versions { - if !(ver.Name == "v1beta1" && ver.Storage == true) { - return false - } + promService, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) + if err == nil { + return originalTimeStamp.Before(&promService.ObjectMeta.CreationTimestamp) } - return true + return false }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) + Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) + }) - By("Datavolume is still there") - _, err = f.CdiClient.CdiV1beta1().DataVolumes(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verify no import - the PVC still includes our custom changes") - md5Match, err := f.VerifyTargetPVCContentMD5(f.Namespace, pvc, testFile, fillDataFSMD5sum) + It("[test_id:3952]add cdi-sa to containerized-data-importer scc", func() { + if !utils.IsOpenshift(f.K8sClient) { + Skip("This test is OpenShift specific") + } + + secClient, err := secclient.NewForConfig(f.RestConfig) Expect(err).ToNot(HaveOccurred()) - Expect(md5Match).To(BeTrue()) - }) - }) - It("[test_id:3951]should create a route in OpenShift", func() { - if !utils.IsOpenshift(f.K8sClient) { - Skip("This test is OpenShift specific") - } + scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - routeClient, err := routeclient.NewForConfig(f.RestConfig) - Expect(err).ToNot(HaveOccurred()) + cdiSA := fmt.Sprintf("system:serviceaccount:%s:cdi-sa", f.CdiInstallNs) + Expect(scc.Users).Should(ContainElement(cdiSA)) + }) - r, err := routeClient.RouteV1().Routes(f.CdiInstallNs).Get(context.TODO(), "cdi-uploadproxy", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + // Condition flags can be found here with their meaning https://github.com/kubevirt/hyperconverged-cluster-operator/blob/main/docs/conditions.md + It("[test_id:3953]Condition flags on CR should be healthy and operating", func() { + cdiObject := getCDI(f) + conditionMap := sdk.GetConditionValues(cdiObject.Status.Conditions) + // Application should be fully operational and healthy. + Expect(conditionMap[conditions.ConditionAvailable]).To(Equal(corev1.ConditionTrue)) + Expect(conditionMap[conditions.ConditionProgressing]).To(Equal(corev1.ConditionFalse)) + Expect(conditionMap[conditions.ConditionDegraded]).To(Equal(corev1.ConditionFalse)) + }) - Expect(r.Spec.TLS.Termination).To(Equal(routev1.TLSTerminationReencrypt)) + It("should make CDI config authority", func() { + Eventually(func() bool { + cdiObject := getCDI(f) + _, ok := cdiObject.Annotations["cdi.kubevirt.io/configAuthority"] + return ok + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + }) }) - It("[test_id:4351]should create a prometheus service in cdi namespace", func() { - promService, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) - Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) - originalTimeStamp := promService.ObjectMeta.CreationTimestamp + var _ = Describe("Tests needing the restore of nodes", func() { + var nodes *corev1.NodeList + var cdiPods *corev1.PodList + var err error - By("Deleting the service") - err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Delete(context.TODO(), common.PrometheusServiceName, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verifying the operator has re-created the service") - Eventually(func() bool { - promService, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), common.PrometheusServiceName, metav1.GetOptions{}) - if err == nil { - return originalTimeStamp.Before(&promService.ObjectMeta.CreationTimestamp) - } - return false - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - Expect(promService.Spec.Ports[0].Name).To(Equal("metrics")) - Expect(promService.Spec.Selector[common.PrometheusLabelKey]).To(Equal(common.PrometheusLabelValue)) - }) + f := framework.NewFramework("operator-delete-cdi-test") - It("[test_id:3952]add cdi-sa to containerized-data-importer scc", func() { - if !utils.IsOpenshift(f.K8sClient) { - Skip("This test is OpenShift specific") - } + BeforeEach(func() { + nodes, err = f.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + Expect(nodes.Items).ToNot(BeEmpty(), "There should be some compute node") + Expect(err).ToNot(HaveOccurred()) - secClient, err := secclient.NewForConfig(f.RestConfig) - Expect(err).ToNot(HaveOccurred()) + cdiPods = getCDIPods(f) + }) - scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + AfterEach(func() { + var errors []error + var newCdiPods *corev1.PodList + By("Restoring nodes") + for _, node := range nodes.Items { + newNode, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - cdiSA := fmt.Sprintf("system:serviceaccount:%s:cdi-sa", f.CdiInstallNs) - Expect(scc.Users).Should(ContainElement(cdiSA)) - }) + newNode.Spec = node.Spec + _, err = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}) + if err != nil { + errors = append(errors, err) + } + } + Expect(errors).Should(BeEmpty(), "failed restoring one or more nodes") - // Condition flags can be found here with their meaning https://github.com/kubevirt/hyperconverged-cluster-operator/blob/main/docs/conditions.md - It("[test_id:3953]Condition flags on CR should be healthy and operating", func() { - cdiObject := getCDI(f) - conditionMap := sdk.GetConditionValues(cdiObject.Status.Conditions) - // Application should be fully operational and healthy. - Expect(conditionMap[conditions.ConditionAvailable]).To(Equal(corev1.ConditionTrue)) - Expect(conditionMap[conditions.ConditionProgressing]).To(Equal(corev1.ConditionFalse)) - Expect(conditionMap[conditions.ConditionDegraded]).To(Equal(corev1.ConditionFalse)) - }) + By("Waiting for there to be as many CDI pods as before") + Eventually(func() bool { + newCdiPods = getCDIPods(f) + By(fmt.Sprintf("number of cdi pods: %d\n new number of cdi pods: %d\n", len(cdiPods.Items), len(newCdiPods.Items))) + return len(cdiPods.Items) == len(newCdiPods.Items) + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - It("should make CDI config authority", func() { - Eventually(func() bool { - cdiObject := getCDI(f) - _, ok := cdiObject.Annotations["cdi.kubevirt.io/configAuthority"] - return ok - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - }) - }) + for _, newCdiPod := range newCdiPods.Items { + By(fmt.Sprintf("Waiting for CDI pod %s to be ready", newCdiPod.Name)) + err := utils.WaitTimeoutForPodReady(f.K8sClient, newCdiPod.Name, newCdiPod.Namespace, 20*time.Minute) + Expect(err).ToNot(HaveOccurred()) + } + + Eventually(func() bool { + services, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred(), "failed getting CDI services") + for _, service := range services.Items { + if service.Name != "cdi-prometheus-metrics" { + endpoint, err := f.K8sClient.CoreV1().Endpoints(f.CdiInstallNs).Get(context.TODO(), service.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred(), "failed getting service endpoint") + for _, subset := range endpoint.Subsets { + if len(subset.NotReadyAddresses) > 0 { + By(fmt.Sprintf("Not all endpoints of service %s are ready", service.Name)) + return false + } + } + } + } + return true + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + }) - var _ = Describe("Tests needing the restore of nodes", func() { - var nodes *corev1.NodeList - var cdiPods *corev1.PodList - var err error + It("should deploy components that tolerate CriticalAddonsOnly taint", func() { + cr := getCDI(f) + criticalAddonsToleration := corev1.Toleration{ + Key: "CriticalAddonsOnly", + Operator: corev1.TolerationOpExists, + } - f := framework.NewFramework("operator-delete-cdi-test") + if !tolerationExists(cr.Spec.Infra.NodePlacement.Tolerations, criticalAddonsToleration) { + Skip("Unexpected CDI CR (not from cdi-cr.yaml), doesn't tolerate CriticalAddonsOnly") + } - BeforeEach(func() { - nodes, err = f.K8sClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - Expect(nodes.Items).ToNot(BeEmpty(), "There should be some compute node") - Expect(err).ToNot(HaveOccurred()) + labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"cdi.kubevirt.io/testing": ""}} + cdiTestPods, err := f.K8sClient.CoreV1().Pods(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labels.Set(labelSelector.MatchLabels).String(), + }) + Expect(err).ToNot(HaveOccurred(), "failed listing cdi testing pods") + Expect(cdiTestPods.Items).ToNot(BeEmpty(), "no cdi testing pods found") + + By("adding taints to all nodes") + criticalPodTaint := corev1.Taint{ + Key: "CriticalAddonsOnly", + Value: "", + Effect: corev1.TaintEffectNoExecute, + } - cdiPods = getCDIPods(f) - }) + for _, node := range nodes.Items { + Eventually(func() bool { + nodeCopy, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - AfterEach(func() { - var errors []error - var newCdiPods *corev1.PodList - By("Restoring nodes") - for _, node := range nodes.Items { - newNode, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + if nodeHasTaint(*nodeCopy, criticalPodTaint) { + return true + } - newNode.Spec = node.Spec - _, err = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), newNode, metav1.UpdateOptions{}) - if err != nil { - errors = append(errors, err) + nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, criticalPodTaint) + _, _ = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), nodeCopy, metav1.UpdateOptions{}) + return false + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) } - } - Expect(errors).Should(BeEmpty(), "failed restoring one or more nodes") - - By("Waiting for there to be as many CDI pods as before") - Eventually(func() bool { - newCdiPods = getCDIPods(f) - By(fmt.Sprintf("number of cdi pods: %d\n new number of cdi pods: %d\n", len(cdiPods.Items), len(newCdiPods.Items))) - return len(cdiPods.Items) == len(newCdiPods.Items) - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - - for _, newCdiPod := range newCdiPods.Items { - By(fmt.Sprintf("Waiting for CDI pod %s to be ready", newCdiPod.Name)) - err := utils.WaitTimeoutForPodReady(f.K8sClient, newCdiPod.Name, newCdiPod.Namespace, 20*time.Minute) - Expect(err).ToNot(HaveOccurred()) - } - Eventually(func() bool { - services, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) - Expect(err).ToNot(HaveOccurred(), "failed getting CDI services") - for _, service := range services.Items { - if service.Name != "cdi-prometheus-metrics" { - endpoint, err := f.K8sClient.CoreV1().Endpoints(f.CdiInstallNs).Get(context.TODO(), service.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred(), "failed getting service endpoint") - for _, subset := range endpoint.Subsets { - if len(subset.NotReadyAddresses) > 0 { - By(fmt.Sprintf("Not all endpoints of service %s are ready", service.Name)) - return false - } + By("Waiting for all CDI testing pods to terminate") + Eventually(func() bool { + for _, cdiTestPod := range cdiTestPods.Items { + By(fmt.Sprintf("CDI test pod: %s", cdiTestPod.Name)) + _, err := f.K8sClient.CoreV1().Pods(cdiTestPod.Namespace).Get(context.TODO(), cdiTestPod.Name, metav1.GetOptions{}) + if !errors.IsNotFound(err) { + return false } } + return true + }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + + By("Checking that all the non-testing pods are running") + for _, cdiPod := range cdiPods.Items { + if _, isTestingComponent := cdiPod.Labels["cdi.kubevirt.io/testing"]; isTestingComponent { + continue + } + By(fmt.Sprintf("Non-test CDI pod: %s", cdiPod.Name)) + podUpdated, err := f.K8sClient.CoreV1().Pods(cdiPod.Namespace).Get(context.TODO(), cdiPod.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred(), "failed setting taint on node") + Expect(podUpdated.Status.Phase).To(Equal(corev1.PodRunning)) } - return true - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + }) + }) - It("should deploy components that tolerate CriticalAddonsOnly taint", func() { - cr := getCDI(f) - criticalAddonsToleration := corev1.Toleration{ - Key: "CriticalAddonsOnly", - Operator: corev1.TolerationOpExists, + var _ = Describe("Operator delete CDI CR tests", func() { + var cr *cdiv1.CDI + f := framework.NewFramework("operator-delete-cdi-test") + var cdiPods *corev1.PodList + + BeforeEach(func() { + cr = getCDI(f) + cdiPods = getCDIPods(f) + }) + + removeCDI := func() { + removeCDI(f, cr) } - if !tolerationExists(cr.Spec.Infra.NodePlacement.Tolerations, criticalAddonsToleration) { - Skip("Unexpected CDI CR (not from cdi-cr.yaml), doesn't tolerate CriticalAddonsOnly") + ensureCDI := func() { + ensureCDI(f, cr, cdiPods) } - labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"cdi.kubevirt.io/testing": ""}} - cdiTestPods, err := f.K8sClient.CoreV1().Pods(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{ - LabelSelector: labels.Set(labelSelector.MatchLabels).String(), + AfterEach(func() { + removeCDI() + ensureCDI() }) - Expect(err).ToNot(HaveOccurred(), "failed listing cdi testing pods") - Expect(cdiTestPods.Items).ToNot(BeEmpty(), "no cdi testing pods found") - - By("adding taints to all nodes") - criticalPodTaint := corev1.Taint{ - Key: "CriticalAddonsOnly", - Value: "", - Effect: corev1.TaintEffectNoExecute, - } - for _, node := range nodes.Items { - Eventually(func() bool { - nodeCopy, err := f.K8sClient.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) + It("[test_id:4986]should remove/install CDI a number of times successfully", func() { + for i := 0; i < 5; i++ { + err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) + ensureCDI() + } + }) - if nodeHasTaint(*nodeCopy, criticalPodTaint) { - return true - } + It("[test_id:3954]should delete an upload pod", func() { + dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") - nodeCopy.Spec.Taints = append(nodeCopy.Spec.Taints, criticalPodTaint) - _, _ = f.K8sClient.CoreV1().Nodes().Update(context.TODO(), nodeCopy, metav1.UpdateOptions{}) - return false - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - } + By("Creating datavolume") + dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) - By("Waiting for all CDI testing pods to terminate") - Eventually(func() bool { - for _, cdiTestPod := range cdiTestPods.Items { - By(fmt.Sprintf("CDI test pod: %s", cdiTestPod.Name)) - _, err := f.K8sClient.CoreV1().Pods(cdiTestPod.Namespace).Get(context.TODO(), cdiTestPod.Name, metav1.GetOptions{}) - if !errors.IsNotFound(err) { + pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + uploadPodName := utils.UploadPodName(pvc) + + By("Waiting for pod to be running") + Eventually(func() bool { + pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), uploadPodName, metav1.GetOptions{}) + if errors.IsNotFound(err) { return false } - } - return true - }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + Expect(err).ToNot(HaveOccurred()) + return pod.Status.Phase == corev1.PodRunning + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - By("Checking that all the non-testing pods are running") - for _, cdiPod := range cdiPods.Items { - if _, isTestingComponent := cdiPod.Labels["cdi.kubevirt.io/testing"]; isTestingComponent { - continue + if us := cr.Spec.UninstallStrategy; us != nil && *us == cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist { + err = utils.DeleteDataVolume(f.CdiClient, dv.Namespace, dv.Name) + Expect(err).ToNot(HaveOccurred()) } - By(fmt.Sprintf("Non-test CDI pod: %s", cdiPod.Name)) - podUpdated, err := f.K8sClient.CoreV1().Pods(cdiPod.Namespace).Get(context.TODO(), cdiPod.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred(), "failed setting taint on node") - Expect(podUpdated.Status.Phase).To(Equal(corev1.PodRunning)) - } - }) - - }) - var _ = Describe("Operator delete CDI CR tests", func() { - var cr *cdiv1.CDI - f := framework.NewFramework("operator-delete-cdi-test") - var cdiPods *corev1.PodList - - BeforeEach(func() { - cr = getCDI(f) - cdiPods = getCDIPods(f) - }) + By("Deleting CDI") + err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) - removeCDI := func() { - removeCDI(f, cr) - } + By("Waiting for pod to be deleted") + Eventually(func() bool { + _, err = f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), "cdi-upload-"+dv.Name, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return true + } + Expect(err).ToNot(HaveOccurred()) + return false + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - ensureCDI := func() { - ensureCDI(f, cr, cdiPods) - } + It("[test_id:3955]should block CDI delete", func() { + uninstallStrategy := cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist + updateUninstallStrategy(f, &uninstallStrategy) - By("Cannot delete CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("there are still 1 DataVolumes present")) + By("Creating datavolume") + dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") + dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) + Expect(err).ToNot(HaveOccurred()) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) - It("[test_id:4986]should remove/install CDI a number of times successfully", func() { - for i := 0; i < 5; i++ { - err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) + By("Creating datavolume with DataImportCron label") + dv = utils.NewDataVolumeForUpload("retain-me", "1Gi") + dv.Labels = map[string]string{common.DataImportCronLabel: "dic"} + dv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) Expect(err).ToNot(HaveOccurred()) - ensureCDI() - } - }) + f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) - It("[test_id:3954]should delete an upload pod", func() { - dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") + By("Cannot delete CDI") + err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("there are still 1 DataVolumes present")) - By("Creating datavolume") - dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + By("Delete the unlabeled datavolume") + err = f.CdiClient.CdiV1beta1().DataVolumes(f.Namespace.Name).Delete(context.TODO(), "delete-me", metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) - pvc, err := f.K8sClient.CoreV1().PersistentVolumeClaims(dv.Namespace).Get(context.TODO(), dv.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - uploadPodName := utils.UploadPodName(pvc) + By("Can delete CDI") + err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) + Expect(err).ToNot(HaveOccurred()) + }) - By("Waiting for pod to be running") - Eventually(func() bool { - pod, err := f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), uploadPodName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false - } + It("[test_id:8087]CDI CR deletion should delete DataImportCron CRD and all DataImportCrons", func() { + reg, err := getDataVolumeSourceRegistry(f) Expect(err).ToNot(HaveOccurred()) - return pod.Status.Phase == corev1.PodRunning - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - if us := cr.Spec.UninstallStrategy; us != nil && *us == cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist { - err = utils.DeleteDataVolume(f.CdiClient, dv.Namespace, dv.Name) + By("Create new DataImportCron") + cron := utils.NewDataImportCron("cron-test", "5Gi", scheduleEveryMinute, "ds", 1, *reg) + cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) - } - By("Deleting CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Verify cron first import completed") + Eventually(func() bool { + cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Get(context.TODO(), cron.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + upToDateCond := controller.FindDataImportCronConditionByType(cron, cdiv1.DataImportCronUpToDate) + return upToDateCond != nil && upToDateCond.Status == corev1.ConditionTrue + }, dataImportCronTimeout, pollingInterval).Should(BeTrue()) - By("Waiting for pod to be deleted") - Eventually(func() bool { - _, err = f.K8sClient.CoreV1().Pods(dv.Namespace).Get(context.TODO(), "cdi-upload-"+dv.Name, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true - } - Expect(err).ToNot(HaveOccurred()) - return false - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) + pvc := cron.Status.LastImportedPVC + Expect(pvc).ToNot(BeNil()) - It("[test_id:3955]should block CDI delete", func() { - uninstallStrategy := cdiv1.CDIUninstallStrategyBlockUninstallIfWorkloadsExist - updateUninstallStrategy(f, &uninstallStrategy) + By("Verify dv succeeded") + err = utils.WaitForDataVolumePhase(f, pvc.Namespace, cdiv1.Succeeded, pvc.Name) + Expect(err).ToNot(HaveOccurred()) - By("Creating datavolume") - dv := utils.NewDataVolumeForUpload("delete-me", "1Gi") - dv, err := utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + By("Start goroutine creating DataImportCrons") + go func() { + defer GinkgoRecover() + var err error + for i := 0; i < 100 && err == nil; i++ { + cronName := fmt.Sprintf("cron-test-%d", i) + cron := utils.NewDataImportCron(cronName, "5Gi", scheduleEveryMinute, "ds", 1, *reg) + _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) + } + }() - By("Creating datavolume with DataImportCron label") - dv = utils.NewDataVolumeForUpload("retain-me", "1Gi") - dv.Labels = map[string]string{common.DataImportCronLabel: "dic"} - dv, err = utils.CreateDataVolumeFromDefinition(f.CdiClient, f.Namespace.Name, dv) - Expect(err).ToNot(HaveOccurred()) - f.ForceBindPvcIfDvIsWaitForFirstConsumer(dv) + removeCDI() - By("Cannot delete CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("there are still DataVolumes present")) + By("Verify no DataImportCrons are found") + Eventually(func() bool { + _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) + return err != nil && errors.IsNotFound(err) + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - By("Delete the unlabeled datavolume") - err = f.CdiClient.CdiV1beta1().DataVolumes(f.Namespace.Name).Delete(context.TODO(), "delete-me", metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Verify no cronjobs left") + Eventually(func() bool { + cronjobs, err := f.K8sClient.BatchV1().CronJobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + return len(cronjobs.Items) == 0 + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - By("Can delete CDI") - err = f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), cr.Name, metav1.DeleteOptions{DryRun: []string{"All"}}) - Expect(err).ToNot(HaveOccurred()) + By("Verify no jobs left") + Eventually(func() bool { + jobs, err := f.K8sClient.BatchV1().Jobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + return len(jobs.Items) == 0 + }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + }) }) - It("[test_id:8087]CDI CR deletion should delete DataImportCron CRD and all DataImportCrons", func() { - reg, err := getDataVolumeSourceRegistry(f) - Expect(err).ToNot(HaveOccurred()) - - By("Create new DataImportCron") - cron := utils.NewDataImportCron("cron-test", "5Gi", scheduleEveryMinute, "ds", 1, *reg) - cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) + var _ = Describe("[rfe_id:4784][crit:high] CDI Operator deployment + CDI CR delete tests", func() { + var restoreCdiCr *cdiv1.CDI + var restoreCdiOperatorDeployment *appsv1.Deployment + f := framework.NewFramework("operator-delete-cdi-test") - By("Verify cron first import completed") - Eventually(func() bool { - cron, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Get(context.TODO(), cron.Name, metav1.GetOptions{}) + removeCDI := func() { + By("Deleting CDI CR") + err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), restoreCdiCr.Name, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) - upToDateCond := controller.FindDataImportCronConditionByType(cron, cdiv1.DataImportCronUpToDate) - return upToDateCond != nil && upToDateCond.Status == corev1.ConditionTrue - }, dataImportCronTimeout, pollingInterval).Should(BeTrue()) - pvc := cron.Status.LastImportedPVC - Expect(pvc).ToNot(BeNil()) + By("Waiting for CDI CR and infra deployments to be deleted after CDI CR was removed") + Eventually(func() bool { return infraDeploymentGone(f) && crGone(f, restoreCdiCr) }, 15*time.Minute, 2*time.Second).Should(BeTrue()) - By("Verify dv succeeded") - err = utils.WaitForDataVolumePhase(f, pvc.Namespace, cdiv1.Succeeded, pvc.Name) - Expect(err).ToNot(HaveOccurred()) - - By("Start goroutine creating DataImportCrons") - go func() { - defer GinkgoRecover() - var err error - for i := 0; i < 100 && err == nil; i++ { - cronName := fmt.Sprintf("cron-test-%d", i) - cron := utils.NewDataImportCron(cronName, "5Gi", scheduleEveryMinute, "ds", 1, *reg) - _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).Create(context.TODO(), cron, metav1.CreateOptions{}) - } - }() - - removeCDI() + By("Deleting CDI operator") + err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Delete(context.TODO(), "cdi-operator", metav1.DeleteOptions{}) + Expect(err).ToNot(HaveOccurred()) - By("Verify no DataImportCrons are found") - Eventually(func() bool { - _, err = f.CdiClient.CdiV1beta1().DataImportCrons(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}) - return err != nil && errors.IsNotFound(err) - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) + By("Waiting for CDI operator deployment to be deleted") + Eventually(func() bool { return cdiOperatorDeploymentGone(f) }, 5*time.Minute, 2*time.Second).Should(BeTrue()) + } - By("Verify no cronjobs left") - Eventually(func() bool { - cronjobs, err := f.K8sClient.BatchV1().CronJobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) + ensureCDI := func(cr *cdiv1.CDI) { + By("Re-creating CDI (CR and deployment)") + _, err := f.CdiClient.CdiV1beta1().CDIs().Create(context.TODO(), cr, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) - return len(cronjobs.Items) == 0 - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - By("Verify no jobs left") - Eventually(func() bool { - jobs, err := f.K8sClient.BatchV1().Jobs(f.CdiInstallNs).List(context.TODO(), metav1.ListOptions{}) + By("Recreating CDI operator") + _, err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Create(context.TODO(), restoreCdiOperatorDeployment, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) - return len(jobs.Items) == 0 - }, 1*time.Minute, 2*time.Second).Should(BeTrue()) - }) - }) - var _ = Describe("[rfe_id:4784][crit:high] CDI Operator deployment + CDI CR delete tests", func() { - var restoreCdiCr *cdiv1.CDI - var restoreCdiOperatorDeployment *appsv1.Deployment - f := framework.NewFramework("operator-delete-cdi-test") + By("Verifying CDI apiserver, deployment, uploadproxy exist, before continuing") + Eventually(func() bool { return infraDeploymentAvailable(f, restoreCdiCr) }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI deployments") - removeCDI := func() { - By("Deleting CDI CR") - err := f.CdiClient.CdiV1beta1().CDIs().Delete(context.TODO(), restoreCdiCr.Name, metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Verifying CDI config object exists, before continuing") + Eventually(func() bool { + _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return false + } + Expect(err).ToNot(HaveOccurred(), "Unable to read CDI Config, %v, expect more failures", err) + return true + }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI Config, expect more failures") + } - By("Waiting for CDI CR and infra deployments to be deleted after CDI CR was removed") - Eventually(func() bool { return infraDeploymentGone(f) && crGone(f, restoreCdiCr) }, 15*time.Minute, 2*time.Second).Should(BeTrue()) + BeforeEach(func() { + currentCR := getCDI(f) + restoreCdiCr = &cdiv1.CDI{ + ObjectMeta: metav1.ObjectMeta{ + Name: currentCR.Name, + }, + Spec: currentCR.Spec, + } - By("Deleting CDI operator") - err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Delete(context.TODO(), "cdi-operator", metav1.DeleteOptions{}) - Expect(err).ToNot(HaveOccurred()) + currentCdiOperatorDeployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), "cdi-operator", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - By("Waiting for CDI operator deployment to be deleted") - Eventually(func() bool { return cdiOperatorDeploymentGone(f) }, 5*time.Minute, 2*time.Second).Should(BeTrue()) - } + restoreCdiOperatorDeployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cdi-operator", + Namespace: f.CdiInstallNs, + Labels: currentCdiOperatorDeployment.Labels, + }, + Spec: currentCdiOperatorDeployment.Spec, + } - ensureCDI := func(cr *cdiv1.CDI) { - By("Re-creating CDI (CR and deployment)") - _, err := f.CdiClient.CdiV1beta1().CDIs().Create(context.TODO(), cr, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) + removeCDI() + }) - By("Recreating CDI operator") - _, err = f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Create(context.TODO(), restoreCdiOperatorDeployment, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) + AfterEach(func() { + removeCDI() + ensureCDI(restoreCdiCr) + }) - By("Verifying CDI apiserver, deployment, uploadproxy exist, before continuing") - Eventually(func() bool { return infraDeploymentAvailable(f, restoreCdiCr) }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI deployments") + It("[test_id:4782] Should install CDI infrastructure pods with node placement", func() { + By("Creating modified CDI CR, with infra nodePlacement") + localSpec := restoreCdiCr.Spec.DeepCopy() + nodePlacement := f.TestNodePlacementValues() - By("Verifying CDI config object exists, before continuing") - Eventually(func() bool { - _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false - } - Expect(err).ToNot(HaveOccurred(), "Unable to read CDI Config, %v, expect more failures", err) - return true - }, CompletionTimeout, assertionPollInterval).Should(BeTrue(), "Timeout reading CDI Config, expect more failures") - } + localSpec.Infra.NodePlacement = nodePlacement - BeforeEach(func() { - currentCR := getCDI(f) - restoreCdiCr = &cdiv1.CDI{ - ObjectMeta: metav1.ObjectMeta{ - Name: currentCR.Name, - }, - Spec: currentCR.Spec, - } + tempCdiCr := &cdiv1.CDI{ + ObjectMeta: metav1.ObjectMeta{ + Name: restoreCdiCr.Name, + }, + Spec: *localSpec, + } - currentCdiOperatorDeployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), "cdi-operator", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + ensureCDI(tempCdiCr) - restoreCdiOperatorDeployment = &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cdi-operator", - Namespace: f.CdiInstallNs, - Labels: currentCdiOperatorDeployment.Labels, - }, - Spec: currentCdiOperatorDeployment.Spec, - } + By("Testing all infra deployments have the chosen node placement") + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + deployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verify the deployment has nodeSelector") + Expect(deployment.Spec.Template.Spec.NodeSelector).To(Equal(framework.NodeSelectorTestValue)) - removeCDI() - }) + By("Verify the deployment has affinity") + checkAntiAffinity(deploymentName, deployment.Spec.Template.Spec.Affinity) - AfterEach(func() { - removeCDI() - ensureCDI(restoreCdiCr) + By("Verify the deployment has tolerations") + Expect(deployment.Spec.Template.Spec.Tolerations).To(ContainElement(framework.TolerationsTestValue[0])) + } + }) }) - It("[test_id:4782] Should install CDI infrastructure pods with node placement", func() { - By("Creating modified CDI CR, with infra nodePlacement") - localSpec := restoreCdiCr.Spec.DeepCopy() - nodePlacement := f.TestNodePlacementValues() + var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]Strict Reconciliation tests", func() { + f := framework.NewFramework("strict-reconciliation-test") - localSpec.Infra.NodePlacement = nodePlacement + It("[test_id:5573]cdi-deployment replicas back to original value on attempt to scale", func() { + By("Overwrite number of replicas with 10") + deploymentName := "cdi-deployment" + originalReplicaVal := scaleDeployment(f, deploymentName, 10) - tempCdiCr := &cdiv1.CDI{ - ObjectMeta: metav1.ObjectMeta{ - Name: restoreCdiCr.Name, - }, - Spec: *localSpec, - } + By("Ensuring original value of replicas restored & extra deployment pod was cleaned up") + Eventually(func() bool { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) + return *depl.Spec.Replicas == originalReplicaVal && err == nil + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + }) - ensureCDI(tempCdiCr) + It("[test_id:5574]Service spec.selector restored on overwrite attempt", func() { + service, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + originalSelectorVal := service.Spec.Selector[common.CDIComponentLabel] - By("Testing all infra deployments have the chosen node placement") - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - deployment, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + By("Overwrite spec.selector with empty string") + service.Spec.Selector[common.CDIComponentLabel] = "" + _, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Update(context.TODO(), service, metav1.UpdateOptions{}) Expect(err).ToNot(HaveOccurred()) - By("Verify the deployment has nodeSelector") - Expect(deployment.Spec.Template.Spec.NodeSelector).To(Equal(framework.NodeSelectorTestValue)) - By("Verify the deployment has affinity") - checkAntiAffinity(deploymentName, deployment.Spec.Template.Spec.Affinity) + Eventually(func() bool { + svc, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By(fmt.Sprintf("Waiting until original spec.selector value: %s\n Matches current: %s\n", originalSelectorVal, svc.Spec.Selector[common.CDIComponentLabel])) + return svc.Spec.Selector[common.CDIComponentLabel] == originalSelectorVal + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - By("Verify the deployment has tolerations") - Expect(deployment.Spec.Template.Spec.Tolerations).To(ContainElement(framework.TolerationsTestValue[0])) - } - }) - }) - - var _ = Describe("[vendor:cnv-qe@redhat.com][level:component]Strict Reconciliation tests", func() { - f := framework.NewFramework("strict-reconciliation-test") - - It("[test_id:5573]cdi-deployment replicas back to original value on attempt to scale", func() { - By("Overwrite number of replicas with 10") - deploymentName := "cdi-deployment" - originalReplicaVal := scaleDeployment(f, deploymentName, 10) - - By("Ensuring original value of replicas restored & extra deployment pod was cleaned up") - Eventually(func() bool { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDILabelSelector) - return *depl.Spec.Replicas == originalReplicaVal && err == nil - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - }) - - It("[test_id:5574]Service spec.selector restored on overwrite attempt", func() { - service, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - originalSelectorVal := service.Spec.Selector[common.CDIComponentLabel] - - By("Overwrite spec.selector with empty string") - service.Spec.Selector[common.CDIComponentLabel] = "" - _, err = f.K8sClient.CoreV1().Services(f.CdiInstallNs).Update(context.TODO(), service, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - - Eventually(func() bool { - svc, err := f.K8sClient.CoreV1().Services(f.CdiInstallNs).Get(context.TODO(), "cdi-api", metav1.GetOptions{}) + It("[test_id:5575]ClusterRole verb restored on deletion attempt", func() { + clusterRole, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Waiting until original spec.selector value: %s\n Matches current: %s\n", originalSelectorVal, svc.Spec.Selector[common.CDIComponentLabel])) - return svc.Spec.Selector[common.CDIComponentLabel] == originalSelectorVal - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) - - It("[test_id:5575]ClusterRole verb restored on deletion attempt", func() { - clusterRole, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Remove list verb") - clusterRole.Rules = []rbacv1.PolicyRule{ - { - APIGroups: []string{ - "cdi.kubevirt.io", - }, - Resources: []string{ - "cdiconfigs", - }, - Verbs: []string{ - "get", - // "list", - "watch", + By("Remove list verb") + clusterRole.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "cdi.kubevirt.io", + }, + Resources: []string{ + "cdiconfigs", + }, + Verbs: []string{ + "get", + // "list", + "watch", + }, }, - }, - } - - _, err = f.K8sClient.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - - Eventually(func() bool { - role, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Waiting until list verb exists") - for _, verb := range role.Rules[0].Verbs { - if verb == "list" { - return true - } } - return false - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) - - It("[test_id:5576]ServiceAccount values restored on update attempt", func() { - serviceAccount, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Change one of ServiceAccount labels") - serviceAccount.Labels[common.CDIComponentLabel] = "somebadvalue" + _, err = f.K8sClient.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - _, err = f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + role, err := f.K8sClient.RbacV1().ClusterRoles().Get(context.TODO(), "cdi.kubevirt.io:config-reader", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Waiting until list verb exists") + for _, verb := range role.Rules[0].Verbs { + if verb == "list" { + return true + } + } + return false + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - Eventually(func() bool { - sa, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) + It("[test_id:5576]ServiceAccount values restored on update attempt", func() { + serviceAccount, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - By("Waiting until label value restored") - return sa.Labels[common.CDIComponentLabel] == "" - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) - It("[test_id:5577]Certificate restored to ConfigMap on deletion attempt", func() { - configMap, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Change one of ServiceAccount labels") + serviceAccount.Labels[common.CDIComponentLabel] = "somebadvalue" - By("Empty ConfigMap's data") - configMap.Data = map[string]string{} + _, err = f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Update(context.TODO(), serviceAccount, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - _, err = f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Update(context.TODO(), configMap, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + sa, err := f.K8sClient.CoreV1().ServiceAccounts(f.CdiInstallNs).Get(context.TODO(), common.ControllerServiceAccountName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Waiting until label value restored") + return sa.Labels[common.CDIComponentLabel] == "" + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - Eventually(func() bool { - cm, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) + It("[test_id:5577]Certificate restored to ConfigMap on deletion attempt", func() { + configMap, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - By("Waiting until ConfigMap's data is not empty") - return len(cm.Data) != 0 - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) - It("[test_id:5578]Cant enable featureGate by editing CDIConfig resource", func() { - feature := "nonExistantFeature" - cdiConfig, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + By("Empty ConfigMap's data") + configMap.Data = map[string]string{} - By("Enable non existent featureGate") - cdiConfig.Spec = cdiv1.CDIConfigSpec{ - FeatureGates: []string{feature}, - } + _, err = f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Update(context.TODO(), configMap, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Update(context.TODO(), cdiConfig, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + cm, err := f.K8sClient.CoreV1().ConfigMaps(f.CdiInstallNs).Get(context.TODO(), "cdi-apiserver-signer-bundle", metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Waiting until ConfigMap's data is not empty") + return len(cm.Data) != 0 + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - Eventually(func() bool { - config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) + It("[test_id:5578]Cant enable featureGate by editing CDIConfig resource", func() { + feature := "nonExistantFeature" + cdiConfig, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - By(fmt.Sprintf("Waiting until %s featureGate doesn't exist", feature)) - for _, fgate := range config.Spec.FeatureGates { - if fgate == feature { - return false - } + + By("Enable non existent featureGate") + cdiConfig.Spec = cdiv1.CDIConfigSpec{ + FeatureGates: []string{feature}, } - return true - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - }) - It("SCC priority always reset to default", func() { - if !utils.IsOpenshift(f.K8sClient) { - Skip("This test is OpenShift specific") - } + _, err = f.CdiClient.CdiV1beta1().CDIConfigs().Update(context.TODO(), cdiConfig, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - secClient, err := secclient.NewForConfig(f.RestConfig) - Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + config, err := f.CdiClient.CdiV1beta1().CDIConfigs().Get(context.TODO(), common.ConfigName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + By(fmt.Sprintf("Waiting until %s featureGate doesn't exist", feature)) + for _, fgate := range config.Spec.FeatureGates { + if fgate == feature { + return false + } + } + return true + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) - scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + It("SCC priority always reset to default", func() { + if !utils.IsOpenshift(f.K8sClient) { + Skip("This test is OpenShift specific") + } - By("Overwrite priority of SCC") - scc.Priority = ptr.To[int32](10) - _, err = secClient.SecurityV1().SecurityContextConstraints().Update(context.TODO(), scc, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + secClient, err := secclient.NewForConfig(f.RestConfig) + Expect(err).ToNot(HaveOccurred()) - Eventually(func() *int32 { scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - return scc.Priority - }, 2*time.Minute, 1*time.Second).Should(BeNil()) - }) - It("[test_id:4785] Should update infra pod number when modify the replica in CDI CR", func() { - By("Modify the replica separately") - cdi := getCDI(f) - apiserverTmpReplica := int32(2) - deploymentTmpReplica := int32(3) - uploadproxyTmpReplica := int32(4) - - cdi.Spec.Infra.APIServerReplicas = &apiserverTmpReplica - cdi.Spec.Infra.DeploymentReplicas = &deploymentTmpReplica - cdi.Spec.Infra.UploadProxyReplicas = &uploadproxyTmpReplica - - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + By("Overwrite priority of SCC") + scc.Priority = ptr.To[int32](10) + _, err = secClient.SecurityV1().SecurityContextConstraints().Update(context.TODO(), scc, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() *int32 { + scc, err := secClient.SecurityV1().SecurityContextConstraints().Get(context.TODO(), "containerized-data-importer", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - if err != nil || *depl.Spec.Replicas == 1 { - return false + return scc.Priority + }, 2*time.Minute, 1*time.Second).Should(BeNil()) + }) + It("[test_id:4785] Should update infra pod number when modify the replica in CDI CR", func() { + By("Modify the replica separately") + cdi := getCDI(f) + apiserverTmpReplica := int32(2) + deploymentTmpReplica := int32(3) + uploadproxyTmpReplica := int32(4) + + cdi.Spec.Infra.APIServerReplicas = &apiserverTmpReplica + cdi.Spec.Infra.DeploymentReplicas = &deploymentTmpReplica + cdi.Spec.Infra.UploadProxyReplicas = &uploadproxyTmpReplica + + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + if err != nil || *depl.Spec.Replicas == 1 { + return false + } } - } - By("Replicas in deployments update complete") - return true - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + By("Replicas in deployments update complete") + return true + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - By("Verify the replica of cdi-apiserver") + By("Verify the replica of cdi-apiserver") - Eventually(func() bool { - return getPodNumByPrefix(f, "cdi-apiserver") == 2 - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + Eventually(func() bool { + return getPodNumByPrefix(f, "cdi-apiserver") == 2 + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - By("Verify the replica of cdi-deployment") - Eventually(func() bool { - return getPodNumByPrefix(f, "cdi-deployment") == 3 - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + By("Verify the replica of cdi-deployment") + Eventually(func() bool { + return getPodNumByPrefix(f, "cdi-deployment") == 3 + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - By("Verify the replica of cdi-uploadproxy") - Eventually(func() bool { - return getPodNumByPrefix(f, "cdi-uploadproxy") == 4 - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + By("Verify the replica of cdi-uploadproxy") + Eventually(func() bool { + return getPodNumByPrefix(f, "cdi-uploadproxy") == 4 + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - By("Reset replica for CDI CR") - cdi = getCDI(f) - cdi.Spec.Infra.APIServerReplicas = nil - cdi.Spec.Infra.DeploymentReplicas = nil - cdi.Spec.Infra.UploadProxyReplicas = nil + By("Reset replica for CDI CR") + cdi = getCDI(f) + cdi.Spec.Infra.APIServerReplicas = nil + cdi.Spec.Infra.DeploymentReplicas = nil + cdi.Spec.Infra.UploadProxyReplicas = nil - _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - By("Replica should be 1 when replica dosen't set in CDI CR") + By("Replica should be 1 when replica dosen't set in CDI CR") - Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) - if err != nil || *depl.Spec.Replicas != 1 { - return false - } + Eventually(func() bool { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) + if err != nil || *depl.Spec.Replicas != 1 { + return false + } - } - return true + } + return true - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - }) - It("Should update infra deployments when modify customizeComponents in CDI Cr", func() { - By("Modify the customizeComponents separately") - cdi := getCDI(f) - testJSONPatch := "test-json-patch" - testStrategicPatch := "test-strategic-patch" - testMergePatch := "test-merge-patch" - cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{ - Patches: []cdiv1.CustomizeComponentsPatch{ - { - ResourceName: "cdi-apiserver", - ResourceType: "Deployment", - Patch: fmt.Sprintf(`[{"op":"add","path":"/metadata/annotations/%s","value":"%s"}]`, testJSONPatch, testJSONPatch), - Type: cdiv1.JSONPatchType, - }, - { - ResourceName: "cdi-deployment", - ResourceType: "Deployment", - Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testStrategicPatch, testStrategicPatch), - Type: cdiv1.StrategicMergePatchType, + }) + It("Should update infra deployments when modify customizeComponents in CDI Cr", func() { + By("Modify the customizeComponents separately") + cdi := getCDI(f) + testJSONPatch := "test-json-patch" + testStrategicPatch := "test-strategic-patch" + testMergePatch := "test-merge-patch" + cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{ + Patches: []cdiv1.CustomizeComponentsPatch{ + { + ResourceName: "cdi-apiserver", + ResourceType: "Deployment", + Patch: fmt.Sprintf(`[{"op":"add","path":"/metadata/annotations/%s","value":"%s"}]`, testJSONPatch, testJSONPatch), + Type: cdiv1.JSONPatchType, + }, + { + ResourceName: "cdi-deployment", + ResourceType: "Deployment", + Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testStrategicPatch, testStrategicPatch), + Type: cdiv1.StrategicMergePatchType, + }, + { + ResourceName: "cdi-uploadproxy", + ResourceType: "Deployment", + Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testMergePatch, testMergePatch), + Type: cdiv1.MergePatchType, + }, }, - { - ResourceName: "cdi-uploadproxy", - ResourceType: "Deployment", - Patch: fmt.Sprintf(`{"metadata": {"annotations": {"%s": "%s"}}}`, testMergePatch, testMergePatch), - Type: cdiv1.MergePatchType, + Flags: &cdiv1.Flags{ + API: map[string]string{"v": "5", "skip_headers": ""}, + Controller: map[string]string{"v": "6", "skip_headers": ""}, + UploadProxy: map[string]string{"v": "7", "skip_headers": ""}, }, - }, - Flags: &cdiv1.Flags{ - API: map[string]string{"v": "5", "skip_headers": ""}, - Controller: map[string]string{"v": "6", "skip_headers": ""}, - UploadProxy: map[string]string{"v": "7", "skip_headers": ""}, - }, - } - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - - Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - - if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] == "" { - return false - } } - By("Patches applied") - return true - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - verifyPatches := func(deployment, annoKey, annoValue string, desiredArgs ...string) { - By(fmt.Sprintf("Verify patches of %s", deployment)) Eventually(func() bool { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deployment, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - args := strings.Join(depl.Spec.Template.Spec.Containers[0].Args, " ") - for _, a := range desiredArgs { - if !strings.Contains(args, a) { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + + if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] == "" { return false } } - return depl.GetAnnotations()[annoKey] == annoValue + By("Patches applied") + return true }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - } - verifyPatches("cdi-apiserver", testJSONPatch, testJSONPatch, "-v 5", "-skip_headers") - verifyPatches("cdi-deployment", testStrategicPatch, testStrategicPatch, "-v 6", "-skip_headers") - verifyPatches("cdi-uploadproxy", testMergePatch, testMergePatch, "-v 7", "-skip_headers") - - By("Reset CustomizeComponents for CDI CR") - cdi = getCDI(f) - - cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{} - _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - Eventually(func() bool { - for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { - depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) - _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) - if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] != "" { - return false - } + verifyPatches := func(deployment, annoKey, annoValue string, desiredArgs ...string) { + By(fmt.Sprintf("Verify patches of %s", deployment)) + Eventually(func() bool { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deployment, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + args := strings.Join(depl.Spec.Template.Spec.Containers[0].Args, " ") + for _, a := range desiredArgs { + if !strings.Contains(args, a) { + return false + } + } + return depl.GetAnnotations()[annoKey] == annoValue + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) } - return true - }, 5*time.Minute, 1*time.Second).Should(BeTrue()) + verifyPatches("cdi-apiserver", testJSONPatch, testJSONPatch, "-v 5", "-skip_headers") + verifyPatches("cdi-deployment", testStrategicPatch, testStrategicPatch, "-v 6", "-skip_headers") + verifyPatches("cdi-uploadproxy", testMergePatch, testMergePatch, "-v 7", "-skip_headers") - }) - }) + By("Reset CustomizeComponents for CDI CR") + cdi = getCDI(f) - var _ = Describe("Operator cert config tests", func() { - var cdi *cdiv1.CDI - f := framework.NewFramework("operator-cert-config-test") + cdi.Spec.CustomizeComponents = cdiv1.CustomizeComponents{} + _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cdi, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + Eventually(func() bool { + for _, deploymentName := range []string{"cdi-apiserver", "cdi-deployment", "cdi-uploadproxy"} { + depl, err := f.K8sClient.AppsV1().Deployments(f.CdiInstallNs).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) - BeforeEach(func() { - cdi = getCDI(f) - }) + _, err = utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, deploymentName, common.CDIComponentLabel+"="+deploymentName) + if err != nil || depl.GetAnnotations()[cc.AnnCdiCustomizeComponentHash] != "" { + return false + } + } + return true + }, 5*time.Minute, 1*time.Second).Should(BeTrue()) - AfterEach(func() { - if cdi == nil { - return - } + }) + }) - cr, err := f.CdiClient.CdiV1beta1().CDIs().Get(context.TODO(), cdi.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred()) + var _ = Describe("Operator cert config tests", func() { + var cdi *cdiv1.CDI + f := framework.NewFramework("operator-cert-config-test") - cr.Spec.CertConfig = cdi.Spec.CertConfig + BeforeEach(func() { + cdi = getCDI(f) + }) - _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - }) + AfterEach(func() { + if cdi == nil { + return + } - getSecrets := func(secrets []string) []corev1.Secret { - var result []corev1.Secret - for _, s := range secrets { - s, err := f.K8sClient.CoreV1().Secrets(f.CdiInstallNs).Get(context.TODO(), s, metav1.GetOptions{}) + cr, err := f.CdiClient.CdiV1beta1().CDIs().Get(context.TODO(), cdi.Name, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - result = append(result, *s) - } - return result - } - - validateCertConfig := func(obj metav1.Object, lifetime, refresh string) { - fmt.Fprintf(GinkgoWriter, "validateCertConfig") - cca, ok := obj.GetAnnotations()["operator.cdi.kubevirt.io/certConfig"] - Expect(ok).To(BeTrue()) - certConfig := make(map[string]interface{}) - err := json.Unmarshal([]byte(cca), &certConfig) - Expect(err).ToNot(HaveOccurred()) - l, ok := certConfig["lifetime"] - Expect(ok).To(BeTrue()) - Expect(l.(string)).To(Equal(lifetime)) - r, ok := certConfig["refresh"] - Expect(ok).To(BeTrue()) - Expect(r.(string)).To(Equal(refresh)) - } - It("should allow update", func() { - caSecretNames := []string{"cdi-apiserver-signer", "cdi-uploadproxy-signer", "cdi-uploadserver-client-signer"} - serverSecretNames := []string{"cdi-apiserver-server-cert", "cdi-uploadproxy-server-cert"} - clientSecretNames := []string{"cdi-uploadserver-client-cert"} + cr.Spec.CertConfig = cdi.Spec.CertConfig - ts := time.Now() - // Time comparison here is in seconds, so make sure there is an interval - time.Sleep(2 * time.Second) + _, err = f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + }) - Eventually(func() bool { - cr := getCDI(f) - cr.Spec.CertConfig = &cdiv1.CDICertConfig{ - CA: &cdiv1.CertConfig{ - Duration: &metav1.Duration{Duration: time.Minute * 20}, - RenewBefore: &metav1.Duration{Duration: time.Minute * 5}, - }, - Server: &cdiv1.CertConfig{ - Duration: &metav1.Duration{Duration: time.Minute * 5}, - RenewBefore: &metav1.Duration{Duration: time.Minute * 2}, - }, - Client: &cdiv1.CertConfig{ - Duration: &metav1.Duration{Duration: time.Minute * 2}, - RenewBefore: &metav1.Duration{Duration: time.Minute * 1}, - }, - } - newCR, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - if errors.IsConflict(err) { - return false + getSecrets := func(secrets []string) []corev1.Secret { + var result []corev1.Secret + for _, s := range secrets { + s, err := f.K8sClient.CoreV1().Secrets(f.CdiInstallNs).Get(context.TODO(), s, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + result = append(result, *s) } + return result + } + + validateCertConfig := func(obj metav1.Object, lifetime, refresh string) { + fmt.Fprintf(GinkgoWriter, "validateCertConfig") + cca, ok := obj.GetAnnotations()["operator.cdi.kubevirt.io/certConfig"] + Expect(ok).To(BeTrue()) + certConfig := make(map[string]interface{}) + err := json.Unmarshal([]byte(cca), &certConfig) Expect(err).ToNot(HaveOccurred()) - Expect(newCR.Spec.CertConfig).To(Equal(cr.Spec.CertConfig)) - By("Cert config update complete") - return true - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + l, ok := certConfig["lifetime"] + Expect(ok).To(BeTrue()) + Expect(l.(string)).To(Equal(lifetime)) + r, ok := certConfig["refresh"] + Expect(ok).To(BeTrue()) + Expect(r.(string)).To(Equal(refresh)) + } - Eventually(func() bool { - caSecrets := getSecrets(caSecretNames) - serverSecrets := getSecrets(serverSecretNames) - clientSecrets := getSecrets(clientSecretNames) + It("should allow update", func() { + caSecretNames := []string{"cdi-apiserver-signer", "cdi-uploadproxy-signer", "cdi-uploadserver-client-signer"} + serverSecretNames := []string{"cdi-apiserver-server-cert", "cdi-uploadproxy-server-cert"} + clientSecretNames := []string{"cdi-uploadserver-client-cert"} - for _, s := range append(caSecrets, append(serverSecrets, clientSecrets...)...) { - fmt.Fprintf(GinkgoWriter, "Comparing not-before to time.Now() for all\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - if ts.After(t) { - fmt.Fprintf(GinkgoWriter, "%s is after\n", s.Name) - return false - } - } + ts := time.Now() + // Time comparison here is in seconds, so make sure there is an interval + time.Sleep(2 * time.Second) - for _, s := range caSecrets { - fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for caSecrets\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - naa := s.Annotations["auth.openshift.io/certificate-not-after"] - t2, err := time.Parse(time.RFC3339, naa) - Expect(err).ToNot(HaveOccurred()) - if t2.Sub(t) < time.Minute*20 { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s)\n", nba, naa) - return false + Eventually(func() bool { + cr := getCDI(f) + cr.Spec.CertConfig = &cdiv1.CDICertConfig{ + CA: &cdiv1.CertConfig{ + Duration: &metav1.Duration{Duration: time.Minute * 20}, + RenewBefore: &metav1.Duration{Duration: time.Minute * 5}, + }, + Server: &cdiv1.CertConfig{ + Duration: &metav1.Duration{Duration: time.Minute * 5}, + RenewBefore: &metav1.Duration{Duration: time.Minute * 2}, + }, + Client: &cdiv1.CertConfig{ + Duration: &metav1.Duration{Duration: time.Minute * 2}, + RenewBefore: &metav1.Duration{Duration: time.Minute * 1}, + }, } - if t2.Sub(t)-(time.Minute*20) > time.Second { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) + newCR, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + if errors.IsConflict(err) { return false } - // 20m - 5m = 15m - validateCertConfig(&s, "20m0s", "15m0s") - } - - for _, s := range serverSecrets { - fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for serverSecrets\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - naa := s.Annotations["auth.openshift.io/certificate-not-after"] - t2, err := time.Parse(time.RFC3339, naa) Expect(err).ToNot(HaveOccurred()) - if t2.Sub(t) < time.Minute*5 { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s)\n", nba, naa) - return false + Expect(newCR.Spec.CertConfig).To(Equal(cr.Spec.CertConfig)) + By("Cert config update complete") + return true + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + + Eventually(func() bool { + caSecrets := getSecrets(caSecretNames) + serverSecrets := getSecrets(serverSecretNames) + clientSecrets := getSecrets(clientSecretNames) + + for _, s := range append(caSecrets, append(serverSecrets, clientSecrets...)...) { + fmt.Fprintf(GinkgoWriter, "Comparing not-before to time.Now() for all\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + if ts.After(t) { + fmt.Fprintf(GinkgoWriter, "%s is after\n", s.Name) + return false + } } - if t2.Sub(t)-(time.Minute*5) > time.Second { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) - return false + + for _, s := range caSecrets { + fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for caSecrets\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + naa := s.Annotations["auth.openshift.io/certificate-not-after"] + t2, err := time.Parse(time.RFC3339, naa) + Expect(err).ToNot(HaveOccurred()) + if t2.Sub(t) < time.Minute*20 { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s)\n", nba, naa) + return false + } + if t2.Sub(t)-(time.Minute*20) > time.Second { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 20 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) + return false + } + // 20m - 5m = 15m + validateCertConfig(&s, "20m0s", "15m0s") } - // 5m - 2m = 3m - validateCertConfig(&s, "5m0s", "3m0s") - } - for _, s := range clientSecrets { - fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for clientSecrets\n") - nba := s.Annotations["auth.openshift.io/certificate-not-before"] - t, err := time.Parse(time.RFC3339, nba) - Expect(err).ToNot(HaveOccurred()) - naa := s.Annotations["auth.openshift.io/certificate-not-after"] - t2, err := time.Parse(time.RFC3339, naa) - Expect(err).ToNot(HaveOccurred()) - if t2.Sub(t) < time.Minute*2 { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s)\n", nba, naa) - return false + for _, s := range serverSecrets { + fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for serverSecrets\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + naa := s.Annotations["auth.openshift.io/certificate-not-after"] + t2, err := time.Parse(time.RFC3339, naa) + Expect(err).ToNot(HaveOccurred()) + if t2.Sub(t) < time.Minute*5 { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s)\n", nba, naa) + return false + } + if t2.Sub(t)-(time.Minute*5) > time.Second { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 5 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) + return false + } + // 5m - 2m = 3m + validateCertConfig(&s, "5m0s", "3m0s") } - if t2.Sub(t)-(time.Minute*2) > time.Second { - fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) - return false + + for _, s := range clientSecrets { + fmt.Fprintf(GinkgoWriter, "Comparing not-before/not-after for clientSecrets\n") + nba := s.Annotations["auth.openshift.io/certificate-not-before"] + t, err := time.Parse(time.RFC3339, nba) + Expect(err).ToNot(HaveOccurred()) + naa := s.Annotations["auth.openshift.io/certificate-not-after"] + t2, err := time.Parse(time.RFC3339, naa) + Expect(err).ToNot(HaveOccurred()) + if t2.Sub(t) < time.Minute*2 { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s)\n", nba, naa) + return false + } + if t2.Sub(t)-(time.Minute*2) > time.Second { + fmt.Fprintf(GinkgoWriter, "Not-Before (%s) should be 2 minutes before Not-After (%s) with 1 second toleration\n", nba, naa) + return false + } + // 2m - 1m = 1m + validateCertConfig(&s, "2m0s", "1m0s") } - // 2m - 1m = 1m - validateCertConfig(&s, "2m0s", "1m0s") - } - return true - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + return true + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + }) }) - }) - var _ = Describe("Priority class tests", func() { - var ( - cdi *cdiv1.CDI - cdiPods *corev1.PodList - systemClusterCritical = cdiv1.CDIPriorityClass("system-cluster-critical") - osUserCrit = &schedulev1.PriorityClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourcesutils.CDIPriorityClass, - }, - Value: 10000, + var _ = Describe("Priority class tests", func() { + var ( + cdi *cdiv1.CDI + cdiPods *corev1.PodList + systemClusterCritical = cdiv1.CDIPriorityClass("system-cluster-critical") + osUserCrit = &schedulev1.PriorityClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourcesutils.CDIPriorityClass, + }, + Value: 10000, + } + ) + f := framework.NewFramework("operator-priority-class-test") + verifyPodPriorityClass := func(prefix, priorityClassName, labelSelector string) { + Eventually(func() string { + controllerPod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, prefix, labelSelector) + if err != nil { + return "" + } + return controllerPod.Spec.PriorityClassName + }, 2*time.Minute, 1*time.Second).Should(BeEquivalentTo(priorityClassName)) } - ) - f := framework.NewFramework("operator-priority-class-test") - verifyPodPriorityClass := func(prefix, priorityClassName, labelSelector string) { - Eventually(func() string { - controllerPod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, prefix, labelSelector) - if err != nil { - return "" + + BeforeEach(func() { + cdiPods = getCDIPods(f) + cdi = getCDI(f) + if cdi.Spec.PriorityClass != nil { + By(fmt.Sprintf("Current priority class is: [%s]", *cdi.Spec.PriorityClass)) } - return controllerPod.Spec.PriorityClassName - }, 2*time.Minute, 1*time.Second).Should(BeEquivalentTo(priorityClassName)) - } + }) - BeforeEach(func() { - cdiPods = getCDIPods(f) - cdi = getCDI(f) - if cdi.Spec.PriorityClass != nil { - By(fmt.Sprintf("Current priority class is: [%s]", *cdi.Spec.PriorityClass)) - } - }) + AfterEach(func() { + if cdi == nil { + return + } - AfterEach(func() { - if cdi == nil { - return - } + cr := getCDI(f) + cr.Spec.PriorityClass = cdi.Spec.PriorityClass + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) - cr := getCDI(f) - cr.Spec.PriorityClass = cdi.Spec.PriorityClass - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) + if !utils.IsOpenshift(f.K8sClient) { + Eventually(func() bool { + return errors.IsNotFound(f.K8sClient.SchedulingV1().PriorityClasses().Delete(context.TODO(), osUserCrit.Name, metav1.DeleteOptions{})) + }, 2*time.Minute, 1*time.Second).Should(BeTrue()) + } + By("Ensuring the CDI priority class is restored") + prioClass := "" + if cr.Spec.PriorityClass != nil { + prioClass = string(*cr.Spec.PriorityClass) + } else if utils.IsOpenshift(f.K8sClient) { + prioClass = osUserCrit.Name + } + // Deployment + verifyPodPriorityClass(cdiDeploymentPodPrefix, prioClass, common.CDILabelSelector) + // API server + verifyPodPriorityClass(cdiAPIServerPodPrefix, prioClass, common.CDILabelSelector) + // Upload server + verifyPodPriorityClass(cdiUploadProxyPodPrefix, prioClass, common.CDILabelSelector) + By("Verifying there is just a single cdi controller pod") + Eventually(func() error { + _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) + return err + }, 2*time.Minute, 1*time.Second).Should(BeNil()) - if !utils.IsOpenshift(f.K8sClient) { - Eventually(func() bool { - return errors.IsNotFound(f.K8sClient.SchedulingV1().PriorityClasses().Delete(context.TODO(), osUserCrit.Name, metav1.DeleteOptions{})) - }, 2*time.Minute, 1*time.Second).Should(BeTrue()) - } - By("Ensuring the CDI priority class is restored") - prioClass := "" - if cr.Spec.PriorityClass != nil { - prioClass = string(*cr.Spec.PriorityClass) - } else if utils.IsOpenshift(f.K8sClient) { - prioClass = osUserCrit.Name - } - // Deployment - verifyPodPriorityClass(cdiDeploymentPodPrefix, prioClass, common.CDILabelSelector) - // API server - verifyPodPriorityClass(cdiAPIServerPodPrefix, prioClass, common.CDILabelSelector) - // Upload server - verifyPodPriorityClass(cdiUploadProxyPodPrefix, prioClass, common.CDILabelSelector) - By("Verifying there is just a single cdi controller pod") - Eventually(func() error { - _, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) - return err - }, 2*time.Minute, 1*time.Second).Should(BeNil()) - - pod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) - Expect(err).ToNot(HaveOccurred()) + pod, err := utils.FindPodByPrefix(f.K8sClient, f.CdiInstallNs, cdiDeploymentPodPrefix, common.CDILabelSelector) + Expect(err).ToNot(HaveOccurred()) - By("Ensuring this pod is the leader") - Eventually(func() (string, error) { - out, err := f.K8sClient.CoreV1(). - Pods(f.CdiInstallNs). - GetLogs(pod.Name, &corev1.PodLogOptions{SinceTime: &metav1.Time{Time: CurrentSpecReport().StartTime}}). - DoRaw(context.Background()) - return string(out), err - }, 2*time.Minute, time.Second).Should(ContainSubstring("successfully acquired lease")) + By("Ensuring this pod is the leader") + Eventually(func() (string, error) { + out, err := f.K8sClient.CoreV1(). + Pods(f.CdiInstallNs). + GetLogs(pod.Name, &corev1.PodLogOptions{SinceTime: &metav1.Time{Time: CurrentSpecReport().StartTime}}). + DoRaw(context.Background()) + return string(out), err + }, 2*time.Minute, time.Second).Should(ContainSubstring("successfully acquired lease")) - waitCDI(f, cr, cdiPods) - }) + waitCDI(f, cr, cdiPods) + }) - It("should use kubernetes priority class if set", func() { - cr := getCDI(f) - By("Setting the priority class to system cluster critical, which is known to exist") - cr.Spec.PriorityClass = &systemClusterCritical - _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verifying the CDI deployment is updated") - verifyPodPriorityClass(cdiDeploymentPodPrefix, string(systemClusterCritical), common.CDILabelSelector) - By("Verifying the CDI api server is updated") - verifyPodPriorityClass(cdiAPIServerPodPrefix, string(systemClusterCritical), common.CDILabelSelector) - By("Verifying the CDI upload proxy server is updated") - verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(systemClusterCritical), common.CDILabelSelector) - }) + It("should use kubernetes priority class if set", func() { + cr := getCDI(f) + By("Setting the priority class to system cluster critical, which is known to exist") + cr.Spec.PriorityClass = &systemClusterCritical + _, err := f.CdiClient.CdiV1beta1().CDIs().Update(context.TODO(), cr, metav1.UpdateOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verifying the CDI deployment is updated") + verifyPodPriorityClass(cdiDeploymentPodPrefix, string(systemClusterCritical), common.CDILabelSelector) + By("Verifying the CDI api server is updated") + verifyPodPriorityClass(cdiAPIServerPodPrefix, string(systemClusterCritical), common.CDILabelSelector) + By("Verifying the CDI upload proxy server is updated") + verifyPodPriorityClass(cdiUploadProxyPodPrefix, string(systemClusterCritical), common.CDILabelSelector) + }) - It("should use openshift priority class if not set and available", func() { - if utils.IsOpenshift(f.K8sClient) { - Skip("This test is not needed in OpenShift") - } - getCDI(f) - _, err := f.K8sClient.SchedulingV1().PriorityClasses().Create(context.TODO(), osUserCrit, metav1.CreateOptions{}) - Expect(err).ToNot(HaveOccurred()) - By("Verifying the CDI control plane is updated") - // Deployment - verifyPodPriorityClass(cdiDeploymentPodPrefix, osUserCrit.Name, common.CDILabelSelector) - // API server - verifyPodPriorityClass(cdiAPIServerPodPrefix, osUserCrit.Name, common.CDILabelSelector) - // Upload server - verifyPodPriorityClass(cdiUploadProxyPodPrefix, osUserCrit.Name, common.CDILabelSelector) + It("should use openshift priority class if not set and available", func() { + if utils.IsOpenshift(f.K8sClient) { + Skip("This test is not needed in OpenShift") + } + getCDI(f) + _, err := f.K8sClient.SchedulingV1().PriorityClasses().Create(context.TODO(), osUserCrit, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + By("Verifying the CDI control plane is updated") + // Deployment + verifyPodPriorityClass(cdiDeploymentPodPrefix, osUserCrit.Name, common.CDILabelSelector) + // API server + verifyPodPriorityClass(cdiAPIServerPodPrefix, osUserCrit.Name, common.CDILabelSelector) + // Upload server + verifyPodPriorityClass(cdiUploadProxyPodPrefix, osUserCrit.Name, common.CDILabelSelector) + }) }) }) - }) func getCDIPods(f *framework.Framework) *corev1.PodList { From d47f80facea5973df039771d45c4eb5181d908da Mon Sep 17 00:00:00 2001 From: Thomas-David Griedel Date: Mon, 8 Sep 2025 00:46:30 +0200 Subject: [PATCH 6/7] Update Destructive Context to use Label filter in operator tests Signed-off-by: Thomas-David Griedel griedel911@gmail.com --- tests/operator_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/operator_test.go b/tests/operator_test.go index 4901d281f8..bd656c878c 100644 --- a/tests/operator_test.go +++ b/tests/operator_test.go @@ -42,7 +42,7 @@ import ( ) var _ = Describe("ALL Operator tests", func() { - Context("[Destructive]", Serial, func() { + Context("[Destructive]", Label("Destructive"), Serial, func() { var _ = Describe("Operator tests", func() { f := framework.NewFramework("operator-test") From 9b510aeb2ed9975c7f7525933baa1545407910bf Mon Sep 17 00:00:00 2001 From: Thomas-David Griedel Date: Mon, 8 Sep 2025 02:16:07 +0200 Subject: [PATCH 7/7] fix syntax Signed-off-by: Thomas-David Griedel griedel911@gmail.com --- automation/test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/test.sh b/automation/test.sh index c17ea88d62..ac0b42ded3 100755 --- a/automation/test.sh +++ b/automation/test.sh @@ -77,7 +77,7 @@ kubectl() { cluster-up/kubectl.sh "$@"; } export CDI_NAMESPACE="${CDI_NAMESPACE:-cdi}" # Skip destructive tests by default -export CDI_LABEL_FILTER="${CDI_LABEL_FILTER:-'!Destructive'}" +export CDI_LABEL_FILTER="${CDI_LABEL_FILTER:-!Destructive}" make cluster-down # Create .bazelrc to use remote cache