37
37
# - kind (https://github.com/kubernetes-sigs/kind) installed
38
38
# - optional: Go already installed
39
39
40
+ set -x
41
+
40
42
RELEASE_TOOLS_ROOT=" $( realpath " $( dirname " ${BASH_SOURCE[0]} " ) " ) "
41
43
REPO_DIR=" $( pwd) "
42
44
@@ -381,6 +383,9 @@ default_csi_snapshotter_version () {
381
383
}
382
384
configvar CSI_SNAPSHOTTER_VERSION " $( default_csi_snapshotter_version) " " external-snapshotter version tag"
383
385
386
+ # Enable installing VolumeGroupSnapshot CRDs (off by default, can be set to true in prow jobs)
387
+ configvar CSI_PROW_ENABLE_GROUP_SNAPSHOT " true" " Enable the VolumeGroupSnapshot tests"
388
+
384
389
# Some tests are known to be unusable in a KinD cluster. For example,
385
390
# stopping kubelet with "ssh <node IP> systemctl stop kubelet" simply
386
391
# doesn't work. Such tests should be written in a way that they verify
@@ -794,6 +799,37 @@ install_snapshot_crds() {
794
799
done
795
800
}
796
801
802
+ # Installs VolumeGroupSnapshot CRDs (VolumeGroupSnapshot, VolumeGroupSnapshotContent, VolumeGroupSnapshotClass)
803
+ install_volumegroupsnapshot_crds () {
804
+ local crd_base_dir=" https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION} /client/config/crd"
805
+
806
+ # If we are running inside the external-snapshotter repo, use local files instead of GitHub
807
+ if [[ ${REPO_DIR} == * " external-snapshotter" * ]]; then
808
+ crd_base_dir=" ${REPO_DIR} /client/config/crd"
809
+ fi
810
+
811
+ echo " Installing VolumeGroupSnapshot CRDs from ${crd_base_dir} "
812
+ kubectl apply -f " ${crd_base_dir} /groupsnapshot.storage.k8s.io_volumegroupsnapshotclasses.yaml" --validate=false
813
+ kubectl apply -f " ${crd_base_dir} /groupsnapshot.storage.k8s.io_volumegroupsnapshotcontents.yaml" --validate=false
814
+ kubectl apply -f " ${crd_base_dir} /groupsnapshot.storage.k8s.io_volumegroupsnapshots.yaml" --validate=false
815
+
816
+ local cnt=0
817
+ until kubectl get volumegroupsnapshotclasses.groupsnapshot.storage.k8s.io \
818
+ && kubectl get volumegroupsnapshots.groupsnapshot.storage.k8s.io \
819
+ && kubectl get volumegroupsnapshotcontents.groupsnapshot.storage.k8s.io; do
820
+ if [ $cnt -gt 30 ]; then
821
+ echo >&2 " ERROR: VolumeGroupSnapshot CRDs not ready after 60s"
822
+ exit 1
823
+ fi
824
+ echo " $( date +%H:%M:%S) " " waiting for VolumeGroupSnapshot CRDs, attempt #$cnt "
825
+ cnt=$(( cnt + 1 ))
826
+ sleep 2
827
+ done
828
+
829
+ echo " VolumeGroupSnapshot CRDs installed and ready"
830
+ }
831
+
832
+
797
833
# Install snapshot controller and associated RBAC, retrying until the pod is running.
798
834
install_snapshot_controller () {
799
835
CONTROLLER_DIR=" https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION} "
@@ -880,8 +916,15 @@ install_snapshot_controller() {
880
916
exit 1
881
917
fi
882
918
else
883
- echo " kubectl apply -f $SNAPSHOT_CONTROLLER_YAML "
884
- kubectl apply -f " $SNAPSHOT_CONTROLLER_YAML "
919
+ if [ " ${CSI_PROW_ENABLE_GROUP_SNAPSHOT} " = " true" ]; then
920
+ echo " Deploying snapshot-controller with CSIVolumeGroupSnapshot feature gate enabled"
921
+ curl -s " $SNAPSHOT_CONTROLLER_YAML " | \
922
+ awk '/--leader-election=true/ {print; print " - \" --feature-gates=CSIVolumeGroupSnapshot=true\" " ; next}1' | \
923
+ kubectl apply -f - || die " failed to deploy snapshot-controller with feature gate"
924
+ else
925
+ echo " kubectl apply -f $SNAPSHOT_CONTROLLER_YAML "
926
+ kubectl apply -f " $SNAPSHOT_CONTROLLER_YAML "
927
+ fi
885
928
fi
886
929
887
930
cnt=0
@@ -1028,6 +1071,7 @@ run_e2e () (
1028
1071
# Rename, merge and filter JUnit files. Necessary in case that we run the E2E suite again
1029
1072
# and to avoid the large number of "skipped" tests that we get from using
1030
1073
# the full Kubernetes E2E testsuite while only running a few tests.
1074
+ # shellcheck disable=SC2329
1031
1075
move_junit () {
1032
1076
if ls " ${ARTIFACTS} " /junit_[0-9]* .xml 2> /dev/null > /dev/null; then
1033
1077
mkdir -p " ${ARTIFACTS} /junit/${name} " &&
@@ -1038,6 +1082,11 @@ run_e2e () (
1038
1082
}
1039
1083
trap move_junit EXIT
1040
1084
1085
+ if ${CSI_PROW_ENABLE_GROUP_SNAPSHOT} ; then
1086
+ yq -i ' .DriverInfo.Capabilities.groupSnapshot = true' " ${CSI_PROW_WORK} " /test-driver.yaml
1087
+ cat " ${CSI_PROW_WORK} " /test-driver.yaml
1088
+ fi
1089
+
1041
1090
if [ " ${name} " == " local" ]; then
1042
1091
cd " ${GOPATH} /src/${CSI_PROW_SIDECAR_E2E_PATH} " &&
1043
1092
run_with_loggers env KUBECONFIG=" $KUBECONFIG " KUBE_TEST_REPO_LIST=" $( if [ -e " ${CSI_PROW_WORK} /e2e-repo-list" ]; then echo " ${CSI_PROW_WORK} /e2e-repo-list" ; fi) " ginkgo --timeout=" ${CSI_PROW_GINKGO_TIMEOUT} " -v " $@ " " ${CSI_PROW_WORK} /e2e-local.test" -- -report-dir " ${ARTIFACTS} " -report-prefix local
@@ -1383,6 +1432,12 @@ main () {
1383
1432
install_snapshot_crds
1384
1433
install_snapshot_controller
1385
1434
1435
+ # TODO: Remove the condition after the vgs GA
1436
+ if ${CSI_PROW_ENABLE_GROUP_SNAPSHOT} ; then
1437
+ install_volumegroupsnapshot_crds
1438
+ fi
1439
+
1440
+
1386
1441
# Installing the driver might be disabled.
1387
1442
if ${CSI_PROW_DRIVER_INSTALL} " $images " ; then
1388
1443
collect_cluster_info
0 commit comments