diff --git a/templates/test/ci/cluster-template-prow-azl3.yaml b/templates/test/ci/cluster-template-prow-azl3.yaml new file mode 100644 index 00000000000..1aa76e2f4f7 --- /dev/null +++ b/templates/test/ci/cluster-template-prow-azl3.yaml @@ -0,0 +1,482 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cloud-provider: ${CLOUD_PROVIDER_AZURE_LABEL:=azure} + cni: calico + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + subnets: + - name: control-plane-subnet + role: control-plane + - name: node-subnet + role: node + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: {} + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-provider: external + cluster-name: ${CLUSTER_NAME} + v: "4" + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Install ca-certificates packages for Azure Linux + tdnf install -y ca-certificates ca-certificates-legacy + update-ca-trust + + # Allow Azure service IP addresses (required for Azure resources) + iptables -A INPUT -s 168.63.129.16 -j ACCEPT + iptables -A OUTPUT -d 168.63.129.16 -j ACCEPT + + # Kubernetes API Server (port 6443) - bound to all IPv6 interfaces, needs external access + iptables -A INPUT -p tcp --dport 6443 -j ACCEPT + + # etcd server communication + iptables -A INPUT -p tcp --dport 2379 -j ACCEPT + iptables -A INPUT -p tcp --dport 2380 -j ACCEPT + + # Allow traffic to Kubernetes service network (10.96.0.0/12) + iptables -A OUTPUT -d 10.96.0.0/12 -j ACCEPT + iptables -A INPUT -s 10.96.0.0/12 -j ACCEPT + + # Allow traffic to/from node network (10.1.0.0/24) + iptables -A OUTPUT -d 10.1.0.0/24 -j ACCEPT + iptables -A INPUT -s 10.1.0.0/24 -j ACCEPT + + # Allow traffic to/from Calico pod network + iptables -A OUTPUT -d 192.168.0.0/24 -j ACCEPT + iptables -A INPUT -s 192.168.0.0/24 -j ACCEPT + + # Save the rules following Azure Linux 3 approach + iptables-save > /etc/systemd/scripts/ip4save + owner: root:root + path: /tmp/azl3-setup.sh + permissions: "0744" + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: + - bash -c /tmp/azl3-setup.sh + verbosity: 10 + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + identity: UserAssigned + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION} + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT:=2} + selector: {} + template: + metadata: + labels: + nodepool: pool1 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + identity: UserAssigned + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION} + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Allow Azure service IP addresses (required for Azure resources) + iptables -A INPUT -s 168.63.129.16 -j ACCEPT + iptables -A OUTPUT -d 168.63.129.16 -j ACCEPT + + # Allow localhost traffic + iptables -A INPUT -i lo -j ACCEPT + iptables -A OUTPUT -o lo -j ACCEPT + + # Allow established and related connections + iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + + # SSH (port 22) + # iptables -A INPUT -p tcp --dport 22 -j ACCEPT + + # Kubelet API (port 10250) + iptables -A INPUT -p tcp --dport 10250 -j ACCEPT + + # Allow traffic to Kubernetes service network (10.96.0.0/12) + iptables -A OUTPUT -d 10.96.0.0/12 -j ACCEPT + iptables -A INPUT -s 10.96.0.0/12 -j ACCEPT + + # Allow traffic to/from Calico pod network (192.168.0.0/16) + iptables -A OUTPUT -d 192.168.0.0/16 -j ACCEPT + iptables -A INPUT -s 192.168.0.0/16 -j ACCEPT + + # Allow traffic to/from node network (10.1.0.0/24) + iptables -A OUTPUT -d 10.1.0.0/24 -j ACCEPT + iptables -A INPUT -s 10.1.0.0/24 -j ACCEPT + + # Calico networking requirements + # Calico Typha (port 5473) + iptables -A INPUT -p tcp --dport 5473 -j ACCEPT + + # VXLAN for overlay networking (port 4789 UDP) + iptables -A INPUT -p udp --dport 4789 -j ACCEPT + + # BGP for node-to-node communication (port 179) + iptables -A INPUT -p tcp --d port 179 -j ACCEPT + + # DNS (port 53) + iptables -A INPUT -p udp --dport 53 -j ACCEPT + iptables -A OUTPUT -p udp --dport 53 -j ACCEPT + + # Save the rules following Azure Linux 3 approach + iptables-save > /etc/systemd/scripts/ip4save + owner: root:root + path: /tmp/azl3-setup.sh + permissions: "0744" + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: + - bash -c /tmp/azl3-setup.sh +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY} + tenantID: ${AZURE_TENANT_ID} + type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: calico + namespace: default +spec: + chartName: tigera-operator + clusterSelector: + matchLabels: + cni: calico + namespace: tigera-operator + releaseName: projectcalico + repoURL: https://docs.tigera.io/calico/charts + valuesTemplate: | + installation: + cni: + type: Calico + ipam: + type: Calico + calicoNetwork: + bgp: Disabled + windowsDataplane: HNS + mtu: 1350 + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: VXLAN{{end}} + typhaDeployment: + spec: + template: + spec: + # By default, typha tolerates all NoSchedule taints. This breaks + # scale-ins when it continuously gets scheduled onto an + # out-of-date Node that is being deleted. Tolerate only the + # NoSchedule taints that are expected. + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + registry: capzcicommunity.azurecr.io + serviceCIDRs: + - 10.96.0.0/12 # must match cluster service CIDR (this is the default) + # Image and registry configuration for the tigera/operator pod + tigeraOperator: + image: tigera/operator + registry: capzcicommunity.azurecr.io + calicoctl: + image: capzcicommunity.azurecr.io/calico/ctl + # when kubernetesServiceEndpoint (required for windows) is added + # DNS configuration is needed to look up the api server name properly + # https://github.com/projectcalico/calico/issues/9536 + dnsConfig: + nameservers: + - 127.0.0.53 + options: + - name: edns0 + - name: trust-ad + kubernetesServiceEndpoint: + host: "{{ .Cluster.spec.controlPlaneEndpoint.host }}" + port: "{{ .Cluster.spec.controlPlaneEndpoint.port }}" + # By default, tigera tolerates all NoSchedule taints. This breaks upgrades + # when it continuously gets scheduled onto an out-of-date Node that is being + # deleted. Tolerate only the NoSchedule taints that are expected. + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + caCertDir: "/etc/pki/tls/certs" + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart-ci + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure-ci + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + caCertDir: "/etc/pki/tls/certs" + cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"} + cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""} + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + imageName: "${CCM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CCM:-""}" + logVerbosity: ${CCM_LOG_VERBOSITY:-4} + replicas: ${CCM_COUNT:-1} + enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false} + cloudNodeManager: + imageName: "${CNM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CNM:-""}" diff --git a/templates/test/ci/cluster-template-prow-ci-version-azl3.yaml b/templates/test/ci/cluster-template-prow-ci-version-azl3.yaml new file mode 100644 index 00000000000..2d988188f15 --- /dev/null +++ b/templates/test/ci/cluster-template-prow-ci-version-azl3.yaml @@ -0,0 +1,1438 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cloud-provider: ${CLOUD_PROVIDER_AZURE_LABEL:=azure} + cni: calico + cni-windows: ${CLUSTER_NAME}-calico + containerd-logger: enabled + csi-proxy: enabled + metrics-server: enabled + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + subnets: + - name: control-plane-subnet + role: control-plane + - name: node-subnet + role: node + vnet: + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + feature-gates: ${K8S_FEATURE_GATES:-""} + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-provider: external + cluster-name: ${CLUSTER_NAME} + v: "4" + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + kubernetesVersion: ci/${CI_VERSION} + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Install ca-certificates packages for Azure Linux + tdnf install -y ca-certificates ca-certificates-legacy + update-ca-trust + + # Allow Azure service IP addresses (required for Azure resources) + iptables -A INPUT -s 168.63.129.16 -j ACCEPT + iptables -A OUTPUT -d 168.63.129.16 -j ACCEPT + + # Kubernetes API Server (port 6443) - bound to all IPv6 interfaces, needs external access + iptables -A INPUT -p tcp --dport 6443 -j ACCEPT + + # etcd server communication + iptables -A INPUT -p tcp --dport 2379 -j ACCEPT + iptables -A INPUT -p tcp --dport 2380 -j ACCEPT + + # Allow traffic to Kubernetes service network (10.96.0.0/12) + iptables -A OUTPUT -d 10.96.0.0/12 -j ACCEPT + iptables -A INPUT -s 10.96.0.0/12 -j ACCEPT + + # Allow traffic to/from node network (10.1.0.0/24) + iptables -A OUTPUT -d 10.1.0.0/24 -j ACCEPT + iptables -A INPUT -s 10.1.0.0/24 -j ACCEPT + + # Allow traffic to/from Calico pod network + iptables -A OUTPUT -d 192.168.0.0/24 -j ACCEPT + iptables -A INPUT -s 192.168.0.0/24 -j ACCEPT + + # Save the rules following Azure Linux 3 approach + iptables-save > /etc/systemd/scripts/ip4save + owner: root:root + path: /tmp/azl3-setup.sh + permissions: "0744" + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # Run the az login command with managed identity + if az login --identity > /dev/null 2>&1; then + echo "Logged in Azure with managed identity" + echo "Use OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" -f /var/lib/kubelet/credential-provider/acr-credential-provider --auth-mode login + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" -f /var/lib/kubelet/credential-provider-config.yaml --auth-mode login + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + else + echo "Using curl to download the OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider/acr-credential-provider "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider-config.yaml "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + fi + owner: root:root + path: /tmp/oot-cred-provider.sh + permissions: "0744" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + CI_VERSION=${CI_VERSION} + if [[ "$${CI_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p $$CI_DIR + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing CI version $$CI_VERSION" + # Check for semver + if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" + DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list + apt-get update + # replace . with \. + VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" + PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" + DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION + done + else + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" + wget --inet4-only "$$CI_URL/$$CI_PACKAGE" -nv -O "$$CI_DIR/$$CI_PACKAGE" + chmod +x "$$CI_DIR/$$CI_PACKAGE" + mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" + done + IMAGE_REGISTRY_PREFIX=registry.k8s.io + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" + wget --inet4-only "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -nv -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" + $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + done + fi + systemctl restart kubelet + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + owner: root:root + path: /tmp/kubeadm-bootstrap.sh + permissions: "0744" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: + - bash -c /tmp/azl3-setup.sh + - bash -c /tmp/oot-cred-provider.sh + - bash -c /tmp/kubeadm-bootstrap.sh + verbosity: 5 + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + additionalTags: + monitoring: virtualmachine + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + identity: UserAssigned + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION} + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT:=2} + selector: {} + template: + metadata: + labels: + nodepool: pool1 + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + additionalTags: + monitoring: virtualmachine + identity: UserAssigned + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION} + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmExtensions: + - name: CustomScript + protectedSettings: + commandToExecute: | + #!/bin/sh + echo "This script is a no-op used for extension testing purposes ..." + touch test_file + publisher: Microsoft.Azure.Extensions + version: "2.1" + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Allow Azure service IP addresses (required for Azure resources) + iptables -A INPUT -s 168.63.129.16 -j ACCEPT + iptables -A OUTPUT -d 168.63.129.16 -j ACCEPT + + # Allow localhost traffic + iptables -A INPUT -i lo -j ACCEPT + iptables -A OUTPUT -o lo -j ACCEPT + + # Allow established and related connections + iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + + # SSH (port 22) + # iptables -A INPUT -p tcp --dport 22 -j ACCEPT + + # Kubelet API (port 10250) + iptables -A INPUT -p tcp --dport 10250 -j ACCEPT + + # Allow traffic to Kubernetes service network (10.96.0.0/12) + iptables -A OUTPUT -d 10.96.0.0/12 -j ACCEPT + iptables -A INPUT -s 10.96.0.0/12 -j ACCEPT + + # Allow traffic to/from Calico pod network (192.168.0.0/16) + iptables -A OUTPUT -d 192.168.0.0/16 -j ACCEPT + iptables -A INPUT -s 192.168.0.0/16 -j ACCEPT + + # Allow traffic to/from node network (10.1.0.0/24) + iptables -A OUTPUT -d 10.1.0.0/24 -j ACCEPT + iptables -A INPUT -s 10.1.0.0/24 -j ACCEPT + + # Calico networking requirements + # Calico Typha (port 5473) + iptables -A INPUT -p tcp --dport 5473 -j ACCEPT + + # VXLAN for overlay networking (port 4789 UDP) + iptables -A INPUT -p udp --dport 4789 -j ACCEPT + + # BGP for node-to-node communication (port 179) + iptables -A INPUT -p tcp --d port 179 -j ACCEPT + + # DNS (port 53) + iptables -A INPUT -p udp --dport 53 -j ACCEPT + iptables -A OUTPUT -p udp --dport 53 -j ACCEPT + + # Save the rules following Azure Linux 3 approach + iptables-save > /etc/systemd/scripts/ip4save + owner: root:root + path: /tmp/azl3-setup.sh + permissions: "0744" + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # Run the az login command with managed identity + if az login --identity > /dev/null 2>&1; then + echo "Logged in Azure with managed identity" + echo "Use OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" -f /var/lib/kubelet/credential-provider/acr-credential-provider --auth-mode login + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" -f /var/lib/kubelet/credential-provider-config.yaml --auth-mode login + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + else + echo "Use OOT credential provider" + mkdir -p /var/lib/kubelet/credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider/acr-credential-provider "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" + chmod 755 /var/lib/kubelet/credential-provider/acr-credential-provider + curl --retry 10 --retry-delay 5 -w "response status code is %{http_code}" -Lo /var/lib/kubelet/credential-provider-config.yaml "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" + chmod 644 /var/lib/kubelet/credential-provider-config.yaml + fi + owner: root:root + path: /tmp/oot-cred-provider.sh + permissions: "0744" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + CI_VERSION=${CI_VERSION} + if [[ "$${CI_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p $$CI_DIR + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing CI version $$CI_VERSION" + # Check for semver + if [[ "$${CI_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + VERSION_WITHOUT_PREFIX="${CI_VERSION#v}" + DEBIAN_FRONTEND=noninteractive apt-get install -y apt-transport-https curl + curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /" | tee /etc/apt/sources.list.d/kubernetes.list + apt-get update + # replace . with \. + VERSION_REGEX="${VERSION_WITHOUT_PREFIX//./\\.}" + PACKAGE_VERSION="$(apt-cache madison kubelet|grep $${VERSION_REGEX}- | head -n1 | cut -d '|' -f 2 | tr -d '[:space:]')" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* installing package: $$CI_PACKAGE $${PACKAGE_VERSION}" + DEBIAN_FRONTEND=noninteractive apt-get install -y $$CI_PACKAGE=$$PACKAGE_VERSION + done + else + CI_URL="https://storage.googleapis.com/k8s-release-dev/ci/$${CI_VERSION}/bin/linux/amd64" + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + echo "* downloading binary: $$CI_URL/$$CI_PACKAGE" + wget --inet4-only "$$CI_URL/$$CI_PACKAGE" -nv -O "$$CI_DIR/$$CI_PACKAGE" + chmod +x "$$CI_DIR/$$CI_PACKAGE" + mv "$$CI_DIR/$$CI_PACKAGE" "/usr/bin/$$CI_PACKAGE" + done + IMAGE_REGISTRY_PREFIX=registry.k8s.io + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" + wget --inet4-only "$$CI_URL/$$CI_CONTAINER.$$CONTAINER_EXT" -nv -O "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" + $${SUDO} ctr -n k8s.io images import "$$CI_DIR/$$CI_CONTAINER.$$CONTAINER_EXT" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag $$IMAGE_REGISTRY_PREFIX/$$CI_CONTAINER-amd64:"$${CI_VERSION//+/_}" gcr.io/k8s-staging-ci-images/$$CI_CONTAINER:"$${CI_VERSION//+/_}" + done + fi + systemctl restart kubelet + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + owner: root:root + path: /tmp/kubeadm-bootstrap.sh + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: + - bash -c /tmp/azl3-setup.sh + - bash -c /tmp/oot-cred-provider.sh + - bash -c /tmp/kubeadm-bootstrap.sh + verbosity: 5 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WINDOWS_WORKER_MACHINE_COUNT:-0} + selector: {} + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-win + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-win + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + annotations: + runtime: containerd + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + metadata: + annotations: + runtime: containerd + windowsServerVersion: ${WINDOWS_SERVER_VERSION:=""} + spec: + identity: UserAssigned + image: + marketplace: + offer: capi-windows + publisher: cncf-upstream + sku: ${WINDOWS_SERVER_VERSION:=windows-2019}-containerd-gen1 + version: latest + osDisk: + diskSizeGB: 128 + managedDisk: + storageAccountType: Premium_LRS + osType: Windows + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-win + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-win-azure-json + owner: root:root + path: c:/k/azure.json + permissions: "0644" + - content: |- + Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico.exe + Add-MpPreference -ExclusionProcess C:/opt/cni/bin/calico-ipam.exe + path: C:/defender-exclude-calico.ps1 + permissions: "0744" + - content: | + # /tmp is assumed created and required for upstream e2e tests to pass + New-Item -ItemType Directory -Force -Path C:\tmp\ + path: C:/create-temp-folder.ps1 + permissions: "0744" + - content: | + $ErrorActionPreference = 'Stop' + + $$CONTAINERD_URL="${WINDOWS_CONTAINERD_URL}" + if($$CONTAINERD_URL -ne ""){ + # Kubelet service depends on contianerd service so make a best effort attempt to stop it + Stop-Service kubelet -Force -ErrorAction SilentlyContinue + Stop-Service containerd -Force + echo "downloading containerd: $$CONTAINERD_URL" + curl.exe --retry 10 --retry-delay 5 -L "$$CONTAINERD_URL" --output "c:/k/containerd.tar.gz" + tar.exe -zxvf c:/k/containerd.tar.gz -C "c:/Program Files/containerd" --strip-components 1 + + Start-Service containerd + } + + containerd.exe --version + containerd-shim-runhcs-v1.exe --version + path: C:/replace-containerd.ps1 + permissions: "0744" + - content: | + mkdir -Force c:/localdumps + reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpCount /t REG_DWORD /d 50 /f + reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpType /t REG_DWORD /d 2 /f + reg.exe add "HKLM\Software\Microsoft\Windows\Windows Error Reporting\LocalDumps" /V DumpFolder /t REG_EXPAND_SZ /d "c:/LocalDumps" /f + # Enable sftp so we can copy crash dump files during log collection of stfp + $sshd_config = "$env:ProgramData\ssh\sshd_config" + if (-not (Test-Path $sshd_config)) { mkdir -Force $sshd_config } + Add-Content -Path $sshd_config "Subsystem sftp sftp-server.exe" + sc.exe stop sshd + sc.exe start sshd + path: C:/collect-hns-crashes.ps1 + permissions: "0744" + - content: | + $ErrorActionPreference = 'Stop' + + Write-Host "Attempting to log in to Azure with managed identity" + az login --identity > $null 2>&1 + if ($LASTEXITCODE -eq 0) { + Write-Host "Logged in Azure with managed identity" + Write-Host "Use OOT credential provider" + mkdir C:\var\lib\kubelet\credential-provider + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" -f C:\var\lib\kubelet\credential-provider\acr-credential-provider --auth-mode login + cp C:\var\lib\kubelet\credential-provider\acr-credential-provider C:\var\lib\kubelet\credential-provider\acr-credential-provider.exe + az storage blob download --blob-url "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" -f C:\var\lib\kubelet\credential-provider-config.yaml --auth-mode login + } else { + Write-Host "Using curl to download the OOT credential provider" + mkdir C:\var\lib\kubelet\credential-provider + curl.exe --retry 10 --retry-delay 5 -L "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" --output C:\var\lib\kubelet\credential-provider\acr-credential-provider + cp C:\var\lib\kubelet\credential-provider\acr-credential-provider C:\var\lib\kubelet\credential-provider\acr-credential-provider.exe + curl.exe --retry 10 --retry-delay 5 -L "https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_BLOB_CONTAINER_NAME}/${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" --output C:\var\lib\kubelet\credential-provider-config.yaml + } + path: C:/oot-cred-provider.ps1 + permissions: "0744" + - content: | + $ErrorActionPreference = 'Stop' + + Stop-Service kubelet -Force + + $$CI_VERSION="${CI_VERSION}" + if($$CI_VERSION -ne "") + { + $$binaries=@("kubeadm", "kubectl", "kubelet", "kube-proxy") + $$ci_url="https://storage.googleapis.com/k8s-release-dev/ci/$$CI_VERSION/bin/windows/amd64" + foreach ( $$binary in $$binaries ) + { + echo "downloading binary: $$ci_url/$$binary.exe" + curl.exe --retry 10 --retry-delay 5 "$$ci_url/$$binary.exe" --output "c:/k/$$binary.exe" + } + } + + # Tag it to the ci version. The image knows how to use the copy locally with the configmap + # that is applied at at this stage (windows-kubeproxy-ci.yaml) + ctr.exe -n k8s.io images pull docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess + ctr.exe -n k8s.io images tag docker.io/sigwindowstools/kube-proxy:v1.23.1-calico-hostprocess "docker.io/sigwindowstools/kube-proxy:${CI_VERSION/+/_}-calico-hostprocess" + + kubeadm.exe version -o=short + kubectl.exe version --client=true + kubelet.exe --version + kube-proxy.exe --version + path: C:/replace-ci-binaries.ps1 + permissions: "0744" + joinConfiguration: + nodeRegistration: + criSocket: npipe:////./pipe/containerd-containerd + kubeletExtraArgs: + cloud-provider: external + feature-gates: ${NODE_FEATURE_GATES:-""} + image-credential-provider-bin-dir: /var/lib/kubelet/credential-provider + image-credential-provider-config: /var/lib/kubelet/credential-provider-config.yaml + v: "2" + windows-priorityclass: ABOVE_NORMAL_PRIORITY_CLASS + name: '{{ ds.meta_data["local_hostname"] }}' + postKubeadmCommands: + - nssm set kubelet start SERVICE_AUTO_START + - powershell C:/defender-exclude-calico.ps1 + preKubeadmCommands: + - powershell C:/create-temp-folder.ps1 + - powershell C:/replace-containerd.ps1 + - powershell C:/collect-hns-crashes.ps1 + - powershell C:/oot-cred-provider.ps1 + - powershell C:/replace-ci-binaries.ps1 + users: + - groups: Administrators + name: capi + sshAuthorizedKeys: + - ${AZURE_SSH_PUBLIC_KEY:=""} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + maxUnhealthy: 100% + selector: + matchLabels: + cluster.x-k8s.io/control-plane: "" + unhealthyConditions: + - status: Unknown + timeout: 300s + type: Ready + - status: "False" + timeout: 300s + type: Ready +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: ${CLUSTER_NAME}-mhc-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + maxUnhealthy: 100% + selector: + matchLabels: + nodepool: pool1 + unhealthyConditions: + - status: "True" + timeout: 30s + type: E2ENodeUnhealthy +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-calico-windows + namespace: default +spec: + clusterSelector: + matchLabels: + cni-windows: ${CLUSTER_NAME}-calico + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-calico-windows + strategy: ApplyOnce +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY} + tenantID: ${AZURE_TENANT_ID} + type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: csi-proxy + namespace: default +spec: + clusterSelector: + matchLabels: + csi-proxy: enabled + resources: + - kind: ConfigMap + name: csi-proxy-addon + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: containerd-logger-${CLUSTER_NAME} + namespace: default +spec: + clusterSelector: + matchLabels: + containerd-logger: enabled + resources: + - kind: ConfigMap + name: containerd-logger-${CLUSTER_NAME} + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: calico + namespace: default +spec: + chartName: tigera-operator + clusterSelector: + matchLabels: + cni: calico + namespace: tigera-operator + releaseName: projectcalico + repoURL: https://docs.tigera.io/calico/charts + valuesTemplate: | + installation: + cni: + type: Calico + ipam: + type: Calico + calicoNetwork: + bgp: Disabled + windowsDataplane: HNS + mtu: 1350 + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: VXLAN{{end}} + typhaDeployment: + spec: + template: + spec: + # By default, typha tolerates all NoSchedule taints. This breaks + # scale-ins when it continuously gets scheduled onto an + # out-of-date Node that is being deleted. Tolerate only the + # NoSchedule taints that are expected. + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + registry: capzcicommunity.azurecr.io + serviceCIDRs: + - 10.96.0.0/12 # must match cluster service CIDR (this is the default) + # Image and registry configuration for the tigera/operator pod + tigeraOperator: + image: tigera/operator + registry: capzcicommunity.azurecr.io + calicoctl: + image: capzcicommunity.azurecr.io/calico/ctl + # when kubernetesServiceEndpoint (required for windows) is added + # DNS configuration is needed to look up the api server name properly + # https://github.com/projectcalico/calico/issues/9536 + dnsConfig: + nameservers: + - 127.0.0.53 + options: + - name: edns0 + - name: trust-ad + kubernetesServiceEndpoint: + host: "{{ .Cluster.spec.controlPlaneEndpoint.host }}" + port: "{{ .Cluster.spec.controlPlaneEndpoint.port }}" + # By default, tigera tolerates all NoSchedule taints. This breaks upgrades + # when it continuously gets scheduled onto an out-of-date Node that is being + # deleted. Tolerate only the NoSchedule taints that are expected. + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + caCertDir: "/etc/pki/tls/certs" + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart-ci + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure-ci + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + caCertDir: "/etc/pki/tls/certs" + cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"} + cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""} + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + imageName: "${CCM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CCM:-""}" + logVerbosity: ${CCM_LOG_VERBOSITY:-4} + replicas: ${CCM_COUNT:-1} + enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false} + cloudNodeManager: + imageName: "${CNM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CNM:-""}" +--- +apiVersion: v1 +data: + kube-proxy-patch: |- + apiVersion: v1 + kind: ConfigMap + metadata: + name: windows-kubeproxy-ci + namespace: kube-system + data: + KUBEPROXY_PATH: "c:/k/kube-proxy.exe" + proxy: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: kube-proxy + name: kube-proxy-windows + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: kube-proxy-windows + template: + metadata: + labels: + k8s-app: kube-proxy-windows + spec: + serviceAccountName: kube-proxy + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\system" + hostNetwork: true + priorityClassName: system-node-critical + containers: + - image: sigwindowstools/kube-proxy:${KUBERNETES_VERSION/+/_}-calico-hostprocess + args: ["$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/start.ps1"] + workingDir: "$env:CONTAINER_SANDBOX_MOUNT_POINT/kube-proxy/" + name: kube-proxy + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: KUBEPROXY_PATH + valueFrom: + configMapKeyRef: + name: windows-kubeproxy-ci + key: KUBEPROXY_PATH + optional: true + volumeMounts: + - mountPath: /var/lib/kube-proxy + name: kube-proxy + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - configMap: + name: kube-proxy + name: kube-proxy + updateStrategy: + type: RollingUpdate + windows-cni: "# strictAffinity required for windows\napiVersion: crd.projectcalico.org/v1\nkind: + IPAMConfig\nmetadata:\n name: default\nspec:\n autoAllocateBlocks: true\n strictAffinity: + true\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n name: calico-static-rules\n + \ namespace: calico-system\n labels:\n tier: node\n app: calico\ndata:\n + \ static-rules.json: |\n {\n \"Provider\": \"azure\",\n \"Version\": + \"0.1\",\n \"Rules\": [\n {\n \"Name\": \"EndpointPolicy\",\n + \ \"Rule\": {\n \"Id\": \"wireserver\",\n \"Type\": + \"ACL\",\n \"Protocol\": 6,\n \"Action\": \"Block\",\n + \ \"Direction\": \"Out\",\n \"RemoteAddresses\": \"168.63.129.16/32\",\n + \ \"RemotePorts\": \"80\",\n \"Priority\": 200,\n \"RuleType\": + \"Switch\"\n }\n }\n ]\n } \n---\n" +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: cni-${CLUSTER_NAME}-calico-windows + namespace: default +--- +apiVersion: v1 +data: + csi-proxy: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: csi-proxy + name: csi-proxy + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: csi-proxy + template: + metadata: + labels: + k8s-app: csi-proxy + spec: + nodeSelector: + "kubernetes.io/os": windows + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\SYSTEM" + hostNetwork: true + containers: + - name: csi-proxy + image: ghcr.io/kubernetes-sigs/sig-windows/csi-proxy:v1.0.2 +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: csi-proxy-addon + namespace: default +--- +apiVersion: v1 +data: + containerd-windows-logger: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: containerd-logger + name: containerd-logger + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: containerd-logger + template: + metadata: + labels: + k8s-app: containerd-logger + spec: + securityContext: + windowsOptions: + hostProcess: true + runAsUserName: "NT AUTHORITY\\system" + hostNetwork: true + containers: + - image: ghcr.io/kubernetes-sigs/sig-windows/eventflow-logger:v0.1.0 + args: [ "config.json" ] + name: containerd-logger + imagePullPolicy: Always + volumeMounts: + - name: containerd-logger-config + mountPath: /config.json + subPath: config.json + nodeSelector: + kubernetes.io/os: windows + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - operator: Exists + volumes: + - configMap: + name: containerd-logger-config + name: containerd-logger-config + updateStrategy: + type: RollingUpdate + --- + kind: ConfigMap + apiVersion: v1 + metadata: + name: containerd-logger-config + namespace: kube-system + data: + config.json: | + { + "inputs": [ + { + "type": "ETW", + "sessionNamePrefix": "containerd", + "cleanupOldSessions": true, + "reuseExistingSession": true, + "providers": [ + { + "providerName": "Microsoft.Virtualization.RunHCS", + "providerGuid": "0B52781F-B24D-5685-DDF6-69830ED40EC3", + "level": "Verbose" + }, + { + "providerName": "ContainerD", + "providerGuid": "2acb92c0-eb9b-571a-69cf-8f3410f383ad", + "level": "Verbose" + } + ] + } + ], + "filters": [ + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == Stats && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::LayerID && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == hcsshim::NameToGuid && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.Stats && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == containerd.task.v2.Task.State && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetProcessProperties && hasnoproperty error" + }, + { + "type": "drop", + "include": "ProviderName == Microsoft.Virtualization.RunHCS && name == HcsGetComputeSystemProperties && hasnoproperty error" + } + ], + "outputs": [ + { + "type": "StdOutput" + } + ], + "schemaVersion": "2016-08-11" + } +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: containerd-logger-${CLUSTER_NAME} + namespace: default +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: metrics-server-${CLUSTER_NAME} + namespace: default +spec: + clusterSelector: + matchLabels: + metrics-server: enabled + resources: + - kind: ConfigMap + name: metrics-server-${CLUSTER_NAME} + strategy: ApplyOnce +--- +apiVersion: v1 +data: + metrics-server: | + apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader + rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server + rules: + - apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get + - apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server + subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system + --- + apiVersion: v1 + kind: Service + metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system + spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls + image: registry.k8s.io/metrics-server/metrics-server:v0.6.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - emptyDir: {} + name: tmp-dir + --- + apiVersion: apiregistration.k8s.io/v1 + kind: APIService + metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io + spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 +kind: ConfigMap +metadata: + annotations: + note: generated + labels: + type: generated + name: metrics-server-${CLUSTER_NAME} + namespace: default diff --git a/templates/test/ci/cluster-template-prow-dalec-custom-builds.yaml b/templates/test/ci/cluster-template-prow-dalec-custom-builds.yaml new file mode 100644 index 00000000000..b0750a09d1e --- /dev/null +++ b/templates/test/ci/cluster-template-prow-dalec-custom-builds.yaml @@ -0,0 +1,696 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cloud-provider: ${CLOUD_PROVIDER_AZURE_LABEL:=azure} + cni: calico + name: ${CLUSTER_NAME} + namespace: default +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureCluster +metadata: + name: ${CLUSTER_NAME} + namespace: default +spec: + additionalTags: + buildProvenance: ${BUILD_PROVENANCE} + creationTimestamp: ${TIMESTAMP} + jobName: ${JOB_NAME} + identityRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureClusterIdentity + name: ${CLUSTER_IDENTITY_NAME} + location: ${AZURE_LOCATION} + networkSpec: + apiServerLB: + frontendIPs: + - name: ${CLUSTER_NAME}-api-lb + publicIP: + dnsName: ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com + name: ${CLUSTER_NAME}-api-lb + - name: ${CLUSTER_NAME}-internal-lb-private-ip + privateIP: ${AZURE_INTERNAL_LB_PRIVATE_IP} + subnets: + - cidrBlocks: + - ${AZURE_CP_SUBNET_CIDR} + name: control-plane-subnet + role: control-plane + - cidrBlocks: + - ${AZURE_NODE_SUBNET_CIDR} + name: node-subnet + role: node + vnet: + cidrBlocks: + - ${AZURE_VNET_CIDR} + name: ${AZURE_VNET_NAME:=${CLUSTER_NAME}-vnet} + resourceGroup: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} + subscriptionID: ${AZURE_SUBSCRIPTION_ID} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: {} + timeoutForControlPlane: 20m + controllerManager: + extraArgs: + allocate-node-cidrs: "false" + cloud-provider: external + cluster-name: ${CLUSTER_NAME} + v: "4" + etcd: + local: + dataDir: /var/lib/etcddisk/etcd + extraArgs: + quota-backend-bytes: "8589934592" + diskSetup: + filesystems: + - device: /dev/disk/azure/scsi1/lun0 + extraOpts: + - -E + - lazy_itable_init=1,lazy_journal_init=1 + filesystem: ext4 + label: etcd_disk + - device: ephemeral0.1 + filesystem: ext4 + label: ephemeral0 + replaceFS: ntfs + partitions: + - device: /dev/disk/azure/scsi1/lun0 + layout: true + overwrite: false + tableType: gpt + files: + - contentFrom: + secret: + key: control-plane-azure.json + name: ${CLUSTER_NAME}-control-plane-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + + # Custom image handling + if [[ -n "${KUBE_APISERVER_IMAGE:-}" ]]; then + echo "* Using custom kube-apiserver image: ${KUBE_APISERVER_IMAGE}" + # Remove existing image if present + echo "* Removing existing kube-apiserver image if present" + $${SUDO} ctr -n k8s.io images rm "registry.k8s.io/kube-apiserver:${KUBERNETES_VERSION}" 2>/dev/null || true + echo "* Pulling kube-apiserver: ${KUBE_APISERVER_IMAGE}" + $${SUDO} ctr -n k8s.io images pull "${KUBE_APISERVER_IMAGE}" + echo "* Tagging kube-apiserver: ${KUBE_APISERVER_IMAGE} as registry.k8s.io/kube-apiserver:${KUBERNETES_VERSION}" + $${SUDO} ctr -n k8s.io images tag "${KUBE_APISERVER_IMAGE}" "registry.k8s.io/kube-apiserver:${KUBERNETES_VERSION}" + else + echo "* Custom kube-apiserver image environment variable not set, using default" + fi + + if [[ -n "${KUBE_CONTROLLER_MANAGER_IMAGE:-}" ]]; then + echo "* Using custom kube-controller-manager image: ${KUBE_CONTROLLER_MANAGER_IMAGE}" + # Remove existing image if present + echo "* Removing existing kube-controller-manager image if present" + $${SUDO} ctr -n k8s.io images rm "registry.k8s.io/kube-controller-manager:${KUBERNETES_VERSION}" 2>/dev/null || true + echo "* Pulling kube-controller-manager: ${KUBE_CONTROLLER_MANAGER_IMAGE}" + $${SUDO} ctr -n k8s.io images pull "${KUBE_CONTROLLER_MANAGER_IMAGE}" + echo "* Tagging kube-controller-manager: ${KUBE_CONTROLLER_MANAGER_IMAGE} as registry.k8s.io/kube-controller-manager:${KUBERNETES_VERSION}" + $${SUDO} ctr -n k8s.io images tag "${KUBE_CONTROLLER_MANAGER_IMAGE}" "registry.k8s.io/kube-controller-manager:${KUBERNETES_VERSION}" + else + echo "* Custom kube-controller-manager image environment variable not set, using default" + fi + + if [[ -n "${KUBE_SCHEDULER_IMAGE:-}" ]]; then + echo "* Using custom kube-scheduler image: ${KUBE_SCHEDULER_IMAGE}" + # Remove existing image if present + echo "* Removing existing kube-scheduler image if present" + $${SUDO} ctr -n k8s.io images rm "registry.k8s.io/kube-scheduler:${KUBERNETES_VERSION}" 2>/dev/null || true + echo "* Pulling kube-scheduler: ${KUBE_SCHEDULER_IMAGE}" + $${SUDO} ctr -n k8s.io images pull "${KUBE_SCHEDULER_IMAGE}" + echo "* Tagging kube-scheduler: ${KUBE_SCHEDULER_IMAGE} as registry.k8s.io/kube-scheduler:${KUBERNETES_VERSION}" + $${SUDO} ctr -n k8s.io images tag "${KUBE_SCHEDULER_IMAGE}" "registry.k8s.io/kube-scheduler:${KUBERNETES_VERSION}" + else + echo "* Custom kube-scheduler image environment variable not set, using default" + fi + + if [[ -n "${KUBE_PROXY_IMAGE:-}" ]]; then + echo "* Using custom kube-proxy image: ${KUBE_PROXY_IMAGE}" + # Remove existing image if present + echo "* Removing existing kube-proxy image if present" + $${SUDO} ctr -n k8s.io images rm "registry.k8s.io/kube-proxy:${KUBERNETES_VERSION}" 2>/dev/null || true + echo "* Pulling kube-proxy: ${KUBE_PROXY_IMAGE}" + $${SUDO} ctr -n k8s.io images pull "${KUBE_PROXY_IMAGE}" + echo "* Tagging kube-proxy: ${KUBE_PROXY_IMAGE} as registry.k8s.io/kube-proxy:${KUBERNETES_VERSION}" + $${SUDO} ctr -n k8s.io images tag "${KUBE_PROXY_IMAGE}" "registry.k8s.io/kube-proxy:${KUBERNETES_VERSION}" + else + echo "* Custom kube-proxy image environment variable not set, using default" + fi + + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + owner: root:root + path: /tmp/kubeadm-bootstrap.sh + permissions: "0744" + - content: | + #!/bin/bash + set -o nounset + set -o pipefail + set -o errexit + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + # Define the base URL and version + BASE_URL="https://kubernetesreleases.blob.core.windows.net/dalec-packages" + VERSION="${KUBERNETES_VERSION}" + VERSION=$${VERSION#v} + OS_VERSION="ubuntu24.04" + ARCH="amd64" + for BINARY in "$${BINARIES[@]}"; do + if [[ "$${BINARY}" == "kubelet" ]]; then + DEB_VERSION="ubuntu24.04u1" + else + DEB_VERSION="ubuntu24.04u2" + fi + echo "* downloading and extracting binary: $${BINARY} $${VERSION} with deb version $${DEB_VERSION}" + DEB_FILE="/tmp/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + DEB_URL="$${BASE_URL}/$${BINARY}/$${VERSION}/$${OS_VERSION}/$${ARCH}/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + echo "Downloading from: $${DEB_URL}" + curl -L --retry 10 --retry-delay 5 "$${DEB_URL}" --output "$${DEB_FILE}" + # Extract just the binary from the deb package directly to /usr/bin + echo "Extracting $${BINARY} binary to /usr/bin" + dpkg-deb --fsys-tarfile "$${DEB_FILE}" | tar -xf - --strip-components=3 -C /usr/bin ./usr/bin/$${BINARY} + chmod +x "/usr/bin/$${BINARY}" + # Clean up the downloaded deb file + rm -f "$${DEB_FILE}" + done + systemctl restart kubelet + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true)" + echo "kubelet version: $(kubelet --version)" + owner: root:root + path: /tmp/replace-k8s-binaries.sh + permissions: "0744" + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + mounts: + - - LABEL=etcd_disk + - /var/lib/etcddisk + postKubeadmCommands: [] + preKubeadmCommands: + - bash -c /tmp/kubeadm-bootstrap.sh + - bash -c /tmp/replace-k8s-binaries.sh + verbosity: 5 + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-control-plane + replicas: ${CONTROL_PLANE_MACHINE_COUNT:=1} + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +spec: + template: + spec: + dataDisks: + - diskSizeGB: 256 + lun: 0 + nameSuffix: etcddisk + identity: UserAssigned + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_CONTROL_PLANE_MACHINE_TYPE} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT:=2} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + identity: UserAssigned + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + userAssignedIdentities: + - providerID: /subscriptions/${AZURE_SUBSCRIPTION_ID}/resourceGroups/${CI_RG:=capz-ci}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/${USER_IDENTITY:=cloud-provider-user-identity} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 + namespace: default +spec: + template: + spec: + files: + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + LINE_SEPARATOR="*************************************************" + echo "$$LINE_SEPARATOR" + + # Custom image handling + if [[ -n "${KUBE_PROXY_IMAGE:-}" ]]; then + echo "* Using custom kube-proxy image: ${KUBE_PROXY_IMAGE}" + # Remove existing image if present + echo "* Removing existing kube-proxy image if present" + $${SUDO} ctr -n k8s.io images rm "registry.k8s.io/kube-proxy:${KUBERNETES_VERSION}" 2>/dev/null || true + echo "* Pulling kube-proxy: ${KUBE_PROXY_IMAGE}" + $${SUDO} ctr -n k8s.io images pull "${KUBE_PROXY_IMAGE}" + echo "* Tagging kube-proxy: ${KUBE_PROXY_IMAGE} as registry.k8s.io/kube-proxy:${KUBERNETES_VERSION}" + $${SUDO} ctr -n k8s.io images tag "${KUBE_PROXY_IMAGE}" "registry.k8s.io/kube-proxy:${KUBERNETES_VERSION}" + else + echo "* Custom kube-proxy image environment variable not set, using default" + fi + echo "* checking binary versions" + echo "ctr version: " $(ctr version) + echo "kubeadm version: " $(kubeadm version -o=short) + echo "kubectl version: " $(kubectl version --client=true) + echo "kubelet version: " $(kubelet --version) + echo "$$LINE_SEPARATOR" + owner: root:root + path: /tmp/kubeadm-bootstrap.sh + permissions: "0744" + - content: | + #!/bin/bash + set -o nounset + set -o pipefail + set -o errexit + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + # Define the base URL and version + BASE_URL="https://kubernetesreleases.blob.core.windows.net/dalec-packages" + VERSION="${KUBERNETES_VERSION}" + VERSION=$${VERSION#v} + OS_VERSION="ubuntu24.04" + ARCH="amd64" + for BINARY in "$${BINARIES[@]}"; do + if [[ "$${BINARY}" == "kubelet" ]]; then + DEB_VERSION="ubuntu24.04u1" + else + DEB_VERSION="ubuntu24.04u2" + fi + echo "* downloading and extracting binary: $${BINARY} $${VERSION} with deb version $${DEB_VERSION}" + DEB_FILE="/tmp/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + DEB_URL="$${BASE_URL}/$${BINARY}/$${VERSION}/$${OS_VERSION}/$${ARCH}/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + echo "Downloading from: $${DEB_URL}" + curl -L --retry 10 --retry-delay 5 "$${DEB_URL}" --output "$${DEB_FILE}" + # Extract just the binary from the deb package directly to /usr/bin + echo "Extracting $${BINARY} binary to /usr/bin" + dpkg-deb --fsys-tarfile "$${DEB_FILE}" | tar -xf - --strip-components=3 -C /usr/bin ./usr/bin/$${BINARY} + chmod +x "/usr/bin/$${BINARY}" + # Clean up the downloaded deb file + rm -f "$${DEB_FILE}" + done + systemctl restart kubelet + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true)" + echo "kubelet version: $(kubelet --version)" + owner: root:root + path: /tmp/replace-k8s-binaries.sh + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: + - echo '${AZURE_INTERNAL_LB_PRIVATE_IP} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts + - bash -c /tmp/kubeadm-bootstrap.sh + - bash -c /tmp/replace-k8s-binaries.sh + verbosity: 5 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureClusterIdentity +metadata: + labels: + clusterctl.cluster.x-k8s.io/move-hierarchy: "true" + name: ${CLUSTER_IDENTITY_NAME} + namespace: default +spec: + allowedNamespaces: {} + clientID: ${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY} + tenantID: ${AZURE_TENANT_ID} + type: ${CLUSTER_IDENTITY_TYPE:=WorkloadIdentity} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: calico + namespace: default +spec: + chartName: tigera-operator + clusterSelector: + matchLabels: + cni: calico + namespace: tigera-operator + releaseName: projectcalico + repoURL: https://docs.tigera.io/calico/charts + valuesTemplate: | + installation: + cni: + type: Calico + ipam: + type: Calico + calicoNetwork: + bgp: Disabled + windowsDataplane: HNS + mtu: 1350 + ipPools:{{range $i, $cidr := .Cluster.spec.clusterNetwork.pods.cidrBlocks }} + - cidr: {{ $cidr }} + encapsulation: VXLAN{{end}} + typhaDeployment: + spec: + template: + spec: + # By default, typha tolerates all NoSchedule taints. This breaks + # scale-ins when it continuously gets scheduled onto an + # out-of-date Node that is being deleted. Tolerate only the + # NoSchedule taints that are expected. + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + preference: + matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + registry: capzcicommunity.azurecr.io + serviceCIDRs: + - 10.96.0.0/12 # must match cluster service CIDR (this is the default) + # Image and registry configuration for the tigera/operator pod + tigeraOperator: + image: tigera/operator + registry: capzcicommunity.azurecr.io + calicoctl: + image: capzcicommunity.azurecr.io/calico/ctl + # when kubernetesServiceEndpoint (required for windows) is added + # DNS configuration is needed to look up the api server name properly + # https://github.com/projectcalico/calico/issues/9536 + dnsConfig: + nameservers: + - 127.0.0.53 + options: + - name: edns0 + - name: trust-ad + kubernetesServiceEndpoint: + host: "{{ .Cluster.spec.controlPlaneEndpoint.host }}" + port: "{{ .Cluster.spec.controlPlaneEndpoint.port }}" + # By default, tigera tolerates all NoSchedule taints. This breaks upgrades + # when it continuously gets scheduled onto an out-of-date Node that is being + # deleted. Tolerate only the NoSchedule taints that are expected. + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + version: ${CALICO_VERSION} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: azuredisk-csi-driver-chart + namespace: default +spec: + chartName: azuredisk-csi-driver + clusterSelector: + matchLabels: + azuredisk-csi: "true" + namespace: kube-system + releaseName: azuredisk-csi-driver-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/azuredisk-csi-driver/master/charts + valuesTemplate: |- + controller: + replicas: 1 + runOnControlPlane: true + windows: + useHostProcessContainers: {{ hasKey .Cluster.metadata.labels "cni-windows" }} +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart-ci + namespace: default +spec: + chartName: cloud-provider-azure + clusterSelector: + matchLabels: + cloud-provider: azure-ci + releaseName: cloud-provider-azure-oot + repoURL: https://raw.githubusercontent.com/kubernetes-sigs/cloud-provider-azure/master/helm/repo + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"} + cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""} + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + imageName: "${CCM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CCM:-""}" + logVerbosity: ${CCM_LOG_VERBOSITY:-4} + replicas: ${CCM_COUNT:-1} + enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false} + cloudNodeManager: + imageName: "${CNM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CNM:-""}" +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-azl3-md-0 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${AZL3_WORKER_MACHINE_COUNT:=2} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_NAME}-azl3-md-0 + clusterName: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate + name: ${CLUSTER_NAME}-azl3-md-0 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-azl3-md-0 + namespace: default +spec: + template: + spec: + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION:="1.33.2"} + osDisk: + diskSizeGB: 128 + osType: Linux + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + vmSize: ${AZURE_NODE_MACHINE_TYPE} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-azl3-md-0 + namespace: default +spec: + template: + spec: + files: + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Install ca-certificates packages for Azure Linux + tdnf install -y ca-certificates ca-certificates-legacy + update-ca-trust + + # Follow Azure Linux 3 docs exactly - completely permissive for debugging + # Change default policy to ACCEPT (as recommended by AZL3 docs) + iptables -P INPUT ACCEPT + iptables -P FORWARD ACCEPT + iptables -P OUTPUT ACCEPT + + ip6tables -P INPUT ACCEPT + ip6tables -P FORWARD ACCEPT + ip6tables -P OUTPUT ACCEPT + + # Flush any rules which would filter packets + iptables -F + ip6tables -F + + iptables-save > /etc/systemd/scripts/ip4save + ip6tables-save > /etc/systemd/scripts/ip6save + owner: root:root + path: /tmp/azl3-setup.sh + permissions: "0744" + - contentFrom: + secret: + key: worker-node-azure.json + name: ${CLUSTER_NAME}-azl3-md-0-azure-json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: "#!/bin/bash\n\nset -o nounset\nset -o pipefail\nset -o errexit\n\nsystemctl + stop kubelet\n\ndeclare -a BINARIES=(\"kubeadm\" \"kubectl\" \"kubelet\")\n\n# + Define the base URL and version\nBASE_URL=\"https://kubernetesreleases.blob.core.windows.net/dalec-packages\"\nVERSION=\"${KUBERNETES_VERSION}\"\nVERSION=$${VERSION#v}\nOS_VERSION=\"azl3\"\nARCH=\"x86_64\"\n\nfor + BINARY in \"$${BINARIES[@]}\"; do\n echo \"* downloading and extracting + binary: $${BINARY} $${VERSION}\"\n RPM_FILE=\"/tmp/$${BINARY}-$${VERSION}-1.$${OS_VERSION}.$${ARCH}.rpm\"\n + \ RPM_URL=\"$${BASE_URL}/$${BINARY}/$${VERSION}/$${OS_VERSION}/$${ARCH}/$${BINARY}-$${VERSION}-1.$${OS_VERSION}.$${ARCH}.rpm\"\n\n + \ echo \"Downloading from: $${RPM_URL}\"\n curl -L --retry 10 --retry-delay + 5 \"$${RPM_URL}\" --output \"$${RPM_FILE}\"\n\n # Extract binary from RPM + package in a temporary directory\n echo \"Extracting $${BINARY} binary + to /usr/bin\"\n TEMP_DIR=\"/tmp/$${BINARY}-extract-$$\"\n mkdir -p \"$${TEMP_DIR}\"\n + \ cd \"$${TEMP_DIR}\"\n rpm2cpio \"$${RPM_FILE}\" | cpio -idmv\n \n # + Move the binary to the correct location\n if [ -f \"./usr/bin/$${BINARY}\" + ]; then\n mv \"./usr/bin/$${BINARY}\" \"/usr/bin/$${BINARY}\"\n chmod + +x \"/usr/bin/$${BINARY}\"\n else\n echo \"Error: Binary $${BINARY} + not found in RPM package\"\n exit 1\n fi\n\n # Clean up\n cd /\n rm + -rf \"$${TEMP_DIR}\"\n rm -f \"$${RPM_FILE}\"\ndone\n\nsystemctl restart + kubelet\n\necho \"kubeadm version: $(kubeadm version -o=short)\"\necho \"kubectl + version: $(kubectl version --client=true)\"\necho \"kubelet version: $(kubelet + --version)\"\n" + owner: root:root + path: /opt/install-custom-k8s-binaries.sh + permissions: "0744" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + name: '{{ ds.meta_data["local_hostname"] }}' + preKubeadmCommands: + - bash -c /tmp/azl3-setup.sh + - echo '${AZURE_INTERNAL_LB_PRIVATE_IP} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' + >> /etc/hosts + - bash -c /opt/install-custom-k8s-binaries.sh diff --git a/templates/test/ci/prow-azl3/kustomization.yaml b/templates/test/ci/prow-azl3/kustomization.yaml new file mode 100644 index 00000000000..98e08aeb1c2 --- /dev/null +++ b/templates/test/ci/prow-azl3/kustomization.yaml @@ -0,0 +1,36 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- ../../../flavors/default +- ../../../addons/cluster-api-helm/calico.yaml +- ../../../addons/cluster-api-helm/azuredisk-csi-driver.yaml +- ../../../addons/cluster-api-helm/cloud-provider-azure.yaml +- ../../../addons/cluster-api-helm/cloud-provider-azure-ci.yaml +patches: +- path: ../patches/tags.yaml +- path: ../patches/mhc.yaml +- path: ../patches/controller-manager.yaml +- path: ../patches/uami-md-0.yaml +- path: ../patches/uami-control-plane.yaml +- path: ../patches/cluster-label-calico.yaml +- path: ../patches/cluster-label-cloud-provider-azure.yaml +- path: patches/controller-manager.yaml + target: + group: controlplane.cluster.x-k8s.io + kind: KubeadmControlPlane + name: .*-control-plane + version: v1beta1 +- path: patches/kubeadm-config-template-azl3.yaml + target: + group: bootstrap.cluster.x-k8s.io + kind: KubeadmConfigTemplate + name: .*-md-0 + namespace: default + version: v1beta1 +- path: patches/azuremachinetemplate-azl3-image.yaml +- path: patches/cloud-provider-azure-cacertdir.yaml +- path: patches/cloud-provider-azure-ci-cacertdir.yaml + +sortOptions: + order: fifo diff --git a/templates/test/ci/prow-azl3/patches/azuremachinetemplate-azl3-image.yaml b/templates/test/ci/prow-azl3/patches/azuremachinetemplate-azl3-image.yaml new file mode 100644 index 00000000000..c6739de2b6a --- /dev/null +++ b/templates/test/ci/prow-azl3/patches/azuremachinetemplate-azl3-image.yaml @@ -0,0 +1,25 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION} diff --git a/templates/test/ci/prow-azl3/patches/cloud-provider-azure-cacertdir.yaml b/templates/test/ci/prow-azl3/patches/cloud-provider-azure-cacertdir.yaml new file mode 100644 index 00000000000..1a19310be5e --- /dev/null +++ b/templates/test/ci/prow-azl3/patches/cloud-provider-azure-cacertdir.yaml @@ -0,0 +1,12 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart +spec: + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + caCertDir: "/etc/pki/tls/certs" + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + logVerbosity: 4 diff --git a/templates/test/ci/prow-azl3/patches/cloud-provider-azure-ci-cacertdir.yaml b/templates/test/ci/prow-azl3/patches/cloud-provider-azure-ci-cacertdir.yaml new file mode 100644 index 00000000000..cf26adf88c4 --- /dev/null +++ b/templates/test/ci/prow-azl3/patches/cloud-provider-azure-ci-cacertdir.yaml @@ -0,0 +1,23 @@ +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cloud-provider-azure-chart-ci +spec: + valuesTemplate: | + infra: + clusterName: {{ .Cluster.metadata.name }} + cloudControllerManager: + caCertDir: "/etc/pki/tls/certs" + cloudConfig: ${CLOUD_CONFIG:-"/etc/kubernetes/azure.json"} + cloudConfigSecretName: ${CONFIG_SECRET_NAME:-""} + clusterCIDR: {{ .Cluster.spec.clusterNetwork.pods.cidrBlocks | join "," }} + imageName: "${CCM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CCM:-""}" + logVerbosity: ${CCM_LOG_VERBOSITY:-4} + replicas: ${CCM_COUNT:-1} + enableDynamicReloading: ${ENABLE_DYNAMIC_RELOADING:-false} + cloudNodeManager: + imageName: "${CNM_IMAGE_NAME:-""}" + imageRepository: "${IMAGE_REGISTRY:-""}" + imageTag: "${IMAGE_TAG_CNM:-""}" diff --git a/templates/test/ci/prow-azl3/patches/controller-manager.yaml b/templates/test/ci/prow-azl3/patches/controller-manager.yaml new file mode 100644 index 00000000000..c1db3892a74 --- /dev/null +++ b/templates/test/ci/prow-azl3/patches/controller-manager.yaml @@ -0,0 +1,46 @@ +- op: add + path: /spec/kubeadmConfigSpec/files/0 + value: + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Install ca-certificates packages for Azure Linux + tdnf install -y ca-certificates ca-certificates-legacy + update-ca-trust + + # Allow Azure service IP addresses (required for Azure resources) + iptables -A INPUT -s 168.63.129.16 -j ACCEPT + iptables -A OUTPUT -d 168.63.129.16 -j ACCEPT + + # Kubernetes API Server (port 6443) - bound to all IPv6 interfaces, needs external access + iptables -A INPUT -p tcp --dport 6443 -j ACCEPT + + # etcd server communication + iptables -A INPUT -p tcp --dport 2379 -j ACCEPT + iptables -A INPUT -p tcp --dport 2380 -j ACCEPT + + # Allow traffic to Kubernetes service network (10.96.0.0/12) + iptables -A OUTPUT -d 10.96.0.0/12 -j ACCEPT + iptables -A INPUT -s 10.96.0.0/12 -j ACCEPT + + # Allow traffic to/from node network (10.1.0.0/24) + iptables -A OUTPUT -d 10.1.0.0/24 -j ACCEPT + iptables -A INPUT -s 10.1.0.0/24 -j ACCEPT + + # Allow traffic to/from Calico pod network + iptables -A OUTPUT -d 192.168.0.0/24 -j ACCEPT + iptables -A INPUT -s 192.168.0.0/24 -j ACCEPT + + # Save the rules following Azure Linux 3 approach + iptables-save > /etc/systemd/scripts/ip4save + path: /tmp/azl3-setup.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/kubeadmConfigSpec/preKubeadmCommands/0 + value: + bash -c /tmp/azl3-setup.sh diff --git a/templates/test/ci/prow-azl3/patches/disable-vm-bootstrap-extension.yaml b/templates/test/ci/prow-azl3/patches/disable-vm-bootstrap-extension.yaml new file mode 100644 index 00000000000..5d06409a84d --- /dev/null +++ b/templates/test/ci/prow-azl3/patches/disable-vm-bootstrap-extension.yaml @@ -0,0 +1,17 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + disableVMBootstrapExtension: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + disableVMBootstrapExtension: true diff --git a/templates/test/ci/prow-azl3/patches/kubeadm-config-template-azl3.yaml b/templates/test/ci/prow-azl3/patches/kubeadm-config-template-azl3.yaml new file mode 100644 index 00000000000..e5231fa7a42 --- /dev/null +++ b/templates/test/ci/prow-azl3/patches/kubeadm-config-template-azl3.yaml @@ -0,0 +1,63 @@ +- op: add + path: /spec/template/spec/files/0 + value: + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Allow Azure service IP addresses (required for Azure resources) + iptables -A INPUT -s 168.63.129.16 -j ACCEPT + iptables -A OUTPUT -d 168.63.129.16 -j ACCEPT + + # Allow localhost traffic + iptables -A INPUT -i lo -j ACCEPT + iptables -A OUTPUT -o lo -j ACCEPT + + # Allow established and related connections + iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT + + # SSH (port 22) + # iptables -A INPUT -p tcp --dport 22 -j ACCEPT + + # Kubelet API (port 10250) + iptables -A INPUT -p tcp --dport 10250 -j ACCEPT + + # Allow traffic to Kubernetes service network (10.96.0.0/12) + iptables -A OUTPUT -d 10.96.0.0/12 -j ACCEPT + iptables -A INPUT -s 10.96.0.0/12 -j ACCEPT + + # Allow traffic to/from Calico pod network (192.168.0.0/16) + iptables -A OUTPUT -d 192.168.0.0/16 -j ACCEPT + iptables -A INPUT -s 192.168.0.0/16 -j ACCEPT + + # Allow traffic to/from node network (10.1.0.0/24) + iptables -A OUTPUT -d 10.1.0.0/24 -j ACCEPT + iptables -A INPUT -s 10.1.0.0/24 -j ACCEPT + + # Calico networking requirements + # Calico Typha (port 5473) + iptables -A INPUT -p tcp --dport 5473 -j ACCEPT + + # VXLAN for overlay networking (port 4789 UDP) + iptables -A INPUT -p udp --dport 4789 -j ACCEPT + + # BGP for node-to-node communication (port 179) + iptables -A INPUT -p tcp --d port 179 -j ACCEPT + + # DNS (port 53) + iptables -A INPUT -p udp --dport 53 -j ACCEPT + iptables -A OUTPUT -p udp --dport 53 -j ACCEPT + + # Save the rules following Azure Linux 3 approach + iptables-save > /etc/systemd/scripts/ip4save + path: /tmp/azl3-setup.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/template/spec/preKubeadmCommands/0 + value: + bash -c /tmp/azl3-setup.sh diff --git a/templates/test/ci/prow-azl3/patches/remove-marketplace-image.yaml b/templates/test/ci/prow-azl3/patches/remove-marketplace-image.yaml new file mode 100644 index 00000000000..c44ae2bdf32 --- /dev/null +++ b/templates/test/ci/prow-azl3/patches/remove-marketplace-image.yaml @@ -0,0 +1,19 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + image: + marketplace: null +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + image: + marketplace: null diff --git a/templates/test/ci/prow-ci-version-azl3/kustomization.yaml b/templates/test/ci/prow-ci-version-azl3/kustomization.yaml new file mode 100644 index 00000000000..eb46c9d12a5 --- /dev/null +++ b/templates/test/ci/prow-ci-version-azl3/kustomization.yaml @@ -0,0 +1,26 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- ../prow-ci-version +patches: +- path: ../prow-azl3/patches/controller-manager.yaml + target: + group: controlplane.cluster.x-k8s.io + kind: KubeadmControlPlane + name: .*-control-plane + version: v1beta1 +- path: ../prow-azl3/patches/kubeadm-config-template-azl3.yaml + target: + group: bootstrap.cluster.x-k8s.io + kind: KubeadmConfigTemplate + name: .*-md-0 + namespace: default + version: v1beta1 +- path: ../prow-azl3/patches/azuremachinetemplate-azl3-image.yaml +- path: ../prow-azl3/patches/remove-marketplace-image.yaml +- path: ../prow-azl3/patches/cloud-provider-azure-cacertdir.yaml +- path: ../prow-azl3/patches/cloud-provider-azure-ci-cacertdir.yaml + +sortOptions: + order: fifo diff --git a/templates/test/ci/prow-dalec-custom-builds/kustomization.yaml b/templates/test/ci/prow-dalec-custom-builds/kustomization.yaml new file mode 100644 index 00000000000..b496ba70bc0 --- /dev/null +++ b/templates/test/ci/prow-dalec-custom-builds/kustomization.yaml @@ -0,0 +1,31 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: default +resources: +- ../prow-apiserver-ilb-custom-images +- patches/azl3-machine-deployment.yaml +patches: +- path: patches/kubeadm-bootstrap-custom-builds.yaml + target: + group: bootstrap.cluster.x-k8s.io + kind: KubeadmConfigTemplate + name: ^[^-]*-md-0$ + namespace: default + version: v1beta1 +- path: patches/control-plane-custom-builds.yaml + target: + group: controlplane.cluster.x-k8s.io + kind: KubeadmControlPlane + name: .*-control-plane + version: v1beta1 +- path: patches/delete-machine-health-check.yaml + target: + group: cluster.x-k8s.io + kind: MachineHealthCheck + name: .*-control-plane + namespace: default + version: v1beta1 + +sortOptions: + order: fifo + diff --git a/templates/test/ci/prow-dalec-custom-builds/patches/azl3-machine-deployment.yaml b/templates/test/ci/prow-dalec-custom-builds/patches/azl3-machine-deployment.yaml new file mode 100644 index 00000000000..badd21faa37 --- /dev/null +++ b/templates/test/ci/prow-dalec-custom-builds/patches/azl3-machine-deployment.yaml @@ -0,0 +1,152 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-azl3-md-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${AZL3_WORKER_MACHINE_COUNT:=2} + selector: + matchLabels: + template: + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-azl3-md-0" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-azl3-md-0" + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: AzureMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: AzureMachineTemplate +metadata: + name: "${CLUSTER_NAME}-azl3-md-0" +spec: + template: + spec: + vmSize: ${AZURE_NODE_MACHINE_TYPE} + osDisk: + osType: "Linux" + diskSizeGB: 128 + sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} + image: + computeGallery: + gallery: ClusterAPI-f72ceb4f-5159-4c26-a0fe-2ea738f0d019 + name: capi-azurelinux-3 + version: ${AZL3_VERSION:="1.33.2"} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-azl3-md-0" +spec: + template: + spec: + files: + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + # Install ca-certificates packages for Azure Linux + tdnf install -y ca-certificates ca-certificates-legacy + update-ca-trust + + # Follow Azure Linux 3 docs exactly - completely permissive for debugging + # Change default policy to ACCEPT (as recommended by AZL3 docs) + iptables -P INPUT ACCEPT + iptables -P FORWARD ACCEPT + iptables -P OUTPUT ACCEPT + + ip6tables -P INPUT ACCEPT + ip6tables -P FORWARD ACCEPT + ip6tables -P OUTPUT ACCEPT + + # Flush any rules which would filter packets + iptables -F + ip6tables -F + + iptables-save > /etc/systemd/scripts/ip4save + ip6tables-save > /etc/systemd/scripts/ip6save + path: /tmp/azl3-setup.sh + owner: "root:root" + permissions: "0744" + - contentFrom: + secret: + name: ${CLUSTER_NAME}-azl3-md-0-azure-json + key: worker-node-azure.json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" + - content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + systemctl stop kubelet + + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + + # Define the base URL and version + BASE_URL="https://kubernetesreleases.blob.core.windows.net/dalec-packages" + VERSION="${KUBERNETES_VERSION}" + VERSION=$${VERSION#v} + OS_VERSION="azl3" + ARCH="x86_64" + + for BINARY in "$${BINARIES[@]}"; do + echo "* downloading and extracting binary: $${BINARY} $${VERSION}" + RPM_FILE="/tmp/$${BINARY}-$${VERSION}-1.$${OS_VERSION}.$${ARCH}.rpm" + RPM_URL="$${BASE_URL}/$${BINARY}/$${VERSION}/$${OS_VERSION}/$${ARCH}/$${BINARY}-$${VERSION}-1.$${OS_VERSION}.$${ARCH}.rpm" + + echo "Downloading from: $${RPM_URL}" + curl -L --retry 10 --retry-delay 5 "$${RPM_URL}" --output "$${RPM_FILE}" + + # Extract binary from RPM package in a temporary directory + echo "Extracting $${BINARY} binary to /usr/bin" + TEMP_DIR="/tmp/$${BINARY}-extract-$$" + mkdir -p "$${TEMP_DIR}" + cd "$${TEMP_DIR}" + rpm2cpio "$${RPM_FILE}" | cpio -idmv + + # Move the binary to the correct location + if [ -f "./usr/bin/$${BINARY}" ]; then + mv "./usr/bin/$${BINARY}" "/usr/bin/$${BINARY}" + chmod +x "/usr/bin/$${BINARY}" + else + echo "Error: Binary $${BINARY} not found in RPM package" + exit 1 + fi + + # Clean up + cd / + rm -rf "$${TEMP_DIR}" + rm -f "$${RPM_FILE}" + done + + systemctl restart kubelet + + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true)" + echo "kubelet version: $(kubelet --version)" + path: /opt/install-custom-k8s-binaries.sh + owner: "root:root" + permissions: "0744" + preKubeadmCommands: + - bash -c /tmp/azl3-setup.sh + - echo '${AZURE_INTERNAL_LB_PRIVATE_IP} ${CLUSTER_NAME}-${APISERVER_LB_DNS_SUFFIX}.${AZURE_LOCATION}.cloudapp.azure.com' >> /etc/hosts + - bash -c /opt/install-custom-k8s-binaries.sh + joinConfiguration: + nodeRegistration: + name: '{{ ds.meta_data["local_hostname"] }}' + kubeletExtraArgs: + cloud-provider: external diff --git a/templates/test/ci/prow-dalec-custom-builds/patches/azl3-worker-azure-json.yaml b/templates/test/ci/prow-dalec-custom-builds/patches/azl3-worker-azure-json.yaml new file mode 100644 index 00000000000..7c7fcf30f59 --- /dev/null +++ b/templates/test/ci/prow-dalec-custom-builds/patches/azl3-worker-azure-json.yaml @@ -0,0 +1,10 @@ +- op: add + path: /spec/template/spec/files/- + value: + contentFrom: + secret: + name: ${CLUSTER_NAME}-azl3-md-0-azure-json + key: worker-node-azure.json + owner: root:root + path: /etc/kubernetes/azure.json + permissions: "0644" diff --git a/templates/test/ci/prow-dalec-custom-builds/patches/control-plane-custom-builds.yaml b/templates/test/ci/prow-dalec-custom-builds/patches/control-plane-custom-builds.yaml new file mode 100644 index 00000000000..4352e2497a3 --- /dev/null +++ b/templates/test/ci/prow-dalec-custom-builds/patches/control-plane-custom-builds.yaml @@ -0,0 +1,46 @@ +- op: add + path: /spec/kubeadmConfigSpec/files/- + value: + content: | + #!/bin/bash + set -o nounset + set -o pipefail + set -o errexit + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + # Define the base URL and version + BASE_URL="https://kubernetesreleases.blob.core.windows.net/dalec-packages" + VERSION="${KUBERNETES_VERSION}" + VERSION=$${VERSION#v} + OS_VERSION="ubuntu24.04" + ARCH="amd64" + for BINARY in "$${BINARIES[@]}"; do + if [[ "$${BINARY}" == "kubelet" ]]; then + DEB_VERSION="ubuntu24.04u1" + else + DEB_VERSION="ubuntu24.04u2" + fi + echo "* downloading and extracting binary: $${BINARY} $${VERSION} with deb version $${DEB_VERSION}" + DEB_FILE="/tmp/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + DEB_URL="$${BASE_URL}/$${BINARY}/$${VERSION}/$${OS_VERSION}/$${ARCH}/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + echo "Downloading from: $${DEB_URL}" + curl -L --retry 10 --retry-delay 5 "$${DEB_URL}" --output "$${DEB_FILE}" + # Extract just the binary from the deb package directly to /usr/bin + echo "Extracting $${BINARY} binary to /usr/bin" + dpkg-deb --fsys-tarfile "$${DEB_FILE}" | tar -xf - --strip-components=3 -C /usr/bin ./usr/bin/$${BINARY} + chmod +x "/usr/bin/$${BINARY}" + # Clean up the downloaded deb file + rm -f "$${DEB_FILE}" + done + systemctl restart kubelet + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true)" + echo "kubelet version: $(kubelet --version)" + path: /tmp/replace-k8s-binaries.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/kubeadmConfigSpec/preKubeadmCommands/- + value: + bash -c /tmp/replace-k8s-binaries.sh + \ No newline at end of file diff --git a/templates/test/ci/prow-dalec-custom-builds/patches/delete-machine-health-check.yaml b/templates/test/ci/prow-dalec-custom-builds/patches/delete-machine-health-check.yaml new file mode 100644 index 00000000000..3a64707e022 --- /dev/null +++ b/templates/test/ci/prow-dalec-custom-builds/patches/delete-machine-health-check.yaml @@ -0,0 +1,6 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: ${CLUSTER_NAME}-control-plane + namespace: default +$patch: delete diff --git a/templates/test/ci/prow-dalec-custom-builds/patches/kubeadm-bootstrap-custom-builds.yaml b/templates/test/ci/prow-dalec-custom-builds/patches/kubeadm-bootstrap-custom-builds.yaml new file mode 100644 index 00000000000..9068d7b5eda --- /dev/null +++ b/templates/test/ci/prow-dalec-custom-builds/patches/kubeadm-bootstrap-custom-builds.yaml @@ -0,0 +1,45 @@ +- op: add + path: /spec/template/spec/files/- + value: + content: | + #!/bin/bash + set -o nounset + set -o pipefail + set -o errexit + systemctl stop kubelet + declare -a BINARIES=("kubeadm" "kubectl" "kubelet") + # Define the base URL and version + BASE_URL="https://kubernetesreleases.blob.core.windows.net/dalec-packages" + VERSION="${KUBERNETES_VERSION}" + VERSION=$${VERSION#v} + OS_VERSION="ubuntu24.04" + ARCH="amd64" + for BINARY in "$${BINARIES[@]}"; do + if [[ "$${BINARY}" == "kubelet" ]]; then + DEB_VERSION="ubuntu24.04u1" + else + DEB_VERSION="ubuntu24.04u2" + fi + echo "* downloading and extracting binary: $${BINARY} $${VERSION} with deb version $${DEB_VERSION}" + DEB_FILE="/tmp/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + DEB_URL="$${BASE_URL}/$${BINARY}/$${VERSION}/$${OS_VERSION}/$${ARCH}/$${BINARY}_$${VERSION}-$${DEB_VERSION}_$${ARCH}.deb" + echo "Downloading from: $${DEB_URL}" + curl -L --retry 10 --retry-delay 5 "$${DEB_URL}" --output "$${DEB_FILE}" + # Extract just the binary from the deb package directly to /usr/bin + echo "Extracting $${BINARY} binary to /usr/bin" + dpkg-deb --fsys-tarfile "$${DEB_FILE}" | tar -xf - --strip-components=3 -C /usr/bin ./usr/bin/$${BINARY} + chmod +x "/usr/bin/$${BINARY}" + # Clean up the downloaded deb file + rm -f "$${DEB_FILE}" + done + systemctl restart kubelet + echo "kubeadm version: $(kubeadm version -o=short)" + echo "kubectl version: $(kubectl version --client=true)" + echo "kubelet version: $(kubelet --version)" + path: /tmp/replace-k8s-binaries.sh + owner: "root:root" + permissions: "0744" +- op: add + path: /spec/template/spec/preKubeadmCommands/- + value: + bash -c /tmp/replace-k8s-binaries.sh diff --git a/test/e2e/azure_logcollector.go b/test/e2e/azure_logcollector.go index b5f4f3c134f..cedb71d3742 100644 --- a/test/e2e/azure_logcollector.go +++ b/test/e2e/azure_logcollector.go @@ -443,15 +443,15 @@ func linuxLogs(execToPathFn func(outputFileName string, command string, args ... ), execToPathFn( "cloud-init.log", - "cat", "/var/log/cloud-init.log", + "sudo", "sh", "-c", "if [ -f /var/log/cloud-init.log ]; then sudo cat /var/log/cloud-init.log; else echo 'cloud-init.log not found'; fi", ), execToPathFn( "cloud-init-output.log", - "cat", "/var/log/cloud-init-output.log", + "sudo", "sh", "-c", "echo 'Waiting for cloud-init to complete before collecting output log...' && timeout 60 cloud-init status --wait || echo 'Cloud-init wait timed out, proceeding with log collection...' && if [ -f /var/log/cloud-init-output.log ]; then echo 'Found cloud-init-output.log, reading contents:' && sudo cat /var/log/cloud-init-output.log; else echo 'cloud-init-output.log not found'; fi", ), execToPathFn( "sentinel-file-dir.txt", - "ls", "/run/cluster-api/", + "ls", "-la", "/run/cluster-api/", ), execToPathFn( "cni.log", diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 72862998509..466f5d44b4e 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -24,6 +24,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/Azure/azure-service-operator/v2/pkg/common/config" @@ -1287,5 +1288,94 @@ var _ = Describe("Workload cluster creation", func() { }) }) + Context("Creating a highly-available cluster with Azure Linux 3 [OPTIONAL]", func() { + It("with three controlplane node and two worker nodes", func() { + clusterName = getClusterName(clusterNamePrefix, "azl3") + kubernetesVersion := e2eConfig.MustGetVariable(capi_e2e.KubernetesVersion) + kubernetesVersion = strings.TrimPrefix(kubernetesVersion, "v") + Expect(os.Setenv("AZL3_VERSION", kubernetesVersion)).To(Succeed()) + skipResourceGroupCheck = true + + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( + specName, + withNamespace(namespace.Name), + withClusterName(clusterName), + withFlavor("azl3"), + withControlPlaneMachineCount(3), + withWorkerMachineCount(2), + withControlPlaneInterval(specName, "wait-control-plane-ha"), + withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ + WaitForControlPlaneInitialized: EnsureControlPlaneInitialized, + }), + withPostMachinesProvisioned(func() { + EnsureDaemonsets(ctx, func() DaemonsetsSpecInput { + return DaemonsetsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }), + ), result) + + By("Verifying expected VM extensions are present on the node", func() { + AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { + return AzureVMExtensionsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }) + + By("Verifying security rules are deleted on azure side", func() { + AzureSecurityGroupsSpec(ctx, func() AzureSecurityGroupsSpecInput { + return AzureSecurityGroupsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + Cluster: result.Cluster, + WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-nsg-update"), + } + }) + }) + + By("Validating failure domains", func() { + AzureFailureDomainsSpec(ctx, func() AzureFailureDomainsSpecInput { + return AzureFailureDomainsSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Cluster: result.Cluster, + Namespace: namespace, + ClusterName: clusterName, + } + }) + }) + + By("Creating an accessible load balancer", func() { + AzureLBSpec(ctx, func() AzureLBSpecInput { + return AzureLBSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + SkipCleanup: skipCleanup, + } + }) + }) + + By("Validating network policies", func() { + AzureNetPolSpec(ctx, func() AzureNetPolSpecInput { + return AzureNetPolSpecInput{ + BootstrapClusterProxy: bootstrapClusterProxy, + Namespace: namespace, + ClusterName: clusterName, + SkipCleanup: skipCleanup, + } + }) + }) + + By("PASSED!") + }) + }) + // TODO: add a same test as above for a windows cluster }) diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index c868a09592e..1973c5025bd 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -148,6 +148,8 @@ providers: targetName: "cluster-template-conformance-presubmit-artifacts-dra.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-ci-version-dra.yaml" targetName: "cluster-template-conformance-ci-artifacts-dra.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-ci-version-azl3.yaml" + targetName: "cluster-template-conformance-ci-artifacts-azl3.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-machine-pool-flex.yaml" targetName: "cluster-template-machine-pool-flex.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-aks.yaml" @@ -184,6 +186,10 @@ providers: targetName: "cluster-template-apiserver-ilb.yaml" - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-apiserver-ilb-custom-images.yaml" targetName: "cluster-template-apiserver-ilb-custom-images.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-dalec-custom-builds.yaml" + targetName: "cluster-template-dalec-custom-builds.yaml" + - sourcePath: "${PWD}/templates/test/ci/cluster-template-prow-azl3.yaml" + targetName: "cluster-template-azl3.yaml" replacements: - old: "--v=0" new: "--v=2"