diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 401dd765e5..102217c078 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -21,7 +21,7 @@ spec: - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" - --v=4 - - "--feature-gates=MultiNetworks=${EXP_MULTI_NETWORKS:=false},NodeAntiAffinity=${EXP_NODE_ANTI_AFFINITY:=false},NamespaceScopedZones=${EXP_NAMESPACE_SCOPED_ZONES:=false},PriorityQueue=${EXP_PRIORITY_QUEUE:=false}" + - "--feature-gates=MultiNetworks=${EXP_MULTI_NETWORKS:=false},NodeAntiAffinity=${EXP_NODE_ANTI_AFFINITY:=false},NamespaceScopedZones=${EXP_NAMESPACE_SCOPED_ZONES:=false},NodeAutoPlacement=${EXP_NODE_AUTO_PLACEMENT:=false},PriorityQueue=${EXP_PRIORITY_QUEUE:=false}" image: controller:latest imagePullPolicy: IfNotPresent name: manager diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ff4613da71..c57a326fb9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -249,6 +249,7 @@ rules: - apiGroups: - vmoperator.vmware.com resources: + - virtualmachinegroups - virtualmachineimages - virtualmachineimages/status - virtualmachines @@ -264,6 +265,12 @@ rules: - patch - update - watch +- apiGroups: + - vmoperator.vmware.com + resources: + - virtualmachinegroups/status + verbs: + - get - apiGroups: - vmware.com resources: diff --git a/controllers/vmware/virtualmachinegroup_controller.go b/controllers/vmware/virtualmachinegroup_controller.go new file mode 100644 index 0000000000..edfc5d0211 --- /dev/null +++ b/controllers/vmware/virtualmachinegroup_controller.go @@ -0,0 +1,91 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmware + +import ( + "context" + + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" + apitypes "k8s.io/apimachinery/pkg/types" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters/status,verbs=get +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachinegroups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=vmoperator.vmware.com,resources=virtualmachinegroups/status,verbs=get +// +kubebuilder:rbac:groups=vmware.infrastructure.cluster.x-k8s.io,resources=vsphereclusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=vmware.infrastructure.cluster.x-k8s.io,resources=vspheremachines,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines,verbs=get;list;watch + +// AddVirtualMachineGroupControllerToManager adds the VirtualMachineGroup controller to the provided +// manager. +func AddVirtualMachineGroupControllerToManager(ctx context.Context, controllerManagerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, options controller.Options) error { + predicateLog := ctrl.LoggerFrom(ctx).WithValues("controller", "virtualmachinegroup") + + reconciler := &VirtualMachineGroupReconciler{ + Client: controllerManagerCtx.Client, + Recorder: mgr.GetEventRecorderFor("virtualmachinegroup-controller"), + } + + // Predicate: only allow VMG with the cluster-name label + hasClusterNameLabel := predicate.NewPredicateFuncs(func(obj ctrlclient.Object) bool { + labels := obj.GetLabels() + if labels == nil { + return false + } + _, ok := labels[clusterv1.ClusterNameLabel] + return ok + }) + + builder := ctrl.NewControllerManagedBy(mgr). + For(&vmoprv1.VirtualMachineGroup{}). + WithOptions(options). + WithEventFilter(hasClusterNameLabel). + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(reconciler.ClusterToVirtualMachineGroup), + ). + WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, controllerManagerCtx.WatchFilterValue)) + + return builder.Complete(reconciler) +} + +func (r VirtualMachineGroupReconciler) ClusterToVirtualMachineGroup(ctx context.Context, a ctrlclient.Object) []reconcile.Request { + cluster, ok := a.(*clusterv1.Cluster) + if !ok { + return nil + } + + // Always enqueue a request for the "would-be VMG" + return []reconcile.Request{{ + NamespacedName: apitypes.NamespacedName{ + Namespace: cluster.Namespace, + Name: cluster.Name, + }, + }} +} diff --git a/controllers/vmware/virtualmachinegroup_reconciler.go b/controllers/vmware/virtualmachinegroup_reconciler.go new file mode 100644 index 0000000000..aa65ba62a4 --- /dev/null +++ b/controllers/vmware/virtualmachinegroup_reconciler.go @@ -0,0 +1,488 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package vmware contains the VirtualMachineGroup Reconciler. +package vmware + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) + +const ( + reconciliationDelay = 10 * time.Second +) + +// VirtualMachineGroupReconciler reconciles VirtualMachineGroup. +type VirtualMachineGroupReconciler struct { + Client client.Client + Recorder record.EventRecorder +} + +func (r *VirtualMachineGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Fetch the Cluster instance. + cluster := &clusterv1.Cluster{} + if err := r.Client.Get(ctx, req.NamespacedName, cluster); err != nil { + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + log = log.WithValues("Cluster", klog.KObj(cluster)) + // If Cluster is deleted, just return as VirtualMachineGroup will be GCed and no extra processing needed. + if !cluster.DeletionTimestamp.IsZero() { + return reconcile.Result{}, nil + } + + vmg := &vmoprv1.VirtualMachineGroup{} + key := &client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Name, + } + + if err := r.Client.Get(ctx, *key, vmg); err != nil { + if !apierrors.IsNotFound(err) { + log.Error(err, "failed to get VirtualMachineGroup") + return ctrl.Result{}, err + } + vmg = &vmoprv1.VirtualMachineGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: key.Name, + Namespace: key.Namespace, + }, + } + } + + // // Proceed only if multiple zones are available. + // // If there is only one zone(default), node automatic placement is unnecessary + // // because all Machine Deployments will be scheduled into that single zone. + // // The VSphereCluster resource discovers the underlying zones, + // // which we treat as the source of truth. + // vsphereClusterList := &vmwarev1.VSphereClusterList{} + // labelKey := clusterv1.ClusterNameLabel + // if err := r.Client.List(ctx, vsphereClusterList, + // client.InNamespace(cluster.Namespace), + // client.MatchingLabels(map[string]string{labelKey: cluster.Name}), + // ); err != nil { + // return reconcile.Result{}, fmt.Errorf("failed to list VSphereClusters in namespace %s: %w", cluster.Namespace, err) + // } + + // vsphereCluster := &vmwarev1.VSphereCluster{} + // switch len(vsphereClusterList.Items) { + // case 0: + // return reconcile.Result{}, fmt.Errorf("no VSphereCluster found with label %s=%s in namespace %s", labelKey, cluster.Name, cluster.Namespace) + // case 1: + // vsphereCluster = &vsphereClusterList.Items[0] + // default: + // return reconcile.Result{}, fmt.Errorf("found %d VSphereClusters with label %s=%s in namespace %s; expected exactly 1", len(vsphereClusterList.Items), labelKey, cluster.Name, cluster.Namespace) + // } + + // // Fetch the VSphereCluster instance. + // if vsphereCluster.Status.Ready != true { + // log.Info("Waiting for VSphereCluster to be ready with failure domain discovered") + // return reconcile.Result{RequeueAfter: reconciliationDelay}, nil + + // } + + // if len(vsphereCluster.Status.FailureDomains) <= 1 { + // log.Info("Single or no zone detected; skipping node automatic placement") + // return reconcile.Result{}, nil + // } + + // If ControlPlane haven't initialized, requeue it since VSphereMachines of MachineDeployment will only be created after + // ControlPlane is initialized. + if !conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { + log.Info("Waiting for Cluster ControlPlaneInitialized") + return reconcile.Result{RequeueAfter: reconciliationDelay}, nil + } + + // Continue with the main logic. + return r.createOrUpdateVMG(ctx, cluster, vmg) + +} + +// createOrUpdateVMG Create or Update VirtualMachineGroup +func (r *VirtualMachineGroupReconciler) createOrUpdateVMG(ctx context.Context, cluster *clusterv1.Cluster, desiredVMG *vmoprv1.VirtualMachineGroup) (_ reconcile.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + + // Calculate expected Machines of all MachineDeployments. + expectedMachines := getExpectedMachines(cluster) + if expectedMachines == 0 { + log.Info("none of MachineDeployments specifies replica and node auto replacement doesn't support this scenario") + return reconcile.Result{}, nil + } + + // Calculate current Machines of all MachineDeployments. + currentVSphereMachines, err := getCurrentVSphereMachines(ctx, r.Client, cluster.Namespace, cluster.Name) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to get current VSphereMachine of cluster %s/%s", + cluster.Name, cluster.Namespace) + } + + // Wait until all VSphereMachines are create, this could happen during initial deployment or day-2 like cluster update. + current := int32(len(currentVSphereMachines)) + if current < expectedMachines { + // Only check timeout if VMG doesn't exist. + // if desiredVMG.CreationTimestamp.IsZero() { + // if _, err := r.isMDDefined(ctx, cluster); err != nil { + // log.Error(err, "cluster MachineDeployments are not defined") + // return reconcile.Result{}, nil + // } + + // mdList := &clusterv1.MachineDeploymentList{} + // if err := r.Client.List(ctx, mdList, + // client.InNamespace(cluster.Namespace), + // client.MatchingLabels{clusterv1.ClusterNameLabel: cluster.Name}, + // ); err != nil { + // return reconcile.Result{}, errors.Errorf("failed to list MachineDeployments: %w", err) + // } + + // // If no deployments exist, report error + // if len(mdList.Items) == 0 { + // return reconcile.Result{}, errors.Errorf("no MachineDeployments found for cluster %s/%s", cluster.Namespace, cluster.Name) + // } + + // // Check one MachineDeployment's creation timestamp + // firstMD := mdList.Items[0] + // if time.Since(firstMD.CreationTimestamp.Time) > 1*time.Minute { + // log.Error(errors.New("timeout waiting for VSphereMachines"), "1 minute timeout after MachineDeployment creation", + // "MachineDeployment", firstMD.Name, "Cluster", cluster.Namespace+"/"+cluster.Name) + + // return reconcile.Result{}, nil + // } + // } + + log.Info("current VSphereMachines do not match expected", "Expected:", expectedMachines, + "Current:", current, "ClusterName", cluster.Name, "Namespace", cluster.Namespace) + return reconcile.Result{RequeueAfter: reconciliationDelay}, nil + } + + // Generate VM names according to the naming strategy set on the VSphereMachine. + vmNames := make([]string, 0, len(currentVSphereMachines)) + for _, machine := range currentVSphereMachines { + name, err := GenerateVirtualMachineName(machine.Name, machine.Spec.NamingStrategy) + if err != nil { + return reconcile.Result{}, err + } + vmNames = append(vmNames, name) + } + // Sort the VM names alphabetically for consistent ordering + sort.Slice(vmNames, func(i, j int) bool { + return vmNames[i] < vmNames[j] + }) + + members := make([]vmoprv1.GroupMember, 0, len(currentVSphereMachines)) + for _, name := range vmNames { + members = append(members, vmoprv1.GroupMember{ + Name: name, + Kind: "VirtualMachine", + }) + } + + // Get all the names of MachineDeployments of the Cluster. + if !cluster.Spec.Topology.IsDefined() { + return reconcile.Result{}, errors.Errorf("Cluster Topology is not defined %s/%s", + cluster.Namespace, cluster.Name) + } + machineDeployments := &clusterv1.MachineDeploymentList{} + if err := r.Client.List(ctx, machineDeployments, + client.InNamespace(cluster.Namespace), + client.MatchingLabels{clusterv1.ClusterNameLabel: cluster.Name}); err != nil { + return reconcile.Result{}, err + } + mdNames := []string{} + for _, md := range machineDeployments.Items { + mdNames = append(mdNames, md.Name) + } + + // Use CreateOrPatch to create or update the VirtualMachineGroup. + _, err = controllerutil.CreateOrPatch(ctx, r.Client, desiredVMG, func() error { + // Set the desired labels + if desiredVMG.Labels == nil { + desiredVMG.Labels = make(map[string]string) + // Set Cluster name label + desiredVMG.Labels[clusterv1.ClusterNameLabel] = cluster.Name + } + + // Add per-md-zone label for day-2 operations once placement of a VM belongs to MachineDeployment is done + // Do not update per-md-zone label once set, as placement decision should not change without user explicitly + // ask. + placementDecisionLabels, err := GenerateVMGPlacementLabels(ctx, desiredVMG, mdNames) + if err != nil { + return err + } + if len(placementDecisionLabels) > 0 { + for k, v := range placementDecisionLabels { + if _, exists := desiredVMG.Labels[k]; exists { + // Skip if the label already exists + continue + } + desiredVMG.Labels[k] = v + } + } + + // Compose bootOrder. + desiredVMG.Spec.BootOrder = []vmoprv1.VirtualMachineGroupBootOrderGroup{ + { + Members: members, + }, + } + + // Make sure the Cluster owns the VM Operator VirtualMachineGroup. + if err = controllerutil.SetControllerReference(cluster, desiredVMG, r.Client.Scheme()); err != nil { + return errors.Wrapf(err, "failed to mark %s %s/%s as owner of %s %s/%s", + cluster.GroupVersionKind(), + cluster.Namespace, + cluster.Name, + desiredVMG.GroupVersionKind(), + desiredVMG.Namespace, + desiredVMG.Name) + } + + return nil + }) + + return reconcile.Result{}, err +} + +// isMDDefined checks if there are any MachineDeployments for the given cluster +// by listing objects with the cluster.x-k8s.io/cluster-name label. +func (r *VirtualMachineGroupReconciler) isMDDefined(ctx context.Context, cluster *clusterv1.Cluster) (bool, error) { + mdList := &clusterv1.MachineDeploymentList{} + if err := r.Client.List(ctx, mdList, client.InNamespace(cluster.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: cluster.Name}); err != nil { + return false, errors.Wrapf(err, "failed to list MachineDeployments for cluster %s/%s", + cluster.Namespace, cluster.Name) + } + + if len(mdList.Items) == 0 { + return false, errors.Errorf("no MachineDeployments found for cluster %s/%s", + cluster.Namespace, cluster.Name) + } + + return true, nil +} + +// isExplicitPlacement checks if any MachineDeployment has an explicit failure domain set. +func (r *VirtualMachineGroupReconciler) isExplicitPlacement(cluster *clusterv1.Cluster) (bool, error) { + // First, ensure MachineDeployments are defined + mdDefined, err := r.isMDDefined(context.Background(), cluster) + if !mdDefined { + return false, err + } + + // Iterate through MachineDeployments to find if an explicit failure domain is set. + mds := cluster.Spec.Topology.Workers.MachineDeployments + for _, md := range mds { + // If a failure domain is specified for any MachineDeployment, it indicates + // explicit placement is configured, so return true. + if md.FailureDomain != "" { + return true, nil + } + } + + return false, nil +} + +// getExpectedMachines returns the total number of replicas across all +// MachineDeployments in the Cluster's Topology.Workers. +func getExpectedMachines(cluster *clusterv1.Cluster) int32 { + if !cluster.Spec.Topology.IsDefined() { + return 0 + } + + var total int32 + for _, md := range cluster.Spec.Topology.Workers.MachineDeployments { + if md.Replicas != nil { + total += *md.Replicas + } + } + return total +} + +func getCurrentVSphereMachines(ctx context.Context, kubeClient client.Client, clusterNamespace, clusterName string) ([]vmwarev1.VSphereMachine, error) { + log := ctrl.LoggerFrom(ctx) + + // // List MachineDeployments for the cluster. + // var mdList clusterv1.MachineDeploymentList + // if err := kubeClient.List(ctx, &mdList, + // client.InNamespace(clusterNamespace), + // client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}, + // ); err != nil { + // return nil, errors.Wrapf(err, "failed to list MachineDeployments for cluster %s/%s", clusterNamespace, clusterName) + // } + // validMDs := make(map[string]struct{}) + // for _, md := range mdList.Items { + // validMDs[md.Name] = struct{}{} + // } + // log.V(6).Info("Identified active MachineDeployments", "count", len(validMDs)) + + // // List MachineSets and filter those owned by a valid MachineDeployment. + // var msList clusterv1.MachineSetList + // if err := kubeClient.List(ctx, &msList, + // client.InNamespace(clusterNamespace), + // client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}, + // ); err != nil { + // return nil, errors.Wrapf(err, "failed to list MachineSets for cluster %s/%s", clusterNamespace, clusterName) + // } + // validMS := make(map[string]struct{}) + // for _, ms := range msList.Items { + // for _, owner := range ms.OwnerReferences { + // if owner.Kind == "MachineDeployment" && owner.APIVersion == clusterv1.GroupVersion.String() { + // if _, ok := validMDs[owner.Name]; ok { + // validMS[ms.Name] = struct{}{} + // break + // } + // } + // } + // } + // log.V(6).Info("Filtered MachineSets owned by valid MachineDeployments", "count", len(validMS)) + + // // List Machines and filter those owned by valid MachineSets (skip control plane). + // var machineList clusterv1.MachineList + // if err := kubeClient.List(ctx, &machineList, + // client.InNamespace(clusterNamespace), + // client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}, + // ); err != nil { + // return nil, errors.Wrapf(err, "failed to list Machines for cluster %s/%s", clusterNamespace, clusterName) + // } + + // workerMachines := make(map[string]struct{}) + // for _, m := range machineList.Items { + // if _, isControlPlane := m.Labels[clusterv1.MachineControlPlaneLabel]; isControlPlane { + // continue + // } + // for _, owner := range m.OwnerReferences { + // if owner.Kind == "MachineSet" && owner.APIVersion == clusterv1.GroupVersion.String() { + // if _, ok := validMS[owner.Name]; ok { + // workerMachines[m.Name] = struct{}{} + // break + // } + // } + // } + // } + // log.V(5).Info("Identified worker Machines linked to MachineSets", "count", len(workerMachines)) + + // List VSphereMachine objects + var vsMachineList vmwarev1.VSphereMachineList + if err := kubeClient.List(ctx, &vsMachineList, + client.InNamespace(clusterNamespace), + client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}, + client.HasLabels{clusterv1.MachineDeploymentNameLabel}, + ); err != nil { + return nil, errors.Wrapf(err, "failed to list VSphereMachines in namespace %s", clusterNamespace) + } + + var result []vmwarev1.VSphereMachine + for _, vs := range vsMachineList.Items { + if vs.DeletionTimestamp.IsZero() { + result = append(result, vs) + } + } + log.V(4).Info("Final list of VSphereMachines for VMG member generation", "count", len(result)) + + return result, nil +} + +// GenerateVMGPlacementLabels returns labels per MachineDeployment which contain zone info for placed VMs for day-2 operations. +func GenerateVMGPlacementLabels(ctx context.Context, vmg *vmoprv1.VirtualMachineGroup, machineDeployments []string) (map[string]string, error) { + log := ctrl.LoggerFrom(ctx) + labels := make(map[string]string) + + // For each member in status + for _, member := range vmg.Status.Members { + // Skip if not a VM or not placement ready, + if member.Kind != "VirtualMachine" { + return nil, errors.Errorf("VirtualMachineGroup %s/%s contains none VirtualMachine member, member.Kind %s", vmg.Namespace, vmg.Name, member.Kind) + } + + // Once member VM is placed, VirtualMachineGroupMemberConditionPlacementReady will be set to true. + if !conditions.IsTrue(&member, vmoprv1.VirtualMachineGroupMemberConditionPlacementReady) { + continue + } + + // Check if this VM belongs to any of our target Machine Deployments + // Use machine deployment name as the label key. + for _, md := range machineDeployments { + // Check if we already found placement for this Machine Deployments + if _, found := labels[md]; found { + log.Info(fmt.Sprintf("Skipping Machine Deployment %s, placement already found", md)) + continue + } + + // Check if VM belongs to a Machine Deployment by name (e.g. cluster-1-np-1-vm-xxx contains np-1) + // TODO: Establish membership via the machine deployment name label + if strings.Contains(member.Name, md) { + // Get the VM placement information by member status. + // VMs that have undergone placement do not have Placement info set, skip. + if member.Placement == nil { + log.V(4).Info("VM in VMG has no placement info. Placement is nil", "VM", member.Name, "VMG", vmg.Name, "Namespace", vmg.Namespace) + continue + } + + // Skip to next member if Zone is empty. + zone := member.Placement.Zone + if zone == "" { + log.V(4).Info("VM in VMG has no placement info. Zone is empty", "VM", member.Name, "VMG", vmg.Name, "Namespace", vmg.Namespace) + continue + } + + log.Info(fmt.Sprintf("VM %s in VMG %s/%s has been placed in zone %s", member.Name, vmg.Namespace, vmg.Name, zone)) + labels[fmt.Sprintf("zone.cluster.x-k8s.io/%s", md)] = zone + } + } + } + + return labels, nil +} + +// TODO: de-dup this logic with vmopmachine.go +// GenerateVirtualMachineName generates the name of a VirtualMachine based on the naming strategy. +func GenerateVirtualMachineName(machineName string, namingStrategy *vmwarev1.VirtualMachineNamingStrategy) (string, error) { + // Per default the name of the VirtualMachine should be equal to the Machine name (this is the same as "{{ .machine.name }}") + if namingStrategy == nil || namingStrategy.Template == nil { + // Note: No need to trim to max length in this case as valid Machine names will also be valid VirtualMachine names. + return machineName, nil + } + + name, err := infrautilv1.GenerateMachineNameFromTemplate(machineName, namingStrategy.Template) + if err != nil { + return "", errors.Wrap(err, "failed to generate name for VirtualMachine") + } + + return name, nil +} diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index 39d40ebea2..7ea7b164ae 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -427,6 +427,7 @@ func (r *clusterReconciler) reconcileDeploymentZones(ctx context.Context, cluste failureDomains := clusterv1beta1.FailureDomains{} for _, zone := range deploymentZoneList.Items { if zone.Spec.Server != clusterCtx.VSphereCluster.Spec.Server { + continue } diff --git a/feature/feature.go b/feature/feature.go index a233d351c7..1799aaeb68 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -44,6 +44,11 @@ const ( // alpha: v1.11 NamespaceScopedZones featuregate.Feature = "NamespaceScopedZones" + // NodeAutoPlacement is a feature gate for the NodeAutoPlacement functionality for supervisor. + // + // alpha: v1.15 + NodeAutoPlacement featuregate.Feature = "NodeAutoPlacement" + // PriorityQueue is a feature gate that controls if the controller uses the controller-runtime PriorityQueue // instead of the default queue implementation. // @@ -61,6 +66,7 @@ var defaultCAPVFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ // Every feature should be initiated here: NodeAntiAffinity: {Default: false, PreRelease: featuregate.Alpha}, NamespaceScopedZones: {Default: false, PreRelease: featuregate.Alpha}, + NodeAutoPlacement: {Default: false, PreRelease: featuregate.Alpha}, PriorityQueue: {Default: false, PreRelease: featuregate.Alpha}, MultiNetworks: {Default: false, PreRelease: featuregate.Alpha}, } diff --git a/go.mod b/go.mod index d8a4b971c4..a31af1cac0 100644 --- a/go.mod +++ b/go.mod @@ -4,13 +4,13 @@ go 1.24.0 replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.11.1 -replace github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels => github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels v0.0.0-20240404200847-de75746a9505 +replace github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels => github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels v1.9.1-0.20250908141901-a9e1dfbc0045 require ( github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d // The version of vm-operator should be kept in sync with the manifests at: config/deployments/integration-tests - github.com/vmware-tanzu/vm-operator/api v1.8.6 + github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20251007154704-e2d6e85d9ec7 github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505 github.com/vmware/govmomi v0.51.0 ) diff --git a/go.sum b/go.sum index 39b1d7d876..f5db01ff34 100644 --- a/go.sum +++ b/go.sum @@ -221,8 +221,8 @@ github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d h1:c github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d/go.mod h1:JbFOh22iDsT5BowJe0GgpMI5e2/S7cWaJlv9LdURVQM= github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d h1:z9lrzKVtNlujduv9BilzPxuge/LE2F0N1ms3TP4JZvw= github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d/go.mod h1:Q4JzNkNMvjo7pXtlB5/R3oME4Nhah7fAObWgghVmtxk= -github.com/vmware-tanzu/vm-operator/api v1.8.6 h1:NIndORjcnSmIlQsCMIewpIwg/ocRVDh2lYjOroTVLrU= -github.com/vmware-tanzu/vm-operator/api v1.8.6/go.mod h1:HHA2SNI9B5Yqtyp5t+Gt9WTWBi/fIkM6+MukDDSf11A= +github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20251007154704-e2d6e85d9ec7 h1:VlnaiDKI1H1buwBOgL8R3HRB3EQNN96xMdz25vE5FUo= +github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20251007154704-e2d6e85d9ec7/go.mod h1:nWTPpxfe4gHuuYuFcrs86+NMxfkqPk3a3IlvI8TCWak= github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505 h1:y4wXx1FUFqqSgJ/xUOEM1DLS2Uu0KaeLADWpzpioGTU= github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505/go.mod h1:5rqRJ9zGR+KnKbkGx373WgN8xJpvAj99kHnfoDYRO5I= github.com/vmware/govmomi v0.51.0 h1:n3RLS9aw/irTOKbiIyJzAb6rOat4YOVv/uDoRsNTSQI= diff --git a/main.go b/main.go index b92f48d25a..6d6ea9e011 100644 --- a/main.go +++ b/main.go @@ -94,6 +94,7 @@ var ( vSphereVMConcurrency int vSphereClusterIdentityConcurrency int vSphereDeploymentZoneConcurrency int + virtualMachineGroupConcurrency int skipCRDMigrationPhases []string managerOptions = capiflags.ManagerOptions{} @@ -141,6 +142,9 @@ func InitFlags(fs *pflag.FlagSet) { fs.IntVar(&vSphereDeploymentZoneConcurrency, "vspheredeploymentzone-concurrency", 10, "Number of vSphere deployment zones to process simultaneously") + fs.IntVar(&virtualMachineGroupConcurrency, "virtualmachinegroup-concurrency", 10, + "Number of virtual machine group to process simultaneously") + fs.StringVar( &managerOpts.PodName, "pod-name", @@ -482,6 +486,12 @@ func setupSupervisorControllers(ctx context.Context, controllerCtx *capvcontext. return err } + if feature.Gates.Enabled(feature.NamespaceScopedZones) && feature.Gates.Enabled(feature.NodeAutoPlacement) { + if err := vmware.AddVirtualMachineGroupControllerToManager(ctx, controllerCtx, mgr, concurrency(virtualMachineGroupConcurrency)); err != nil { + return err + } + } + return vmware.AddServiceDiscoveryControllerToManager(ctx, controllerCtx, mgr, clusterCache, concurrency(serviceDiscoveryConcurrency)) } diff --git a/packaging/go.sum b/packaging/go.sum index d2e2f4a52b..40d9ac5465 100644 --- a/packaging/go.sum +++ b/packaging/go.sum @@ -134,8 +134,8 @@ github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d h1:c github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d/go.mod h1:JbFOh22iDsT5BowJe0GgpMI5e2/S7cWaJlv9LdURVQM= github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d h1:z9lrzKVtNlujduv9BilzPxuge/LE2F0N1ms3TP4JZvw= github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d/go.mod h1:Q4JzNkNMvjo7pXtlB5/R3oME4Nhah7fAObWgghVmtxk= -github.com/vmware-tanzu/vm-operator/api v1.8.6 h1:NIndORjcnSmIlQsCMIewpIwg/ocRVDh2lYjOroTVLrU= -github.com/vmware-tanzu/vm-operator/api v1.8.6/go.mod h1:HHA2SNI9B5Yqtyp5t+Gt9WTWBi/fIkM6+MukDDSf11A= +github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045 h1:zME8crazIAWVJGboJpSLl+qcRYQ8yA6hPQojz28gY5M= +github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045/go.mod h1:hkc/QZCSHcosWWMPS6VWWR12WenZcNE3BaTJ/8A8sNE= github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505 h1:y4wXx1FUFqqSgJ/xUOEM1DLS2Uu0KaeLADWpzpioGTU= github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505/go.mod h1:5rqRJ9zGR+KnKbkGx373WgN8xJpvAj99kHnfoDYRO5I= github.com/vmware/govmomi v0.51.0 h1:n3RLS9aw/irTOKbiIyJzAb6rOat4YOVv/uDoRsNTSQI= diff --git a/pkg/services/network/netop_provider.go b/pkg/services/network/netop_provider.go index fa1c1860fa..e13de3bd4d 100644 --- a/pkg/services/network/netop_provider.go +++ b/pkg/services/network/netop_provider.go @@ -136,7 +136,7 @@ func (np *netopNetworkProvider) ConfigureVirtualMachine(ctx context.Context, clu // Set the VM primary interface vm.Spec.Network.Interfaces = append(vm.Spec.Network.Interfaces, vmoprv1.VirtualMachineNetworkInterfaceSpec{ Name: PrimaryInterfaceName, - Network: vmoprv1common.PartialObjectRef{ + Network: &vmoprv1common.PartialObjectRef{ TypeMeta: metav1.TypeMeta{ Kind: NetworkGVKNetOperator.Kind, APIVersion: NetworkGVKNetOperator.GroupVersion().String(), diff --git a/pkg/services/network/nsxt_provider.go b/pkg/services/network/nsxt_provider.go index 96a0450bb7..90885cb568 100644 --- a/pkg/services/network/nsxt_provider.go +++ b/pkg/services/network/nsxt_provider.go @@ -223,7 +223,7 @@ func (np *nsxtNetworkProvider) ConfigureVirtualMachine(_ context.Context, cluste } vm.Spec.Network.Interfaces = append(vm.Spec.Network.Interfaces, vmoprv1.VirtualMachineNetworkInterfaceSpec{ Name: fmt.Sprintf("eth%d", len(vm.Spec.Network.Interfaces)), - Network: vmoprv1common.PartialObjectRef{ + Network: &vmoprv1common.PartialObjectRef{ TypeMeta: metav1.TypeMeta{ Kind: NetworkGVKNSXT.Kind, APIVersion: NetworkGVKNSXT.GroupVersion().String(), diff --git a/pkg/services/network/nsxt_vpc_provider.go b/pkg/services/network/nsxt_vpc_provider.go index 0c3533a37c..9b2c8defa0 100644 --- a/pkg/services/network/nsxt_vpc_provider.go +++ b/pkg/services/network/nsxt_vpc_provider.go @@ -224,7 +224,7 @@ func (vp *nsxtVPCNetworkProvider) ConfigureVirtualMachine(_ context.Context, clu networkName := clusterCtx.VSphereCluster.Name vm.Spec.Network.Interfaces = append(vm.Spec.Network.Interfaces, vmoprv1.VirtualMachineNetworkInterfaceSpec{ Name: PrimaryInterfaceName, - Network: vmoprv1common.PartialObjectRef{ + Network: &vmoprv1common.PartialObjectRef{ TypeMeta: metav1.TypeMeta{ Kind: NetworkGVKNSXTVPCSubnetSet.Kind, APIVersion: NetworkGVKNSXTVPCSubnetSet.GroupVersion().String(), @@ -243,7 +243,7 @@ func (vp *nsxtVPCNetworkProvider) ConfigureVirtualMachine(_ context.Context, clu } vmInterface := vmoprv1.VirtualMachineNetworkInterfaceSpec{ Name: PrimaryInterfaceName, - Network: vmoprv1common.PartialObjectRef{ + Network: &vmoprv1common.PartialObjectRef{ TypeMeta: metav1.TypeMeta{ Kind: primary.Network.Kind, APIVersion: primary.Network.APIVersion, @@ -281,7 +281,7 @@ func setVMSecondaryInterfaces(machine *vmwarev1.VSphereMachine, vm *vmoprv1.Virt } vmInterface := vmoprv1.VirtualMachineNetworkInterfaceSpec{ Name: secondaryInterface.Name, - Network: vmoprv1common.PartialObjectRef{ + Network: &vmoprv1common.PartialObjectRef{ TypeMeta: metav1.TypeMeta{ Kind: secondaryInterface.Network.Kind, APIVersion: secondaryInterface.Network.APIVersion, diff --git a/pkg/services/vmoperator/constants.go b/pkg/services/vmoperator/constants.go index 011082a06c..37ca556fc6 100644 --- a/pkg/services/vmoperator/constants.go +++ b/pkg/services/vmoperator/constants.go @@ -18,8 +18,6 @@ limitations under the License. package vmoperator const ( - kubeTopologyZoneLabelKey = "topology.kubernetes.io/zone" - // ControlPlaneVMClusterModuleGroupName is the name used for the control plane Cluster Module. ControlPlaneVMClusterModuleGroupName = "control-plane-group" // ClusterModuleNameAnnotationKey is key for the Cluster Module annotation. diff --git a/pkg/services/vmoperator/control_plane_endpoint.go b/pkg/services/vmoperator/control_plane_endpoint.go index e0070188e3..3b500711d7 100644 --- a/pkg/services/vmoperator/control_plane_endpoint.go +++ b/pkg/services/vmoperator/control_plane_endpoint.go @@ -189,7 +189,7 @@ func newVirtualMachineService(ctx *vmware.ClusterContext) *vmoprv1.VirtualMachin Namespace: ctx.Cluster.Namespace, }, TypeMeta: metav1.TypeMeta{ - APIVersion: vmoprv1.SchemeGroupVersion.String(), + APIVersion: vmoprv1.GroupVersion.String(), Kind: "VirtualMachineService", }, } diff --git a/pkg/services/vmoperator/vmopmachine.go b/pkg/services/vmoperator/vmopmachine.go index 840b166406..18e4aae3e7 100644 --- a/pkg/services/vmoperator/vmopmachine.go +++ b/pkg/services/vmoperator/vmopmachine.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "fmt" + "sort" "github.com/pkg/errors" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" @@ -41,6 +42,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/feature" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" @@ -163,6 +165,13 @@ func (v *VmopMachineService) SyncFailureReason(_ context.Context, machineCtx cap return supervisorMachineCtx.VSphereMachine.Status.FailureReason != nil || supervisorMachineCtx.VSphereMachine.Status.FailureMessage != nil, nil } +// affinityInfo is an internal to store VM affinity information. +type affinityInfo struct { + affinitySpec *vmoprv1.AffinitySpec + vmGroupName string + failureDomain *string +} + // ReconcileNormal reconciles create and update events for VM Operator VMs. func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx capvcontext.MachineContext) (bool, error) { log := ctrl.LoggerFrom(ctx) @@ -171,10 +180,6 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap return false, errors.New("received unexpected SupervisorMachineContext type") } - if supervisorMachineCtx.Machine.Spec.FailureDomain != "" { - supervisorMachineCtx.VSphereMachine.Spec.FailureDomain = ptr.To(supervisorMachineCtx.Machine.Spec.FailureDomain) - } - // If debug logging is enabled, report the number of vms in the cluster before and after the reconcile if log.V(5).Enabled() { vms, err := v.getVirtualMachinesInCluster(ctx, supervisorMachineCtx) @@ -188,6 +193,106 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap // Set the VM state. Will get reset throughout the reconcile supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePending + var affInfo affinityInfo + if feature.Gates.Enabled(feature.NodeAutoPlacement) && + !infrautilv1.IsControlPlaneMachine(machineCtx.GetVSphereMachine()) { + vmOperatorVMGroup := &vmoprv1.VirtualMachineGroup{} + key := client.ObjectKey{ + Namespace: supervisorMachineCtx.Cluster.Namespace, + Name: supervisorMachineCtx.Cluster.Name, + } + err := v.Client.Get(ctx, key, vmOperatorVMGroup) + if err != nil { + if !apierrors.IsNotFound(err) { + return false, err + } + if apierrors.IsNotFound(err) { + log.V(4).Info("VirtualMachineGroup not found, requeueing") + return true, nil + } + } + + // Proceed only if the machine is a member of the VirtualMachineGroup. + if !v.checkVirtualMachineGroupMembership(vmOperatorVMGroup, supervisorMachineCtx) { + log.V(4).Info("Waiting for VirtualMachineGroup membership, requeueing") + return true, nil + } + + affInfo = affinityInfo{ + vmGroupName: vmOperatorVMGroup.Name, + } + + // Reuse the label from the node pool -> zone mapping. + nodePool := supervisorMachineCtx.Machine.Labels[clusterv1.MachineDeploymentNameLabel] + if zone, ok := vmOperatorVMGroup.Labels[fmt.Sprintf("zone.cluster.x-k8s.io/%s", nodePool)]; ok && zone != "" { + affInfo.failureDomain = ptr.To(zone) + } + + // Fetch machine deployments without explicit failureDomain specified + // to use when setting the anti-affinity rules. + machineDeployments := &clusterv1.MachineDeploymentList{} + if err := v.Client.List(ctx, machineDeployments, + client.InNamespace(supervisorMachineCtx.Cluster.Namespace), + client.MatchingLabels{clusterv1.ClusterNameLabel: supervisorMachineCtx.Cluster.Name}); err != nil { + return false, err + } + mdNames := []string{} + for _, machineDeployment := range machineDeployments.Items { + if machineDeployment.Spec.Template.Spec.FailureDomain == "" && machineDeployment.Name != nodePool { + mdNames = append(mdNames, machineDeployment.Name) + } + } + sort.Strings(mdNames) + + affInfo.affinitySpec = &vmoprv1.AffinitySpec{ + VMAffinity: &vmoprv1.VMAffinitySpec{ + RequiredDuringSchedulingPreferredDuringExecution: []vmoprv1.VMAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.MachineDeploymentNameLabel: nodePool, + }, + }, + TopologyKey: corev1.LabelTopologyZone, + }, + }, + }, + VMAntiAffinity: &vmoprv1.VMAntiAffinitySpec{ + PreferredDuringSchedulingPreferredDuringExecution: []vmoprv1.VMAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.MachineDeploymentNameLabel: nodePool, + }, + }, + TopologyKey: corev1.LabelHostname, + }, + }, + }, + } + if len(mdNames) > 0 { + affInfo.affinitySpec.VMAntiAffinity.PreferredDuringSchedulingPreferredDuringExecution = append( + affInfo.affinitySpec.VMAntiAffinity.PreferredDuringSchedulingPreferredDuringExecution, + vmoprv1.VMAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: clusterv1.MachineDeploymentNameLabel, + Operator: metav1.LabelSelectorOpIn, + Values: mdNames, + }, + }, + }, + TopologyKey: corev1.LabelTopologyZone, + }, + ) + } + } + + if supervisorMachineCtx.Machine.Spec.FailureDomain != "" { + supervisorMachineCtx.VSphereMachine.Spec.FailureDomain = ptr.To(supervisorMachineCtx.Machine.Spec.FailureDomain) + } + // Check for the presence of an existing object vmOperatorVM := &vmoprv1.VirtualMachine{} key, err := virtualMachineObjectKey(supervisorMachineCtx.Machine.Name, supervisorMachineCtx.Machine.Namespace, supervisorMachineCtx.VSphereMachine.Spec.NamingStrategy) @@ -208,7 +313,7 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap } // Reconcile the VM Operator VirtualMachine. - if err := v.reconcileVMOperatorVM(ctx, supervisorMachineCtx, vmOperatorVM); err != nil { + if err := v.reconcileVMOperatorVM(ctx, supervisorMachineCtx, vmOperatorVM, &affInfo); err != nil { v1beta1conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMCreationFailedReason, clusterv1beta1.ConditionSeverityWarning, "failed to create or update VirtualMachine: %v", err) v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ @@ -378,7 +483,7 @@ func (v *VmopMachineService) GetHostInfo(ctx context.Context, machineCtx capvcon return vmOperatorVM.Status.Host, nil } -func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine) error { +func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine, affinityInfo *affinityInfo) error { // All Machine resources should define the version of Kubernetes to use. if supervisorMachineCtx.Machine.Spec.Version == "" { return errors.Errorf( @@ -472,7 +577,7 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis } // Assign the VM's labels. - vmOperatorVM.Labels = getVMLabels(supervisorMachineCtx, vmOperatorVM.Labels) + vmOperatorVM.Labels = getVMLabels(supervisorMachineCtx, vmOperatorVM.Labels, affinityInfo) addResourcePolicyAnnotations(supervisorMachineCtx, vmOperatorVM) @@ -494,6 +599,15 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis vmOperatorVM = typedModified } + if affinityInfo != nil && affinityInfo.affinitySpec != nil { + if vmOperatorVM.Spec.Affinity == nil { + vmOperatorVM.Spec.Affinity = affinityInfo.affinitySpec + } + if vmOperatorVM.Spec.GroupName == "" { + vmOperatorVM.Spec.GroupName = affinityInfo.vmGroupName + } + } + // Make sure the VSphereMachine owns the VM Operator VirtualMachine. if err := ctrlutil.SetControllerReference(supervisorMachineCtx.VSphereMachine, vmOperatorVM, v.Client.Scheme()); err != nil { return errors.Wrapf(err, "failed to mark %s %s/%s as owner of %s %s/%s", @@ -735,7 +849,7 @@ func (v *VmopMachineService) addVolumes(ctx context.Context, supervisorMachineCt if zone := supervisorMachineCtx.VSphereMachine.Spec.FailureDomain; zonal && zone != nil { topology := []map[string]string{ - {kubeTopologyZoneLabelKey: *zone}, + {corev1.LabelTopologyZone: *zone}, } b, err := json.Marshal(topology) if err != nil { @@ -777,7 +891,7 @@ func (v *VmopMachineService) addVolumes(ctx context.Context, supervisorMachineCt } // getVMLabels returns the labels applied to a VirtualMachine. -func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels map[string]string) map[string]string { +func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels map[string]string, affinityInfo *affinityInfo) map[string]string { if vmLabels == nil { vmLabels = map[string]string{} } @@ -791,7 +905,11 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels // Get the labels that determine the VM's placement inside of a stretched // cluster. - topologyLabels := getTopologyLabels(supervisorMachineCtx) + var failureDomain *string + if affinityInfo != nil && affinityInfo.failureDomain != nil { + failureDomain = affinityInfo.failureDomain + } + topologyLabels := getTopologyLabels(supervisorMachineCtx, failureDomain) for k, v := range topologyLabels { vmLabels[k] = v } @@ -800,6 +918,11 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels // resources associated with the target cluster. vmLabels[clusterv1.ClusterNameLabel] = supervisorMachineCtx.GetClusterContext().Cluster.Name + // Ensure the VM has the machine deployment name label + if !infrautilv1.IsControlPlaneMachine(supervisorMachineCtx.Machine) { + vmLabels[clusterv1.MachineDeploymentNameLabel] = supervisorMachineCtx.Machine.Labels[clusterv1.MachineDeploymentNameLabel] + } + return vmLabels } @@ -809,10 +932,16 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels // // and thus the code is optimized as such. However, in the future // this function may return a more diverse topology. -func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext) map[string]string { +func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, failureDomain *string) map[string]string { + // TODO: Make it so that we always set the zone label, might require enquiring the zones present (when unset) if fd := supervisorMachineCtx.VSphereMachine.Spec.FailureDomain; fd != nil && *fd != "" { return map[string]string{ - kubeTopologyZoneLabelKey: *fd, + corev1.LabelTopologyZone: *fd, + } + } + if failureDomain != nil && *failureDomain != "" { + return map[string]string{ + corev1.LabelTopologyZone: *failureDomain, } } return nil @@ -823,3 +952,16 @@ func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext) ma func getMachineDeploymentNameForCluster(cluster *clusterv1.Cluster) string { return fmt.Sprintf("%s-workers-0", cluster.Name) } + +// checkVirtualMachineGroupMembership checks if the machine is in the first boot order group +// and performs logic if a match is found. +func (v *VmopMachineService) checkVirtualMachineGroupMembership(vmOperatorVMGroup *vmoprv1.VirtualMachineGroup, supervisorMachineCtx *vmware.SupervisorMachineContext) bool { + if len(vmOperatorVMGroup.Spec.BootOrder) > 0 { + for _, member := range vmOperatorVMGroup.Spec.BootOrder[0].Members { + if member.Name == supervisorMachineCtx.Machine.Name { + return true + } + } + } + return false +} diff --git a/pkg/services/vmoperator/vmopmachine_test.go b/pkg/services/vmoperator/vmopmachine_test.go index aa91556341..8d4ca34510 100644 --- a/pkg/services/vmoperator/vmopmachine_test.go +++ b/pkg/services/vmoperator/vmopmachine_test.go @@ -18,6 +18,8 @@ package vmoperator import ( "context" + "fmt" + "slices" "testing" "time" @@ -32,6 +34,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -40,6 +43,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/feature" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/network" @@ -65,6 +69,49 @@ func updateReconciledVMStatus(ctx context.Context, vmService VmopMachineService, Expect(err).ShouldNot(HaveOccurred()) } +func verifyVMAffinityRules(vmopVM *vmoprv1.VirtualMachine, machineDeploymentName string) { + Expect(vmopVM.Spec.Affinity.VMAffinity).ShouldNot(BeNil()) + Expect(vmopVM.Spec.Affinity.VMAffinity.RequiredDuringSchedulingPreferredDuringExecution).To(HaveLen(1)) + + vmAffinityTerm := vmopVM.Spec.Affinity.VMAffinity.RequiredDuringSchedulingPreferredDuringExecution[0] + Expect(vmAffinityTerm.LabelSelector.MatchLabels).To(HaveKeyWithValue(clusterv1.MachineDeploymentNameLabel, machineDeploymentName)) + Expect(vmAffinityTerm.TopologyKey).To(Equal(corev1.LabelTopologyZone)) +} + +func verifyVMAntiAffinityRules(vmopVM *vmoprv1.VirtualMachine, machineDeploymentName string, extraMDs ...string) { + Expect(vmopVM.Spec.Affinity.VMAntiAffinity).ShouldNot(BeNil()) + + expectedNumAntiAffinityTerms := 1 + if len(extraMDs) > 0 { + expectedNumAntiAffinityTerms = 2 + } + + antiAffinityTerms := vmopVM.Spec.Affinity.VMAntiAffinity.PreferredDuringSchedulingPreferredDuringExecution + Expect(antiAffinityTerms).To(HaveLen(expectedNumAntiAffinityTerms)) + + // First anti-affinity constraint - same machine deployment, different hosts + antiAffinityTerm1 := antiAffinityTerms[0] + Expect(antiAffinityTerm1.LabelSelector.MatchLabels).To(HaveKeyWithValue(clusterv1.MachineDeploymentNameLabel, machineDeploymentName)) + Expect(antiAffinityTerm1.TopologyKey).To(Equal(corev1.LabelHostname)) + + // Second anti-affinity term - different machine deployments + if len(extraMDs) > 0 { + isSortedAlphabetically := func(actual []string) (bool, error) { + return slices.IsSorted(actual), nil + } + antiAffinityTerm2 := antiAffinityTerms[1] + Expect(antiAffinityTerm2.LabelSelector.MatchExpressions).To(HaveLen(1)) + Expect(antiAffinityTerm2.LabelSelector.MatchExpressions[0].Key).To(Equal(clusterv1.MachineDeploymentNameLabel)) + Expect(antiAffinityTerm2.LabelSelector.MatchExpressions[0].Operator).To(Equal(metav1.LabelSelectorOpIn)) + + Expect(antiAffinityTerm2.LabelSelector.MatchExpressions[0].Values).To(HaveLen(len(extraMDs))) + Expect(antiAffinityTerm2.LabelSelector.MatchExpressions[0].Values).To( + WithTransform(isSortedAlphabetically, BeTrue()), + "Expected extra machine deployments to be sorted alphabetically", + ) + } +} + const ( machineName = "test-machine" clusterName = "test-cluster" @@ -81,6 +128,32 @@ const ( clusterNameLabel = clusterv1.ClusterNameLabel ) +func createMachineDeployment(name, namespace, clusterName, failureDomain string) *clusterv1.MachineDeployment { + md := &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + // FailureDomain will be set conditionally below + }, + }, + }, + } + + // Only set failure domain if it's provided and not empty + if failureDomain != "" { + md.Spec.Template.Spec.FailureDomain = failureDomain + } + + return md +} + var _ = Describe("VirtualMachine tests", func() { var ( @@ -655,6 +728,304 @@ var _ = Describe("VirtualMachine tests", func() { Expect(vmopVM.Spec.Volumes[i]).To(BeEquivalentTo(vmVolume)) } }) + + Context("With auto placement feature gate enabled", func() { + BeforeEach(func() { + t := GinkgoT() + featuregatetesting.SetFeatureGateDuringTest(t, feature.Gates, feature.NodeAutoPlacement, true) + }) + + // control plane machine is the machine with the control plane label set + Specify("Reconcile valid control plane Machine", func() { + // Control plane machines should not have auto placement logic applied + expectReconcileError = false + expectVMOpVM = true + expectedImageName = imageName + expectedRequeue = true + + // Provide valid bootstrap data + By("bootstrap data is created") + secretName := machine.GetName() + "-data" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: machine.GetNamespace(), + }, + Data: map[string][]byte{ + "value": []byte(bootstrapData), + }, + } + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) + + machine.Spec.Bootstrap.DataSecretName = &secretName + expectedConditions = append(expectedConditions, clusterv1beta1.Condition{ + Type: infrav1.VMProvisionedCondition, + Status: corev1.ConditionFalse, + Reason: vmwarev1.VMProvisionStartedReason, + Message: "", + }) + + By("VirtualMachine is created") + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + verifyOutput(supervisorMachineContext) + + By("Verify that control plane machine does not have affinity spec set") + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) + Expect(vmopVM).ShouldNot(BeNil()) + Expect(vmopVM.Spec.Affinity).To(BeNil()) + + By("Verify that control plane machine has correct labels") + Expect(vmopVM.Labels[nodeSelectorKey]).To(Equal(roleControlPlane)) + + By("Verify that machine-deployment label is not set for control plane") + Expect(vmopVM.Labels).ToNot(HaveKey(clusterv1.MachineDeploymentNameLabel)) + }) + + Context("For worker machine", func() { + var ( + machineDeploymentName string + vmGroup *vmoprv1.VirtualMachineGroup + ) + + BeforeEach(func() { + // Create a worker machine (no control plane label) + machineDeploymentName = "test-md" + workerMachineName := "test-worker-machine" + machine = util.CreateMachine(workerMachineName, clusterName, k8sVersion, false) + machine.Labels[clusterv1.MachineDeploymentNameLabel] = machineDeploymentName + + vsphereMachine = util.CreateVSphereMachine(workerMachineName, clusterName, className, imageName, storageClass, false) + + clusterContext, controllerManagerContext := util.CreateClusterContext(cluster, vsphereCluster) + supervisorMachineContext = util.CreateMachineContext(clusterContext, machine, vsphereMachine) + supervisorMachineContext.ControllerManagerContext = controllerManagerContext + + // Create a VirtualMachineGroup for the cluster + vmGroup = &vmoprv1.VirtualMachineGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: corev1.NamespaceDefault, + }, + Spec: vmoprv1.VirtualMachineGroupSpec{ + BootOrder: []vmoprv1.VirtualMachineGroupBootOrderGroup{ + { + Members: []vmoprv1.GroupMember{ + { + Name: workerMachineName, + Kind: "VirtualMachine", + }, + }, + }, + }, + }, + } + Expect(vmService.Client.Create(ctx, vmGroup)).To(Succeed()) + + // Create a MachineDeployment for the worker + machineDeployment := createMachineDeployment(machineDeploymentName, corev1.NamespaceDefault, clusterName, "") + Expect(vmService.Client.Create(ctx, machineDeployment)).To(Succeed()) + }) + + Specify("Reconcile valid Machine with no failure domain set", func() { + expectReconcileError = false + expectVMOpVM = true + expectedImageName = imageName + expectedRequeue = true + + // Provide valid bootstrap data + By("bootstrap data is created") + secretName := machine.GetName() + "-data" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: machine.GetNamespace(), + }, + Data: map[string][]byte{ + "value": []byte(bootstrapData), + }, + } + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) + + machine.Spec.Bootstrap.DataSecretName = &secretName + + By("VirtualMachine is created") + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + Expect(err).ShouldNot(HaveOccurred()) + Expect(requeue).Should(BeTrue()) + + By("Verify that worker machine has affinity spec set") + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) + Expect(vmopVM).ShouldNot(BeNil()) + Expect(vmopVM.Spec.Affinity).ShouldNot(BeNil()) + + By("Verify VM affinity rules are set correctly") + verifyVMAffinityRules(vmopVM, machineDeploymentName) + + By("Verify VM anti-affinity rules are set correctly") + verifyVMAntiAffinityRules(vmopVM, machineDeploymentName) + + By("Verify that worker machine has machine deploymet label set") + Expect(vmopVM.Labels[clusterv1.MachineDeploymentNameLabel]).To(Equal(machineDeploymentName)) + + By("Verify that GroupName is set from VirtualMachineGroup") + Expect(vmopVM.Spec.GroupName).To(Equal(clusterName)) + }) + + Specify("Reconcile machine with failure domain set", func() { + expectReconcileError = false + expectVMOpVM = true + expectedImageName = imageName + expectedRequeue = true + + failureDomainName := "zone-1" + machineDeploymentName := "test-md-with-fd" + workerMachineName := "test-worker-machine-with-fd" + fdClusterName := "test-cluster-fd" + + // Create a separate cluster for this test to avoid VirtualMachineGroup conflicts + fdCluster := util.CreateCluster(fdClusterName) + fdVSphereCluster := util.CreateVSphereCluster(fdClusterName) + fdVSphereCluster.Status.ResourcePolicyName = resourcePolicyName + + // Create a worker machine with failure domain + machine = util.CreateMachine(workerMachineName, fdClusterName, k8sVersion, false) + machine.Labels[clusterv1.MachineDeploymentNameLabel] = machineDeploymentName + machine.Spec.FailureDomain = failureDomainName + + vsphereMachine = util.CreateVSphereMachine(workerMachineName, fdClusterName, className, imageName, storageClass, false) + + fdClusterContext, fdControllerManagerContext := util.CreateClusterContext(fdCluster, fdVSphereCluster) + supervisorMachineContext = util.CreateMachineContext(fdClusterContext, machine, vsphereMachine) + supervisorMachineContext.ControllerManagerContext = fdControllerManagerContext + + // Create a VirtualMachineGroup for the cluster with zone label + vmGroup := &vmoprv1.VirtualMachineGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: fdClusterName, + Namespace: corev1.NamespaceDefault, + Labels: map[string]string{ + fmt.Sprintf("zone.cluster.x-k8s.io/%s", machineDeploymentName): failureDomainName, + }, + }, + Spec: vmoprv1.VirtualMachineGroupSpec{ + BootOrder: []vmoprv1.VirtualMachineGroupBootOrderGroup{ + { + Members: []vmoprv1.GroupMember{ + { + Name: workerMachineName, + Kind: "VirtualMachine", + }, + }, + }, + }, + }, + } + Expect(vmService.Client.Create(ctx, vmGroup)).To(Succeed()) + + // Create a MachineDeployment for the worker with no explicit failure domain + machineDeployment := createMachineDeployment(machineDeploymentName, corev1.NamespaceDefault, fdClusterName, "") + Expect(vmService.Client.Create(ctx, machineDeployment)).To(Succeed()) + + // Provide valid bootstrap data + By("bootstrap data is created") + secretName := machine.GetName() + "-data" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: machine.GetNamespace(), + }, + Data: map[string][]byte{ + "value": []byte(bootstrapData), + }, + } + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) + + machine.Spec.Bootstrap.DataSecretName = &secretName + + By("VirtualMachine is created with auto placement and failure domain") + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + Expect(err).ShouldNot(HaveOccurred()) + Expect(requeue).Should(BeTrue()) + + By("Verify that worker machine has affinity spec set") + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) + Expect(vmopVM).ShouldNot(BeNil()) + Expect(vmopVM.Spec.Affinity).ShouldNot(BeNil()) + + By("Verify VM affinity rules are set correctly") + verifyVMAffinityRules(vmopVM, machineDeploymentName) + + By("Verify VM anti-affinity rules are set correctly") + verifyVMAntiAffinityRules(vmopVM, machineDeploymentName) + + By("Verify that worker machine has correct labels including topology") + Expect(vmopVM.Labels[clusterv1.MachineDeploymentNameLabel]).To(Equal(machineDeploymentName)) + Expect(vmopVM.Labels[corev1.LabelTopologyZone]).To(Equal(failureDomainName)) + + By("Verify that GroupName is set from VirtualMachineGroup") + Expect(vmopVM.Spec.GroupName).To(Equal(fdClusterName)) + }) + + Context("For multiple machine deployments", func() { + const ( + otherMdName1 = "other-md-1" + otherMdName2 = "other-md-2" + ) + + BeforeEach(func() { + otherMd1 := createMachineDeployment(otherMdName1, corev1.NamespaceDefault, clusterName, "") + Expect(vmService.Client.Create(ctx, otherMd1)).To(Succeed()) + + otherMd2 := createMachineDeployment(otherMdName2, corev1.NamespaceDefault, clusterName, "") + Expect(vmService.Client.Create(ctx, otherMd2)).To(Succeed()) + + // Create a MachineDeployment with failure domain + otherMdWithFd := createMachineDeployment("other-md-with-fd", corev1.NamespaceDefault, clusterName, "zone-1") + Expect(vmService.Client.Create(ctx, otherMdWithFd)).To(Succeed()) + }) + + Specify("Reconcile valid machine with additional anti-affinity term added", func() { + expectReconcileError = false + expectVMOpVM = true + expectedImageName = imageName + expectedRequeue = true + + // Provide valid bootstrap data + By("bootstrap data is created") + secretName := machine.GetName() + "-data" + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: machine.GetNamespace(), + }, + Data: map[string][]byte{ + "value": []byte(bootstrapData), + }, + } + Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) + + machine.Spec.Bootstrap.DataSecretName = &secretName + + By("VirtualMachine is created") + requeue, err = vmService.ReconcileNormal(ctx, supervisorMachineContext) + Expect(err).ShouldNot(HaveOccurred()) + Expect(requeue).Should(BeTrue()) + + By("Verify that worker machine has affinity spec set") + vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) + Expect(vmopVM).ShouldNot(BeNil()) + Expect(vmopVM.Spec.Affinity).ShouldNot(BeNil()) + + By("Verify VM affinity rules are set correctly") + verifyVMAffinityRules(vmopVM, machineDeploymentName) + + By("Verify VM anti-affinity rules are set correctly") + verifyVMAntiAffinityRules(vmopVM, machineDeploymentName, otherMdName1, otherMdName2) + }) + }) + }) + + }) }) Context("Delete tests", func() { diff --git a/test/go.mod b/test/go.mod index bdf2f03505..04b01cb2fd 100644 --- a/test/go.mod +++ b/test/go.mod @@ -8,13 +8,14 @@ replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.11.1 replace sigs.k8s.io/cluster-api-provider-vsphere => ../ -replace github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels => github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels v0.0.0-20240404200847-de75746a9505 +replace github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels => github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels v1.9.1-0.20250908141901-a9e1dfbc0045 require ( github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d // The version of vm-operator should be kept in sync with the manifests at: config/deployments/integration-tests - github.com/vmware-tanzu/vm-operator/api v1.8.6 + github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045 github.com/vmware/govmomi v0.51.0 + ) require ( diff --git a/test/go.sum b/test/go.sum index 78fe7f6a5a..2959ebfbd8 100644 --- a/test/go.sum +++ b/test/go.sum @@ -338,8 +338,8 @@ github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d h1:c github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d/go.mod h1:JbFOh22iDsT5BowJe0GgpMI5e2/S7cWaJlv9LdURVQM= github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d h1:z9lrzKVtNlujduv9BilzPxuge/LE2F0N1ms3TP4JZvw= github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d/go.mod h1:Q4JzNkNMvjo7pXtlB5/R3oME4Nhah7fAObWgghVmtxk= -github.com/vmware-tanzu/vm-operator/api v1.8.6 h1:NIndORjcnSmIlQsCMIewpIwg/ocRVDh2lYjOroTVLrU= -github.com/vmware-tanzu/vm-operator/api v1.8.6/go.mod h1:HHA2SNI9B5Yqtyp5t+Gt9WTWBi/fIkM6+MukDDSf11A= +github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045 h1:zME8crazIAWVJGboJpSLl+qcRYQ8yA6hPQojz28gY5M= +github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045/go.mod h1:hkc/QZCSHcosWWMPS6VWWR12WenZcNE3BaTJ/8A8sNE= github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505 h1:y4wXx1FUFqqSgJ/xUOEM1DLS2Uu0KaeLADWpzpioGTU= github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505/go.mod h1:5rqRJ9zGR+KnKbkGx373WgN8xJpvAj99kHnfoDYRO5I= github.com/vmware/govmomi v0.51.0 h1:n3RLS9aw/irTOKbiIyJzAb6rOat4YOVv/uDoRsNTSQI=