Skip to content

Commit fa7432f

Browse files
committed
Initial impl for VSphereMachine AAF changes
- Updates VMOP API dependency Misc VMG fixes - Use namingStrategy to calculate VM names - Use MachineDeployment names for VMG placement label - Includes all machinedeployments to generate node-pool -> zone mapping Fixes VMG webhook validation error - Adds cluster-name label to Af/AAF spec - re-adds zone topology key back to anti-aff spec Signed-off-by: Sagar Muchhal <[email protected]>
1 parent 1e7dc55 commit fa7432f

File tree

5 files changed

+205
-28
lines changed

5 files changed

+205
-28
lines changed

controllers/vmware/virtualmachinegroup_reconciler.go

Lines changed: 43 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ import (
3737
"sigs.k8s.io/controller-runtime/pkg/reconcile"
3838

3939
vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1"
40+
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
4041
clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2"
4142
)
4243

@@ -191,16 +192,24 @@ func (r *VirtualMachineGroupReconciler) createOrUpdateVMG(ctx context.Context, c
191192
return reconcile.Result{RequeueAfter: reconciliationDelay}, nil
192193
}
193194

194-
// Generate all the members of the VirtualMachineGroup.
195-
members := make([]vmoprv1.GroupMember, 0, len(currentVSphereMachines))
196-
// Sort the VSphereMachines by name for consistent ordering
197-
sort.Slice(currentVSphereMachines, func(i, j int) bool {
198-
return currentVSphereMachines[i].Name < currentVSphereMachines[j].Name
195+
// Generate VM names according to the naming strategy set on the VSphereMachine.
196+
vmNames := make([]string, 0, len(currentVSphereMachines))
197+
for _, machine := range currentVSphereMachines {
198+
name, err := GenerateVirtualMachineName(machine.Name, machine.Spec.NamingStrategy)
199+
if err != nil {
200+
return reconcile.Result{}, err
201+
}
202+
vmNames = append(vmNames, name)
203+
}
204+
// Sort the VM names alphabetically for consistent ordering
205+
sort.Slice(vmNames, func(i, j int) bool {
206+
return vmNames[i] < vmNames[j]
199207
})
200208

201-
for _, vm := range currentVSphereMachines {
209+
members := make([]vmoprv1.GroupMember, 0, len(currentVSphereMachines))
210+
for _, name := range vmNames {
202211
members = append(members, vmoprv1.GroupMember{
203-
Name: vm.Name,
212+
Name: name,
204213
Kind: "VirtualMachine",
205214
})
206215
}
@@ -210,9 +219,14 @@ func (r *VirtualMachineGroupReconciler) createOrUpdateVMG(ctx context.Context, c
210219
return reconcile.Result{}, errors.Errorf("Cluster Topology is not defined %s/%s",
211220
cluster.Namespace, cluster.Name)
212221
}
213-
mds := cluster.Spec.Topology.Workers.MachineDeployments
214-
mdNames := make([]string, 0, len(mds))
215-
for _, md := range mds {
222+
machineDeployments := &clusterv1.MachineDeploymentList{}
223+
if err := r.Client.List(ctx, machineDeployments,
224+
client.InNamespace(cluster.Namespace),
225+
client.MatchingLabels{clusterv1.ClusterNameLabel: cluster.Name}); err != nil {
226+
return reconcile.Result{}, err
227+
}
228+
mdNames := []string{}
229+
for _, md := range machineDeployments.Items {
216230
mdNames = append(mdNames, md.Name)
217231
}
218232

@@ -401,7 +415,7 @@ func getCurrentVSphereMachines(ctx context.Context, kubeClient client.Client, cl
401415
return result, nil
402416
}
403417

404-
// GenerateVMGPlacementLabels returns labels per MachineDeployment which contain zone info for placed VMs for day-2 operationss
418+
// GenerateVMGPlacementLabels returns labels per MachineDeployment which contain zone info for placed VMs for day-2 operations.
405419
func GenerateVMGPlacementLabels(ctx context.Context, vmg *vmoprv1.VirtualMachineGroup, machineDeployments []string) (map[string]string, error) {
406420
log := ctrl.LoggerFrom(ctx)
407421
labels := make(map[string]string)
@@ -428,6 +442,7 @@ func GenerateVMGPlacementLabels(ctx context.Context, vmg *vmoprv1.VirtualMachine
428442
}
429443

430444
// Check if VM belongs to a Machine Deployment by name (e.g. cluster-1-np-1-vm-xxx contains np-1)
445+
// TODO: Establish membership via the machine deployment name label
431446
if strings.Contains(member.Name, md) {
432447
// Get the VM placement information by member status.
433448
if member.Placement == nil {
@@ -448,3 +463,20 @@ func GenerateVMGPlacementLabels(ctx context.Context, vmg *vmoprv1.VirtualMachine
448463

449464
return labels, nil
450465
}
466+
467+
// TODO: de-dup this logic with vmopmachine.go
468+
// GenerateVirtualMachineName generates the name of a VirtualMachine based on the naming strategy.
469+
func GenerateVirtualMachineName(machineName string, namingStrategy *vmwarev1.VirtualMachineNamingStrategy) (string, error) {
470+
// Per default the name of the VirtualMachine should be equal to the Machine name (this is the same as "{{ .machine.name }}")
471+
if namingStrategy == nil || namingStrategy.Template == nil {
472+
// Note: No need to trim to max length in this case as valid Machine names will also be valid VirtualMachine names.
473+
return machineName, nil
474+
}
475+
476+
name, err := infrautilv1.GenerateMachineNameFromTemplate(machineName, namingStrategy.Template)
477+
if err != nil {
478+
return "", errors.Wrap(err, "failed to generate name for VirtualMachine")
479+
}
480+
481+
return name, nil
482+
}

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ require (
1010
github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d
1111
github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d
1212
// The version of vm-operator should be kept in sync with the manifests at: config/deployments/integration-tests
13-
github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045
13+
github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20251007154704-e2d6e85d9ec7
1414
github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505
1515
github.com/vmware/govmomi v0.51.0
1616
)

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -221,8 +221,8 @@ github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d h1:c
221221
github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d/go.mod h1:JbFOh22iDsT5BowJe0GgpMI5e2/S7cWaJlv9LdURVQM=
222222
github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d h1:z9lrzKVtNlujduv9BilzPxuge/LE2F0N1ms3TP4JZvw=
223223
github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d/go.mod h1:Q4JzNkNMvjo7pXtlB5/R3oME4Nhah7fAObWgghVmtxk=
224-
github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045 h1:zME8crazIAWVJGboJpSLl+qcRYQ8yA6hPQojz28gY5M=
225-
github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20250908141901-a9e1dfbc0045/go.mod h1:hkc/QZCSHcosWWMPS6VWWR12WenZcNE3BaTJ/8A8sNE=
224+
github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20251007154704-e2d6e85d9ec7 h1:VlnaiDKI1H1buwBOgL8R3HRB3EQNN96xMdz25vE5FUo=
225+
github.com/vmware-tanzu/vm-operator/api v1.9.1-0.20251007154704-e2d6e85d9ec7/go.mod h1:nWTPpxfe4gHuuYuFcrs86+NMxfkqPk3a3IlvI8TCWak=
226226
github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505 h1:y4wXx1FUFqqSgJ/xUOEM1DLS2Uu0KaeLADWpzpioGTU=
227227
github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505/go.mod h1:5rqRJ9zGR+KnKbkGx373WgN8xJpvAj99kHnfoDYRO5I=
228228
github.com/vmware/govmomi v0.51.0 h1:n3RLS9aw/irTOKbiIyJzAb6rOat4YOVv/uDoRsNTSQI=

pkg/services/vmoperator/constants.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ limitations under the License.
1818
package vmoperator
1919

2020
const (
21-
kubeTopologyZoneLabelKey = "topology.kubernetes.io/zone"
22-
2321
// ControlPlaneVMClusterModuleGroupName is the name used for the control plane Cluster Module.
2422
ControlPlaneVMClusterModuleGroupName = "control-plane-group"
2523
// ClusterModuleNameAnnotationKey is key for the Cluster Module annotation.

pkg/services/vmoperator/vmopmachine.go

Lines changed: 159 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ import (
4141

4242
infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1"
4343
vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1"
44+
"sigs.k8s.io/cluster-api-provider-vsphere/feature"
4445
capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context"
4546
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware"
4647
infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util"
@@ -163,6 +164,15 @@ func (v *VmopMachineService) SyncFailureReason(_ context.Context, machineCtx cap
163164
return supervisorMachineCtx.VSphereMachine.Status.FailureReason != nil || supervisorMachineCtx.VSphereMachine.Status.FailureMessage != nil, nil
164165
}
165166

167+
type affinityInfo struct {
168+
affinitySpec *vmoprv1.AffinitySpec
169+
vmGroupName string
170+
failureDomain *string
171+
172+
// TODO: is this needed for the single zone case?
173+
// zones []topologyv1.Zone
174+
}
175+
166176
// ReconcileNormal reconciles create and update events for VM Operator VMs.
167177
func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx capvcontext.MachineContext) (bool, error) {
168178
log := ctrl.LoggerFrom(ctx)
@@ -171,10 +181,6 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
171181
return false, errors.New("received unexpected SupervisorMachineContext type")
172182
}
173183

174-
if supervisorMachineCtx.Machine.Spec.FailureDomain != "" {
175-
supervisorMachineCtx.VSphereMachine.Spec.FailureDomain = ptr.To(supervisorMachineCtx.Machine.Spec.FailureDomain)
176-
}
177-
178184
// If debug logging is enabled, report the number of vms in the cluster before and after the reconcile
179185
if log.V(5).Enabled() {
180186
vms, err := v.getVirtualMachinesInCluster(ctx, supervisorMachineCtx)
@@ -188,6 +194,112 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
188194
// Set the VM state. Will get reset throughout the reconcile
189195
supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePending
190196

197+
var affInfo affinityInfo
198+
if feature.Gates.Enabled(feature.NodeAutoPlacement) &&
199+
!infrautilv1.IsControlPlaneMachine(machineCtx.GetVSphereMachine()) {
200+
// Check for the presence of a VirtualMachineGroup with the name and namespace same as the name of the Cluster
201+
vmOperatorVMGroup := &vmoprv1.VirtualMachineGroup{}
202+
key := client.ObjectKey{
203+
Namespace: supervisorMachineCtx.Cluster.Namespace,
204+
Name: supervisorMachineCtx.Cluster.Name,
205+
}
206+
err := v.Client.Get(ctx, key, vmOperatorVMGroup)
207+
if err != nil {
208+
if !apierrors.IsNotFound(err) {
209+
return false, err
210+
}
211+
if apierrors.IsNotFound(err) {
212+
log.V(4).Info("VirtualMachineGroup not found, requeueing")
213+
return true, nil
214+
}
215+
}
216+
217+
// Check if the current machine is a member of the boot order
218+
// in the VirtualMachineGroup.
219+
if !v.checkVirtualMachineGroupMembership(vmOperatorVMGroup, supervisorMachineCtx) {
220+
log.V(4).Info("Waiting for VirtualMachineGroup membership, requeueing")
221+
return true, nil
222+
}
223+
224+
// Initialize the affinityInfo for the VM
225+
affInfo = affinityInfo{
226+
vmGroupName: vmOperatorVMGroup.Name,
227+
}
228+
229+
// Check the presence of the node-pool label on the VirtualMachineGroup object
230+
nodePool := supervisorMachineCtx.Machine.Labels[clusterv1.MachineDeploymentNameLabel]
231+
if zone, ok := vmOperatorVMGroup.Labels[fmt.Sprintf("zone.cluster.x-k8s.io/%s", nodePool)]; ok && zone != "" {
232+
affInfo.failureDomain = ptr.To(zone)
233+
}
234+
235+
// Fetch machine deployments without explicit failureDomain specified
236+
// to use when setting the anti-affinity rules
237+
machineDeployments := &clusterv1.MachineDeploymentList{}
238+
if err := v.Client.List(ctx, machineDeployments,
239+
client.InNamespace(supervisorMachineCtx.Cluster.Namespace),
240+
client.MatchingLabels{clusterv1.ClusterNameLabel: supervisorMachineCtx.Cluster.Name}); err != nil {
241+
return false, err
242+
}
243+
mdNames := []string{}
244+
for _, machineDeployment := range machineDeployments.Items {
245+
// Not adding node pool with explicit failureDomain specified to propose anti-affinity behavior
246+
// among node pools with automatic placement only.
247+
if machineDeployment.Spec.Template.Spec.FailureDomain == "" && machineDeployment.Name != nodePool {
248+
mdNames = append(mdNames, machineDeployment.Name)
249+
}
250+
}
251+
// turn to v4 log
252+
log.V(2).Info("Gathered anti-affine MDs", "mdNames", mdNames)
253+
254+
affInfo.affinitySpec = &vmoprv1.AffinitySpec{
255+
VMAffinity: &vmoprv1.VMAffinitySpec{
256+
RequiredDuringSchedulingPreferredDuringExecution: []vmoprv1.VMAffinityTerm{
257+
{
258+
LabelSelector: &metav1.LabelSelector{
259+
MatchLabels: map[string]string{
260+
clusterv1.MachineDeploymentNameLabel: nodePool,
261+
clusterv1.ClusterNameLabel: supervisorMachineCtx.Cluster.Name,
262+
},
263+
},
264+
TopologyKey: corev1.LabelTopologyZone,
265+
},
266+
},
267+
},
268+
VMAntiAffinity: &vmoprv1.VMAntiAffinitySpec{
269+
PreferredDuringSchedulingPreferredDuringExecution: []vmoprv1.VMAffinityTerm{
270+
{
271+
LabelSelector: &metav1.LabelSelector{
272+
MatchLabels: map[string]string{
273+
clusterv1.MachineDeploymentNameLabel: nodePool,
274+
clusterv1.ClusterNameLabel: supervisorMachineCtx.Cluster.Name,
275+
},
276+
},
277+
TopologyKey: corev1.LabelHostname,
278+
},
279+
{
280+
LabelSelector: &metav1.LabelSelector{
281+
MatchLabels: map[string]string{
282+
clusterv1.ClusterNameLabel: supervisorMachineCtx.Cluster.Name,
283+
},
284+
MatchExpressions: []metav1.LabelSelectorRequirement{
285+
{
286+
Key: clusterv1.MachineDeploymentNameLabel,
287+
Operator: metav1.LabelSelectorOpIn,
288+
Values: mdNames,
289+
},
290+
},
291+
},
292+
TopologyKey: corev1.LabelTopologyZone,
293+
},
294+
},
295+
},
296+
}
297+
}
298+
299+
if supervisorMachineCtx.Machine.Spec.FailureDomain != "" {
300+
supervisorMachineCtx.VSphereMachine.Spec.FailureDomain = ptr.To(supervisorMachineCtx.Machine.Spec.FailureDomain)
301+
}
302+
191303
// Check for the presence of an existing object
192304
vmOperatorVM := &vmoprv1.VirtualMachine{}
193305
key, err := virtualMachineObjectKey(supervisorMachineCtx.Machine.Name, supervisorMachineCtx.Machine.Namespace, supervisorMachineCtx.VSphereMachine.Spec.NamingStrategy)
@@ -208,7 +320,7 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap
208320
}
209321

210322
// Reconcile the VM Operator VirtualMachine.
211-
if err := v.reconcileVMOperatorVM(ctx, supervisorMachineCtx, vmOperatorVM); err != nil {
323+
if err := v.reconcileVMOperatorVM(ctx, supervisorMachineCtx, vmOperatorVM, &affInfo); err != nil {
212324
v1beta1conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMCreationFailedReason, clusterv1beta1.ConditionSeverityWarning,
213325
"failed to create or update VirtualMachine: %v", err)
214326
v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{
@@ -378,7 +490,7 @@ func (v *VmopMachineService) GetHostInfo(ctx context.Context, machineCtx capvcon
378490
return vmOperatorVM.Status.Host, nil
379491
}
380492

381-
func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine) error {
493+
func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervisorMachineCtx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine, affinityInfo *affinityInfo) error {
382494
// All Machine resources should define the version of Kubernetes to use.
383495
if supervisorMachineCtx.Machine.Spec.Version == "" {
384496
return errors.Errorf(
@@ -472,7 +584,7 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis
472584
}
473585

474586
// Assign the VM's labels.
475-
vmOperatorVM.Labels = getVMLabels(supervisorMachineCtx, vmOperatorVM.Labels)
587+
vmOperatorVM.Labels = getVMLabels(supervisorMachineCtx, vmOperatorVM.Labels, affinityInfo)
476588

477589
addResourcePolicyAnnotations(supervisorMachineCtx, vmOperatorVM)
478590

@@ -494,6 +606,15 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis
494606
vmOperatorVM = typedModified
495607
}
496608

609+
if affinityInfo != nil && affinityInfo.affinitySpec != nil {
610+
if vmOperatorVM.Spec.Affinity == nil {
611+
vmOperatorVM.Spec.Affinity = affinityInfo.affinitySpec
612+
}
613+
if vmOperatorVM.Spec.GroupName == "" {
614+
vmOperatorVM.Spec.GroupName = affinityInfo.vmGroupName
615+
}
616+
}
617+
497618
// Make sure the VSphereMachine owns the VM Operator VirtualMachine.
498619
if err := ctrlutil.SetControllerReference(supervisorMachineCtx.VSphereMachine, vmOperatorVM, v.Client.Scheme()); err != nil {
499620
return errors.Wrapf(err, "failed to mark %s %s/%s as owner of %s %s/%s",
@@ -735,7 +856,7 @@ func (v *VmopMachineService) addVolumes(ctx context.Context, supervisorMachineCt
735856

736857
if zone := supervisorMachineCtx.VSphereMachine.Spec.FailureDomain; zonal && zone != nil {
737858
topology := []map[string]string{
738-
{kubeTopologyZoneLabelKey: *zone},
859+
{corev1.LabelTopologyZone: *zone},
739860
}
740861
b, err := json.Marshal(topology)
741862
if err != nil {
@@ -777,7 +898,7 @@ func (v *VmopMachineService) addVolumes(ctx context.Context, supervisorMachineCt
777898
}
778899

779900
// getVMLabels returns the labels applied to a VirtualMachine.
780-
func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels map[string]string) map[string]string {
901+
func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels map[string]string, affinityInfo *affinityInfo) map[string]string {
781902
if vmLabels == nil {
782903
vmLabels = map[string]string{}
783904
}
@@ -791,7 +912,11 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels
791912

792913
// Get the labels that determine the VM's placement inside of a stretched
793914
// cluster.
794-
topologyLabels := getTopologyLabels(supervisorMachineCtx)
915+
var failureDomain *string
916+
if affinityInfo != nil && affinityInfo.failureDomain != nil {
917+
failureDomain = affinityInfo.failureDomain
918+
}
919+
topologyLabels := getTopologyLabels(supervisorMachineCtx, failureDomain)
795920
for k, v := range topologyLabels {
796921
vmLabels[k] = v
797922
}
@@ -800,6 +925,9 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels
800925
// resources associated with the target cluster.
801926
vmLabels[clusterv1.ClusterNameLabel] = supervisorMachineCtx.GetClusterContext().Cluster.Name
802927

928+
// Ensure the VM has the machine deployment name label
929+
vmLabels[clusterv1.MachineDeploymentNameLabel] = supervisorMachineCtx.Machine.Labels[clusterv1.MachineDeploymentNameLabel]
930+
803931
return vmLabels
804932
}
805933

@@ -809,10 +937,16 @@ func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels
809937
//
810938
// and thus the code is optimized as such. However, in the future
811939
// this function may return a more diverse topology.
812-
func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext) map[string]string {
940+
func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, failureDomain *string) map[string]string {
941+
// TODO: Make it so that we always set the zone label, might require enquiring the zones present (when unset)
813942
if fd := supervisorMachineCtx.VSphereMachine.Spec.FailureDomain; fd != nil && *fd != "" {
814943
return map[string]string{
815-
kubeTopologyZoneLabelKey: *fd,
944+
corev1.LabelTopologyZone: *fd,
945+
}
946+
}
947+
if failureDomain != nil && *failureDomain != "" {
948+
return map[string]string{
949+
corev1.LabelTopologyZone: *failureDomain,
816950
}
817951
}
818952
return nil
@@ -823,3 +957,16 @@ func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext) ma
823957
func getMachineDeploymentNameForCluster(cluster *clusterv1.Cluster) string {
824958
return fmt.Sprintf("%s-workers-0", cluster.Name)
825959
}
960+
961+
// checkVirtualMachineGroupMembership checks if the machine is in the first boot order group
962+
// and performs logic if a match is found.
963+
func (v *VmopMachineService) checkVirtualMachineGroupMembership(vmOperatorVMGroup *vmoprv1.VirtualMachineGroup, supervisorMachineCtx *vmware.SupervisorMachineContext) bool {
964+
if len(vmOperatorVMGroup.Spec.BootOrder) > 0 {
965+
for _, member := range vmOperatorVMGroup.Spec.BootOrder[0].Members {
966+
if member.Name == supervisorMachineCtx.Machine.Name {
967+
return true
968+
}
969+
}
970+
}
971+
return false
972+
}

0 commit comments

Comments
 (0)