diff --git a/README.md b/README.md index 6f1641802e..17b35ce234 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ The [Amazon Elastic Block Store](https://aws.amazon.com/ebs/) Container Storage * **Volume Snapshots** - Create and restore [snapshots](https://kubernetes.io/docs/concepts/storage/volume-snapshots/) taken from a volume in Kubernetes. * **Volume Resizing** - Expand the volume by specifying a new size in the [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims) (PVC). * **Volume Modification** - Change the properties (type, iops, or throughput) [via a `VolumeAttributesClass`](examples/kubernetes/modify-volume). +* **Node-Local Volumes** - Mount pre-attached, node-specific EBS volumes using a single cluster-wide PV/PVC for node-local caching scenarios. ## Container Images @@ -51,6 +52,7 @@ The EBS CSI Driver implements the [Container Storage Interface specification](ht * [Driver Installation](docs/install.md) * [Driver Launch Options](docs/options.md) * [StorageClass Parameters](docs/parameters.md) +* [Node-Local Volumes](docs/node-local-volumes.md) * [Frequently Asked Questions](docs/faq.md) * [Volume Tagging](docs/tagging.md) * [Volume Modification](docs/modify-volume.md) diff --git a/charts/aws-ebs-csi-driver/templates/controller.yaml b/charts/aws-ebs-csi-driver/templates/controller.yaml index 5e9d6b8ae5..5458edea2c 100644 --- a/charts/aws-ebs-csi-driver/templates/controller.yaml +++ b/charts/aws-ebs-csi-driver/templates/controller.yaml @@ -94,6 +94,9 @@ spec: {{- if .Values.controller.batching }} - --batching=true {{- end}} + {{- if .Values.controller.enableNodeLocalVolumes }} + - --enable-node-local-volumes=true + {{- end}} {{- with .Values.controller.loggingFormat }} - --logging-format={{ . }} {{- end }} diff --git a/charts/aws-ebs-csi-driver/values.schema.json b/charts/aws-ebs-csi-driver/values.schema.json index 413c8a59df..01dd4ca8f4 100644 --- a/charts/aws-ebs-csi-driver/values.schema.json +++ b/charts/aws-ebs-csi-driver/values.schema.json @@ -482,6 +482,11 @@ "default": false } } + }, + "enableNodeLocalVolumes": { + "type": "boolean", + "description": "Enable support for node-local volumes that use pre-attached EBS volumes", + "default": false } } }, diff --git a/charts/aws-ebs-csi-driver/values.yaml b/charts/aws-ebs-csi-driver/values.yaml index cf574b0e87..17e0dc6df8 100644 --- a/charts/aws-ebs-csi-driver/values.yaml +++ b/charts/aws-ebs-csi-driver/values.yaml @@ -199,6 +199,8 @@ controller: batching: true volumeModificationFeature: enabled: false + # Enable support for node-local volumes that use pre-attached EBS volumes + enableNodeLocalVolumes: false # Additional parameters provided by aws-ebs-csi-driver controller. additionalArgs: [] sdkDebugLog: false diff --git a/docs/node-local-volumes.md b/docs/node-local-volumes.md new file mode 100644 index 0000000000..d779f00d00 --- /dev/null +++ b/docs/node-local-volumes.md @@ -0,0 +1,129 @@ +# Node-Local Volumes + +## Overview + +Node-local volumes enable a single cluster-wide PersistentVolume (PV) and PersistentVolumeClaim (PVC) to mount pre-attached, node-specific EBS volumes. When pods reference this PVC, each node independently mounts its own local EBS device, and all pods on that node share the mount. + +This feature is useful for scenarios where: +- Multiple co-located pods need access to a shared dataset (e.g., cached files from S3). +- You want to avoid using `hostPath` volumes for security reasons. +- You need to scale workloads across nodes while maintaining node-local caching. + +## Prerequisites + +- EBS volumes must be pre-attached to each node at a consistent device name (e.g., `/dev/xvdbz`). +- The driver must be deployed with `controller.enableNodeLocalVolumes=true`. + +## Enabling the Feature + +### Helm Installation + +```bash +helm upgrade --install aws-ebs-csi-driver \ + --namespace kube-system \ + ./charts/aws-ebs-csi-driver \ + --set controller.enableNodeLocalVolumes=true +``` + +## Usage + +### 1. Pre-attach EBS Volumes to Nodes + +Each node must have an EBS volume attached at the same device name. For example: + +**EC2 Launch Template:** +```json +{ + "BlockDeviceMappings": [{ + "DeviceName": "/dev/xvdbz", + "Ebs": { + "VolumeSize": 100, + "VolumeType": "gp3", + "DeleteOnTermination": true + } + }] +} +``` + +### 2. Create cluster-wide PV and PVC + +Create a PersistentVolume with `volumeHandle` using the `local-ebs://` prefix: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: node-local-cache-pv +spec: + capacity: + storage: 100Gi + volumeMode: Filesystem + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + csi: + driver: ebs.csi.aws.com + volumeHandle: local-ebs://dev/xvdbz + volumeAttributes: + ebs.csi.aws.com/fsType: "xfs" +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-local-cache-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 100Gi + volumeName: node-local-cache-pv +``` + +**Important:** The `volumeHandle` format is `local-ebs://` where `` is the device path without the leading slash (e.g., `dev/xvdbz` for `/dev/xvdbz`). + +### 3. Use in Workloads + +Reference the PVC in your pod or deployment: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app +spec: + replicas: 10 + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: app + image: my-app:latest + volumeMounts: + - name: cache + mountPath: /cache + volumes: + - name: cache + persistentVolumeClaim: + claimName: node-local-cache-pvc +``` + +## Access Mode Requirements + +Node-local volumes may use `ReadWriteMany` (RWX) access mode. This tells the Kubernetes scheduler it's safe to place pods on multiple nodes. Each node uses its own physical EBS volume, so there's no actual cross-node sharing. + +## Limitations + +- Volumes must be statically provisioned and pre-attached at the specified device path. +- Cross-node data sharing is not supported (each node has its own volume). +- Volume snapshots and modifications for local cache volumes are not supported. +- The root device cannot be used as a node-local volume. + +## Examples + +See [examples/kubernetes/node-local-volumes](../examples/kubernetes/node-local-volumes) for complete working examples. diff --git a/docs/options.md b/docs/options.md index c1f68f5df5..42f08e10ce 100644 --- a/docs/options.md +++ b/docs/options.md @@ -21,3 +21,4 @@ There are a couple of driver options that can be passed as arguments when starti | reserved-volume-attachments | 2 | -1 | Number of volume attachments reserved for system use. Not used when --volume-attach-limit is specified. When -1, the amount of reserved attachments is loaded from instance metadata that captured state at node boot and may include not only system disks but also CSI volumes. | | legacy-xfs | true | false | Warning: This option will be removed in a future release. It is a temporary workaround for users unable to immediately migrate off of older kernel versions. Formats XFS volumes with `bigtime=0,inobtcount=0,reflink=0`, so that they can be mounted onto nodes with linux kernel ≤ v5.4. Volumes formatted with this option may experience issues after 2038, and will be unable to use some XFS features (for example, reflinks). | | metadata-sources | imds | imds,kubernetes,metadalabeler | Dictates which sources are used to retrieve instance metadata. The driver will attempt to rely on each source in order until one succeeds. Valid options include 'imds', 'kubernetes', and (ALPHA)'metadata-labeler'. | +| enable-node-local-volumes | true | false | If set to true, enables support for node-local volumes that use pre-attached EBS volumes. See [node-local-volumes.md](node-local-volumes.md) for details. | diff --git a/examples/kubernetes/node-local-volumes/README.md b/examples/kubernetes/node-local-volumes/README.md new file mode 100644 index 0000000000..a9cc5d80c4 --- /dev/null +++ b/examples/kubernetes/node-local-volumes/README.md @@ -0,0 +1,47 @@ +# Node-Local Volumes Example + +This example demonstrates how to use node-local volumes with the EBS CSI Driver. + +## Prerequisites + +1. Enable the feature in the driver: + ```bash + helm upgrade --install aws-ebs-csi-driver \ + --namespace kube-system \ + ./charts/aws-ebs-csi-driver \ + --set controller.enableNodeLocalVolumes=true + ``` + +2. Pre-attach EBS volumes to each node using an EC2 Launch Template: + ```json + { + "BlockDeviceMappings": [{ + "DeviceName": "/dev/xvdbz", + "Ebs": { + "VolumeSize": 100, + "VolumeType": "gp3", + "DeleteOnTermination": true + } + }] + } + ``` + Apply this launch template to your node group or managed node group. + +## Deploy + +```bash +kubectl apply -f manifests/ +``` + +## Verify + +```bash +# Check pods are running +kubectl get pods -l app=cache-app -o wide +``` + +## Cleanup + +```bash +kubectl delete -f manifests/ +``` diff --git a/examples/kubernetes/node-local-volumes/manifests/deployment.yaml b/examples/kubernetes/node-local-volumes/manifests/deployment.yaml new file mode 100644 index 0000000000..5dee8e5823 --- /dev/null +++ b/examples/kubernetes/node-local-volumes/manifests/deployment.yaml @@ -0,0 +1,41 @@ +# Copyright 2025 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cache-app +spec: + replicas: 4 + selector: + matchLabels: + app: cache-app + template: + metadata: + labels: + app: cache-app + spec: + containers: + - name: app + image: public.ecr.aws/amazonlinux/amazonlinux + command: ["/bin/sh"] + args: ["-c", "while true; do echo $(date -u) >> /cache/out.txt; sleep 5; done"] + volumeMounts: + - name: cache + mountPath: /cache + volumes: + - name: cache + persistentVolumeClaim: + claimName: node-local-cache-pvc diff --git a/examples/kubernetes/node-local-volumes/manifests/pv-pvc.yaml b/examples/kubernetes/node-local-volumes/manifests/pv-pvc.yaml new file mode 100644 index 0000000000..ac429f7621 --- /dev/null +++ b/examples/kubernetes/node-local-volumes/manifests/pv-pvc.yaml @@ -0,0 +1,43 @@ +# Copyright 2025 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: node-local-cache-pv +spec: + capacity: + storage: 100Gi + volumeMode: Filesystem + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + csi: + driver: ebs.csi.aws.com + volumeHandle: local-ebs://dev/xvdbz + volumeAttributes: + ebs.csi.aws.com/fsType: "xfs" +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-local-cache-pvc +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 100Gi + volumeName: node-local-cache-pv diff --git a/pkg/cloud/cloud.go b/pkg/cloud/cloud.go index 45900b6474..bb4a53d9a2 100644 --- a/pkg/cloud/cloud.go +++ b/pkg/cloud/cloud.go @@ -1637,6 +1637,27 @@ func (c *cloud) GetDiskByID(ctx context.Context, volumeID string) (*Disk, error) return disk, nil } +func (c *cloud) GetVolumeIDByNodeAndDevice(ctx context.Context, nodeID string, deviceName string) (string, error) { + instance, err := c.getInstance(ctx, nodeID) + if err != nil { + return "", fmt.Errorf("failed to get instance %s: %w", nodeID, err) + } + + if instance.RootDeviceName != nil && *instance.RootDeviceName == deviceName { + return "", fmt.Errorf("device %s is the root device: %w", deviceName, ErrInvalidRequest) + } + + for _, bdm := range instance.BlockDeviceMappings { + if bdm.DeviceName != nil && *bdm.DeviceName == deviceName { + if bdm.Ebs != nil && bdm.Ebs.VolumeId != nil { + return *bdm.Ebs.VolumeId, nil + } + } + } + + return "", fmt.Errorf("volume not found at device %s on node %s: %w", deviceName, nodeID, ErrNotFound) +} + func isHyperPodNode(nodeID string) bool { return strings.HasPrefix(nodeID, "hyperpod-") } diff --git a/pkg/cloud/cloud_test.go b/pkg/cloud/cloud_test.go index 25e100b061..4c0c38db7b 100644 --- a/pkg/cloud/cloud_test.go +++ b/pkg/cloud/cloud_test.go @@ -5115,6 +5115,119 @@ func createDescribeVolumesOutput(volumeIDs []*string, nodeID, path, state string } } +func TestGetVolumeIDByNodeAndDevice(t *testing.T) { + testCases := []struct { + name string + nodeID string + deviceName string + instance *types.Instance + instanceErr error + expectedVolID string + expectError bool + }{ + { + name: "success: volume found at device", + nodeID: "i-1234567890", + deviceName: "/dev/xvdf", + instance: &types.Instance{ + InstanceId: aws.String("i-1234567890"), + RootDeviceName: aws.String("/dev/xvda"), + BlockDeviceMappings: []types.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("/dev/xvdf"), + Ebs: &types.EbsInstanceBlockDevice{ + VolumeId: aws.String("vol-test-123"), + }, + }, + }, + }, + expectedVolID: "vol-test-123", + expectError: false, + }, + { + name: "fail: device is root device", + nodeID: "i-1234567890", + deviceName: "/dev/xvda", + instance: &types.Instance{ + InstanceId: aws.String("i-1234567890"), + RootDeviceName: aws.String("/dev/xvda"), + BlockDeviceMappings: []types.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("/dev/xvda"), + Ebs: &types.EbsInstanceBlockDevice{ + VolumeId: aws.String("vol-root-123"), + }, + }, + }, + }, + expectError: true, + }, + { + name: "fail: volume not found at device", + nodeID: "i-1234567890", + deviceName: "/dev/xvdz", + instance: &types.Instance{ + InstanceId: aws.String("i-1234567890"), + RootDeviceName: aws.String("/dev/xvda"), + BlockDeviceMappings: []types.InstanceBlockDeviceMapping{ + { + DeviceName: aws.String("/dev/xvdf"), + Ebs: &types.EbsInstanceBlockDevice{ + VolumeId: aws.String("vol-test-123"), + }, + }, + }, + }, + expectError: true, + }, + { + name: "fail: getInstance error", + nodeID: "i-1234567890", + deviceName: "/dev/xvdf", + instanceErr: errors.New("instance not found"), + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + mockEC2 := NewMockEC2API(mockCtrl) + c := newCloud(mockEC2) + + ctx := t.Context() + + expectedInput := &ec2.DescribeInstancesInput{ + InstanceIds: []string{tc.nodeID}, + } + + if tc.instanceErr != nil { + mockEC2.EXPECT().DescribeInstances(t.Context(), gomock.Eq(expectedInput)).Return(nil, tc.instanceErr) + } else if tc.instance != nil { + mockEC2.EXPECT().DescribeInstances(t.Context(), gomock.Eq(expectedInput)).Return( + &ec2.DescribeInstancesOutput{ + Reservations: []types.Reservation{ + { + Instances: []types.Instance{*tc.instance}, + }, + }, + }, nil) + } + + volumeID, err := c.GetVolumeIDByNodeAndDevice(ctx, tc.nodeID, tc.deviceName) + + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedVolID, volumeID) + } + + mockCtrl.Finish() + }) + } +} + func TestCheckIfIopsIncreaseOnExpansion(t *testing.T) { testCases := []struct { name string diff --git a/pkg/cloud/interface.go b/pkg/cloud/interface.go index 2bb4be2a9d..4297b26fbf 100644 --- a/pkg/cloud/interface.go +++ b/pkg/cloud/interface.go @@ -32,6 +32,7 @@ type Cloud interface { IsVolumeInitialized(ctx context.Context, volumeID string) (bool, error) GetDiskByName(ctx context.Context, name string, capacityBytes int64) (disk *Disk, err error) GetDiskByID(ctx context.Context, volumeID string) (disk *Disk, err error) + GetVolumeIDByNodeAndDevice(ctx context.Context, nodeID string, deviceName string) (volumeID string, err error) CreateSnapshot(ctx context.Context, volumeID string, snapshotOptions *SnapshotOptions) (snapshot *Snapshot, err error) DeleteSnapshot(ctx context.Context, snapshotID string) (success bool, err error) GetSnapshotByName(ctx context.Context, name string) (snapshot *Snapshot, err error) diff --git a/pkg/cloud/mock_cloud.go b/pkg/cloud/mock_cloud.go index 6913c715a5..e63591e96e 100644 --- a/pkg/cloud/mock_cloud.go +++ b/pkg/cloud/mock_cloud.go @@ -244,6 +244,21 @@ func (mr *MockCloudMockRecorder) GetSnapshotByName(ctx, name interface{}) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshotByName", reflect.TypeOf((*MockCloud)(nil).GetSnapshotByName), ctx, name) } +// GetVolumeIDByNodeAndDevice mocks base method. +func (m *MockCloud) GetVolumeIDByNodeAndDevice(ctx context.Context, nodeID, deviceName string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVolumeIDByNodeAndDevice", ctx, nodeID, deviceName) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVolumeIDByNodeAndDevice indicates an expected call of GetVolumeIDByNodeAndDevice. +func (mr *MockCloudMockRecorder) GetVolumeIDByNodeAndDevice(ctx, nodeID, deviceName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumeIDByNodeAndDevice", reflect.TypeOf((*MockCloud)(nil).GetVolumeIDByNodeAndDevice), ctx, nodeID, deviceName) +} + // IsVolumeInitialized mocks base method. func (m *MockCloud) IsVolumeInitialized(ctx context.Context, volumeID string) (bool, error) { m.ctrl.T.Helper() diff --git a/pkg/driver/constants.go b/pkg/driver/constants.go index 8a9c61ba3c..9d8b60994e 100644 --- a/pkg/driver/constants.go +++ b/pkg/driver/constants.go @@ -27,6 +27,9 @@ const ( // DevicePathKey represents key for device path in PublishContext // devicePath is the device path where the volume is attached to. DevicePathKey = "devicePath" + // VolumeIDKey represents key for volume ID in PublishContext + // Used for node-local volumes to pass the real volume ID. + VolumeIDKey = "volumeID" ) // constants of keys in VolumeContext. @@ -173,6 +176,12 @@ const ( DefaultModifyVolumeRequestHandlerTimeout = 2 * time.Second ) +// constants for node-local volumes. +const ( + // NodeLocalVolumeHandlePrefix is the prefix for node-local volume handles. + NodeLocalVolumeHandlePrefix = "local-ebs://" +) + // constants for fstypes. const ( // FSTypeExt3 represents the ext3 filesystem type. diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index f0e8ddbc02..436965f4b1 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -473,13 +473,21 @@ func validateDeleteVolumeRequest(req *csi.DeleteVolumeRequest) error { func (d *ControllerService) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { klog.V(4).InfoS("ControllerPublishVolume: called", "args", util.SanitizeRequest(req)) - if err := validateControllerPublishVolumeRequest(req); err != nil { - return nil, err - } volumeID := req.GetVolumeId() nodeID := req.GetNodeId() + if isNodeLocalVolume(volumeID) { + if !d.options.EnableNodeLocalVolumes { + return nil, status.Error(codes.InvalidArgument, "node-local volumes are not enabled") + } + return d.controllerPublishVolumeNodeLocal(ctx, req) + } + + if err := validateControllerPublishVolumeRequest(req); err != nil { + return nil, err + } + if !d.inFlight.Insert(volumeID + nodeID) { return nil, status.Error(codes.Aborted, fmt.Sprintf(internal.VolumeOperationAlreadyExistsErrorMsg, volumeID)) } @@ -536,6 +544,32 @@ func validateControllerPublishVolumeRequest(req *csi.ControllerPublishVolumeRequ return nil } +func (d *ControllerService) controllerPublishVolumeNodeLocal(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { + volumeID := req.GetVolumeId() + nodeID := req.GetNodeId() + + deviceName := strings.TrimPrefix(volumeID, NodeLocalVolumeHandlePrefix) + if deviceName == "" || deviceName == volumeID { + return nil, status.Error(codes.InvalidArgument, "invalid node-local volume handle format") + } + deviceName = "/" + deviceName + realVolumeID, err := d.cloud.GetVolumeIDByNodeAndDevice(ctx, nodeID, deviceName) + if err != nil { + if errors.Is(err, cloud.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "Failed to find volume at device %s on node %s: %v", deviceName, nodeID, err) + } + return nil, status.Errorf(codes.Internal, "Failed to get volume at device %s on node %s: %v", deviceName, nodeID, err) + } + + klog.InfoS("ControllerPublishVolume: resolved node-local volume", "volumeID", volumeID, "realVolumeID", realVolumeID, "nodeID", nodeID, "deviceName", deviceName) + + pvInfo := map[string]string{ + DevicePathKey: deviceName, + VolumeIDKey: realVolumeID, + } + return &csi.ControllerPublishVolumeResponse{PublishContext: pvInfo}, nil +} + func (d *ControllerService) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { klog.V(4).InfoS("ControllerUnpublishVolume: called", "args", util.SanitizeRequest(req)) @@ -546,6 +580,11 @@ func (d *ControllerService) ControllerUnpublishVolume(ctx context.Context, req * volumeID := req.GetVolumeId() nodeID := req.GetNodeId() + if isNodeLocalVolume(volumeID) { + klog.V(2).InfoS("ControllerUnpublishVolume: node-local mode, skipping detach", "volumeID", volumeID, "nodeID", nodeID) + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + if !d.inFlight.Insert(volumeID + nodeID) { return nil, status.Error(codes.Aborted, fmt.Sprintf(internal.VolumeOperationAlreadyExistsErrorMsg, volumeID)) } @@ -615,15 +654,30 @@ func (d *ControllerService) ValidateVolumeCapabilities(ctx context.Context, req return nil, status.Error(codes.InvalidArgument, "Volume capabilities not provided") } - if _, err := d.cloud.GetDiskByID(ctx, volumeID); err != nil { - if errors.Is(err, cloud.ErrNotFound) { - return nil, status.Error(codes.NotFound, "Volume not found") + // Node-local volumes don't need GetDiskByID validation + if !isNodeLocalVolume(volumeID) { + if _, err := d.cloud.GetDiskByID(ctx, volumeID); err != nil { + if errors.Is(err, cloud.ErrNotFound) { + return nil, status.Error(codes.NotFound, "Volume not found") + } + return nil, status.Errorf(codes.Internal, "Could not get volume with ID %q: %v", volumeID, err) } - return nil, status.Errorf(codes.Internal, "Could not get volume with ID %q: %v", volumeID, err) } var confirmed *csi.ValidateVolumeCapabilitiesResponse_Confirmed - if isValidVolumeCapabilities(volCaps) { + if isNodeLocalVolume(volumeID) { + // For node-local volumes, allow RWX + valid := true + for _, c := range volCaps { + if !isValidCapabilityForNodeLocal(c) { + valid = false + break + } + } + if valid { + confirmed = &csi.ValidateVolumeCapabilitiesResponse_Confirmed{VolumeCapabilities: volCaps} + } + } else if isValidVolumeCapabilities(volCaps) { confirmed = &csi.ValidateVolumeCapabilitiesResponse_Confirmed{VolumeCapabilities: volCaps} } return &csi.ValidateVolumeCapabilitiesResponse{ @@ -638,6 +692,10 @@ func (d *ControllerService) ControllerExpandVolume(ctx context.Context, req *csi return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") } + if isNodeLocalVolume(volumeID) { + return nil, status.Error(codes.InvalidArgument, "node-local volumes cannot be expanded") + } + capRange := req.GetCapacityRange() if capRange == nil { return nil, status.Error(codes.InvalidArgument, "Capacity range not provided") @@ -677,6 +735,10 @@ func (d *ControllerService) ControllerModifyVolume(ctx context.Context, req *csi return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") } + if isNodeLocalVolume(volumeID) { + return nil, status.Error(codes.InvalidArgument, "node-local volumes cannot be modified") + } + options, err := parseModifyVolumeParameters(req.GetMutableParameters()) if err != nil { return nil, err @@ -729,6 +791,15 @@ func isValidCapability(c *csi.VolumeCapability) bool { } } +func isNodeLocalVolume(volumeID string) bool { + return strings.HasPrefix(volumeID, NodeLocalVolumeHandlePrefix) +} + +func isValidCapabilityForNodeLocal(c *csi.VolumeCapability) bool { + accessMode := c.GetAccessMode().GetMode() + return accessMode == SingleNodeWriter || accessMode == MultiNodeMultiWriter +} + func isBlock(capability *csi.VolumeCapability) bool { _, isBlk := capability.GetAccessType().(*csi.VolumeCapability_Block) return isBlk @@ -884,9 +955,14 @@ func validateCreateSnapshotRequest(req *csi.CreateSnapshotRequest) error { return status.Error(codes.InvalidArgument, "Snapshot name not provided") } - if len(req.GetSourceVolumeId()) == 0 { + volumeID := req.GetSourceVolumeId() + if len(volumeID) == 0 { return status.Error(codes.InvalidArgument, "Snapshot volume source ID not provided") } + + if isNodeLocalVolume(volumeID) { + return status.Error(codes.InvalidArgument, "node-local volumes cannot be snapshotted") + } return nil } diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index ef970eeba7..e50fa0b3ec 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -4232,6 +4232,31 @@ func TestCreateSnapshot(t *testing.T) { checkExpectedErrorCode(t, err, codes.ResourceExhausted) }, }, + { + name: "fail with node-local volume", + testFunc: func(t *testing.T) { + t.Helper() + req := &csi.CreateSnapshotRequest{ + Name: "test-snapshot", + Parameters: nil, + SourceVolumeId: NodeLocalVolumeHandlePrefix + "dev/xvdf", + } + + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := cloud.NewMockCloud(mockCtl) + + awsDriver := ControllerService{ + cloud: mockCloud, + inFlight: internal.NewInFlight(), + options: &Options{}, + } + _, err := awsDriver.CreateSnapshot(t.Context(), req) + + checkExpectedErrorCode(t, err, codes.InvalidArgument) + }, + }, } for _, tc := range testCases { t.Run(tc.name, tc.testFunc) @@ -4728,6 +4753,52 @@ func TestControllerPublishVolume(t *testing.T) { ControllerService.inFlight.Insert("vol-test" + expInstanceID) }, }, + { + name: "Success with node-local volume", + volumeID: NodeLocalVolumeHandlePrefix + "dev/xvdf", + nodeID: expInstanceID, + volumeCapability: stdVolCap, + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().GetVolumeIDByNodeAndDevice(gomock.Eq(ctx), gomock.Eq(expInstanceID), gomock.Eq("/dev/xvdf")).Return("vol-real", nil) + }, + expResp: &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{DevicePathKey: "/dev/xvdf", VolumeIDKey: "vol-real"}, + }, + errorCode: codes.OK, + setupFunc: func(ControllerService *ControllerService) { + ControllerService.options.EnableNodeLocalVolumes = true + }, + }, + { + name: "Fail with node-local volume invalid format", + volumeID: NodeLocalVolumeHandlePrefix, + nodeID: expInstanceID, + volumeCapability: stdVolCap, + errorCode: codes.InvalidArgument, + setupFunc: func(ControllerService *ControllerService) { + ControllerService.options.EnableNodeLocalVolumes = true + }, + }, + { + name: "Fail with node-local volume not found", + volumeID: NodeLocalVolumeHandlePrefix + "dev/xvdf", + nodeID: expInstanceID, + volumeCapability: stdVolCap, + mockAttach: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string, nodeID string) { + mockCloud.EXPECT().GetVolumeIDByNodeAndDevice(gomock.Eq(ctx), gomock.Eq(expInstanceID), gomock.Eq("/dev/xvdf")).Return("", cloud.ErrNotFound) + }, + errorCode: codes.NotFound, + setupFunc: func(ControllerService *ControllerService) { + ControllerService.options.EnableNodeLocalVolumes = true + }, + }, + { + name: "Fail with node-local volume when feature disabled", + volumeID: NodeLocalVolumeHandlePrefix + "dev/xvdf", + nodeID: expInstanceID, + volumeCapability: stdVolCap, + errorCode: codes.InvalidArgument, + }, } for _, tc := range testCases { @@ -4824,6 +4895,13 @@ func TestControllerUnpublishVolume(t *testing.T) { driver.inFlight.Insert("vol-test" + expInstanceID) }, }, + { + name: "Success with node-local volume skips detach", + volumeID: NodeLocalVolumeHandlePrefix + "dev/xvdf", + nodeID: expInstanceID, + errorCode: codes.OK, + expResp: &csi.ControllerUnpublishVolumeResponse{}, + }, } for _, tc := range testCases { @@ -4895,6 +4973,16 @@ func TestControllerExpandVolume(t *testing.T) { }, expError: true, }, + { + name: "fail node-local volume cannot be expanded", + req: &csi.ControllerExpandVolumeRequest{ + VolumeId: NodeLocalVolumeHandlePrefix + "dev/xvdf", + CapacityRange: &csi.CapacityRange{ + RequiredBytes: 5 * util.GiB, + }, + }, + expError: true, + }, } for _, tc := range testCases { @@ -4943,6 +5031,178 @@ func TestControllerExpandVolume(t *testing.T) { } } +func TestControllerModifyVolume(t *testing.T) { + testCases := []struct { + name string + req *csi.ControllerModifyVolumeRequest + expError bool + }{ + { + name: "fail node-local volume cannot be modified", + req: &csi.ControllerModifyVolumeRequest{ + VolumeId: NodeLocalVolumeHandlePrefix + "dev/xvdf", + MutableParameters: map[string]string{ + "iops": "4000", + }, + }, + expError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := t.Context() + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockCloud := cloud.NewMockCloud(mockCtl) + + awsDriver := ControllerService{ + cloud: mockCloud, + inFlight: internal.NewInFlight(), + options: &Options{}, + modifyVolumeCoalescer: newModifyVolumeCoalescer(mockCloud, &Options{}), + } + + _, err := awsDriver.ControllerModifyVolume(ctx, tc.req) + if tc.expError { + require.Error(t, err) + assert.Equal(t, codes.InvalidArgument, status.Code(err)) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateVolumeCapabilities(t *testing.T) { + stdVolCap := []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + } + multiWriterVolCap := []*csi.VolumeCapability{ + { + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + }, + } + + testCases := []struct { + name string + volumeID string + volCaps []*csi.VolumeCapability + setupFunc func(*ControllerService) + mockFunc func(*cloud.MockCloud, context.Context, string) + expected bool + }{ + { + name: "Success with regular volume", + volumeID: "vol-test", + volCaps: stdVolCap, + mockFunc: func(mockCloud *cloud.MockCloud, ctx context.Context, volumeID string) { + mockCloud.EXPECT().GetDiskByID(gomock.Eq(ctx), gomock.Eq(volumeID)).Return(&cloud.Disk{}, nil) + }, + expected: true, + }, + { + name: "Success with node-local volume and RWO", + volumeID: NodeLocalVolumeHandlePrefix + "dev/xvdf", + volCaps: stdVolCap, + expected: true, + }, + { + name: "Success with node-local volume and RWX", + volumeID: NodeLocalVolumeHandlePrefix + "dev/xvdf", + volCaps: multiWriterVolCap, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := &csi.ValidateVolumeCapabilitiesRequest{ + VolumeId: tc.volumeID, + VolumeCapabilities: tc.volCaps, + } + ctx := t.Context() + + awsDriver, mockCtl, mockCloud := createControllerService(t) + defer mockCtl.Finish() + + if tc.setupFunc != nil { + tc.setupFunc(&awsDriver) + } + + if tc.mockFunc != nil { + tc.mockFunc(mockCloud, ctx, req.GetVolumeId()) + } + + resp, err := awsDriver.ValidateVolumeCapabilities(ctx, req) + require.NoError(t, err) + if tc.expected { + assert.NotNil(t, resp.GetConfirmed()) + } else { + assert.Nil(t, resp.GetConfirmed()) + } + }) + } +} + +func TestIsNodeLocalVolume(t *testing.T) { + testCases := []struct { + name string + volumeID string + expected bool + }{ + {"Regular volume", "vol-test", false}, + {"Node-local volume", NodeLocalVolumeHandlePrefix + "dev/xvdf", true}, + {"Empty string", "", false}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := isNodeLocalVolume(tc.volumeID) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestIsValidCapabilityForNodeLocal(t *testing.T) { + testCases := []struct { + name string + accessMode csi.VolumeCapability_AccessMode_Mode + expected bool + }{ + {"SINGLE_NODE_WRITER", csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, true}, + {"MULTI_NODE_MULTI_WRITER", csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, true}, + {"SINGLE_NODE_READER_ONLY", csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, false}, + {"MULTI_NODE_READER_ONLY", csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY, false}, + {"MULTI_NODE_SINGLE_WRITER", csi.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER, false}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + volCap := &csi.VolumeCapability{ + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: tc.accessMode, + }, + } + result := isValidCapabilityForNodeLocal(volCap) + assert.Equal(t, tc.expected, result) + }) + } +} + func checkExpectedErrorCode(t *testing.T, err error, expectedCode codes.Code) { t.Helper() if err == nil { diff --git a/pkg/driver/node.go b/pkg/driver/node.go index 1a1521cd31..c2fa2cd627 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -120,8 +120,14 @@ func (d *NodeService) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol return nil, status.Error(codes.InvalidArgument, "Volume capability not provided") } - if !isValidVolumeCapabilities([]*csi.VolumeCapability{volCap}) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if isNodeLocalVolume(volumeID) { + if !isValidCapabilityForNodeLocal(volCap) { + return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + } + } else { + if !isValidVolumeCapabilities([]*csi.VolumeCapability{volCap}) { + return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + } } volumeContext := req.GetVolumeContext() if isValidVolumeContext := isValidVolumeContext(volumeContext); !isValidVolumeContext { @@ -199,7 +205,14 @@ func (d *NodeService) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol } } - source, err := d.mounter.FindDevicePath(devicePath, volumeID, partition, d.metadata.GetRegion()) + effectiveVolumeID := volumeID + if isNodeLocalVolume(volumeID) { + if realVolumeID, ok := req.GetPublishContext()[VolumeIDKey]; ok && realVolumeID != "" { + effectiveVolumeID = realVolumeID + } + } + + source, err := d.mounter.FindDevicePath(devicePath, effectiveVolumeID, partition, d.metadata.GetRegion()) if err != nil { return nil, status.Errorf(codes.NotFound, "Failed to find device path %s. %v", devicePath, err) } @@ -432,8 +445,14 @@ func (d *NodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublis return nil, status.Error(codes.InvalidArgument, "Volume capability not provided") } - if !isValidVolumeCapabilities([]*csi.VolumeCapability{volCap}) { - return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + if isNodeLocalVolume(volumeID) { + if !isValidCapabilityForNodeLocal(volCap) { + return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + } + } else { + if !isValidVolumeCapabilities([]*csi.VolumeCapability{volCap}) { + return nil, status.Error(codes.InvalidArgument, "Volume capability not supported") + } } if ok := d.inFlight.Insert(volumeID); !ok { @@ -631,7 +650,14 @@ func (d *NodeService) nodePublishVolumeForBlock(req *csi.NodePublishVolumeReques } } - source, err := d.mounter.FindDevicePath(devicePath, volumeID, partition, d.metadata.GetRegion()) + effectiveVolumeID := volumeID + if isNodeLocalVolume(volumeID) { + if realVolumeID, ok := req.GetPublishContext()[VolumeIDKey]; ok && realVolumeID != "" { + effectiveVolumeID = realVolumeID + } + } + + source, err := d.mounter.FindDevicePath(devicePath, effectiveVolumeID, partition, d.metadata.GetRegion()) if err != nil { return status.Errorf(codes.NotFound, "Failed to find device path %s. %v", devicePath, err) } diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 0288f0818c..678ac36ad0 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -1002,6 +1002,62 @@ func TestNodeStageVolume(t *testing.T) { options: &Options{LegacyXFSProgs: true}, expectedErr: nil, }, + { + name: "node_local_volume_success", + req: &csi.NodeStageVolumeRequest{ + VolumeId: "local-ebs://dev/xvdba", + StagingTargetPath: "/staging/path", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{ + FsType: "ext4", + }, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + PublishContext: map[string]string{ + DevicePathKey: "/dev/xvdba", + VolumeIDKey: "vol-real", + }, + }, + mounterMock: func(ctrl *gomock.Controller) *mounter.MockMounter { + m := mounter.NewMockMounter(ctrl) + m.EXPECT().FindDevicePath(gomock.Eq("/dev/xvdba"), gomock.Eq("vol-real"), gomock.Eq(""), gomock.Eq("us-west-2")).Return("/dev/xvdba", nil) + m.EXPECT().PathExists(gomock.Any()).Return(true, nil) + m.EXPECT().GetDeviceNameFromMount(gomock.Any()).Return("", 1, nil) + m.EXPECT().FormatAndMountSensitiveWithFormatOptions(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + m.EXPECT().NeedResize(gomock.Any(), gomock.Any()).Return(false, nil) + return m + }, + metadataMock: func(ctrl *gomock.Controller) *metadata.MockMetadataService { + m := metadata.NewMockMetadataService(ctrl) + m.EXPECT().GetRegion().Return("us-west-2") + return m + }, + expectedErr: nil, + }, + { + name: "node_local_volume_unsupported_capability", + req: &csi.NodeStageVolumeRequest{ + VolumeId: "local-ebs://dev/xvdba", + StagingTargetPath: "/staging/path", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{ + FsType: "ext4", + }, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, + }, + }, + }, + mounterMock: nil, + metadataMock: nil, + expectedErr: status.Error(codes.InvalidArgument, "Volume capability not supported"), + }, } for _, tc := range testCases { @@ -1686,6 +1742,60 @@ func TestNodePublishVolume(t *testing.T) { }, expectedErr: status.Error(codes.NotFound, "Failed to find device path /dev/xvdba. device path error"), }, + { + name: "node_local_volume_block_success", + req: &csi.NodePublishVolumeRequest{ + VolumeId: "local-ebs://dev/xvdba", + StagingTargetPath: "/staging/path", + TargetPath: "/target/path", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + PublishContext: map[string]string{ + DevicePathKey: "/dev/xvdba", + VolumeIDKey: "vol-real", + }, + }, + mounterMock: func(ctrl *gomock.Controller) *mounter.MockMounter { + m := mounter.NewMockMounter(ctrl) + m.EXPECT().FindDevicePath(gomock.Eq("/dev/xvdba"), gomock.Eq("vol-real"), gomock.Eq(""), gomock.Eq("us-west-2")).Return("/dev/xvdba", nil) + m.EXPECT().PathExists(gomock.Any()).Return(true, nil) + m.EXPECT().MakeFile(gomock.Any()).Return(nil) + m.EXPECT().IsLikelyNotMountPoint(gomock.Any()).Return(true, nil) + m.EXPECT().Mount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + return m + }, + metadataMock: func(ctrl *gomock.Controller) *metadata.MockMetadataService { + m := metadata.NewMockMetadataService(ctrl) + m.EXPECT().GetRegion().Return("us-west-2") + return m + }, + }, + { + name: "node_local_volume_unsupported_capability", + req: &csi.NodePublishVolumeRequest{ + VolumeId: "local-ebs://dev/xvdba", + StagingTargetPath: "/staging/path", + TargetPath: "/target/path", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, + }, + }, + PublishContext: map[string]string{ + DevicePathKey: "/dev/xvdba", + }, + }, + expectedErr: status.Error(codes.InvalidArgument, "Volume capability not supported"), + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/driver/options.go b/pkg/driver/options.go index 4336eb17cd..d8b79161b6 100644 --- a/pkg/driver/options.go +++ b/pkg/driver/options.go @@ -71,6 +71,8 @@ type Options struct { ModifyVolumeRequestHandlerTimeout time.Duration // flag to enable deprecated metrics DeprecatedMetrics bool + // flag to enable node-local volume support + EnableNodeLocalVolumes bool // #### Node options ##### @@ -123,6 +125,7 @@ func (o *Options) AddFlags(f *flag.FlagSet) { f.BoolVar(&o.Batching, "batching", false, "To enable batching of API calls. This is especially helpful for improving performance in workloads that are sensitive to EC2 rate limits.") f.DurationVar(&o.ModifyVolumeRequestHandlerTimeout, "modify-volume-request-handler-timeout", DefaultModifyVolumeRequestHandlerTimeout, "Timeout for the window in which volume modification calls must be received in order for them to coalesce into a single volume modification call to AWS. This must be lower than the csi-resizer and volumemodifier timeouts") f.BoolVar(&o.DeprecatedMetrics, "deprecated-metrics", false, "DEPRECATED: To enable deprecated metrics. This parameter is only for backward compatibility and may be removed in a future release.") + f.BoolVar(&o.EnableNodeLocalVolumes, "enable-node-local-volumes", false, "Enable support for node-local volumes that use pre-attached EBS volumes.") } // Node options if o.Mode == AllMode || o.Mode == NodeMode { diff --git a/pkg/driver/options_test.go b/pkg/driver/options_test.go index 4508b5ea25..ae37d36599 100644 --- a/pkg/driver/options_test.go +++ b/pkg/driver/options_test.go @@ -77,6 +77,9 @@ func TestAddFlags(t *testing.T) { if err := f.Set("legacy-xfs", "true"); err != nil { t.Errorf("error setting legacy-xfs: %v", err) } + if err := f.Set("enable-node-local-volumes", "true"); err != nil { + t.Errorf("error setting enable-node-local-volumes: %v", err) + } if err := f.Set("csi-mount-point-prefix", "/var/lib/kubelet"); err != nil { t.Errorf("error setting csi-mount-point-prefix: %v", err) @@ -121,6 +124,9 @@ func TestAddFlags(t *testing.T) { if !o.LegacyXFSProgs { t.Errorf("unexpected LegacyXFSProgs: got false, want true") } + if !o.EnableNodeLocalVolumes { + t.Error("unexpected EnableNodeLocalVolumes: got false, want true") + } } func TestValidateAttachmentLimits(t *testing.T) { diff --git a/tests/sanity/fake_sanity_cloud.go b/tests/sanity/fake_sanity_cloud.go index 688a39541f..2992db5300 100644 --- a/tests/sanity/fake_sanity_cloud.go +++ b/tests/sanity/fake_sanity_cloud.go @@ -229,3 +229,7 @@ func (d *fakeCloud) IsVolumeInitialized(ctx context.Context, volumeID string) (b func (d *fakeCloud) DryRun(ctx context.Context) error { return nil } + +func (d *fakeCloud) GetVolumeIDByNodeAndDevice(ctx context.Context, nodeID, deviceName string) (string, error) { + return "", cloud.ErrNotFound +}