From 5c834521e3ecec83769412ca63145aa64577c642 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Wed, 7 May 2025 23:11:09 +0530 Subject: [PATCH 1/9] Added `raw_disk_is_available()` using `blkid` for reliable raw disk checks. Added docstring explaining what this new method does vs `disk_is_available`. Added type hints. Signed-off-by: Rushikesh Jadhav --- lib/host.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lib/host.py b/lib/host.py index 29ffd9b07..e5da2defb 100644 --- a/lib/host.py +++ b/lib/host.py @@ -516,7 +516,22 @@ def disks(self): disks.sort() return disks - def disk_is_available(self, disk): + def raw_disk_is_available(self, disk: str) -> bool: + """ + Check if a raw disk (without any identifiable filesystem or partition label) is available. + It suggests the disk is "raw" and likely unformatted thus available. + """ + return self.ssh_with_result(['blkid', '/dev/' + disk]).returncode == 2 + + def disk_is_available(self, disk: str) -> bool: + """ + Check if a disk is unmounted and appears available for use. + It may or may not contain identifiable filesystem or partition label. + If there are no mountpoints, it is assumed that the disk is not in use. + + Warn: This function may misclassify LVM_member disks (e.g. in XOSTOR, RAID, ZFS) as "available". + Such disks may not have mountpoints but still be in use. + """ return len(self.ssh(['lsblk', '-n', '-o', 'MOUNTPOINT', '/dev/' + disk]).strip()) == 0 def available_disks(self, blocksize=512): From 2f72fd8aa89f4f61dfb87fb799d665baa393caba Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Mon, 10 Mar 2025 21:21:54 +0530 Subject: [PATCH 2/9] tests/storage/linstor: Added `--expansion-sr-disk` pytest option for specifying or auto-detecting Linstor SR expansion disks. Signed-off-by: Rushikesh Jadhav --- conftest.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/conftest.py b/conftest.py index e4d9f5ef7..3931e6077 100644 --- a/conftest.py +++ b/conftest.py @@ -75,6 +75,13 @@ def pytest_addoption(parser): "4KiB blocksize to be formatted and used in storage tests. " "Set it to 'auto' to let the fixtures auto-detect available disks." ) + parser.addoption( + "--expansion-sr-disk", + action="append", + default=[], + help="Name of an available disk (sdc) or partition device (sdc2) to be formatted and used in storage tests. " + "Set it to 'auto' to let the fixtures auto-detect available disks." + ) def pytest_configure(config): global_config.ignore_ssh_banner = config.getoption('--ignore-ssh-banner') From e32681f49d4523b1bcc5c228d50acef451d1ca39 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Mon, 10 Mar 2025 21:27:31 +0530 Subject: [PATCH 3/9] Enhanced LVM cleanup with `pvremove` on all volume group devices. Signed-off-by: Rushikesh Jadhav --- tests/storage/linstor/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/storage/linstor/conftest.py b/tests/storage/linstor/conftest.py index afb09da4d..7f78b1a65 100644 --- a/tests/storage/linstor/conftest.py +++ b/tests/storage/linstor/conftest.py @@ -37,9 +37,10 @@ def lvm_disks(host, sr_disks_for_all_hosts, provisioning_type): yield devices for host in hosts: + devices = host.ssh('vgs ' + GROUP_NAME + ' -o pv_name --no-headings').split("\n") host.ssh(['vgremove', '-f', GROUP_NAME]) for device in devices: - host.ssh(['pvremove', device]) + host.ssh(['pvremove', '-ff', '-y', device.strip()]) @pytest.fixture(scope="package") def storage_pool_name(provisioning_type): From f84c8bcfe5da1911c7d570375450013a6d2d8dc3 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Mon, 19 May 2025 12:36:22 +0530 Subject: [PATCH 4/9] Added fixtures to use in `test_linstor_sr_expand_disk` and `test_linstor_sr_expand_host` Signed-off-by: Rushikesh Jadhav --- tests/storage/linstor/conftest.py | 50 +++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/tests/storage/linstor/conftest.py b/tests/storage/linstor/conftest.py index 7f78b1a65..fde2d02d6 100644 --- a/tests/storage/linstor/conftest.py +++ b/tests/storage/linstor/conftest.py @@ -109,3 +109,53 @@ def vm_on_linstor_sr(host, linstor_sr, vm_ref): yield vm logging.info("<< Destroy VM") vm.destroy(verify=True) + +@pytest.fixture(scope='module') +def prepare_linstor_packages(hostB1): + if not hostB1.is_package_installed(LINSTOR_PACKAGE): + logging.info("Installing %s on host %s", LINSTOR_PACKAGE, hostB1) + hostB1.yum_install([LINSTOR_RELEASE_PACKAGE]) + hostB1.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing") + # Needed because the linstor driver is not in the xapi sm-plugins list + # before installing the LINSTOR packages. + hostB1.ssh(["systemctl", "restart", "multipathd"]) + hostB1.restart_toolstack(verify=True) + yield + hostB1.yum_remove([LINSTOR_PACKAGE]) # Package cleanup + +@pytest.fixture(scope='module') +def setup_lvm_on_host(hostB1): + # Ensure that the host has disks available to use, we do not care about disks symmetry across pool + # We need the disk to be "raw" (non LVM_member etc) to use + disks = [d for d in hostB1.available_disks() if hostB1.raw_disk_is_available(d)] + assert disks, "hostB1 requires at least one raw disk" + devices = [f"/dev/{d}" for d in disks] + + for disk in devices: + logging.info("Found Disk %s", disk) + hostB1.ssh(['pvcreate', disk]) + hostB1.ssh(['vgcreate', GROUP_NAME] + devices) + + yield "linstor_group", devices + +@pytest.fixture(scope='module') +def join_host_to_pool(host, hostB1): + assert len(hostB1.pool.hosts) == 1, "This test requires second host to be a single host" + original_pool = hostB1.pool + logging.info("Joining host %s to pool %s", hostB1, host) + hostB1.join_pool(host.pool) + yield + host.pool.eject_host(hostB1) + hostB1.pool = original_pool + +@pytest.fixture(scope='module') +def vm_with_reboot_check(vm_on_linstor_sr): + vm = vm_on_linstor_sr + vm.start() + vm.wait_for_os_booted() + yield vm + vm.shutdown(verify=True) + # Ensure VM is able to start and shutdown on modified SR + vm.start() + vm.wait_for_os_booted() + vm.shutdown(verify=True) From 43e4fe4925b90cc1f835d0622ebbbbf3113b549d Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Mon, 10 Mar 2025 21:31:04 +0530 Subject: [PATCH 5/9] Added `expand_lvm_on_hosts` to expand LVM of Linstor SR across hosts. Added `test_linstor_sr_expand_disk` to detect & assert change in SR size. The test: - Ensures `--expansion-sr-disk` disk(s) are available on the hosts. - Configures LVM onto it and integrates into Linstor SR pool. - Expands SR capacity and validates the increase. Signed-off-by: Rushikesh Jadhav --- tests/storage/linstor/test_linstor_sr.py | 59 +++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/tests/storage/linstor/test_linstor_sr.py b/tests/storage/linstor/test_linstor_sr.py index 7dc6f4597..145975296 100644 --- a/tests/storage/linstor/test_linstor_sr.py +++ b/tests/storage/linstor/test_linstor_sr.py @@ -2,7 +2,7 @@ import pytest import time -from .conftest import LINSTOR_PACKAGE +from .conftest import GROUP_NAME, LINSTOR_PACKAGE from lib.commands import SSHCommandFailed from lib.common import wait_for, vm_image from tests.storage import vdi_is_open @@ -86,6 +86,29 @@ def test_snapshot(self, vm_on_linstor_sr): finally: vm.shutdown(verify=True) + @pytest.mark.small_vm + def test_linstor_sr_expand_disk(self, linstor_sr, provisioning_type, storage_pool_name, + pytestconfig, vm_with_reboot_check): + """ + This test demonstrates online expansion of a LINSTOR SR while a VM is actively running on it. + + It identifies hosts within the same pool, detects free raw disks, and expands the LVM to grow the SR. + A VM is started before the expansion, and its functionality is verified through a shutdown and restart + after the expansion completes successfully. + """ + sr = linstor_sr + sr_size = sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'}) + + resized = _expand_lvm_on_hosts(sr, provisioning_type, storage_pool_name, pytestconfig) + + # Need to ensure that linstor is healthy/up-to-date before moving ahead. + time.sleep(30) # Wait time for Linstor node communications to restore. + sr.scan() + new_sr_size = sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'}) + assert int(new_sr_size) > int(sr_size) and resized is True, \ + f"Expected SR size to increase but got old size: {sr_size}, new size: {new_sr_size}" + logging.info("SR expansion completed") + # *** tests with reboots (longer tests). @pytest.mark.reboot @@ -133,6 +156,40 @@ def test_linstor_missing(self, linstor_sr, host): # *** End of tests with reboots +def _expand_lvm_on_hosts(sr, provisioning_type, storage_pool_name, pytestconfig): + from lib.commands import SSHCommandFailed + resized = False + for h in sr.pool.hosts: + logging.info(f"Checking for available disks on host: {h.hostname_or_ip}") + available_disks = [d for d in h.available_disks() if h.raw_disk_is_available(d)] + + disks = [] + expansion_sr_disk = pytestconfig.getoption("expansion_sr_disk") + if expansion_sr_disk: + assert len(expansion_sr_disk) == 1, "Only one --expansion-sr-disk should be provided" + if expansion_sr_disk[0] == "auto": + disks = available_disks + else: + assert expansion_sr_disk[0] in available_disks, "The specified expansion disk is unavailable" + disks = expansion_sr_disk + else: + disks = available_disks + + for disk in disks: + device = f"/dev/{disk}" + try: + h.ssh(['pvcreate', device]) + h.ssh(['vgextend', GROUP_NAME, device]) + if provisioning_type == "thin": + h.ssh(['lvextend', '-l', '+100%FREE', storage_pool_name]) + else: + h.ssh(['systemctl', 'restart', 'linstor-satellite.service']) + resized = True + logging.info("LVM extended on host %s using device %s", h.hostname_or_ip, device) + except SSHCommandFailed as e: + raise RuntimeError(f"Disk expansion failed on {h.hostname_or_ip}: {e}") + return resized + # --- Test diskless resources -------------------------------------------------- def _get_diskful_hosts(host, controller_option, volume_name): From b55028318958d9394e946a5804306ba75f3636a7 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Mon, 10 Mar 2025 21:34:13 +0530 Subject: [PATCH 6/9] Added `test_linstor_sr_expand_host` to validate the expansion of Linstor SR by integrating a new host into the storage pool. The test: - Ensures the new host has available disks and installs required Linstor packages. - Configures LVM and integrates it into Linstor SR pool. - Expands SR capacity and validates the increase. - Handles cleanup by removing the node. Signed-off-by: Rushikesh Jadhav --- tests/storage/linstor/test_linstor_sr.py | 62 ++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/tests/storage/linstor/test_linstor_sr.py b/tests/storage/linstor/test_linstor_sr.py index 145975296..97c326784 100644 --- a/tests/storage/linstor/test_linstor_sr.py +++ b/tests/storage/linstor/test_linstor_sr.py @@ -109,6 +109,68 @@ def test_linstor_sr_expand_disk(self, linstor_sr, provisioning_type, storage_poo f"Expected SR size to increase but got old size: {sr_size}, new size: {new_sr_size}" logging.info("SR expansion completed") + @pytest.mark.small_vm + def test_linstor_sr_expand_host(self, linstor_sr, vm_with_reboot_check, prepare_linstor_packages, + join_host_to_pool, setup_lvm_on_host, host, hostB1, storage_pool_name, + provisioning_type): + """ + This test validates expansion of a LINSTOR SR by dynamically adding a new host with local storage to the pool. + A VM is started on the SR before expansion begins to ensure the SR is in active use during the process. + + It performs the following steps: + - Installs LINSTOR packages on the new host (if missing). + - Detects and prepares raw disks using LVM commands. + - Joins the host (hostB1) to the existing pool and registers it with LINSTOR as a node. + - Creates a new LINSTOR storage pool on the added host (LVM or LVM-thin, based on provisioning type). + - Confirms SR expansion by verifying increased physical size. + - Ensures SR functionality by rebooting the VM running on the SR. + + Finally, the test cleans up by deleting the LINSTOR node, ejecting the host from the pool, + and removing packages and LVM metadata. + """ + sr = linstor_sr + sr_size = sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'}) + resized = False + + # TODO: This section could be moved into a separate fixture for modularity. + # However, capturing the SR size before expansion is critical to the test logic, + # so it's intentionally kept inline to preserve control over the measurement point. + + sr_group_name = "xcp-sr-" + storage_pool_name.replace("/", "_") + hostname = hostB1.xe('host-param-get', {'uuid': hostB1.uuid, 'param-name': 'name-label'}) + controller_option = "--controllers=" + ",".join([m.hostname_or_ip for m in host.pool.hosts]) + + logging.info("Current list of linstor nodes:") + logging.info(host.ssh_with_result(["linstor", controller_option, "node", "list"]).stdout) + + logging.info("Creating linstor node") + host.ssh(["linstor", controller_option, "node", "create", "--node-type", "combined", + "--communication-type", "plain", hostname, hostB1.hostname_or_ip]) + hostB1.ssh(['systemctl', 'restart', 'linstor-satellite.service']) + time.sleep(45) + + logging.info("New list of linstor nodes:") + logging.info(host.ssh_with_result(["linstor", controller_option, "node", "list"]).stdout) + logging.info("Expanding with linstor node") + + if provisioning_type == "thin": + hostB1.ssh(['lvcreate', '-l', '+100%FREE', '-T', storage_pool_name]) + host.ssh(["linstor", controller_option, "storage-pool", "create", "lvmthin", + hostname, sr_group_name, storage_pool_name]) + else: + host.ssh(["linstor", controller_option, "storage-pool", "create", "lvm", + hostname, sr_group_name, storage_pool_name]) + + sr.scan() + resized = True + new_sr_size = sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'}) + assert int(new_sr_size) > int(sr_size) and resized is True, \ + f"Expected SR size to increase but got old size: {sr_size}, new size: {new_sr_size}" + logging.info("SR expansion completed from size %s to %s", sr_size, new_sr_size) + + # Cleanup + host.ssh(["linstor", controller_option, "node", "delete", hostname]) + # *** tests with reboots (longer tests). @pytest.mark.reboot From c59d2eb2f757c832a202ada40337f837941dad06 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Tue, 11 Mar 2025 21:01:36 +0530 Subject: [PATCH 7/9] Added `test_linstor_sr_reduce_disk` to test SR size reduction by removing a disk. The test: - Does not apply to "thin" type SR due to thin LVM limitations. - Identifies disks used for LVM, chooses a disk and empty it. - Once the disk is removed from LVM, linstor service is restarted to recognise the SR size change. Signed-off-by: Rushikesh Jadhav --- tests/storage/linstor/test_linstor_sr.py | 41 ++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/tests/storage/linstor/test_linstor_sr.py b/tests/storage/linstor/test_linstor_sr.py index 97c326784..b970dda3d 100644 --- a/tests/storage/linstor/test_linstor_sr.py +++ b/tests/storage/linstor/test_linstor_sr.py @@ -171,6 +171,47 @@ def test_linstor_sr_expand_host(self, linstor_sr, vm_with_reboot_check, prepare_ # Cleanup host.ssh(["linstor", controller_option, "node", "delete", hostname]) + @pytest.mark.small_vm + def test_linstor_sr_reduce_disk(self, linstor_sr, vm_with_reboot_check, provisioning_type): + """ + Identify hosts within the same pool, detect used disks, modify LVM, and rescan LINSTOR SR. + """ + if provisioning_type == "thin": + logging.info(f"* SR reductoin by removing device is not supported for {provisioning_type} type *") + return + sr = linstor_sr + sr_size = int(sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'})) + resized = False + + for h in sr.pool.hosts: + logging.info("Working on %s", h.hostname_or_ip) + devices = h.ssh('vgs ' + GROUP_NAME + ' -o pv_name --no-headings').split("\n") + assert len(devices) > 1, "This test requires {GROUP_NAME} to have more than 1 disk or parition" + eject_device = devices[-1].strip() + logging.info("Attempting to remove device: %s", eject_device) + try: + h.ssh(['pvmove', eject_device]) # Choosing last device from list, assuming its least filled + h.ssh(['vgreduce', GROUP_NAME, eject_device]) + h.ssh(['pvremove', eject_device]) + except SSHCommandFailed as e: + if "No data to move for" in e.stdout: + h.ssh(['vgreduce', GROUP_NAME, eject_device]) + h.ssh(['pvremove', eject_device]) + else: + pytest.fail("Failed to empty device") + h.ssh('systemctl restart linstor-satellite.service') + resized = True + + # Need to ensure that linstor is healthy/up-to-date before moving ahead. + time.sleep(30) # Wait time for Linstor node communications to restore after service restart. + + sr.scan() + + new_sr_size = int(sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'})) + assert new_sr_size < sr_size and resized, \ + f"Expected SR size to decrease but got old size: {sr_size}, new size: {new_sr_size}" + logging.info("SR reduction by removing disk is completed from %s to %s", sr_size, new_sr_size) + # *** tests with reboots (longer tests). @pytest.mark.reboot From 18f46718f6a14a7cd745e13f581096d0a5406043 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Tue, 11 Mar 2025 21:04:09 +0530 Subject: [PATCH 8/9] Added fixtures to use in `test_linstor_sr_reduce_host`. Signed-off-by: Rushikesh Jadhav --- tests/storage/linstor/conftest.py | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/tests/storage/linstor/conftest.py b/tests/storage/linstor/conftest.py index fde2d02d6..dcd4e1e84 100644 --- a/tests/storage/linstor/conftest.py +++ b/tests/storage/linstor/conftest.py @@ -159,3 +159,70 @@ def vm_with_reboot_check(vm_on_linstor_sr): vm.start() vm.wait_for_os_booted() vm.shutdown(verify=True) + +@pytest.fixture(scope='module') +def evacuate_host_and_prepare_removal(host, hostA2, vm_with_reboot_check): + assert len(host.pool.hosts) >= 3, "This test requires Pool to have more than 3 hosts" + + vm = vm_with_reboot_check + try: + host.ssh(f'xe host-evacuate uuid={hostA2.uuid}') + except Exception as e: + logging.warning("Host evacuation failed: %s", e) + if "lacks the feature" in getattr(e, "stdout", ""): + vm.shutdown(verify=True, force_if_fails=True) + host.ssh(f'xe host-evacuate uuid={hostA2.uuid}') + available_hosts = [h.uuid for h in host.pool.hosts if h.uuid != hostA2.uuid] + if available_hosts: + vm.start(on=available_hosts[0]) + yield + +@pytest.fixture(scope='module') +def remove_host_from_linstor(host, hostA2, linstor_sr, evacuate_host_and_prepare_removal): + import time + # Select a host that is not running the LINSTOR controller (port 3370) + linstor_controller_host = None + for h in host.pool.hosts: + if h.ssh_with_result(["ss -tuln | grep :3370"]).returncode == 0: + linstor_controller_host = h + break + + # If the controller is running on the host to be ejected (hostA2), stop the services first + if linstor_controller_host and linstor_controller_host.uuid == hostA2.uuid: + logging.info("Ejecting host is running LINSTOR controller, stopping services first.") + hostA2.ssh("systemctl stop linstor-controller.service") + hostA2.ssh("systemctl stop drbd-reactor.service") + hostA2.ssh("systemctl stop drbd-graceful-shutdown.service") + time.sleep(30) # Give time for services to stop + + ejecting_host = hostA2.xe('host-param-get', {'uuid': hostA2.uuid, 'param-name': 'name-label'}) + controller_option = "--controllers=" + ",".join([m.hostname_or_ip for m in host.pool.hosts]) + + hostA2.ssh("systemctl stop linstor-satellite.service") + + pbd = host.xe('pbd-list', {'sr-uuid': linstor_sr.uuid, 'host-uuid': hostA2.uuid}, minimal=True) + host.xe('pbd-unplug', {'uuid': pbd}) + + logging.info(host.ssh_with_result(["linstor", controller_option, "node", "delete", ejecting_host]).stdout) + host.pool.eject_host(hostA2) + + yield + + logging.info("Rejoining hostA2 to the pool after test") + hostA2.join_pool(host.pool) + # We dont want linstor services to be running on a deleted node + hostA2.ssh("systemctl stop linstor-satellite.service") + hostA2.ssh("systemctl stop drbd-graceful-shutdown.service") + # TODO: Package list is not retained in teardown + # hostA2.saved_packages_list = hostA2.packages() + # hostA2.saved_rollback_id = hostA2.get_last_yum_history_tid() + +@pytest.fixture(scope='module') +def get_sr_size(linstor_sr): + sr = linstor_sr + sr_size = int(sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'})) + logging.info("SR Size: %s", sr_size) + yield + new_sr_size = int(sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'})) + logging.info("New SR Size vs Old SR Size: %s vs %s", new_sr_size, sr_size) + assert new_sr_size != sr_size, "SR size did not change" From fc8a26dd07fb335b573e7e854f84160e1807f218 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Tue, 20 May 2025 19:14:26 +0530 Subject: [PATCH 9/9] Added `test_linstor_sr_reduce_host` to test SR size reduction by removing a host. The test: - Selects a non master host to evacuate and eject from pool. - Ensures that the host selected is not linstor-controller, if it is then moves the controller services to another host. - Evacuates host selected, stops linstor services and unplugs PBD. - Deletes selected node from linstor pool. - Restarts services to recognise the SR size change. Signed-off-by: Rushikesh Jadhav --- tests/storage/linstor/test_linstor_sr.py | 26 ++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tests/storage/linstor/test_linstor_sr.py b/tests/storage/linstor/test_linstor_sr.py index b970dda3d..e64628e9b 100644 --- a/tests/storage/linstor/test_linstor_sr.py +++ b/tests/storage/linstor/test_linstor_sr.py @@ -212,6 +212,32 @@ def test_linstor_sr_reduce_disk(self, linstor_sr, vm_with_reboot_check, provisio f"Expected SR size to decrease but got old size: {sr_size}, new size: {new_sr_size}" logging.info("SR reduction by removing disk is completed from %s to %s", sr_size, new_sr_size) + @pytest.mark.small_vm + def test_linstor_sr_reduce_host(self, linstor_sr, get_sr_size, vm_with_reboot_check, host, hostA2, + remove_host_from_linstor): + """ + Remove non master host from the same pool Linstor SR. + Do we measure the time taken by system to rebalance after host removal? + Should the host be graceful empty or force removal? + """ + sr = linstor_sr + sr_size = int(sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'})) + sr_size = 886189670400 + resized = False + + # Restart satellite services for clean state. This can be optional. + for h in host.pool.hosts: + h.ssh(['systemctl', 'restart', 'linstor-satellite.service']) + + time.sleep(30) # Wait till all services become normal + + resized = True + sr.scan() + new_sr_size = int(sr.pool.master.xe('sr-param-get', {'uuid': sr.uuid, 'param-name': 'physical-size'})) + assert new_sr_size < sr_size and resized, \ + f"Expected SR size to decrease but got old size: {sr_size}, new size: {new_sr_size}" + logging.info("SR reduction by removing host is completed from %s to %s", sr_size, new_sr_size) + # *** tests with reboots (longer tests). @pytest.mark.reboot