From 500f788b66a08d96deb41e33f7372c4e4d8fb7cf Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Fri, 13 Jun 2025 11:46:18 +0200 Subject: [PATCH 01/10] feat(tests): add disk performance benchmark tests Signed-off-by: Mathieu Labourier --- tests/storage/benchmarks/__init__.py | 0 tests/storage/benchmarks/conftest.py | 60 ++++++++++ tests/storage/benchmarks/helpers.py | 48 ++++++++ tests/storage/benchmarks/test_disk_perf.py | 123 +++++++++++++++++++++ 4 files changed, 231 insertions(+) create mode 100644 tests/storage/benchmarks/__init__.py create mode 100644 tests/storage/benchmarks/conftest.py create mode 100644 tests/storage/benchmarks/helpers.py create mode 100644 tests/storage/benchmarks/test_disk_perf.py diff --git a/tests/storage/benchmarks/__init__.py b/tests/storage/benchmarks/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py new file mode 100644 index 000000000..db0d259d7 --- /dev/null +++ b/tests/storage/benchmarks/conftest.py @@ -0,0 +1,60 @@ +import pytest +import tempfile +import os +import logging + +from helpers import load_results_from_csv + +@pytest.fixture(scope='package') +def ext_sr(host, sr_disk): + sr = host.sr_create('ext', "EXT-local-SR-test", {'device': '/dev/' + sr_disk}) + yield sr + # teardown + sr.destroy() + +@pytest.fixture(scope='module', params=['raw', 'vhd', 'qcow2']) +def disk_on_ext_sr(request, ext_sr): + disk_type = request.param + disk = {} + if disk_type == 'raw': + ... + elif disk_type == 'vhd': + ... + elif disk_type == 'qcow2': + ... + else: + raise ValueError(f"Unsupported disk type: {disk_type}") + + yield disk + + # teardown + ... + +@pytest.fixture(scope='module') +def vm_on_ext_sr(host, ext_sr, vm_ref): + vm = host.import_vm(vm_ref, sr_uuid=ext_sr.uuid) + yield vm + # teardown + logging.info("<< Destroy VM") + vm.destroy(verify=True) + +@pytest.fixture +def temp_dir(): + with tempfile.TemporaryDirectory() as tmpdir: + yield tmpdir + +def pytest_addoption(parser): + parser.addoption( + "--prev-csv", + action="store", + default=None, + help="Path to previous CSV results file for comparison", + ) + +@pytest.fixture(scope="session") +def prev_results(request): + csv_path = request.config.getoption("--prev-csv") + results = {} + if csv_path and os.path.exists(csv_path): + load_results_from_csv(csv_path) + return results \ No newline at end of file diff --git a/tests/storage/benchmarks/helpers.py b/tests/storage/benchmarks/helpers.py new file mode 100644 index 000000000..dadacca2d --- /dev/null +++ b/tests/storage/benchmarks/helpers.py @@ -0,0 +1,48 @@ +import os +import statistics +import csv +from datetime import datetime + + +def log_result_csv(test_type, rw_mode, result_json, csv_path): + job = result_json["jobs"][0] + op_data = job[rw_mode.replace("rand", "")] + bw_kbps = op_data["bw"] + iops = op_data["iops"] + latency = op_data["lat_ns"]["mean"] + + result = { + "timestamp": datetime.now().isoformat(), + "test": test_type, + "mode": rw_mode, + "bw_MBps": round(bw_kbps / 1024, 2), + "IOPS": round(iops, 2), + "latency": round(latency, 2), + } + + file_exists = os.path.exists(csv_path) + with open(csv_path, "a", newline="") as f: + writer = csv.DictWriter(f, fieldnames=result.keys()) + if not file_exists: + writer.writeheader() + writer.writerow(result) + + return result + + +def load_results_from_csv(csv_path): + results = {} + with open(csv_path, newline="") as f: + reader = csv.DictReader(f) + for row in reader: + key = (row["test"], row["mode"]) + if key not in results: + results[key] = [] + results[key].append(row) + return results + + +def mean(data, key): + return statistics.mean( + [float(x[key]) for x in data if key in x] + ) \ No newline at end of file diff --git a/tests/storage/benchmarks/test_disk_perf.py b/tests/storage/benchmarks/test_disk_perf.py new file mode 100644 index 000000000..f179cccb3 --- /dev/null +++ b/tests/storage/benchmarks/test_disk_perf.py @@ -0,0 +1,123 @@ +import itertools +import os +import json +import statistics +import subprocess +import pytest +import logging +from datetime import datetime + +from helpers import load_results_from_csv, log_result_csv, mean + +### Tests default settings ### + +CSV_FILE = f"/tmp/results_{datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}.csv" + +DEFAULT_SAMPLES_NUM = 10 +DEFAULT_SIZE = "1G" +DEFAULT_BS = "4k" +DEFAULT_IODEPTH = 1 +DEFAULT_FILE = "fio-testfile" + +### Tests parameters + +system_memory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') + +block_sizes = ("4k", "16k", "64k", "1M") +file_sizes = ("1G", "4G", f"{(system_memory/(1024.**3))*2}G") +modes = ( + "read", + "randread", + "write", + "randwrite" +) + +test_types = { + "read": "seq_read", + "randread": "rand_read", + "write": "seq_write", + "randwrite": "rand_write" +} + +### End of tests parameters ### + +def run_fio( + test_name, + rw_mode, + temp_dir, + bs=DEFAULT_BS, + iodepth=DEFAULT_IODEPTH, + size=DEFAULT_SIZE, + file_path="", +): + json_output_path = os.path.join(temp_dir, f"{test_name}.json") + if not file_path: + file_path = os.path.join(temp_dir, DEFAULT_FILE) + fio_cmd = [ + "fio", + f"--name={test_name}", + f"--rw={rw_mode}", + f"--bs={bs}", + f"--iodepth={iodepth}", + f"--size={size}", + f"--filename={file_path}", + "--direct=1", + "--end_fsync=1", + "--fsync_on_close=1", + "--numjobs=1", + "--group_reporting", + "--output-format=json", + f"--output={json_output_path}" + ] + + result = subprocess.run(fio_cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"fio failed for {test_name}:\n{result.stderr}") + + with open(json_output_path) as f: + return json.load(f) + +def assert_performance_not_degraded(current, previous, threshold=10): + diffs = {} + for metric in ("bw_MBps", "IOPS", "latency"): + try: + curr = mean(current, metric) + prev = mean(previous, metric) + except statistics.StatisticsError: + logging.info(f"Missing metric ({metric}), skipping comparison") + continue + diff = (curr-prev if metric == "latency" else prev-curr) / (prev * 100) + assert diff <= threshold, \ + f"{metric} changed by {diff:.2f}% (allowed {threshold}%)" + diffs[metric] = diff + + logging.info("Performance difference summary:") + for k, v in diffs.items(): + sign = "+" if v < 0 else "-" + logging.info(f"- {k}: {sign}{abs(v):.2f}%") + + +class TestDiskPerfDestroy: ... + + +class TestDiskPerf: + test_cases = itertools.product(block_sizes, file_sizes, modes) + + @pytest.mark.parametrize("block_size,file_size,rw_mode", test_cases) + def test_disk_benchmark( + self, + temp_dir, + prev_results, + block_size, + file_size, + rw_mode + ): + test_type = test_types[rw_mode] + for i in range(DEFAULT_SAMPLES_NUM): + result = run_fio(test_type, rw_mode, temp_dir) + summary = log_result_csv(test_type, rw_mode, result, CSV_FILE) + assert summary["IOPS"] > 0 + results = load_results_from_csv(CSV_FILE) + key = (test_type, rw_mode) + if prev_results and key in prev_results: + assert_performance_not_degraded(results[key], prev_results[key]) From f43d5ebead1e3117f0183e114c4592022f091e80 Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Fri, 13 Jun 2025 16:44:33 +0200 Subject: [PATCH 02/10] feat(tests): integrate with xcp-ng fixtures - fix(disk_perf): add configurable numjob - fix(disk_perf): styling and readability - misc(disk_perf): simplify fixtures - misc(disk_perf): styling - fix(disk_perf): usage of double quote in nested string Signed-off-by: Mathieu Labourier --- tests/storage/benchmarks/conftest.py | 104 +++++++++++++++------ tests/storage/benchmarks/helpers.py | 2 +- tests/storage/benchmarks/test_disk_perf.py | 86 ++++++++++------- 3 files changed, 128 insertions(+), 64 deletions(-) diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index db0d259d7..82a9db8fd 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -3,46 +3,90 @@ import os import logging -from helpers import load_results_from_csv +from lib.commands import SSHCommandFailed +from .helpers import load_results_from_csv -@pytest.fixture(scope='package') -def ext_sr(host, sr_disk): - sr = host.sr_create('ext', "EXT-local-SR-test", {'device': '/dev/' + sr_disk}) - yield sr - # teardown - sr.destroy() - -@pytest.fixture(scope='module', params=['raw', 'vhd', 'qcow2']) -def disk_on_ext_sr(request, ext_sr): - disk_type = request.param - disk = {} - if disk_type == 'raw': - ... - elif disk_type == 'vhd': - ... - elif disk_type == 'qcow2': - ... +MAX_LENGTH = 64 * (1024**3) # 64GiB + +# use vhd, qcow2, raw... when image_format support will be available +@pytest.fixture(scope="module", params=['vdi']) +def image_format(request): + return request.param + +@pytest.fixture(scope="module") +def running_unix_vm_with_fio(running_unix_vm): + vm = running_unix_vm + install_cmds = ( + ("command -v apt", "apt update && apt install -y fio", "apt remove -y fio"), + ("command -v dnf", "dnf install -y fio", "dnf remove -y fio"), + ("command -v yum", "yum install -y fio", "yum remove -y fio"), + ("command -v apk", "apk add fio", "apk del fio") + ) + + for check_cmd, install_cmd, remove in install_cmds: + try: + vm.ssh(check_cmd, check=True) + logging.info(f">> Installing fio with {install_cmd}") + vm.ssh(install_cmd, check=True) + remove_cmd = remove + break + except SSHCommandFailed: + ... else: - raise ValueError(f"Unsupported disk type: {disk_type}") + raise RuntimeError("Unsupported package manager: could not install fio") - yield disk + yield vm # teardown - ... + logging.info(f"<< Removing fio with {remove_cmd}") + vm.ssh(remove_cmd, check=False) + + +@pytest.fixture(scope="module") +def vdi_on_local_sr(host, local_sr_on_hostA1, image_format): + sr = local_sr_on_hostA1 + vdi = sr.create_vdi("testVDI", MAX_LENGTH) + vdi.image_format = image_format + logging.info(f">> Created VDI {vdi.uuid} of type {image_format}") + + yield vdi -@pytest.fixture(scope='module') -def vm_on_ext_sr(host, ext_sr, vm_ref): - vm = host.import_vm(vm_ref, sr_uuid=ext_sr.uuid) - yield vm # teardown - logging.info("<< Destroy VM") - vm.destroy(verify=True) + logging.info(f"<< Destroying VDI {vdi.uuid}") + vdi.destroy() -@pytest.fixture -def temp_dir(): +@pytest.fixture(scope="module") +def plugged_vbd(vdi_on_local_sr, running_unix_vm_with_fio): + vm = running_unix_vm_with_fio + vdi = vdi_on_local_sr + vbd = vm.create_vbd("autodetect", vdi.uuid) + + logging.info(f">> Plugging VDI {vdi.uuid} on VM {vm.uuid}") + vbd.plug() + + yield vbd + + # teardown + logging.info(f"<< Unplugging VDI {vdi.uuid} from VM {vm.uuid}") + vbd.unplug() + vbd.destroy() + +@pytest.fixture(scope="module") +def local_temp_dir(): with tempfile.TemporaryDirectory() as tmpdir: yield tmpdir +@pytest.fixture(scope="module") +def temp_dir(running_unix_vm_with_fio): + vm = running_unix_vm_with_fio + tempdir = vm.ssh("mktemp -d") + + yield tempdir + + # teardown + vm.ssh(f"rm -r {tempdir}") + + def pytest_addoption(parser): parser.addoption( "--prev-csv", @@ -57,4 +101,4 @@ def prev_results(request): results = {} if csv_path and os.path.exists(csv_path): load_results_from_csv(csv_path) - return results \ No newline at end of file + return results diff --git a/tests/storage/benchmarks/helpers.py b/tests/storage/benchmarks/helpers.py index dadacca2d..071cad7cf 100644 --- a/tests/storage/benchmarks/helpers.py +++ b/tests/storage/benchmarks/helpers.py @@ -45,4 +45,4 @@ def load_results_from_csv(csv_path): def mean(data, key): return statistics.mean( [float(x[key]) for x in data if key in x] - ) \ No newline at end of file + ) diff --git a/tests/storage/benchmarks/test_disk_perf.py b/tests/storage/benchmarks/test_disk_perf.py index f179cccb3..db17f24b3 100644 --- a/tests/storage/benchmarks/test_disk_perf.py +++ b/tests/storage/benchmarks/test_disk_perf.py @@ -2,55 +2,54 @@ import os import json import statistics -import subprocess import pytest import logging from datetime import datetime -from helpers import load_results_from_csv, log_result_csv, mean +from lib.commands import SSHCommandFailed +from .helpers import load_results_from_csv, log_result_csv, mean -### Tests default settings ### +# Tests default settings # -CSV_FILE = f"/tmp/results_{datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}.csv" +CSV_FILE = f"/tmp/results_{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.csv" DEFAULT_SAMPLES_NUM = 10 DEFAULT_SIZE = "1G" DEFAULT_BS = "4k" DEFAULT_IODEPTH = 1 +DEFAULT_NUMJOBS = 1 DEFAULT_FILE = "fio-testfile" -### Tests parameters +# Tests parameters # system_memory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') block_sizes = ("4k", "16k", "64k", "1M") -file_sizes = ("1G", "4G", f"{(system_memory/(1024.**3))*2}G") +file_sizes = ("1G", "4G", f"{int((system_memory // (1024.**3)) * 2)}G") + modes = ( - "read", - "randread", - "write", - "randwrite" + "read", + "randread", + "write", + "randwrite" ) -test_types = { - "read": "seq_read", - "randread": "rand_read", - "write": "seq_write", - "randwrite": "rand_write" -} - -### End of tests parameters ### +# End of tests parameters # def run_fio( + vm, test_name, rw_mode, temp_dir, + local_temp_dir, bs=DEFAULT_BS, iodepth=DEFAULT_IODEPTH, size=DEFAULT_SIZE, + numjobs=DEFAULT_NUMJOBS, file_path="", ): json_output_path = os.path.join(temp_dir, f"{test_name}.json") + local_json_path = os.path.join(local_temp_dir, f"{test_name}.json") if not file_path: file_path = os.path.join(temp_dir, DEFAULT_FILE) fio_cmd = [ @@ -64,17 +63,19 @@ def run_fio( "--direct=1", "--end_fsync=1", "--fsync_on_close=1", - "--numjobs=1", + f"--numjobs={numjobs}", "--group_reporting", "--output-format=json", f"--output={json_output_path}" ] - - result = subprocess.run(fio_cmd, capture_output=True, text=True) - if result.returncode != 0: - raise RuntimeError(f"fio failed for {test_name}:\n{result.stderr}") - - with open(json_output_path) as f: + logging.debug(f"Running {fio_cmd}") + try: + vm.ssh(fio_cmd, check=True) + except SSHCommandFailed as e: + raise RuntimeError(f"fio failed for {test_name}:{e}") + vm.scp(json_output_path, local_json_path, local_dest=True) + logging.debug(f"Stored json at {local_json_path}") + with open(local_json_path) as f: return json.load(f) def assert_performance_not_degraded(current, previous, threshold=10): @@ -86,7 +87,7 @@ def assert_performance_not_degraded(current, previous, threshold=10): except statistics.StatisticsError: logging.info(f"Missing metric ({metric}), skipping comparison") continue - diff = (curr-prev if metric == "latency" else prev-curr) / (prev * 100) + diff = (curr - prev if metric == "latency" else prev - curr) / (prev * 100) assert diff <= threshold, \ f"{metric} changed by {diff:.2f}% (allowed {threshold}%)" diffs[metric] = diff @@ -97,9 +98,6 @@ def assert_performance_not_degraded(current, previous, threshold=10): logging.info(f"- {k}: {sign}{abs(v):.2f}%") -class TestDiskPerfDestroy: ... - - class TestDiskPerf: test_cases = itertools.product(block_sizes, file_sizes, modes) @@ -107,17 +105,39 @@ class TestDiskPerf: def test_disk_benchmark( self, temp_dir, + local_temp_dir, prev_results, block_size, file_size, - rw_mode + rw_mode, + running_unix_vm_with_fio, + plugged_vbd, + image_format ): - test_type = test_types[rw_mode] + vm = running_unix_vm_with_fio + vbd = plugged_vbd + device = f"/dev/{vbd.param_get(param_name='device')}" + test_type = "{}-{}-{}-{}".format( + block_size, + file_size, + rw_mode, + image_format + ) + for i in range(DEFAULT_SAMPLES_NUM): - result = run_fio(test_type, rw_mode, temp_dir) + result = run_fio( + vm, + test_type, + rw_mode, + temp_dir, + local_temp_dir, + file_path=device, + bs=block_size, + size=file_size + ) summary = log_result_csv(test_type, rw_mode, result, CSV_FILE) assert summary["IOPS"] > 0 - results = load_results_from_csv(CSV_FILE) key = (test_type, rw_mode) if prev_results and key in prev_results: + results = load_results_from_csv(CSV_FILE) assert_performance_not_degraded(results[key], prev_results[key]) From 577eff22acaaf168cd212c53573a9d3f088352ab Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Mon, 16 Jun 2025 11:16:00 +0200 Subject: [PATCH 03/10] feat(disk_perf): add support of uri for previous csv file - style(disk_perf): sort imports and minor style fixes Signed-off-by: Mathieu Labourier --- tests/storage/benchmarks/conftest.py | 42 ++++++++---- tests/storage/benchmarks/helpers.py | 6 +- tests/storage/benchmarks/test_disk_perf.py | 75 ++++++++++------------ 3 files changed, 66 insertions(+), 57 deletions(-) diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index 82a9db8fd..596893f4d 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -1,18 +1,25 @@ -import pytest -import tempfile -import os import logging +import os +import tempfile +import urllib.request +from urllib.parse import urlparse +from uuid import uuid4 + +import pytest from lib.commands import SSHCommandFailed + from .helpers import load_results_from_csv -MAX_LENGTH = 64 * (1024**3) # 64GiB +MAX_LENGTH = 64 * (1024**3) # 64GiB + # use vhd, qcow2, raw... when image_format support will be available -@pytest.fixture(scope="module", params=['vdi']) +@pytest.fixture(scope="module", params=["vdi"]) def image_format(request): return request.param + @pytest.fixture(scope="module") def running_unix_vm_with_fio(running_unix_vm): vm = running_unix_vm @@ -20,7 +27,7 @@ def running_unix_vm_with_fio(running_unix_vm): ("command -v apt", "apt update && apt install -y fio", "apt remove -y fio"), ("command -v dnf", "dnf install -y fio", "dnf remove -y fio"), ("command -v yum", "yum install -y fio", "yum remove -y fio"), - ("command -v apk", "apk add fio", "apk del fio") + ("command -v apk", "apk add fio", "apk del fio"), ) for check_cmd, install_cmd, remove in install_cmds: @@ -55,6 +62,7 @@ def vdi_on_local_sr(host, local_sr_on_hostA1, image_format): logging.info(f"<< Destroying VDI {vdi.uuid}") vdi.destroy() + @pytest.fixture(scope="module") def plugged_vbd(vdi_on_local_sr, running_unix_vm_with_fio): vm = running_unix_vm_with_fio @@ -71,11 +79,13 @@ def plugged_vbd(vdi_on_local_sr, running_unix_vm_with_fio): vbd.unplug() vbd.destroy() + @pytest.fixture(scope="module") def local_temp_dir(): with tempfile.TemporaryDirectory() as tmpdir: yield tmpdir + @pytest.fixture(scope="module") def temp_dir(running_unix_vm_with_fio): vm = running_unix_vm_with_fio @@ -92,13 +102,19 @@ def pytest_addoption(parser): "--prev-csv", action="store", default=None, - help="Path to previous CSV results file for comparison", + help="Path/URI to previous CSV results file for comparison", ) + @pytest.fixture(scope="session") -def prev_results(request): - csv_path = request.config.getoption("--prev-csv") - results = {} - if csv_path and os.path.exists(csv_path): - load_results_from_csv(csv_path) - return results +def prev_results(pytestconfig): + csv_uri = pytestconfig.getoption("--prev-csv") + if not csv_uri: + return {} + csv_path = csv_uri + if urlparse(csv_uri).scheme != "": + csv_path = f"{uuid4()}.csv" + urllib.request.urlretrieve(csv_uri, csv_path) + if not os.path.exists(csv_path): + raise FileNotFoundError(csv_path) + return load_results_from_csv(csv_path) diff --git a/tests/storage/benchmarks/helpers.py b/tests/storage/benchmarks/helpers.py index 071cad7cf..468cb6d3c 100644 --- a/tests/storage/benchmarks/helpers.py +++ b/tests/storage/benchmarks/helpers.py @@ -1,6 +1,6 @@ +import csv import os import statistics -import csv from datetime import datetime @@ -43,6 +43,4 @@ def load_results_from_csv(csv_path): def mean(data, key): - return statistics.mean( - [float(x[key]) for x in data if key in x] - ) + return statistics.mean([float(x[key]) for x in data if key in x]) diff --git a/tests/storage/benchmarks/test_disk_perf.py b/tests/storage/benchmarks/test_disk_perf.py index db17f24b3..ee45dbee6 100644 --- a/tests/storage/benchmarks/test_disk_perf.py +++ b/tests/storage/benchmarks/test_disk_perf.py @@ -1,12 +1,14 @@ import itertools -import os import json -import statistics -import pytest import logging +import os +import statistics from datetime import datetime +import pytest + from lib.commands import SSHCommandFailed + from .helpers import load_results_from_csv, log_result_csv, mean # Tests default settings # @@ -22,31 +24,27 @@ # Tests parameters # -system_memory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') +system_memory = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") block_sizes = ("4k", "16k", "64k", "1M") file_sizes = ("1G", "4G", f"{int((system_memory // (1024.**3)) * 2)}G") -modes = ( - "read", - "randread", - "write", - "randwrite" -) +modes = ("read", "randread", "write", "randwrite") # End of tests parameters # + def run_fio( - vm, - test_name, - rw_mode, - temp_dir, - local_temp_dir, - bs=DEFAULT_BS, - iodepth=DEFAULT_IODEPTH, - size=DEFAULT_SIZE, - numjobs=DEFAULT_NUMJOBS, - file_path="", + vm, + test_name, + rw_mode, + temp_dir, + local_temp_dir, + bs=DEFAULT_BS, + iodepth=DEFAULT_IODEPTH, + size=DEFAULT_SIZE, + numjobs=DEFAULT_NUMJOBS, + file_path="", ): json_output_path = os.path.join(temp_dir, f"{test_name}.json") local_json_path = os.path.join(local_temp_dir, f"{test_name}.json") @@ -66,7 +64,7 @@ def run_fio( f"--numjobs={numjobs}", "--group_reporting", "--output-format=json", - f"--output={json_output_path}" + f"--output={json_output_path}", ] logging.debug(f"Running {fio_cmd}") try: @@ -78,6 +76,7 @@ def run_fio( with open(local_json_path) as f: return json.load(f) + def assert_performance_not_degraded(current, previous, threshold=10): diffs = {} for metric in ("bw_MBps", "IOPS", "latency"): @@ -88,8 +87,9 @@ def assert_performance_not_degraded(current, previous, threshold=10): logging.info(f"Missing metric ({metric}), skipping comparison") continue diff = (curr - prev if metric == "latency" else prev - curr) / (prev * 100) - assert diff <= threshold, \ - f"{metric} changed by {diff:.2f}% (allowed {threshold}%)" + assert ( + diff <= threshold + ), f"{metric} changed by {diff:.2f}% (allowed {threshold}%)" diffs[metric] = diff logging.info("Performance difference summary:") @@ -103,26 +103,21 @@ class TestDiskPerf: @pytest.mark.parametrize("block_size,file_size,rw_mode", test_cases) def test_disk_benchmark( - self, - temp_dir, - local_temp_dir, - prev_results, - block_size, - file_size, - rw_mode, - running_unix_vm_with_fio, - plugged_vbd, - image_format + self, + temp_dir, + local_temp_dir, + prev_results, + block_size, + file_size, + rw_mode, + running_unix_vm_with_fio, + plugged_vbd, + image_format, ): vm = running_unix_vm_with_fio vbd = plugged_vbd device = f"/dev/{vbd.param_get(param_name='device')}" - test_type = "{}-{}-{}-{}".format( - block_size, - file_size, - rw_mode, - image_format - ) + test_type = "{}-{}-{}-{}".format(block_size, file_size, rw_mode, image_format) for i in range(DEFAULT_SAMPLES_NUM): result = run_fio( @@ -133,7 +128,7 @@ def test_disk_benchmark( local_temp_dir, file_path=device, bs=block_size, - size=file_size + size=file_size, ) summary = log_result_csv(test_type, rw_mode, result, CSV_FILE) assert summary["IOPS"] > 0 From e58c4596c72963a79e6eea213a49dcaf0b771315 Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Mon, 16 Jun 2025 11:59:35 +0200 Subject: [PATCH 04/10] feat(disk_perf): parameterize most test variables - fix(disk_perf): csv paths and add logging Signed-off-by: Mathieu Labourier --- tests/storage/benchmarks/conftest.py | 52 +++++++++++++++++++++- tests/storage/benchmarks/helpers.py | 6 +++ tests/storage/benchmarks/test_disk_perf.py | 18 ++------ 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index 596893f4d..94536b817 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -1,3 +1,4 @@ +import itertools import logging import os import tempfile @@ -9,7 +10,7 @@ from lib.commands import SSHCommandFailed -from .helpers import load_results_from_csv +from .helpers import load_results_from_csv, str_to_tuple MAX_LENGTH = 64 * (1024**3) # 64GiB @@ -98,12 +99,57 @@ def temp_dir(running_unix_vm_with_fio): def pytest_addoption(parser): + system_memory = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") + parser.addoption( "--prev-csv", action="store", default=None, help="Path/URI to previous CSV results file for comparison", ) + parser.addoption( + "--block-sizes", + action="store", + type=lambda value: str_to_tuple(value, sep=","), + default=("4k", "16k", "64k", "1M"), + help="Comma separated values of block sizes to test in disk benchmarks", + ) + parser.addoption( + "--file-sizes", + action="store", + type=lambda value: str_to_tuple(value, sep=","), + default=("1G", "4G", f"{int((system_memory // (1024.**3)) * 2)}G"), + help="Comma separated values of file sizes to test in disk benchmarks", + ) + parser.addoption( + "--modes", + action="store", + type=lambda value: str_to_tuple(value, sep=","), + default=("read", "randread", "write", "randwrite"), + help="Comma separated values of rw_modes to test in disk benchmarks", + ) + parser.addoption( + "--numjobs", + action="store", + default=1, + help="Mapped to fio's --numjobs", + ) + parser.addoption( + "--iodepth", + action="store", + default=1, + help="Mapped to fio's --iodepth", + ) + + +def pytest_generate_tests(metafunc): + if {"block_size", "file_size", "rw_mode"} <= set(metafunc.fixturenames): + block_sizes = metafunc.config.getoption("block_sizes") + file_sizes = metafunc.config.getoption("file_sizes") + modes = metafunc.config.getoption("modes") + + test_cases = list(itertools.product(block_sizes, file_sizes, modes)) + metafunc.parametrize("block_size,file_size,rw_mode", test_cases) @pytest.fixture(scope="session") @@ -113,8 +159,10 @@ def prev_results(pytestconfig): return {} csv_path = csv_uri if urlparse(csv_uri).scheme != "": - csv_path = f"{uuid4()}.csv" + logging.info("Detected CSV path as an url") + csv_path = f"/tmp/{uuid4()}.csv" urllib.request.urlretrieve(csv_uri, csv_path) + logging.info(f"Fetching CSV file from {csv_uri} to {csv_path}") if not os.path.exists(csv_path): raise FileNotFoundError(csv_path) return load_results_from_csv(csv_path) diff --git a/tests/storage/benchmarks/helpers.py b/tests/storage/benchmarks/helpers.py index 468cb6d3c..ce34796ce 100644 --- a/tests/storage/benchmarks/helpers.py +++ b/tests/storage/benchmarks/helpers.py @@ -3,6 +3,8 @@ import statistics from datetime import datetime +system_memory = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") + def log_result_csv(test_type, rw_mode, result_json, csv_path): job = result_json["jobs"][0] @@ -44,3 +46,7 @@ def load_results_from_csv(csv_path): def mean(data, key): return statistics.mean([float(x[key]) for x in data if key in x]) + + +def str_to_tuple(value, sep=","): + return tuple(item.strip() for item in value.split(sep)) diff --git a/tests/storage/benchmarks/test_disk_perf.py b/tests/storage/benchmarks/test_disk_perf.py index ee45dbee6..73ceea7bc 100644 --- a/tests/storage/benchmarks/test_disk_perf.py +++ b/tests/storage/benchmarks/test_disk_perf.py @@ -1,4 +1,3 @@ -import itertools import json import logging import os @@ -13,7 +12,7 @@ # Tests default settings # -CSV_FILE = f"/tmp/results_{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.csv" +CSV_FILE = f"/tmp/results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.csv" DEFAULT_SAMPLES_NUM = 10 DEFAULT_SIZE = "1G" @@ -22,17 +21,6 @@ DEFAULT_NUMJOBS = 1 DEFAULT_FILE = "fio-testfile" -# Tests parameters # - -system_memory = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") - -block_sizes = ("4k", "16k", "64k", "1M") -file_sizes = ("1G", "4G", f"{int((system_memory // (1024.**3)) * 2)}G") - -modes = ("read", "randread", "write", "randwrite") - -# End of tests parameters # - def run_fio( vm, @@ -99,11 +87,11 @@ def assert_performance_not_degraded(current, previous, threshold=10): class TestDiskPerf: - test_cases = itertools.product(block_sizes, file_sizes, modes) @pytest.mark.parametrize("block_size,file_size,rw_mode", test_cases) def test_disk_benchmark( self, + pytestconfig, temp_dir, local_temp_dir, prev_results, @@ -129,6 +117,8 @@ def test_disk_benchmark( file_path=device, bs=block_size, size=file_size, + iodepth=pytestconfig.getoption("iodepth"), + numjobs=pytestconfig.getoption("numjobs"), ) summary = log_result_csv(test_type, rw_mode, result, CSV_FILE) assert summary["IOPS"] > 0 From efa4b26476f67fd35d90b6a208ef30e8ac7940b6 Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Mon, 16 Jun 2025 12:08:37 +0200 Subject: [PATCH 05/10] misc(disk_perf): add marks and job definition Signed-off-by: Mathieu Labourier --- jobs.py | 13 +++++++++++++ tests/storage/benchmarks/test_disk_perf.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/jobs.py b/jobs.py index 7ee238229..80038e6c0 100755 --- a/jobs.py +++ b/jobs.py @@ -143,6 +143,19 @@ "markers": "quicktest and not sr_disk_4k", "name_filter": "not linstor and not zfsvol", }, + "storage-benchmarks": { + "description": "runs disk benchmark tests", + "requirements": [ + "A local SR on host A1" + "A small VM that can be imported on the SR", + "Enough storage space to store the largest test file (numjobs*memory*2)G" + ], + "nb_pools": 1, + "params": { + "--vm": "single/small_vm", + }, + "paths": ["tests/storage"], + }, "linstor-main": { "description": "tests the linstor storage driver, but avoids migrations and reboots", "requirements": [ diff --git a/tests/storage/benchmarks/test_disk_perf.py b/tests/storage/benchmarks/test_disk_perf.py index 73ceea7bc..993fa6ffa 100644 --- a/tests/storage/benchmarks/test_disk_perf.py +++ b/tests/storage/benchmarks/test_disk_perf.py @@ -88,7 +88,7 @@ def assert_performance_not_degraded(current, previous, threshold=10): class TestDiskPerf: - @pytest.mark.parametrize("block_size,file_size,rw_mode", test_cases) + @pytest.mark.small_vm def test_disk_benchmark( self, pytestconfig, From 377d577ab02ecc3d705702a2c5c306afca3147a3 Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Thu, 19 Jun 2025 10:26:59 +0200 Subject: [PATCH 06/10] fix(benchmarks): implement short changes from review - change job path - set "vhd" as the placeholder image_format - remove leftover code Signed-off-by: Mathieu Labourier --- jobs.py | 2 +- tests/storage/benchmarks/conftest.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/jobs.py b/jobs.py index 80038e6c0..9b24c4eab 100755 --- a/jobs.py +++ b/jobs.py @@ -154,7 +154,7 @@ "params": { "--vm": "single/small_vm", }, - "paths": ["tests/storage"], + "paths": ["tests/storage/benchmarks"], }, "linstor-main": { "description": "tests the linstor storage driver, but avoids migrations and reboots", diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index 94536b817..8b3615728 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -16,7 +16,7 @@ # use vhd, qcow2, raw... when image_format support will be available -@pytest.fixture(scope="module", params=["vdi"]) +@pytest.fixture(scope="module", params=["vhd"]) def image_format(request): return request.param @@ -53,8 +53,7 @@ def running_unix_vm_with_fio(running_unix_vm): @pytest.fixture(scope="module") def vdi_on_local_sr(host, local_sr_on_hostA1, image_format): sr = local_sr_on_hostA1 - vdi = sr.create_vdi("testVDI", MAX_LENGTH) - vdi.image_format = image_format + vdi = sr.create_vdi("testVDI", MAX_LENGTH, image_format=image_format) logging.info(f">> Created VDI {vdi.uuid} of type {image_format}") yield vdi From df0b40fdc8abd0c22157165c6d38e690d11f2d9c Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Thu, 19 Jun 2025 10:49:21 +0200 Subject: [PATCH 07/10] fix(benchmarks): use lib to find package manager and snapshot to revert install Signed-off-by: Mathieu Labourier --- tests/storage/benchmarks/conftest.py | 39 ++++++++++++++-------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index 8b3615728..0038b608b 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -9,6 +9,7 @@ import pytest from lib.commands import SSHCommandFailed +from lib.common import PackageManagerEnum from .helpers import load_results_from_csv, str_to_tuple @@ -24,36 +25,34 @@ def image_format(request): @pytest.fixture(scope="module") def running_unix_vm_with_fio(running_unix_vm): vm = running_unix_vm - install_cmds = ( - ("command -v apt", "apt update && apt install -y fio", "apt remove -y fio"), - ("command -v dnf", "dnf install -y fio", "dnf remove -y fio"), - ("command -v yum", "yum install -y fio", "yum remove -y fio"), - ("command -v apk", "apk add fio", "apk del fio"), - ) - - for check_cmd, install_cmd, remove in install_cmds: - try: - vm.ssh(check_cmd, check=True) - logging.info(f">> Installing fio with {install_cmd}") - vm.ssh(install_cmd, check=True) - remove_cmd = remove - break - except SSHCommandFailed: - ... - else: + snapshot = vm.snapshot() + + package_cmds = { + PackageManagerEnum.APT_GET.value: "apt-get update && apt install -y fio", + PackageManagerEnum.RPM.value: "yum install -y fio", + PackageManagerEnum.UNKNOWN.value: "apk add fio", + } + + package_manager = vm.detect_package_manager().value + try: + vm.ssh(package_cmds[package_manager]) + logging.info(f">> Installing fio with {package_cmds[package_manager]}") + except SSHCommandFailed: raise RuntimeError("Unsupported package manager: could not install fio") yield vm # teardown - logging.info(f"<< Removing fio with {remove_cmd}") - vm.ssh(remove_cmd, check=False) + try: + snapshot.revert() + finally: + snapshot.destroy() @pytest.fixture(scope="module") def vdi_on_local_sr(host, local_sr_on_hostA1, image_format): sr = local_sr_on_hostA1 - vdi = sr.create_vdi("testVDI", MAX_LENGTH, image_format=image_format) + vdi = sr.create_vdi("testVDI", MAX_LENGTH) # , image_format=image_format) logging.info(f">> Created VDI {vdi.uuid} of type {image_format}") yield vdi From 65157ae8b42e23611f08812f12856bacab7d35fe Mon Sep 17 00:00:00 2001 From: Millefeuille Date: Fri, 20 Jun 2025 16:01:31 +0200 Subject: [PATCH 08/10] misc: add comment explaining the default file size calculation Co-authored-by: Anthoine Signed-off-by: Millefeuille --- tests/storage/benchmarks/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index 0038b608b..e9362f2e1 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -116,7 +116,7 @@ def pytest_addoption(parser): "--file-sizes", action="store", type=lambda value: str_to_tuple(value, sep=","), - default=("1G", "4G", f"{int((system_memory // (1024.**3)) * 2)}G"), + default=("1G", "4G", f"{int((system_memory // (1024.**3)) * 2)}G"), # (2*Memory) GiB help="Comma separated values of file sizes to test in disk benchmarks", ) parser.addoption( From 63e2cf5c1c2a73b8278ae5b27626c3e7ed0832a1 Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Tue, 24 Jun 2025 10:59:50 +0200 Subject: [PATCH 09/10] misc(benchmarks): implement suggestions from reviews - add improvement threshold - adjust wording in messages - add additional information in test_type - fix percentage calculation - styling Signed-off-by: Mathieu Labourier --- tests/storage/benchmarks/conftest.py | 14 +++++++++- tests/storage/benchmarks/test_disk_perf.py | 30 ++++++++++++++++------ 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index e9362f2e1..094be59a6 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -52,7 +52,7 @@ def running_unix_vm_with_fio(running_unix_vm): @pytest.fixture(scope="module") def vdi_on_local_sr(host, local_sr_on_hostA1, image_format): sr = local_sr_on_hostA1 - vdi = sr.create_vdi("testVDI", MAX_LENGTH) # , image_format=image_format) + vdi = sr.create_vdi("testVDI", MAX_LENGTH) # , image_format=image_format) logging.info(f">> Created VDI {vdi.uuid} of type {image_format}") yield vdi @@ -138,6 +138,18 @@ def pytest_addoption(parser): default=1, help="Mapped to fio's --iodepth", ) + parser.addoption( + "--regression_threshold", + action="store", + default=10, + help="Percentage of regression that will cause the test to fail", + ) + parser.addoption( + "--improvement_threshold", + action="store", + default=10, + help="Minimum percentage of improvement considered significant enough to report", + ) def pytest_generate_tests(metafunc): diff --git a/tests/storage/benchmarks/test_disk_perf.py b/tests/storage/benchmarks/test_disk_perf.py index 993fa6ffa..075e9b09e 100644 --- a/tests/storage/benchmarks/test_disk_perf.py +++ b/tests/storage/benchmarks/test_disk_perf.py @@ -10,8 +10,6 @@ from .helpers import load_results_from_csv, log_result_csv, mean -# Tests default settings # - CSV_FILE = f"/tmp/results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.csv" DEFAULT_SAMPLES_NUM = 10 @@ -65,7 +63,9 @@ def run_fio( return json.load(f) -def assert_performance_not_degraded(current, previous, threshold=10): +def assert_performance_not_degraded( + current, previous, regression_threshold=10, improvement_threshold=10 +): diffs = {} for metric in ("bw_MBps", "IOPS", "latency"): try: @@ -74,10 +74,12 @@ def assert_performance_not_degraded(current, previous, threshold=10): except statistics.StatisticsError: logging.info(f"Missing metric ({metric}), skipping comparison") continue - diff = (curr - prev if metric == "latency" else prev - curr) / (prev * 100) + diff = (curr - prev if metric == "latency" else prev - curr) / prev * 100 + if diff < improvement_threshold: + logging.info(f"{metric} improved by {abs(diff):.2f}%") assert ( - diff <= threshold - ), f"{metric} changed by {diff:.2f}% (allowed {threshold}%)" + diff <= regression_threshold + ), f"{metric} regressed by {diff:.2f}% (allowed {regression_threshold}%)" diffs[metric] = diff logging.info("Performance difference summary:") @@ -105,7 +107,14 @@ def test_disk_benchmark( vm = running_unix_vm_with_fio vbd = plugged_vbd device = f"/dev/{vbd.param_get(param_name='device')}" - test_type = "{}-{}-{}-{}".format(block_size, file_size, rw_mode, image_format) + test_type = "bench-fio-{}-{}-{}-{}-{}-{}".format( + block_size, + file_size, + pytestconfig.getoption("iodepth"), + pytestconfig.getoption("numjobs"), + rw_mode, + image_format, + ) for i in range(DEFAULT_SAMPLES_NUM): result = run_fio( @@ -125,4 +134,9 @@ def test_disk_benchmark( key = (test_type, rw_mode) if prev_results and key in prev_results: results = load_results_from_csv(CSV_FILE) - assert_performance_not_degraded(results[key], prev_results[key]) + assert_performance_not_degraded( + results[key], + prev_results[key], + regression_threshold=pytestconfig.getoption("regression_threshold"), + improvement_threshold=pytestconfig.getoption("improvement_threshold"), + ) From 718b41f883d33d050ffedb3df17ee495a5dd2529 Mon Sep 17 00:00:00 2001 From: Mathieu Labourier Date: Wed, 13 Aug 2025 14:08:40 +0200 Subject: [PATCH 10/10] fix: add TODO label for image_format fixture Signed-off-by: Mathieu Labourier --- tests/storage/benchmarks/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/storage/benchmarks/conftest.py b/tests/storage/benchmarks/conftest.py index 094be59a6..86a058d48 100644 --- a/tests/storage/benchmarks/conftest.py +++ b/tests/storage/benchmarks/conftest.py @@ -16,7 +16,7 @@ MAX_LENGTH = 64 * (1024**3) # 64GiB -# use vhd, qcow2, raw... when image_format support will be available +# TODO: use vhd, qcow2, raw... when image_format support will be available @pytest.fixture(scope="module", params=["vhd"]) def image_format(request): return request.param