diff --git a/qubes/tests/__init__.py b/qubes/tests/__init__.py index 456107529..db69003e3 100644 --- a/qubes/tests/__init__.py +++ b/qubes/tests/__init__.py @@ -1832,6 +1832,7 @@ def load_tests(loader, tests, pattern): # pylint: disable=unused-argument "qubes.tests.integ.devices_block", "qubes.tests.integ.devices_pci", "qubes.tests.integ.qrexec", + "qubes.tests.integ.dispvm_perf", "qubes.tests.integ.qrexec_perf", "qubes.tests.integ.storage_perf", "qubes.tests.integ.dom0_update", diff --git a/qubes/tests/integ/dispvm_perf.py b/qubes/tests/integ/dispvm_perf.py new file mode 100644 index 000000000..083f50b83 --- /dev/null +++ b/qubes/tests/integ/dispvm_perf.py @@ -0,0 +1,253 @@ +# +# The Qubes OS Project, https://www.qubes-os.org/ +# +# Copyright (C) 2025 Marek Marczykowski-Górecki +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program; if not, see . + +import asyncio +import os +import sys +import time + +import qubes.tests + + +class TC_00_DispVMPerfMixin: + def setUp(self: qubes.tests.SystemTestCase): + super().setUp() + if "whonix-g" in self.template: + self.skipTest( + "whonix gateway is not supported as DisposableVM Template" + ) + self.dvm = self.app.add_new_vm( + "AppVM", + name=self.make_vm_name("dvm"), + label="red", + template=self.app.domains[self.template], + template_for_dispvms=True, + ) + self.vm1 = self.app.add_new_vm( + "AppVM", + name=self.make_vm_name("vm1"), + label="red", + template=self.app.domains[self.template], + default_dispvm=self.dvm, + ) + self.vm2 = self.app.add_new_vm( + "AppVM", + name=self.make_vm_name("vm2"), + label="red", + template=self.app.domains[self.template], + default_dispvm=self.dvm, + ) + self.loop.run_until_complete( + asyncio.gather( + self.dvm.create_on_disk(), + self.vm1.create_on_disk(), + self.vm2.create_on_disk(), + ) + ) + start_tasks = [self.vm1.start()] + if self._testMethodName.startswith("vm"): + start_tasks.append(self.vm2.start()) + self.loop.run_until_complete(asyncio.gather(*start_tasks)) + + def tearDown(self: qubes.tests.SystemTestCase): + super().tearDown() + if self.vm2.is_running(): + self.loop.run_until_complete( + asyncio.gather( + self.vm2.shutdown(), + ) + ) + if not os.getenv("QUBES_TEST_SKIP_TEARDOWN_SLEEP"): + # Avoid previous test load interfering with new test. + if self._testMethodName.startswith("vm"): + delay = 5 + else: + delay = 15 + time.sleep(delay) + + def run_test(self, name): + dvm = self.dvm.name + vm1 = self.vm1.name + vm2 = "" + if name.startswith("vm"): + vm2 = self.vm2.name + cmd = [ + "/usr/lib/qubes/tests/dispvm_perf.py", + f"--dvm={dvm}", + f"--vm1={vm1}", + f"--vm2={vm2}", + name, + ] + p = self.loop.run_until_complete(asyncio.create_subprocess_exec(*cmd)) + self.loop.run_until_complete(p.wait()) + if p.returncode: + self.fail(f"'{' '.join(cmd)}' failed: {p.returncode}") + + def test_000_dispvm(self): + """Latency of vm-dispvm calls""" + self.run_test("dispvm") + + def test_001_dispvm_gui(self): + """Latency of vm-dispvm GUI calls""" + self.run_test("dispvm-gui") + + def test_002_dispvm_concurrent(self): + """Latency of vm-dispvm concurrent calls""" + self.run_test("dispvm-concurrent") + + def test_003_dispvm_gui_concurrent(self): + """Latency of vm-dispvm concurrent GUI calls""" + self.run_test("dispvm-gui-concurrent") + + def test_006_dispvm_from_dom0(self): + """Latency of dom0-dispvm calls""" + self.run_test("dispvm-dom0") + + def test_007_dispvm_from_dom0_gui(self): + """Latency of dom0-dispvm GUI calls""" + self.run_test("dispvm-dom0-gui") + + def test_008_dispvm_from_dom0_concurrent(self): + """Latency of dom0-dispvm concurrent calls""" + self.run_test("dispvm-dom0-concurrent") + + def test_009_dispvm_from_dom0_gui_concurrent(self): + """Latency of dom0-dispvm concurrent GUI calls""" + self.run_test("dispvm-dom0-gui-concurrent") + + def test_020_dispvm_preload(self): + """Latency of vm-dispvm (preload) calls""" + self.run_test("dispvm-preload") + + def test_021_dispvm_preload_gui(self): + """Latency of vm-dispvm (preload) GUI calls""" + self.run_test("dispvm-preload-gui") + + def test_022_dispvm_preload_concurrent(self): + """Latency of vm-dispvm (preload) concurrent calls""" + self.run_test("dispvm-preload-concurrent") + + def test_023_dispvm_preload_gui_concurrent(self): + """Latency of vm-dispvm (preload) concurrent GUI calls""" + self.run_test("dispvm-preload-gui-concurrent") + + def test_026_dispvm_from_dom0_preload(self): + """Latency of dom0-dispvm (preload) calls""" + self.run_test("dispvm-preload-dom0") + + def test_027_dispvm_from_dom0_preload_gui(self): + """Latency of dom0-dispvm (preload) GUI calls""" + self.run_test("dispvm-preload-dom0-gui") + + def test_028_dispvm_from_dom0_preload_concurrent(self): + """Latency of dom0-dispvm (preload) concurrent calls""" + self.run_test("dispvm-preload-dom0-concurrent") + + def test_029_dispvm_from_dom0_preload_gui_concurrent(self): + """Latency of dom0-dispvm (preload) concurrent GUI calls""" + self.run_test("dispvm-preload-dom0-gui-concurrent") + + def test_400_dispvm_api(self): + """Latency of dom0-dispvm API calls""" + self.run_test("dispvm-api") + + def test_401_dispvm_gui_api(self): + """Latency of dom0-dispvm GUI API calls""" + self.run_test("dispvm-gui-api") + + def test_402_dispvm_concurrent_api(self): + """Latency of dom0-dispvm concurrent API calls""" + self.run_test("dispvm-concurrent-api") + + def test_403_dispvm_gui_concurrent_api(self): + """Latency of dom0-dispvm concurrent GUI API calls""" + self.run_test("dispvm-gui-concurrent-api") + + def test_404_dispvm_preload_more_api(self): + """Latency of dom0-dispvm (preload more) API calls""" + self.run_test("dispvm-preload-more-api") + + def test_404_dispvm_preload_less_api(self): + """Latency of dom0-dispvm (preload less) API calls""" + self.run_test("dispvm-preload-less-api") + + def test_404_dispvm_preload_api(self): + """Latency of dom0-dispvm (preload) API calls""" + self.run_test("dispvm-preload-api") + + def test_405_dispvm_preload_gui_api(self): + """Latency of dom0-dispvm (preload) GUI API calls""" + self.run_test("dispvm-preload-gui-api") + + def test_406_dispvm_preload_concurrent_api(self): + """Latency of dom0-dispvm (preload) concurrent GUI API calls""" + self.run_test("dispvm-preload-concurrent-api") + + def test_407_dispvm_preload_gui_concurrent_api(self): + """Latency of dom0-dispvm (preload) concurrent GUI API calls""" + self.run_test("dispvm-preload-gui-concurrent-api") + + def test_900_vm(self): + """Latency of vm-vm calls""" + self.run_test("vm") + + def test_901_vm_gui(self): + """Latency of vm-vm GUI calls""" + self.run_test("vm-gui") + + def test_902_vm_concurrent(self): + """Latency of vm-vm concurrent calls""" + self.run_test("vm-concurrent") + + def test_903_vm_gui_concurrent(self): + """Latency of vm-vm concurrent GUI calls""" + self.run_test("vm-gui-concurrent") + + def test_904_vm_api(self): + """Latency of dom0-vm API calls""" + self.run_test("vm-api") + + def test_905_vm_gui_api(self): + """Latency of dom0-vm GUI API calls""" + self.run_test("vm-gui-api") + + def test_906_vm_concurrent_api(self): + """Latency of dom0-vm concurrent API calls""" + self.run_test("vm-concurrent-api") + + def test_907_vm_gui_concurrent_api(self): + """Latency of dom0-vm concurrent GUI API calls""" + self.run_test("vm-gui-concurrent-api") + + +def create_testcases_for_templates(): + return qubes.tests.create_testcases_for_templates( + "TC_00_DispVMPerf", + TC_00_DispVMPerfMixin, + qubes.tests.SystemTestCase, + module=sys.modules[__name__], + ) + + +def load_tests(loader, tests, pattern): # pylint: disable=unused-argument + tests.addTests(loader.loadTestsFromNames(create_testcases_for_templates())) + return tests + + +qubes.tests.maybe_create_testcases_on_import(create_testcases_for_templates) diff --git a/rpm_spec/core-dom0.spec.in b/rpm_spec/core-dom0.spec.in index fdfe526f4..83e66aca6 100644 --- a/rpm_spec/core-dom0.spec.in +++ b/rpm_spec/core-dom0.spec.in @@ -527,6 +527,7 @@ done %{python3_sitelib}/qubes/tests/integ/devices_block.py %{python3_sitelib}/qubes/tests/integ/devices_pci.py %{python3_sitelib}/qubes/tests/integ/dispvm.py +%{python3_sitelib}/qubes/tests/integ/dispvm_perf.py %{python3_sitelib}/qubes/tests/integ/dom0_update.py %{python3_sitelib}/qubes/tests/integ/vm_update.py %{python3_sitelib}/qubes/tests/integ/mime.py @@ -560,6 +561,7 @@ done /usr/lib/qubes/cleanup-dispvms /usr/lib/qubes/fix-dir-perms.sh /usr/lib/qubes/startup-misc.sh +/usr/lib/qubes/tests/dispvm_perf.py /usr/lib/qubes/tests/qrexec_perf.py /usr/lib/qubes/tests/storage_perf.py %{_unitdir}/lvm2-pvscan@.service.d/30_qubes.conf diff --git a/tests/dispvm_perf.py b/tests/dispvm_perf.py new file mode 100755 index 000000000..f62b20068 --- /dev/null +++ b/tests/dispvm_perf.py @@ -0,0 +1,575 @@ +#!/usr/bin/python3 +# +# The Qubes OS Project, https://www.qubes-os.org/ +# +# Copyright (C) 2025 Marek Marczykowski-Górecki +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License along +# with this program; if not, see . + +""" +Test disposable qube call latency + +The tests report call latency in seconds (float) in the second field. The +remaining fields are key=value pairs of the test configuration and call +results. +""" + +import argparse +import asyncio +import concurrent.futures +import dataclasses +import os +import subprocess +import time + + +import qubesadmin + + +@dataclasses.dataclass +class TestConfig: + """ + Test configuration. + + :param str name: test name + :param bool gui: wait for GUI session + :param bool concurrent: request concurrently + :param bool from_dom0: initiate call from dom0 + :param int preload_max: number of disposables to preload + :param bool non_dispvm: target a non disposable qube + :param bool admin_api: use the Admin API directly + + Notes + ----- + Source-Target: + - dom0-dispvm: + - qvm-run: Menu items and scripts. + - qubesadmin: Raw call to measure phases. Doesn't represent the + most realistic output as users would most likely interact with it + via wrappers such as qvm-run. + - vm-dispvm: + - qrexec-client-vm: scripts + - *-vm: Fast way to test if the tests itself are working. + - + GUI VS non-GUI: + - GUI tests workflows that uses disposables to open untrusted files or + programs that requires graphics. Instead of relying on what xdg-open + would do on the target, we simply time the moment until the GUI + session starts on the target. + - non-GUI tests a workflow that uses disposables to run untrusted code. + Sequential VS Concurrent: + - Sequential calls represent a result closer to end-user workflow, as + it is simpler to achieve. + - Concurrent calls are multiple requests that are done without regards + to the previous request completion. + - Concurrency average time is skewed as there are multiples + simultaneous calls. + Normal VS Preloaded: + - Improving normal qube startup will shorten preload usage time, but + the reverse is not true. Normal disposables are a control group for + preloaded disposables. + - Preloading more than 2 is not useful on sequential calls as long as + long as on the next call, there is a preload that has been completed. + """ + + name: str + gui: bool = False + concurrent: bool = False + from_dom0: bool = False + preload_max: int = 0 + non_dispvm: bool = False + admin_api: bool = False + + +POLICY_FILE = "/run/qubes/policy.d/10-test-dispvm-perf.policy" +# MAX_PRELOAD is the number doesn't overpreload or underpreload (best +# performance) on sequential calls between the tests +# "dispvm-preload(-(more|less))-api" (tested on fedora-42-xfce). Machines with +# different hardware or domains that boot faster or slower can theoretically +# have a different best value. +MAX_PRELOAD = 2 +# The preload number is set to MAX_CONCURRENCY on concurrent calls. This number +# is also used by non preloaded disposables to set the maximum workers/jobs. +MAX_CONCURRENCY = MAX_PRELOAD * 2 +# How was this amazing algorithm chosen? Yes. +ITERATIONS = MAX_CONCURRENCY * 4 +# A small round precision excludes noise. It is also used to have 0 padding (as +# a string) to align fields. +ROUND_PRECISION = 3 + +ALL_TESTS = [ + TestConfig("vm", non_dispvm=True), + TestConfig("vm-gui", gui=True, non_dispvm=True), + TestConfig("vm-concurrent", concurrent=True, non_dispvm=True), + TestConfig("vm-gui-concurrent", gui=True, concurrent=True, non_dispvm=True), + TestConfig("vm-api", non_dispvm=True, admin_api=True), + TestConfig("vm-gui-api", gui=True, non_dispvm=True, admin_api=True), + TestConfig( + "vm-concurrent-api", concurrent=True, non_dispvm=True, admin_api=True + ), + TestConfig( + "vm-gui-concurrent-api", + gui=True, + concurrent=True, + non_dispvm=True, + admin_api=True, + ), + TestConfig("dispvm"), + TestConfig("dispvm-gui", gui=True), + TestConfig("dispvm-concurrent", concurrent=True), + TestConfig("dispvm-gui-concurrent", gui=True, concurrent=True), + TestConfig("dispvm-dom0", from_dom0=True), + TestConfig("dispvm-dom0-gui", gui=True, from_dom0=True), + TestConfig("dispvm-dom0-concurrent", concurrent=True, from_dom0=True), + TestConfig( + "dispvm-dom0-gui-concurrent", gui=True, concurrent=True, from_dom0=True + ), + TestConfig("dispvm-preload", preload_max=MAX_PRELOAD), + TestConfig("dispvm-preload-gui", gui=True, preload_max=MAX_PRELOAD), + TestConfig( + "dispvm-preload-concurrent", + concurrent=True, + preload_max=MAX_CONCURRENCY, + ), + TestConfig( + "dispvm-preload-gui-concurrent", + gui=True, + concurrent=True, + preload_max=MAX_CONCURRENCY, + ), + TestConfig("dispvm-preload-dom0", from_dom0=True, preload_max=MAX_PRELOAD), + TestConfig( + "dispvm-preload-dom0-gui", + gui=True, + from_dom0=True, + preload_max=MAX_PRELOAD, + ), + TestConfig( + "dispvm-preload-dom0-concurrent", + concurrent=True, + from_dom0=True, + preload_max=MAX_CONCURRENCY, + ), + TestConfig( + "dispvm-preload-dom0-gui-concurrent", + gui=True, + concurrent=True, + from_dom0=True, + preload_max=MAX_CONCURRENCY, + ), + TestConfig("dispvm-api", admin_api=True), + TestConfig("dispvm-concurrent-api", concurrent=True, admin_api=True), + TestConfig("dispvm-gui-api", gui=True, admin_api=True), + TestConfig( + "dispvm-gui-concurrent-api", gui=True, concurrent=True, admin_api=True + ), + TestConfig( + "dispvm-preload-more-api", + preload_max=MAX_PRELOAD + 1, + admin_api=True, + ), + TestConfig( + "dispvm-preload-less-api", + preload_max=MAX_PRELOAD - 1, + admin_api=True, + ), + TestConfig("dispvm-preload-api", preload_max=MAX_PRELOAD, admin_api=True), + TestConfig( + "dispvm-preload-concurrent-api", + concurrent=True, + preload_max=MAX_PRELOAD, + admin_api=True, + ), + TestConfig( + "dispvm-preload-gui-api", + gui=True, + preload_max=MAX_PRELOAD, + admin_api=True, + ), + TestConfig( + "dispvm-preload-gui-concurrent-api", + gui=True, + concurrent=True, + preload_max=MAX_PRELOAD, + admin_api=True, + ), +] + + +def get_time(): + return time.clock_gettime(time.CLOCK_MONOTONIC) + + +class TestRun: + def __init__(self, dom0, dvm, vm1, vm2): + self.dom0 = dom0 + self.dvm = dvm + self.vm1 = vm1 + self.vm2 = vm2 + self.iterations = ITERATIONS + + async def wait_preload( + self, + preload_max, + appvm=None, + wait_completion=True, + fail_on_timeout=True, + timeout=60, + ): + """Waiting for completion avoids coroutine objects leaking.""" + if not appvm: + appvm = self.dvm + for _ in range(timeout): + preload_dispvm = appvm.features.get("preload-dispvm", "") + preload_dispvm = preload_dispvm.split(" ") or [] + if len(preload_dispvm) == preload_max: + break + await asyncio.sleep(1) + else: + if fail_on_timeout: + raise Exception("didn't preload in time") + if not wait_completion: + return + preload_dispvm = appvm.features.get("preload-dispvm", "") + preload_dispvm = preload_dispvm.split(" ") or [] + preload_unfinished = preload_dispvm + for _ in range(timeout): + for qube in preload_unfinished.copy(): + self.dom0.app.domains.refresh_cache(force=True) + qube = self.dom0.app.domains[qube] + completed = qube.features.get("preload-dispvm-completed") + if completed: + preload_unfinished.remove(qube) + continue + if not preload_unfinished: + break + await asyncio.sleep(1) + else: + if fail_on_timeout: + raise Exception("last preloaded didn't complete in time") + + def wait_for_dispvm_destroy(self, dispvm_names): + timeout = 60 + while True: + self.dom0.app.domains.refresh_cache(force=True) + if set(dispvm_names).isdisjoint(self.dom0.app.domains): + break + time.sleep(1) + timeout -= 1 + if timeout <= 0: + raise Exception("didn't destroy dispvm(s) in time") + + def run_latency_calls(self, test): + if test.gui: + service = "qubes.WaitForSession" + else: + service = "qubes.WaitForRunningSystem" + + if test.concurrent: + term = "&" + timeout = self.iterations / MAX_CONCURRENCY * 30 + else: + term = ";" + timeout = self.iterations * 30 + + if test.from_dom0: + caller = "qvm-run -p --service --filter-escape-chars " + caller += "--no-color-output --no-color-stderr " + if test.gui: + caller += "--gui " + else: + caller += "--no-gui " + caller += f"--dispvm={self.dvm.name} " + cmd = f"{caller} -- {service}" + else: + if test.non_dispvm: + cmd = f"qrexec-client-vm -- {self.vm2.name} {service}" + else: + cmd = f"qrexec-client-vm -- @dispvm {service}" + + code = ( + "set -eu --; " + f'max_concurrency="{MAX_CONCURRENCY}"; ' + f"for i in $(seq {self.iterations}); do " + f" out=$({cmd}) {term}" + ' pid="${!-}"; ' + ' if test -n "${pid}"; then ' + ' set -- "${@}" "${pid}"; ' + ' if test "${#}" = "${max_concurrency}" && test -n "${1}"; then' + ' wait "${1}"; shift; ' + " fi; " + " fi; " + "done; " + 'wait "${@}"' + ) + + start_time = get_time() + try: + if test.from_dom0: + subprocess.run(code, shell=True, check=True, timeout=timeout) + else: + self.vm1.run(code, timeout=timeout) + except subprocess.CalledProcessError as e: + raise Exception( + f"service '{cmd}' failed ({e.returncode}):" + f" {e.stdout}," + f" {e.stderr}" + ) + except subprocess.TimeoutExpired: + raise Exception(f"service '{cmd}' failed: timeout expired") + end_time = get_time() + return round(end_time - start_time, ROUND_PRECISION) + + def call_api(self, test, service, qube): + start_time = get_time() + app = qubesadmin.Qubes() + domains = app.domains + appvm = domains[qube] + domain_time = get_time() + if test.non_dispvm: + target_qube = self.vm1 + else: + target_qube = qubesadmin.vm.DispVM.from_appvm(app, appvm) + target_time = get_time() + try: + target_qube.run_service_for_stdio(service, timeout=60) + except subprocess.CalledProcessError as e: + raise Exception( + f"service '{service}' failed ({e.returncode}):" + f" {e.stdout}," + f" {e.stderr}" + ) + except subprocess.TimeoutExpired: + raise Exception(f"service '{service}' failed: timeout expired") + run_service_time = get_time() + if not test.non_dispvm: + target_qube.cleanup() + cleanup_time = get_time() + end_time = cleanup_time + else: + end_time = get_time() + runtime = {} + runtime["dom"] = round(domain_time - start_time, ROUND_PRECISION) + if not test.non_dispvm: + runtime["disp"] = round(target_time - domain_time, ROUND_PRECISION) + runtime["exec"] = round(run_service_time - target_time, ROUND_PRECISION) + if not test.non_dispvm: + runtime["clean"] = round( + cleanup_time - run_service_time, ROUND_PRECISION + ) + runtime["total"] = round(end_time - start_time, ROUND_PRECISION) + return runtime + + async def api_thread(self, test, service, qube): + tasks = [] + loop = asyncio.get_running_loop() + with concurrent.futures.ThreadPoolExecutor( + max_workers=MAX_CONCURRENCY + ) as executor: + exec_args = self.call_api, test, service, qube + for _ in range(1, self.iterations + 1): + future = loop.run_in_executor(executor, *exec_args) + tasks.append(future) + all_results = await asyncio.gather(*tasks) + return all_results + + def run_latency_api_calls(self, test): + if test.gui: + service = "qubes.WaitForSession" + else: + service = "qubes.WaitForRunningSystem" + if test.non_dispvm: + qube = self.vm2 + else: + qube = self.dvm + + results = {} + start_time = get_time() + if test.concurrent: + all_results = asyncio.run(self.api_thread(test, service, qube)) + for i in range(1, self.iterations + 1): + results[i] = all_results[i - 1] + else: + for i in range(1, self.iterations + 1): + results[i] = self.call_api( + test=test, service=service, qube=qube + ) + end_time = get_time() + + sample_keys = list(results[1].keys()) + value_keys = [k for k in sample_keys if k != "total"] + headers = ( + ["iter"] + + [f"{k}" for k in value_keys] + + ["total"] + + [f"{k}%" for k in value_keys] + ) + rows = [] + for key, values in results.items(): + total = values.get("total", 0) + row_values = [str(key)] + for k in value_keys: + row_values.append(f"{values.get(k, 0):.{ROUND_PRECISION}f}") + row_values.append(f"{total:.{ROUND_PRECISION}f}") + for k in value_keys: + pct = (values.get(k, 0) / total * 100) if total != 0 else 0 + row_values.append(f"{pct:.0f}%") + rows.append(row_values) + col_widths = [len(h) for h in headers] + for row in rows: + for i, value in enumerate(row): + col_widths[i] = max(col_widths[i], len(value)) + header_row = " ".join( + h.rjust(col_widths[i]) for i, h in enumerate(headers) + ) + + print() + print(header_row) + for row in rows: + print( + " ".join(val.rjust(col_widths[i]) for i, val in enumerate(row)) + ) + + total_time = round(end_time - start_time, ROUND_PRECISION) + return total_time, results + + def report_result(self, test, result): + items = " ".join( + "{}={}".format(key, value) for key, value in vars(test).items() + ) + if test.admin_api: + total_time = result[0] + average = round(total_time / self.iterations, ROUND_PRECISION) + pretty_average = f"{average:.{ROUND_PRECISION}f}" + compiled_result = [] + for key, value in result[1].items(): + individual_result = ( + f"{key}=(" + + ",".join( + f"{k}={v:.{ROUND_PRECISION}f}" for k, v in value.items() + ) + + ")" + ) + compiled_result.append(individual_result) + items += f" iterations={self.iterations} average={pretty_average} " + items += " ".join(compiled_result) + else: + total_time = result + average = total_time / self.iterations + pretty_average = f"{average:.{ROUND_PRECISION}f}" + items += f" iterations={self.iterations} average={pretty_average}" + pretty_total_time = f"{total_time:.{ROUND_PRECISION}f}" + final_result = pretty_total_time + " " + items + pretty_items = "iterations=" + str(self.iterations) + pretty_items += " average=" + pretty_average + print(f"Run time ({pretty_items}): {pretty_total_time}s") + results_file = os.environ.get("QUBES_TEST_PERF_FILE") + if not results_file: + return + try: + if self.vm2 and self.vm1.template != self.vm2.template: + name_prefix = ( + f"{self.vm1.template!s}_" f"{self.vm2.template!s}_" + ) + else: + name_prefix = f"{self.vm1.template!s}_" + except AttributeError: + if self.vm2: + name_prefix = f"{self.vm1!s}_{self.vm2!s}_" + else: + name_prefix = f"{self.vm1!s}_" + with open(results_file, "a", encoding="ascii") as file: + file.write(name_prefix + test.name + " " + str(final_result) + "\n") + + def run_test(self, test: TestConfig): + with open(POLICY_FILE, "w", encoding="ascii") as policy: + gui_prefix = f"qubes.WaitForSession * {self.vm1.name}" + nogui_prefix = f"qubes.WaitForRunningSystem * {self.vm1.name}" + if test.non_dispvm: + target = f"{self.vm2.name}" + else: + target = "@dispvm" + policy.write( + f"{gui_prefix} {target} allow\n" + f"{nogui_prefix} {target} allow\n" + ) + if test.preload_max: + orig_preload_max = self.dom0.features.get("preload-dispvm-max") + if orig_preload_max is not None: + del self.dom0.features["preload-dispvm-max"] + try: + if test.preload_max: + preload_max = test.preload_max + self.dvm.features["preload-dispvm-max"] = str(preload_max) + asyncio.run(self.wait_preload(preload_max)) + if test.admin_api: + result = self.run_latency_api_calls(test) + else: + result = self.run_latency_calls(test) + self.report_result(test, result) + finally: + if test.preload_max: + old_preload_max = int( + self.dvm.features.get("preload-dispvm-max", 0) or 0 + ) + asyncio.run(self.wait_preload(old_preload_max)) + old_preload = self.dvm.features.get("preload-dispvm", "") + old_preload = old_preload.split(" ") or [] + del self.dvm.features["preload-dispvm-max"] + self.wait_for_dispvm_destroy(old_preload) + if orig_preload_max is not None: + self.dom0.features["preload-dispvm-max"] = orig_preload_max + if orig_preload_max != 0: + asyncio.run(self.wait_preload(orig_preload_max)) + os.unlink(POLICY_FILE) + + +def main(): + parser = argparse.ArgumentParser( + epilog="You can set QUBES_TEST_PERF_FILE env variable to a path where " + "machine-readable results should be saved." + ) + parser.add_argument("--dvm", required=True) + parser.add_argument("--vm1", required=True) + parser.add_argument("--vm2", required=True) + parser.add_argument( + "--iterations", + default=os.environ.get("QUBES_TEST_ITERATIONS", ITERATIONS), + type=int, + ) + parser.add_argument("test", choices=[t.name for t in ALL_TESTS] + ["all"]) + args = parser.parse_args() + app = qubesadmin.Qubes() + + if args.test == "all": + tests = ALL_TESTS + else: + tests = [t for t in ALL_TESTS if t.name == args.test] + + run = TestRun( + dom0=app.domains["dom0"], + dvm=app.domains[args.dvm], + vm1=app.domains[args.vm1], + vm2="" if not args.vm2 else app.domains[args.vm2], + ) + if args.iterations: + run.iterations = args.iterations + + for index, test in enumerate(tests): + run.run_test(test) + + +if __name__ == "__main__": + main()