|
| 1 | +# Copyright (C) 2025 Intel Corporation |
| 2 | +# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. |
| 3 | +# See LICENSE.TXT |
| 4 | +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 5 | + |
| 6 | +import os |
| 7 | +import shutil |
| 8 | +import unittest |
| 9 | +import tempfile |
| 10 | +import subprocess |
| 11 | +import json |
| 12 | +from collections import namedtuple |
| 13 | + |
| 14 | +# oneapi has to be installed and sourced for sycl benchmarks tests |
| 15 | + |
| 16 | +DataJson = namedtuple("DataJson", ["runs", "metadata", "tags", "names"]) |
| 17 | +DataJsonRun = namedtuple("DataJsonRun", ["name", "results"]) |
| 18 | +DataJsonResult = namedtuple( |
| 19 | + "DataJsonResult", ["name", "label", "suite", "value", "unit"] |
| 20 | +) |
| 21 | + |
| 22 | +class App: |
| 23 | + def __init__(self): |
| 24 | + self.OUTPUT_DIR = None |
| 25 | + self.RESULTS_DIR = None |
| 26 | + self.WORKDIR_DIR = None |
| 27 | + |
| 28 | + def prepare_dirs(self): |
| 29 | + self.OUTPUT_DIR = tempfile.mkdtemp() |
| 30 | + self.RESULTS_DIR = tempfile.mkdtemp() |
| 31 | + self.WORKDIR_DIR = tempfile.mkdtemp() |
| 32 | + |
| 33 | + # when UT does not want to build compute-benchmarks from scratch, it can provide prebuilt path |
| 34 | + cb_targetpath = os.environ.get("COMPUTE_BENCHMARKS_BUILD_PATH") |
| 35 | + if cb_targetpath and os.path.isdir(cb_targetpath): |
| 36 | + cb_build_dir = os.path.join(self.WORKDIR_DIR, "compute-benchmarks-build") |
| 37 | + os.symlink(cb_targetpath, cb_build_dir) |
| 38 | + with open( |
| 39 | + os.path.join(self.WORKDIR_DIR, "BENCH_WORKDIR_VERSION"), "w" |
| 40 | + ) as f: |
| 41 | + f.write("2.0") # TODO: take from main.INTERNAL_WORKDIR_VERSION |
| 42 | + |
| 43 | + def remove_dirs(self): |
| 44 | + for d in [self.RESULTS_DIR, self.OUTPUT_DIR, self.WORKDIR_DIR]: |
| 45 | + if d is not None: |
| 46 | + shutil.rmtree(d, ignore_errors=True) |
| 47 | + |
| 48 | + def run_main(self, *args): |
| 49 | + |
| 50 | + # TODO: not yet tested: "--detect-version", "sycl,compute_runtime" |
| 51 | + |
| 52 | + return subprocess.run( |
| 53 | + [ |
| 54 | + "./devops/scripts/benchmarks/main.py", |
| 55 | + self.WORKDIR_DIR, |
| 56 | + "--sycl", |
| 57 | + os.environ.get("CMPLR_ROOT"), |
| 58 | + "--save", |
| 59 | + "testfile", |
| 60 | + "--output-html", |
| 61 | + "remote", |
| 62 | + "--results-dir", |
| 63 | + self.RESULTS_DIR, |
| 64 | + "--output-dir", |
| 65 | + self.OUTPUT_DIR, |
| 66 | + "--preset", |
| 67 | + "Minimal", |
| 68 | + "--timestamp-override", |
| 69 | + "20240102_030405", |
| 70 | + "--stddev-threshold", |
| 71 | + "999999999.9", |
| 72 | + "--exit-on-failure", |
| 73 | + *args, |
| 74 | + ] |
| 75 | + ) |
| 76 | + |
| 77 | + def get_output(self): |
| 78 | + with open(output_file) as f: |
| 79 | + out = json.load(f) |
| 80 | + return DataJson( |
| 81 | + runs=[ |
| 82 | + DataJsonRun( |
| 83 | + name=run["name"], |
| 84 | + results=[ |
| 85 | + DataJsonResult( |
| 86 | + name=r["name"], |
| 87 | + label=r["label"], |
| 88 | + suite=r["suite"], |
| 89 | + value=r["value"], |
| 90 | + unit=r["unit"], |
| 91 | + ) |
| 92 | + for r in run["results"] |
| 93 | + ], |
| 94 | + ) |
| 95 | + for run in out["benchmarkRuns"] |
| 96 | + ], |
| 97 | + metadata=out["benchmarkMetadata"], |
| 98 | + tags=out["benchmarkTags"], |
| 99 | + names=out["defaultCompareNames"], |
| 100 | + ) |
| 101 | + |
| 102 | + |
| 103 | +# add "--verbose" for debug logs |
| 104 | + |
| 105 | + |
| 106 | +class TestE2E(unittest.TestCase): |
| 107 | + def setUp(self): |
| 108 | + # Load test data |
| 109 | + self.app = App() |
| 110 | + self.app.remove_dirs() |
| 111 | + self.app.prepare_dirs() |
| 112 | + |
| 113 | + # clean directory with input, output |
| 114 | + |
| 115 | + def tearDown(self): |
| 116 | + self.app.remove_dirs() |
| 117 | + |
| 118 | + def test_record_and_replay(self): |
| 119 | + caseName = "L0 RecordGraph AppendCopy 1, AppendKern 10, CmdSetsInLvl 10, ForksInLvl 2, Instantiations 10, Lvls 4, Rec" |
| 120 | + run_result = self.app.run_main("--filter", caseName + "$") |
| 121 | + self.assertEqual(run_result.returncode, 0, "Subprocess did not exit cleanly") |
| 122 | + |
| 123 | + out = self.app.get_output() |
| 124 | + |
| 125 | + self.assertIn(caseName, [r.name for r in out.runs[0].results]) |
| 126 | + |
| 127 | + metadata = out.metadata[caseName] |
| 128 | + self.assertEqual(metadata["type"], "benchmark") |
| 129 | + self.assertEqual(set(metadata["tags"]), {"L0"}) |
| 130 | + |
| 131 | + def test_submit_kernel(self): |
| 132 | + caseName = "SubmitKernel out of order with measure completion KernelExecTime=20" |
| 133 | + run_result = self.app.run_main("--filter", caseName + "$") |
| 134 | + self.assertEqual(run_result.returncode, 0, "Subprocess did not exit cleanly") |
| 135 | + |
| 136 | + out = self.app.get_output() |
| 137 | + |
| 138 | + testName = "api_overhead_benchmark_l0 " + caseName |
| 139 | + self.assertIn(testName, [r.name for r in out.runs[0].results]) |
| 140 | + |
| 141 | + metadata = out.metadata[testName] |
| 142 | + self.assertEqual(metadata["type"], "benchmark") |
| 143 | + self.assertEqual(set(metadata["tags"]), {"L0", "latency", "micro", "submit"}) |
| 144 | + |
| 145 | + |
| 146 | +if __name__ == "__main__": |
| 147 | + unittest.main() |
0 commit comments