Skip to content

Commit fb1858a

Browse files
committed
[Benchmarks] add unittest, add rr benchmarks
1 parent c238ace commit fb1858a

File tree

4 files changed

+206
-1
lines changed

4 files changed

+206
-1
lines changed

devops/scripts/benchmarks/benches/compute.py

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,56 @@ def benchmarks(self) -> list[Benchmark]:
269269
)
270270
)
271271

272+
record_and_reply_params = product(list(PROFILERS), [0, 1], [0, 1])
273+
for profiler_type, emulate, instantiate in record_and_reply_params:
274+
benches += [
275+
RecordAndReply(
276+
self,
277+
RUNTIMES.LEVEL_ZERO,
278+
profiler_type,
279+
nForksInLvl=2,
280+
nLvls=4,
281+
nCmdSetsInLvl=10,
282+
nInstantiations=10,
283+
nAppendKern=10,
284+
nAppendCopy=1,
285+
mRec=1,
286+
mInst=instantiate,
287+
mDest=0,
288+
emulate=emulate,
289+
),
290+
RecordAndReply(
291+
self,
292+
RUNTIMES.LEVEL_ZERO,
293+
profiler_type,
294+
nForksInLvl=1,
295+
nLvls=1,
296+
nCmdSetsInLvl=10,
297+
nInstantiations=10,
298+
nAppendKern=10,
299+
nAppendCopy=10,
300+
mRec=1,
301+
mInst=instantiate,
302+
mDest=0,
303+
emulate=emulate,
304+
),
305+
RecordAndReply(
306+
self,
307+
RUNTIMES.LEVEL_ZERO,
308+
profiler_type,
309+
nForksInLvl=1,
310+
nLvls=4,
311+
nCmdSetsInLvl=1,
312+
nInstantiations=0,
313+
nAppendKern=1,
314+
nAppendCopy=0,
315+
mRec=1,
316+
mInst=instantiate,
317+
mDest=0,
318+
emulate=emulate,
319+
),
320+
]
321+
272322
# Add UR-specific benchmarks
273323
benches += [
274324
# TODO: multithread_benchmark_ur fails with segfault
@@ -646,6 +696,45 @@ def bin_args(self, run_trace: TracingType = TracingType.NONE) -> list[str]:
646696
f"--profilerType={self.profiler_type.value}",
647697
]
648698

699+
class RecordAndReply(ComputeBenchmark):
700+
def __init__(self, bench, runtime: RUNTIMES, profiler_type, **kwargs):
701+
self.rr_params = kwargs
702+
self.iterations_regular = 1000
703+
self.iterations_trace = 10
704+
super().__init__(
705+
bench,
706+
f"record_and_replay_benchmark_{runtime.value}",
707+
"RecordGraph",
708+
runtime,
709+
profiler_type,
710+
)
711+
712+
def name(self):
713+
ret = []
714+
for k, v in self.rr_params.items():
715+
if k[0] == 'n': #numeric parameter
716+
ret.append(f"{k[1:]} {v}")
717+
elif k[0] == 'm':
718+
if v != 0: # measure parameter
719+
ret.append(f"{k[1:]}")
720+
else: # boolean parameter
721+
if v != 0:
722+
ret.append(k)
723+
ret.sort()
724+
return f"{self.bench_name} {self.test} " + ", ".join(ret)
725+
726+
def display_name(self) -> str:
727+
return self.name()
728+
729+
def description(self) -> str:
730+
return f"{self.runtime.value} Graphs record and reply"
731+
732+
def get_tags(self):
733+
return ["L0", "micro"]
734+
735+
def bin_args(self, run_trace: TracingType = TracingType.NONE) -> list[str]:
736+
return [f"--{k}={v}" for k, v in self.rr_params.items()]
737+
649738

650739
class QueueInOrderMemcpy(ComputeBenchmark):
651740
def __init__(self, bench, isCopyOnly, source, destination, size, profiler_type):

devops/scripts/benchmarks/git_project.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
33
# See LICENSE.TXT
44
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5-
5+
import os
66
from pathlib import Path
77
import shutil
88

@@ -167,6 +167,11 @@ def _setup_repo(self) -> bool:
167167
Returns:
168168
bool: True if the repository was cloned or updated, False if it was already up-to-date.
169169
"""
170+
if os.environ.get("LLVM_BENCHMARKS_UNIT_TESTING") == "1":
171+
log.debug(
172+
f"Skipping git operations during unit testing of {self._name} (LLVM_BENCHMARKS_UNIT_TESTING=1)."
173+
)
174+
return False
170175
if not self.src_dir.exists():
171176
self._git_clone()
172177
return True

devops/scripts/benchmarks/main.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,7 @@ def main(directory, additional_env_vars, compare_names, filter):
228228
benchmark for benchmark in s.benchmarks() if benchmark.enabled()
229229
]
230230
if filter:
231+
log.debug(f"Filtering {len(suite_benchmarks)} benchmarks in {s.name()} suite for {filter.pattern}")
231232
suite_benchmarks = [
232233
benchmark
233234
for benchmark in suite_benchmarks
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
import os
2+
import shutil
3+
import unittest
4+
import logging
5+
6+
import subprocess
7+
import json
8+
from collections import namedtuple
9+
10+
# class TestBasic(unittest.TestCase):
11+
# def setUp(self):
12+
# # Load test data
13+
# self.app = App(database="fixtures/test_basic.json")
14+
#
15+
# def test_customer_count(self):
16+
# self.assertEqual(len(self.app.customers), 100)
17+
#
18+
# def test_existence_of_customer(self):
19+
# customer = self.app.get_customer(id=10)
20+
# self.assertEqual(customer.name, "Org XYZ")
21+
# self.assertEqual(customer.address, "10 Red Road, Reading")
22+
23+
24+
# https://realpython.com/python-testing/
25+
26+
DataJson = namedtuple("DataJson", ["runs", "metadata", "tags", "names"])
27+
DataJsonRun = namedtuple("DataJsonRun", ["name", "results"])
28+
DataJsonResult = namedtuple("DataJsonResult", ["name", "label", "suite", "value", "unit"])
29+
30+
class App:
31+
def __init__(self):
32+
self.TMP_DIR = os.path.dirname(__file__)
33+
self.OUTPUT_DIR = os.path.join(self.TMP_DIR, "tmp-output")
34+
self.RESULTS_DIR = os.path.join(self.TMP_DIR, "tmp-results")
35+
self.WORKDIR_DIR = os.path.join(self.TMP_DIR, "tmp-workdir")
36+
37+
38+
39+
def prepare_dirs(self):
40+
for d in [self.RESULTS_DIR, self.OUTPUT_DIR, self.WORKDIR_DIR]:
41+
os.makedirs(d)
42+
43+
# when UT does not want to build compute-benchmarks from scratch, it can provide prebuilt path
44+
cb_targetpath=os.environ.get("COMPUTE_BENCHMARKS_BUILD_PATH")
45+
if cb_targetpath and os.path.isdir(cb_targetpath):
46+
cb_build_dir = os.path.join(self.WORKDIR_DIR, "compute-benchmarks-build")
47+
os.symlink(cb_targetpath, cb_build_dir)
48+
with open(os.path.join(self.WORKDIR_DIR, "BENCH_WORKDIR_VERSION"), "w") as f:
49+
f.write("2.0") # TODO: take from main.INTERNAL_WORKDIR_VERSION
50+
51+
52+
def remove_dirs(self):
53+
for d in [self.RESULTS_DIR, self.OUTPUT_DIR, self.WORKDIR_DIR]:
54+
if os.path.exists(d):
55+
shutil.rmtree(d)
56+
def run_main(self, *args):
57+
58+
# TODO: not yet tested: "--detect-version", "sycl,compute_runtime"
59+
60+
return subprocess.run(["./devops/scripts/benchmarks/main.py", self.WORKDIR_DIR,
61+
"--sycl", os.environ.get("ONEAPI_ROOT"),
62+
"--ur", os.environ.get("CMPLR_ROOT"),
63+
"--adapter", "opencl",
64+
"--save", "testplik",
65+
"--output-html", "remote",
66+
"--results-dir", self.RESULTS_DIR,
67+
"--output-dir", self.OUTPUT_DIR,
68+
"--preset", "Minimal",
69+
"--timestamp-override", "20240102_030405",
70+
"--exit-on-failure",
71+
*args])
72+
73+
def get_output(self):
74+
output_file = os.path.join(self.OUTPUT_DIR, "data.json")
75+
with open(output_file) as f:
76+
out = json.load(f)
77+
return DataJson(
78+
runs=[DataJsonRun(name=run["name"], results=[DataJsonResult(name=r["name"], label=r["label"], suite=r["suite"], value=r["value"], unit=r["unit"]) for r in run["results"]]) for run in out["benchmarkRuns"]],
79+
metadata=out["benchmarkMetadata"],
80+
tags=out["benchmarkTags"],
81+
names=out["defaultCompareNames"],
82+
)
83+
84+
# add "--verbose" for debug logs
85+
86+
class TestE2E(unittest.TestCase):
87+
def setUp(self):
88+
# Load test data
89+
self.app = App()
90+
self.app.remove_dirs()
91+
self.app.prepare_dirs()
92+
93+
run_result = self.app.run_main("--filter", "RecordGraph")
94+
self.assertEqual(run_result.returncode, 0, "Subprocess did not exit cleanly")
95+
# clean directory with input, output
96+
out = self.app.get_output()
97+
self.assertIn("record_and_replay_benchmark_l0 RecordGraph AppendCopy 1, AppendKern 10, CmdSetsInLvl 10, ForksInLvl 2, Instantiations 10, Lvls 4, Rec", [r.name for r in out.runs[0].results])
98+
99+
100+
def tearDown(self):
101+
pass
102+
# self.app.remove_dirs()
103+
104+
def test_record_and_reply(self):
105+
pass
106+
107+
108+
if __name__ == "__main__":
109+
unittest.main()
110+

0 commit comments

Comments
 (0)