Skip to content

Commit 269c808

Browse files
committed
applied comments
1 parent e23f048 commit 269c808

File tree

3 files changed

+41
-51
lines changed

3 files changed

+41
-51
lines changed

devops/scripts/benchmarks/benches/compute.py

Lines changed: 19 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -269,53 +269,45 @@ def benchmarks(self) -> list[Benchmark]:
269269
)
270270
)
271271

272-
record_and_replay_params = product(list(PROFILERS), [0, 1], [0, 1])
273-
for profiler_type, emulate, instantiate in record_and_replay_params:
274-
benches += [
275-
RecordAndReplay(
272+
record_and_replay_params = product([0, 1], [0, 1])
273+
for emulate, instantiate in record_and_replay_params:
274+
275+
def createRrBench(**kwargs):
276+
return RecordAndReplay(
276277
self,
277278
RUNTIMES.LEVEL_ZERO,
278-
profiler_type,
279+
PROFILERS.TIMER,
280+
mRec=1,
281+
mInst=instantiate,
282+
mDest=0,
283+
emulate=emulate,
284+
**kwargs,
285+
)
286+
287+
benches += [
288+
createRrBench(
279289
nForksInLvl=2,
280290
nLvls=4,
281291
nCmdSetsInLvl=10,
282292
nInstantiations=10,
283293
nAppendKern=10,
284294
nAppendCopy=1,
285-
mRec=1,
286-
mInst=instantiate,
287-
mDest=0,
288-
emulate=emulate,
289295
),
290-
RecordAndReplay(
291-
self,
292-
RUNTIMES.LEVEL_ZERO,
293-
profiler_type,
296+
createRrBench(
294297
nForksInLvl=1,
295298
nLvls=1,
296299
nCmdSetsInLvl=10,
297300
nInstantiations=10,
298301
nAppendKern=10,
299302
nAppendCopy=10,
300-
mRec=1,
301-
mInst=instantiate,
302-
mDest=0,
303-
emulate=emulate,
304303
),
305-
RecordAndReplay(
306-
self,
307-
RUNTIMES.LEVEL_ZERO,
308-
profiler_type,
304+
createRrBench(
309305
nForksInLvl=1,
310306
nLvls=4,
311307
nCmdSetsInLvl=1,
312308
nInstantiations=0,
313309
nAppendKern=1,
314310
nAppendCopy=0,
315-
mRec=1,
316-
mInst=instantiate,
317-
mDest=0,
318-
emulate=emulate,
319311
),
320312
]
321313

@@ -711,7 +703,7 @@ def __init__(self, bench, runtime: RUNTIMES, profiler_type, **kwargs):
711703
)
712704

713705
def name(self):
714-
ret = [self.profiler_type.value]
706+
ret = []
715707
for k, v in self.rr_params.items():
716708
if k[0] == "n": # numeric parameter
717709
ret.append(f"{k[1:]} {v}")
@@ -722,14 +714,11 @@ def name(self):
722714
if v != 0:
723715
ret.append(k)
724716
ret.sort()
725-
return f"{self.bench_name} {self.test} " + ", ".join(ret)
717+
return f"L0 {self.test} " + ", ".join(ret)
726718

727719
def display_name(self) -> str:
728720
return self.name()
729721

730-
def description(self) -> str:
731-
return f"{self.runtime.value} Graphs record and replay"
732-
733722
def get_tags(self):
734723
return ["L0"]
735724

devops/scripts/benchmarks/git_project.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
33
# See LICENSE.TXT
44
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5+
56
import os
67
from pathlib import Path
78
import shutil
@@ -10,7 +11,6 @@
1011
from utils.utils import run
1112
from options import options
1213

13-
1414
class GitProject:
1515
def __init__(
1616
self,

devops/scripts/benchmarks/tests/test_integration.py

Lines changed: 21 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,34 @@
1+
# Copyright (C) 2025 Intel Corporation
2+
# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
3+
# See LICENSE.TXT
4+
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5+
16
import os
27
import shutil
38
import unittest
4-
import logging
5-
9+
import tempfile
610
import subprocess
711
import json
812
from collections import namedtuple
913

14+
# oneapi has to be installed and sourced for sycl benchmarks tests
15+
1016
DataJson = namedtuple("DataJson", ["runs", "metadata", "tags", "names"])
1117
DataJsonRun = namedtuple("DataJsonRun", ["name", "results"])
1218
DataJsonResult = namedtuple(
1319
"DataJsonResult", ["name", "label", "suite", "value", "unit"]
1420
)
1521

16-
1722
class App:
1823
def __init__(self):
19-
self.TMP_DIR = os.path.dirname(__file__)
20-
self.OUTPUT_DIR = os.path.join(self.TMP_DIR, "tmp-output")
21-
self.RESULTS_DIR = os.path.join(self.TMP_DIR, "tmp-results")
22-
self.WORKDIR_DIR = os.path.join(self.TMP_DIR, "tmp-workdir")
24+
self.OUTPUT_DIR = None
25+
self.RESULTS_DIR = None
26+
self.WORKDIR_DIR = None
2327

2428
def prepare_dirs(self):
25-
for d in [self.RESULTS_DIR, self.OUTPUT_DIR, self.WORKDIR_DIR]:
26-
os.makedirs(d)
29+
self.OUTPUT_DIR = tempfile.mkdtemp()
30+
self.RESULTS_DIR = tempfile.mkdtemp()
31+
self.WORKDIR_DIR = tempfile.mkdtemp()
2732

2833
# when UT does not want to build compute-benchmarks from scratch, it can provide prebuilt path
2934
cb_targetpath = os.environ.get("COMPUTE_BENCHMARKS_BUILD_PATH")
@@ -37,8 +42,8 @@ def prepare_dirs(self):
3742

3843
def remove_dirs(self):
3944
for d in [self.RESULTS_DIR, self.OUTPUT_DIR, self.WORKDIR_DIR]:
40-
if os.path.exists(d):
41-
shutil.rmtree(d)
45+
if d is not None:
46+
shutil.rmtree(d, ignore_errors=True)
4247

4348
def run_main(self, *args):
4449

@@ -50,10 +55,8 @@ def run_main(self, *args):
5055
self.WORKDIR_DIR,
5156
"--sycl",
5257
os.environ.get("ONEAPI_ROOT"),
53-
"--adapter",
54-
"opencl",
5558
"--save",
56-
"testplik",
59+
"testfile",
5760
"--output-html",
5861
"remote",
5962
"--results-dir",
@@ -72,7 +75,6 @@ def run_main(self, *args):
7275
)
7376

7477
def get_output(self):
75-
output_file = os.path.join(self.OUTPUT_DIR, "data.json")
7678
with open(output_file) as f:
7779
out = json.load(f)
7880
return DataJson(
@@ -114,16 +116,15 @@ def tearDown(self):
114116
self.app.remove_dirs()
115117

116118
def test_record_and_replay(self):
117-
caseName = "RecordGraph AppendCopy 1, AppendKern 10, CmdSetsInLvl 10, ForksInLvl 2, Instantiations 10, Lvls 4, Rec, timer"
118-
run_result = self.app.run_main("--filter", caseName)
119+
caseName = "L0 RecordGraph AppendCopy 1, AppendKern 10, CmdSetsInLvl 10, ForksInLvl 2, Instantiations 10, Lvls 4, Rec"
120+
run_result = self.app.run_main("--filter", caseName + "$")
119121
self.assertEqual(run_result.returncode, 0, "Subprocess did not exit cleanly")
120122

121123
out = self.app.get_output()
122124

123-
testName = "record_and_replay_benchmark_l0 " + caseName
124-
self.assertIn(testName, [r.name for r in out.runs[0].results])
125+
self.assertIn(caseName, [r.name for r in out.runs[0].results])
125126

126-
metadata = out.metadata[testName]
127+
metadata = out.metadata[caseName]
127128
self.assertEqual(metadata["type"], "benchmark")
128129
self.assertEqual(set(metadata["tags"]), {"L0"})
129130

0 commit comments

Comments
 (0)