1+ # Copyright (C) 2025 Intel Corporation
2+ # Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
3+ # See LICENSE.TXT
4+ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5+
16import os
27import shutil
38import unittest
4- import logging
5-
9+ import tempfile
610import subprocess
711import json
812from collections import namedtuple
913
14+ # oneapi has to be installed and sourced for sycl benchmarks tests
15+
1016DataJson = namedtuple ("DataJson" , ["runs" , "metadata" , "tags" , "names" ])
1117DataJsonRun = namedtuple ("DataJsonRun" , ["name" , "results" ])
1218DataJsonResult = namedtuple (
1319 "DataJsonResult" , ["name" , "label" , "suite" , "value" , "unit" ]
1420)
1521
16-
1722class App :
1823 def __init__ (self ):
19- self .TMP_DIR = os .path .dirname (__file__ )
20- self .OUTPUT_DIR = os .path .join (self .TMP_DIR , "tmp-output" )
21- self .RESULTS_DIR = os .path .join (self .TMP_DIR , "tmp-results" )
22- self .WORKDIR_DIR = os .path .join (self .TMP_DIR , "tmp-workdir" )
24+ self .OUTPUT_DIR = None
25+ self .RESULTS_DIR = None
26+ self .WORKDIR_DIR = None
2327
2428 def prepare_dirs (self ):
25- for d in [self .RESULTS_DIR , self .OUTPUT_DIR , self .WORKDIR_DIR ]:
26- os .makedirs (d )
29+ self .OUTPUT_DIR = tempfile .mkdtemp ()
30+ self .RESULTS_DIR = tempfile .mkdtemp ()
31+ self .WORKDIR_DIR = tempfile .mkdtemp ()
2732
2833 # when UT does not want to build compute-benchmarks from scratch, it can provide prebuilt path
2934 cb_targetpath = os .environ .get ("COMPUTE_BENCHMARKS_BUILD_PATH" )
@@ -37,8 +42,8 @@ def prepare_dirs(self):
3742
3843 def remove_dirs (self ):
3944 for d in [self .RESULTS_DIR , self .OUTPUT_DIR , self .WORKDIR_DIR ]:
40- if os . path . exists ( d ) :
41- shutil .rmtree (d )
45+ if d is not None :
46+ shutil .rmtree (d , ignore_errors = True )
4247
4348 def run_main (self , * args ):
4449
@@ -50,10 +55,8 @@ def run_main(self, *args):
5055 self .WORKDIR_DIR ,
5156 "--sycl" ,
5257 os .environ .get ("ONEAPI_ROOT" ),
53- "--adapter" ,
54- "opencl" ,
5558 "--save" ,
56- "testplik " ,
59+ "testfile " ,
5760 "--output-html" ,
5861 "remote" ,
5962 "--results-dir" ,
@@ -72,7 +75,6 @@ def run_main(self, *args):
7275 )
7376
7477 def get_output (self ):
75- output_file = os .path .join (self .OUTPUT_DIR , "data.json" )
7678 with open (output_file ) as f :
7779 out = json .load (f )
7880 return DataJson (
@@ -114,16 +116,15 @@ def tearDown(self):
114116 self .app .remove_dirs ()
115117
116118 def test_record_and_replay (self ):
117- caseName = "RecordGraph AppendCopy 1, AppendKern 10, CmdSetsInLvl 10, ForksInLvl 2, Instantiations 10, Lvls 4, Rec, timer "
118- run_result = self .app .run_main ("--filter" , caseName )
119+ caseName = "L0 RecordGraph AppendCopy 1, AppendKern 10, CmdSetsInLvl 10, ForksInLvl 2, Instantiations 10, Lvls 4, Rec"
120+ run_result = self .app .run_main ("--filter" , caseName + "$" )
119121 self .assertEqual (run_result .returncode , 0 , "Subprocess did not exit cleanly" )
120122
121123 out = self .app .get_output ()
122124
123- testName = "record_and_replay_benchmark_l0 " + caseName
124- self .assertIn (testName , [r .name for r in out .runs [0 ].results ])
125+ self .assertIn (caseName , [r .name for r in out .runs [0 ].results ])
125126
126- metadata = out .metadata [testName ]
127+ metadata = out .metadata [caseName ]
127128 self .assertEqual (metadata ["type" ], "benchmark" )
128129 self .assertEqual (set (metadata ["tags" ]), {"L0" })
129130
0 commit comments