|  | 
|  | 1 | +import json | 
|  | 2 | + | 
|  | 3 | +import matplotlib.pyplot as plt | 
|  | 4 | +import numpy as np | 
|  | 5 | +import pandas as pd | 
|  | 6 | + | 
|  | 7 | +BENCHMARKS_JSON = "results.json" | 
|  | 8 | + | 
|  | 9 | +# Hardware details shown in title | 
|  | 10 | +HARDWARE = "AMD Ryzen 9 9900X 12-Core Processor 63032 MB (fp64 fp16)\noneAPI 2025.1.3 Intel(R) OpenCL Graphics: Intel(R) Arc(TM) B580 Graphics, 11873 MB (fp64 fp16)" | 
|  | 11 | + | 
|  | 12 | +# Show speedup in graph | 
|  | 13 | +SHOW_NUMBERS = True | 
|  | 14 | + | 
|  | 15 | +# Round to digits after decimal | 
|  | 16 | +ROUND_NUMBERS = 1 | 
|  | 17 | + | 
|  | 18 | +# package list in graph order; arrayfire packages are added later | 
|  | 19 | +PKG_NAMES = ["numpy", "dpnp", "cupy"] | 
|  | 20 | + | 
|  | 21 | +# color used in graphs | 
|  | 22 | +PKG_COLOR = { | 
|  | 23 | +    "numpy": "tab:blue", | 
|  | 24 | +    "cupy": "tab:green", | 
|  | 25 | +    "dpnp": "tab:red", | 
|  | 26 | +    "afcpu": "tab:orange", | 
|  | 27 | +    "afopencl": "tab:orange", | 
|  | 28 | +    "afcuda": "tab:orange", | 
|  | 29 | +    "afoneapi": "tab:orange", | 
|  | 30 | +} | 
|  | 31 | + | 
|  | 32 | +# labels displayed in the graph | 
|  | 33 | +PKG_LABELS = { | 
|  | 34 | +    "numpy": "numpy[cpu]", | 
|  | 35 | +    "dpnp": "dpnp[level_zero:gpu]", | 
|  | 36 | +    "cupy": "cupy", | 
|  | 37 | +    "afcpu": "afcpu", | 
|  | 38 | +    "afcuda": "afcuda", | 
|  | 39 | +    "afopencl": "afopencl[opencl:gpu]", | 
|  | 40 | +    "afoneapi": "afoneapi[opencl:gpu]", | 
|  | 41 | +} | 
|  | 42 | + | 
|  | 43 | +AFBACKENDS = ["afcpu", "afcuda", "afopencl", "afoneapi"] | 
|  | 44 | + | 
|  | 45 | +# Tests to be shown in graphs | 
|  | 46 | +TESTS = [ | 
|  | 47 | +    "qr", | 
|  | 48 | +    "neural_network", | 
|  | 49 | +    "gemm", | 
|  | 50 | +    "mandelbrot", | 
|  | 51 | +    "nbody", | 
|  | 52 | +    "pi", | 
|  | 53 | +    "black_scholes", | 
|  | 54 | +    "fft", | 
|  | 55 | +    "normal", | 
|  | 56 | +    "group_elementwise", | 
|  | 57 | +    # Other tests | 
|  | 58 | +    # 'svd | 
|  | 59 | +    # 'cholesky', | 
|  | 60 | +    # 'det', | 
|  | 61 | +    # 'norm', | 
|  | 62 | +    # 'uniform', | 
|  | 63 | +    # 'inv' | 
|  | 64 | +] | 
|  | 65 | + | 
|  | 66 | + | 
|  | 67 | +def get_benchmark_data(): | 
|  | 68 | +    results = {} | 
|  | 69 | +    descriptions = {} | 
|  | 70 | +    with open(BENCHMARKS_JSON) as f: | 
|  | 71 | +        js = json.load(f) | 
|  | 72 | +        for bench in js["benchmarks"]: | 
|  | 73 | +            test_name = bench["name"] | 
|  | 74 | +            test_name = test_name[test_name.find("_") + 1 : test_name.find("[")] | 
|  | 75 | + | 
|  | 76 | +            key = bench["param"] | 
|  | 77 | +            val = bench["stats"]["ops"] | 
|  | 78 | + | 
|  | 79 | +            if len(bench["extra_info"]) != 0 and (not test_name in descriptions): | 
|  | 80 | +                descriptions[test_name] = bench["extra_info"]["description"] | 
|  | 81 | + | 
|  | 82 | +            if test_name not in results: | 
|  | 83 | +                results[test_name] = {key: val} | 
|  | 84 | +            else: | 
|  | 85 | +                results[test_name][key] = val | 
|  | 86 | + | 
|  | 87 | +    return results, descriptions | 
|  | 88 | + | 
|  | 89 | + | 
|  | 90 | +def create_graph(test_name, test_results): | 
|  | 91 | +    names = [] | 
|  | 92 | +    values = [] | 
|  | 93 | +    for name in test_results: | 
|  | 94 | +        names.append(name) | 
|  | 95 | +        values.append(test_results[name]) | 
|  | 96 | + | 
|  | 97 | +    bar = plt.bar(names, values) | 
|  | 98 | +    plt.title(test_name) | 
|  | 99 | + | 
|  | 100 | +    plt.savefig("img/" + test_name + ".png") | 
|  | 101 | +    plt.close() | 
|  | 102 | + | 
|  | 103 | + | 
|  | 104 | +def generate_individual_graphs(): | 
|  | 105 | +    results, descriptions = get_benchmark_data() | 
|  | 106 | + | 
|  | 107 | +    for test in results: | 
|  | 108 | +        create_graph(test, results[test]) | 
|  | 109 | + | 
|  | 110 | + | 
|  | 111 | +# Stores the timing results in a csv file | 
|  | 112 | +def store_csv(): | 
|  | 113 | +    data_dict = {} | 
|  | 114 | +    data_dict["Test(seconds)"] = [] | 
|  | 115 | +    results = {} | 
|  | 116 | +    for pkg in PKG_LABELS.keys(): | 
|  | 117 | +        data_dict[pkg] = [] | 
|  | 118 | +        results[pkg] = {} | 
|  | 119 | + | 
|  | 120 | +    with open(BENCHMARKS_JSON) as f: | 
|  | 121 | +        js = json.load(f) | 
|  | 122 | +        for bench in js["benchmarks"]: | 
|  | 123 | +            test_name = bench["name"] | 
|  | 124 | +            test_name = test_name[test_name.find("_") + 1 : test_name.find("[")] | 
|  | 125 | + | 
|  | 126 | +            pkg = bench["param"] | 
|  | 127 | +            time = bench["stats"]["mean"] | 
|  | 128 | + | 
|  | 129 | +            if not test_name in data_dict["Test(seconds)"]: | 
|  | 130 | +                data_dict["Test(seconds)"].append(test_name) | 
|  | 131 | + | 
|  | 132 | +            results[pkg][test_name] = time | 
|  | 133 | + | 
|  | 134 | +    for test in data_dict["Test(seconds)"]: | 
|  | 135 | +        for pkg in PKG_LABELS.keys(): | 
|  | 136 | +            if test in results[pkg]: | 
|  | 137 | +                data_dict[pkg].append(results[pkg][test]) | 
|  | 138 | +            else: | 
|  | 139 | +                data_dict[pkg].append(np.nan) | 
|  | 140 | + | 
|  | 141 | +    df = pd.DataFrame(data_dict) | 
|  | 142 | +    df.to_csv("summary.csv") | 
|  | 143 | + | 
|  | 144 | + | 
|  | 145 | +def generate_group_graph(test_list=None, show_numbers=False, filename="comparison"): | 
|  | 146 | +    results, descriptions = get_benchmark_data() | 
|  | 147 | + | 
|  | 148 | +    width = 1 / (1 + len(PKG_NAMES)) | 
|  | 149 | +    multiplier = 0 | 
|  | 150 | + | 
|  | 151 | +    tests = None | 
|  | 152 | +    if test_list: | 
|  | 153 | +        tests = test_list | 
|  | 154 | +    else: | 
|  | 155 | +        tests = results.keys() | 
|  | 156 | + | 
|  | 157 | +    tests_values = {} | 
|  | 158 | +    x = np.arange(len(tests)) | 
|  | 159 | + | 
|  | 160 | +    for name in PKG_NAMES: | 
|  | 161 | +        tests_values[name] = [] | 
|  | 162 | + | 
|  | 163 | +    max_val = 1 | 
|  | 164 | +    for test in tests: | 
|  | 165 | +        for name in PKG_NAMES: | 
|  | 166 | +            base_value = results[test]["numpy"] | 
|  | 167 | +            if name in results[test]: | 
|  | 168 | +                val = results[test][name] / base_value | 
|  | 169 | + | 
|  | 170 | +                if ROUND_NUMBERS: | 
|  | 171 | +                    val = round(val, ROUND_NUMBERS) | 
|  | 172 | + | 
|  | 173 | +                if max_val < val: | 
|  | 174 | +                    max_val = val | 
|  | 175 | + | 
|  | 176 | +                tests_values[name].append(val) | 
|  | 177 | +            else: | 
|  | 178 | +                tests_values[name].append(np.nan) | 
|  | 179 | + | 
|  | 180 | +    fig, ax = plt.subplots(layout="constrained") | 
|  | 181 | + | 
|  | 182 | +    for name in PKG_NAMES: | 
|  | 183 | +        offset = width * multiplier | 
|  | 184 | +        rects = ax.barh(x + offset, tests_values[name], width, label=PKG_LABELS[name], color=PKG_COLOR[name]) | 
|  | 185 | + | 
|  | 186 | +        if show_numbers: | 
|  | 187 | +            ax.bar_label(rects, padding=3, rotation=0) | 
|  | 188 | +        multiplier += 1 | 
|  | 189 | + | 
|  | 190 | +    xlabels = [] | 
|  | 191 | +    for test in tests: | 
|  | 192 | +        xlabels.append(test + "\n" + descriptions[test]) | 
|  | 193 | + | 
|  | 194 | +    ax.set_xlabel("Speedup") | 
|  | 195 | +    ax.set_xscale("log") | 
|  | 196 | +    ax.set_title(f"Runtime Comparison\n{HARDWARE}") | 
|  | 197 | +    ax.set_yticks(x + width, xlabels, rotation=0) | 
|  | 198 | +    xmin, xmax = ax.get_xlim() | 
|  | 199 | +    ax.set_xlim(xmin, xmax * 2) | 
|  | 200 | + | 
|  | 201 | +    ax.legend(loc="lower right", ncols=len(PKG_NAMES)) | 
|  | 202 | +    fig.set_figheight(8) | 
|  | 203 | +    fig.set_figwidth(13) | 
|  | 204 | +    fig.savefig(f"img/{filename}.png") | 
|  | 205 | +    plt.show() | 
|  | 206 | + | 
|  | 207 | + | 
|  | 208 | +def main(): | 
|  | 209 | +    store_csv() | 
|  | 210 | +    for backend in AFBACKENDS: | 
|  | 211 | +        try: | 
|  | 212 | +            filename = f"comparison_{backend}" | 
|  | 213 | +            if not backend in PKG_NAMES: | 
|  | 214 | +                PKG_NAMES.insert(1, backend) | 
|  | 215 | +            generate_group_graph(TESTS, SHOW_NUMBERS, filename) | 
|  | 216 | +            PKG_NAMES.remove(backend) | 
|  | 217 | +        except Exception as e: | 
|  | 218 | +            print(e) | 
|  | 219 | +            print("No data for", backend) | 
|  | 220 | + | 
|  | 221 | + | 
|  | 222 | +if __name__ == "__main__": | 
|  | 223 | +    main() | 
0 commit comments