1+ import json
2+
13import matplotlib .pyplot as plt
24import numpy as np
3- import json
45import pandas as pd
56
6- BENCHMARKS_JSON = ' results.json'
7+ BENCHMARKS_JSON = " results.json"
78
89# Hardware details shown in title
910HARDWARE = "AMD Ryzen 9 9900X 12-Core Processor 63032 MB (fp64 fp16)\n oneAPI 2025.1.3 Intel(R) OpenCL Graphics: Intel(R) Arc(TM) B580 Graphics, 11873 MB (fp64 fp16)"
1213SHOW_NUMBERS = True
1314
1415# Round to digits after decimal
15- ROUND_NUMBERS = 1
16+ ROUND_NUMBERS = 1
1617
1718# package list in graph order; arrayfire packages are added later
18- PKG_NAMES = [
19- 'numpy' ,
20- 'dpnp' ,
21- 'cupy'
22- ]
19+ PKG_NAMES = ["numpy" , "dpnp" , "cupy" ]
2320
2421# color used in graphs
2522PKG_COLOR = {
2926 "afcpu" : "tab:orange" ,
3027 "afopencl" : "tab:orange" ,
3128 "afcuda" : "tab:orange" ,
32- "afoneapi" : "tab:orange"
29+ "afoneapi" : "tab:orange" ,
3330}
3431
3532# labels displayed in the graph
4037 "afcpu" : "afcpu" ,
4138 "afcuda" : "afcuda" ,
4239 "afopencl" : "afopencl[opencl:gpu]" ,
43- "afoneapi" : "afoneapi[opencl:gpu]"
40+ "afoneapi" : "afoneapi[opencl:gpu]" ,
4441}
4542
46- AFBACKENDS = [
47- 'afcpu' ,
48- 'afcuda' ,
49- 'afopencl' ,
50- 'afoneapi'
51- ]
43+ AFBACKENDS = ["afcpu" , "afcuda" , "afopencl" , "afoneapi" ]
5244
5345# Tests to be shown in graphs
5446TESTS = [
55- 'qr' ,
56- 'neural_network' ,
57- 'gemm' ,
58- 'mandelbrot' ,
59- 'nbody' ,
60- 'pi' ,
61- 'black_scholes' ,
62- 'fft' ,
63- 'normal' ,
64- 'group_elementwise' ,
65-
66- #Other tests
47+ "qr" ,
48+ "neural_network" ,
49+ "gemm" ,
50+ "mandelbrot" ,
51+ "nbody" ,
52+ "pi" ,
53+ "black_scholes" ,
54+ "fft" ,
55+ "normal" ,
56+ "group_elementwise" ,
57+ # Other tests
6758 # 'svd
6859 # 'cholesky',
6960 # 'det',
7061 # 'norm',
7162 # 'uniform',
7263 # 'inv'
73- ]
64+ ]
65+
7466
7567def get_benchmark_data ():
7668 results = {}
7769 descriptions = {}
7870 with open (BENCHMARKS_JSON ) as f :
7971 js = json .load (f )
80- for bench in js [' benchmarks' ]:
72+ for bench in js [" benchmarks" ]:
8173 test_name = bench ["name" ]
82- test_name = test_name [test_name .find ('_' ) + 1 : test_name .find ('[' )]
74+ test_name = test_name [test_name .find ("_" ) + 1 : test_name .find ("[" )]
8375
8476 key = bench ["param" ]
8577 val = bench ["stats" ]["ops" ]
@@ -88,12 +80,13 @@ def get_benchmark_data():
8880 descriptions [test_name ] = bench ["extra_info" ]["description" ]
8981
9082 if test_name not in results :
91- results [test_name ] = { key : val }
83+ results [test_name ] = {key : val }
9284 else :
9385 results [test_name ][key ] = val
9486
9587 return results , descriptions
9688
89+
9790def create_graph (test_name , test_results ):
9891 names = []
9992 values = []
@@ -107,12 +100,14 @@ def create_graph(test_name, test_results):
107100 plt .savefig ("img/" + test_name + ".png" )
108101 plt .close ()
109102
103+
110104def generate_individual_graphs ():
111105 results , descriptions = get_benchmark_data ()
112106
113107 for test in results :
114108 create_graph (test , results [test ])
115109
110+
116111# Stores the timing results in a csv file
117112def store_csv ():
118113 data_dict = {}
@@ -124,9 +119,9 @@ def store_csv():
124119
125120 with open (BENCHMARKS_JSON ) as f :
126121 js = json .load (f )
127- for bench in js [' benchmarks' ]:
122+ for bench in js [" benchmarks" ]:
128123 test_name = bench ["name" ]
129- test_name = test_name [test_name .find ('_' ) + 1 : test_name .find ('[' )]
124+ test_name = test_name [test_name .find ("_" ) + 1 : test_name .find ("[" )]
130125
131126 pkg = bench ["param" ]
132127 time = bench ["stats" ]["mean" ]
@@ -135,18 +130,19 @@ def store_csv():
135130 data_dict ["Test(seconds)" ].append (test_name )
136131
137132 results [pkg ][test_name ] = time
138-
133+
139134 for test in data_dict ["Test(seconds)" ]:
140135 for pkg in PKG_LABELS .keys ():
141136 if test in results [pkg ]:
142137 data_dict [pkg ].append (results [pkg ][test ])
143138 else :
144139 data_dict [pkg ].append (np .nan )
145-
140+
146141 df = pd .DataFrame (data_dict )
147142 df .to_csv ("summary.csv" )
148143
149- def generate_group_graph (test_list = None , show_numbers = False , filename = "comparison" ):
144+
145+ def generate_group_graph (test_list = None , show_numbers = False , filename = "comparison" ):
150146 results , descriptions = get_benchmark_data ()
151147
152148 width = 1 / (1 + len (PKG_NAMES ))
@@ -181,7 +177,7 @@ def generate_group_graph(test_list = None, show_numbers = False, filename = "com
181177 else :
182178 tests_values [name ].append (np .nan )
183179
184- fig , ax = plt .subplots (layout = ' constrained' )
180+ fig , ax = plt .subplots (layout = " constrained" )
185181
186182 for name in PKG_NAMES :
187183 offset = width * multiplier
@@ -193,21 +189,22 @@ def generate_group_graph(test_list = None, show_numbers = False, filename = "com
193189
194190 xlabels = []
195191 for test in tests :
196- xlabels .append (test + "\n " + descriptions [test ])
192+ xlabels .append (test + "\n " + descriptions [test ])
197193
198- ax .set_xlabel (' Speedup' )
199- ax .set_xscale (' log' )
200- ax .set_title (f' Runtime Comparison\n { HARDWARE } ' )
194+ ax .set_xlabel (" Speedup" )
195+ ax .set_xscale (" log" )
196+ ax .set_title (f" Runtime Comparison\n { HARDWARE } " )
201197 ax .set_yticks (x + width , xlabels , rotation = 0 )
202198 xmin , xmax = ax .get_xlim ()
203199 ax .set_xlim (xmin , xmax * 2 )
204200
205- ax .legend (loc = ' lower right' , ncols = len (PKG_NAMES ))
201+ ax .legend (loc = " lower right" , ncols = len (PKG_NAMES ))
206202 fig .set_figheight (8 )
207203 fig .set_figwidth (13 )
208204 fig .savefig (f"img/{ filename } .png" )
209205 plt .show ()
210-
206+
207+
211208def main ():
212209 store_csv ()
213210 for backend in AFBACKENDS :
@@ -221,5 +218,6 @@ def main():
221218 print (e )
222219 print ("No data for" , backend )
223220
221+
224222if __name__ == "__main__" :
225- main ()
223+ main ()
0 commit comments