forked from NethermindEth/gas-benchmarks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
report_tables.py
133 lines (109 loc) · 5.55 KB
/
report_tables.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import argparse
import json
import os
import yaml
import utils
def get_table_report(client_results, clients, results_paths, test_cases, methods, gas_set, metadata, images):
results_to_print = ''
for client in clients:
image_to_print = ''
image_json = json.loads(images)
if client in image_json:
if image_json[client] != 'default' and image_json[client] != '':
image_to_print = image_json[client]
if image_to_print == '':
with open('images.yaml', 'r') as f:
el_images = yaml.safe_load(f)["images"]
client_without_tag = client.split("_")[0]
image_to_print = el_images[client_without_tag]
results_to_print += f'{client.capitalize()} - {image_to_print} - Benchmarking Report' + '\n'
results_to_print += (center_string('Title',
68) + '| Min (MGas/s) | Max (MGas/s) | p50 (MGas/s) | p95 (MGas/s) | p99 (MGas/s) | N | Description\n')
gas_table_norm = utils.get_gas_table(client_results, client, test_cases, gas_set, methods[0], metadata)
for test_case, data in gas_table_norm.items():
results_to_print += (f'{align_left_string(data[0], 68)}|'
f'{center_string(data[1], 14)}|'
f'{center_string(data[2], 14)}|'
f'{center_string(data[3], 14)}|'
f'{center_string(data[4], 14)}|'
f'{center_string(data[5], 14)}|'
f'{center_string(data[6], 7)}|'
f' {align_left_string(data[7], 50)}\n')
results_to_print += '\n'
print(results_to_print)
if not os.path.exists('reports'):
os.mkdir('reports')
with open(f'reports/tables_norm.txt', 'w') as file:
file.write(results_to_print)
def center_string(string, size):
padding_length = max(0, size - len(string))
padding_left = padding_length // 2
padding_right = padding_length - padding_left
centered_string = " " * padding_left + string + " " * padding_right
return centered_string
def align_left_string(string, size):
padding_right = max(0, size - len(string))
centered_string = string + " " * padding_right
return centered_string
def main():
parser = argparse.ArgumentParser(description='Benchmark script')
parser.add_argument('--resultsPath', type=str, help='Path to gather the results', default='results')
parser.add_argument('--testsPath', type=str, help='results', default='tests/')
parser.add_argument('--clients', type=str, help='Client we want to gather the metrics, if you want to compare, '
'split them by comma, ex: nethermind,geth',
default='nethermind,geth,reth')
parser.add_argument('--runs', type=int, help='Number of runs the program will process', default='10')
parser.add_argument('--images', type=str, help='Image values per each client',
default='{ "nethermind": "default", "besu": "default", "geth": "default", "reth": "default" , '
'"erigon": "default"}')
# Parse command-line arguments
args = parser.parse_args()
# Get client name and test case folder from command-line arguments
results_paths = args.resultsPath
clients = args.clients
tests_path = args.testsPath
runs = args.runs
images = args.images
# Get the computer spec
with open(os.path.join(results_paths, 'computer_specs.txt'), 'r') as file:
text = file.read()
computer_spec = text
print(computer_spec)
client_results = {}
failed_tests = {}
methods = ['engine_newPayloadV3']
fields = 'max'
test_cases = utils.get_test_cases(tests_path)
for client in clients.split(','):
client_results[client] = {}
failed_tests[client] = {}
for test_case_name, test_case_gas in test_cases.items():
client_results[client][test_case_name] = {}
failed_tests[client][test_case_name] = {}
for gas in test_case_gas:
client_results[client][test_case_name][gas] = {}
failed_tests[client][test_case_name][gas] = {}
for method in methods:
client_results[client][test_case_name][gas][method] = []
failed_tests[client][test_case_name][gas][method] = []
for run in range(1, runs + 1):
responses, results = utils.extract_response_and_result(results_paths, client, test_case_name,
gas, run, method, fields)
client_results[client][test_case_name][gas][method].append(results)
failed_tests[client][test_case_name][gas][method].append(not responses)
gas_set = set()
for test_case_name, test_case_gas in test_cases.items():
for gas in test_case_gas:
if gas not in gas_set:
gas_set.add(gas)
if not os.path.exists(f'{results_paths}/reports'):
os.makedirs(f'{results_paths}/reports')
metadata = {}
if os.path.exists(f'{tests_path}/metadata.json'):
data = json.load(open(f'{tests_path}/metadata.json', 'r'))
for item in data:
metadata[item['Name']] = item
get_table_report(client_results, clients.split(','), results_paths, test_cases, methods, gas_set, metadata, images)
print('Done!')
if __name__ == '__main__':
main()