-
Notifications
You must be signed in to change notification settings - Fork 23
/
write_benchresults.py
138 lines (107 loc) · 3.87 KB
/
write_benchresults.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
"""
Updates the doc/benchtable.rst file.
Must be run from project folder.
Set up to work on Rob's machine.
Configre PYPY and SLP constants for your computer to run locally.
Can generalize it if needed,
or if we figure out a better way to handle benchmarks.
"""
from __future__ import print_function
import collections
import subprocess
import sys
import os
PYPY = os.path.expanduser('~/venvs/gopypy/bin/python')
SLP = os.path.expanduser('~/dev/venvs/go27slp/bin/python')
PY3 = os.path.expanduser('~/dev/venvs/go33/bin/python')
EXE_BACKEND_MATRIX = [
[PYPY, 'stackless'],
[PYPY, 'gevent'],
[SLP, 'stackless'],
[SLP, 'gevent'],
[PY3, 'gevent']
]
RST = os.path.join('doc', 'benchtable.rst')
COLUMN_WIDTHS = 9, 9, 16, 7
BenchmarkResult = collections.namedtuple(
'BenchResult', ['platform', 'backend', 'benchmark', 'time'])
def stdout_to_results(s):
"""Turns the multi-line output of a benchmark process into
a sequence of BenchmarkResult instances."""
results = s.strip().split('\n')
return [BenchmarkResult(*r.split()) for r in results]
def get_benchproc_results(clargs, **kwargs):
p = subprocess.Popen(
clargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.stderr.write('Failed to benchmark: %s\n' % ' '.join(clargs))
sys.stderr.write(stderr)
return []
results = stdout_to_results(stdout)
for br in results:
print(justify_benchresult(br))
return results
def benchmark_process_and_backend(exe, backend):
"""Returns BenchmarkResults for a given executable and backend."""
env = dict(os.environ)
env['GOLESS_BACKEND'] = backend
args = [exe, '-m', 'benchmark']
return get_benchproc_results(args, env=env)
def benchmark_go():
"""Writes the go benchmarks, if go is installed."""
subprocess.check_call(['go', 'version'], stdout=subprocess.PIPE)
return get_benchproc_results(['go', 'run', 'benchmark.go'])
def collect_results():
"""Runs all platforms/backends/benchmarks and returns as list of
BenchmarkResults, sorted by benchmark and time taken.
"""
results = []
for exe, backendname in EXE_BACKEND_MATRIX:
results.extend(benchmark_process_and_backend(exe, backendname))
results.extend(benchmark_go())
results.sort(
key=lambda br: (br.benchmark, float(br.time), br.platform, br.backend))
return results
def insert_seperator_results(results):
"""Given a sequence of BenchmarkResults,
return a new sequence where a "seperator" BenchmarkResult has been placed
between differing benchmarks to provide a visual difference."""
sepbench = BenchmarkResult(*[' ' * w for w in COLUMN_WIDTHS])
last_bm = None
for r in results:
if last_bm is None:
last_bm = r.benchmark
elif last_bm != r.benchmark:
yield sepbench
last_bm = r.benchmark
yield r
def justify_benchresult(br):
middle = '|'.join(br[i].ljust(COLUMN_WIDTHS[i]) for i in range(len(br)))
return '|%s|' % middle
def make_sepline(char='-'):
seperator_line = ' +{}+{}+{}+{}+'.format(
*[char * w for w in COLUMN_WIDTHS])
return seperator_line
def main():
print('Running benchmarks.')
results = insert_seperator_results(collect_results())
with open(RST, 'w') as f:
def w(s):
f.write(s)
f.write('\n')
f.flush()
w('.. table:: Current goless Benchmarks')
w('')
w(make_sepline())
w(' |Platform |Backend |Benchmark |Time |')
w(make_sepline('='))
for br in results:
f.write(' ')
f.write(justify_benchresult(br))
f.write('\n')
f.write(make_sepline())
f.write('\n')
print('Benchmarks finished. Report written to %s' % RST)
if __name__ == '__main__':
main()