forked from smarr/ReBench
-
Notifications
You must be signed in to change notification settings - Fork 1
/
rebench.conf
80 lines (74 loc) · 2.7 KB
/
rebench.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
# Config file for ReBench
# Config format is YAML (see http://yaml.org/ for detailed spec)
# this run definition will be chosen if no parameters are given to rebench.py
default_experiment: Test
default_data_file: 'test.data'
# general configuration for runs
runs:
iterations: 100
invocations: 1
# min_iteration_time: 100 # give a warning if average runtime is below this value
# definition of benchmark suites
# settings in the benchmark suite will override similar settings of the executor
benchmark_suites:
TestSuite1:
gauge_adapter: Test
# location: /Users/...
command: TestBenchMarks %(benchmark)s %(input)s %(variable)s
input_sizes: [1, 2, 10, 100, 1000]
benchmarks:
- Bench1
- Bench2:
extra_args: 6
max_invocation_time: 300
variable_values: # this is an other dimension, over which the runs need to be varied
- val1
- val2
TestSuite2:
gauge_adapter: Test
command: TestBenchMarks %(benchmark)s %(input)s %(variable)s
input_sizes: [1, 2, 10, 100, 1000]
cores: [7, 13, 55]
benchmarks:
- Bench1:
extra_args: 3
- Bench2
variable_values: # this is an other dimension, over which the runs need to be varied
- val1
- val2
# Executors have a name and are specified by a path and the executable to be executed
# optional: the number of cores for which the runs have to be executed
executors:
TestRunner1:
path: tests
executable: test-vm1.py
cores: [1, 4, 8]
environment: "ENVVAR1=2 ENVVAR2=1"
TestRunner2:
path: tests
executable: test-vm2.py
# define the benchmarks to be executed for a re-executable benchmark run
# special definitions done here should override benchmark suite definitions, and
# executor definitions
experiments:
Test:
description: >
This run definition is used for testing.
It should try all possible settings and the generated out
will be compared to the expected one by the unit test(s)
suites:
- TestSuite1
- TestSuite2
input_sizes: [1]
executions:
# List of executors and Benchmarks/Benchmark Suites to be run on them
# benchmarks define here will override the ones defined for the whole run
#the following example is equivalent to the global run definition,
#but needs to be tested...
- TestRunner1:
suites:
- TestSuite1
- TestRunner1:
suites:
- TestSuite2
- TestRunner2