forked from SachinNinganure-zz/benchmark-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cr.yaml
110 lines (110 loc) · 3.69 KB
/
cr.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
apiVersion: ripsaw.cloudbulldozer.io/v1alpha1
kind: Benchmark
metadata:
name: fio-benchmark-example
namespace: benchmark-operator
spec:
# where elastic search is running
elasticsearch:
url: http://my.elasticsearch.server:80
verify_cert: false
parallel: false
# clustername: myk8scluster
# test_user: ripsaw
workload:
name: "fio_distributed"
args:
# if true, do large sequential write to preallocate volume before using
prefill: true
# for compressed volume uncomment the next line and make the cmp_bs same as bs
# prefill_bs: 8KiB
# number of times each test
samples: 3
# number of fio pods generating workload
servers: 3
# put all fio pods on this server
pin_server: ''
# test types, see fio documentation
jobs:
- write
- read
- randwrite
- randread
# I/O request sizes (also called block size)
bs:
- 4KiB
- 64KiB
# how many fio processes per pod
numjobs:
- 1
# with libaio ioengine, number of in-flight requests per process
iodepth: 4
# how long to run read tests, this is TOO SHORT DURATION
read_runtime: 15
# how long to run write tests, this is TOO SHORT DURATION
write_runtime: 15
# don't start measuring until this many seconds pass, for reads
read_ramp_time: 5
# don't start measuring until this many seconds pass, for writes
write_ramp_time: 5
# size of file to access
filesize: 2GiB
# interval between i/o stat samples in milliseconds
log_sample_rate: 3000
#storageclass: rook-ceph-block
#storagesize: 5Gi
# use drop_cache_kernel to have set of labeled nodes drop kernel buffer cache before each sample
#drop_cache_kernel: False
# use drop_cache_rook_ceph to have Ceph OSDs drop their cache before each sample
#drop_cache_rook_ceph: False
# increase this if you want fio to run for more than 1 hour without being terminated by K8S
#job_timeout: 3600
#######################################
# EXPERT AREA - MODIFY WITH CAUTION #
#######################################
# global_overrides:
# NOTE: Dropping caches as per this example can only be done if the
# fio server is running in a privileged pod
# - exec_prerun=bash -c 'sync && echo 3 > /proc/sys/vm/drop_caches'
job_params:
- jobname_match: write
params:
- fsync_on_close=1
- create_on_open=1
- runtime={{ workload_args.write_runtime }}
- ramp_time={{ workload_args.write_ramp_time }}
- jobname_match: read
params:
- time_based=1
- runtime={{ workload_args.read_runtime }}
- ramp_time={{ workload_args.read_ramp_time }}
- jobname_match: rw
params:
- rwmixread=50
- time_based=1
- runtime={{ workload_args.read_runtime }}
- ramp_time={{ workload_args.read_ramp_time }}
- jobname_match: readwrite
params:
- rwmixread=50
- time_based=1
- runtime={{ workload_args.read_runtime }}
- ramp_time={{ workload_args.read_ramp_time }}
- jobname_match: randread
params:
- time_based=1
- runtime={{ workload_args.read_runtime }}
- ramp_time={{ workload_args.read_ramp_time }}
- jobname_match: randwrite
params:
- time_based=1
- runtime={{ workload_args.write_runtime }}
- ramp_time={{ workload_args.write_ramp_time }}
- jobname_match: randrw
params:
- time_based=1
- runtime={{ workload_args.write_runtime }}
- ramp_time={{ workload_args.write_ramp_time }}
# - jobname_match: <search_string>
# params:
# - key=value