-
Notifications
You must be signed in to change notification settings - Fork 31
/
task4.mlp.yaml
459 lines (388 loc) · 14.9 KB
/
task4.mlp.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
active_set: dcase2017
sets:
- set_id: dcase2017
description: Minimal MLP based system to test that everything works
dataset:
method: development
dataset_method_parameters:
development:
name: DCASE2017_Task4tagging_DevelopmentSet
fold_list: [1]
evaluation_mode: folds
feature_stacker:
stacking_recipe: mfcc
feature_normalizer:
enable: true
feature_aggregator:
enable: true
aggregation_recipe: flatten
win_length_seconds: 0.1
hop_length_seconds: 0.02
learner:
method: mlp
file_hop: 1
learner_method_parameters:
mlp:
seed: 0
keras:
backend: theano
backend_parameters:
floatX: float64
device: gpu0
fastmath: false
optimizer: None
openmp: false
threads: 1
CNR: true
validation:
enable: false
setup_source: generated_scene_location_event_balanced
validation_amount: 0.10
training:
epochs: 200
batch_size: 256
shuffle: true
callbacks:
- type: EarlyStopping
parameters:
monitor: categorical_accuracy
min_delta: 0.001
patience: 10
verbose: 0
mode: max
model:
config:
- class_name: Dense
config:
units: 50
kernel_initializer: uniform
activation: relu
- class_name: Dropout
config:
rate: 0.2
- class_name: Dense
config:
units: 50
kernel_initializer: uniform
activation: relu
- class_name: Dropout
config:
rate: 0.2
- class_name: Dense
config:
units: CLASS_COUNT
kernel_initializer: uniform
activation: softmax
loss: binary_crossentropy
optimizer:
type: Adam
metrics:
- categorical_accuracy
recognizer:
enable: true
frame_accumulation:
enable: false
type: sum
frame_binarization:
enable: true
type: global_threshold
threshold: 0.5
event_activity_processing:
enable: true
type: median_filtering
window_length_seconds: 0.54
event_post_processing:
enable: true
minimun_event_length_seconds: 0.1
minimum_event_gap_second: 0.1
#type: majority_vote # [maximum, majority_vote]
evaluator:
enable: true
show_details: true
saving:
enable: true
filename: eval_[{parameter_hash}].yaml
defaults:
# ==========================================================
# Flow
# ==========================================================
flow:
initialize: true
extract_features: true
feature_normalizer: true
train_system: true
test_system: true
evaluate_system: true
# ==========================================================
# General
# ==========================================================
general:
overwrite: false # Overwrite previously stored data
challenge_submission_mode: true # Save results into path->challenge_results for challenge submission
print_system_progress: true #
use_ascii_progress_bar: false #
log_system_parameters: false #
log_system_progress: false #
log_learner_status: false #
scene_handling: scene-dependent # [scene-dependent, scene-independent]
# ==========================================================
# Paths
# ==========================================================
path:
data: data/
system_base: system/task4/
feature_extractor: feature_extractor/
feature_normalizer: feature_normalizer/
learner: learner/
recognizer: recognizer/
evaluator: evaluator/
recognizer_challenge_output: challenge_submission/task4/
logs: logs/
# ==========================================================
# Logging
# ==========================================================
logging:
enable: true #
colored: true # Colored console logging
parameters:
version: 1
disable_existing_loggers: false
formatters:
simple:
format: "[%(levelname).1s] %(message)s"
normal:
format: "%(asctime)s\t[%(name)-20s]\t[%(levelname)-8s]\t%(message)s"
extended:
format: "[%(asctime)s] [%(name)s]\t [%(levelname)-8s]\t %(message)s \t(%(filename)s:%(lineno)s)"
handlers:
console:
class: logging.StreamHandler
level: DEBUG
formatter: simple
stream: ext://sys.stdout
info_file_handler:
class: logging.handlers.RotatingFileHandler
level: INFO # Max logging level to save
formatter: normal # [simple, extended]
filename: custom.info.log
maxBytes: 10485760 # 10MB
backupCount: 20
encoding: utf8
debug_file_handler:
class: logging.handlers.RotatingFileHandler
level: DEBUG # Max logging level to save
formatter: normal # [simple, extended]
filename: custom.debug.log
maxBytes: 10485760 # 10MB
backupCount: 20
encoding: utf8
error_file_handler:
class: logging.handlers.RotatingFileHandler
level: ERROR # Max logging level to save
formatter: extended # [simple, extended]
filename: custom.errors.log
maxBytes: 10485760 # 10MB
backupCount: 20
encoding: utf8
loggers:
my_module:
level: ERROR
handlers: [console]
propagate: no
root:
level: INFO
handlers: [console, error_file_handler, info_file_handler, debug_file_handler]
# ==========================================================
# Dataset
# ==========================================================
dataset:
method: development
dataset_method_parameters:
development:
name: DCASE2017_Task4tagging_DevelopmentSet
fold_list: [1]
evaluation_mode: folds
challenge_train:
name: DCASE2017_Task4tagging_DevelopmentSet
evaluation_mode: full
challenge_test:
name: DCASE2017_Task4tagging_EvaluationSet
evaluation_mode: full
# ==========================================================
# Feature extractor
# ==========================================================
feature_extractor:
fs: 44100 # Sampling frequency
win_length_seconds: 0.04 # Window length
hop_length_seconds: 0.02 # Hop length
feature_extractor_method_parameters:
mel: # Mel band energy
mono: true # [true, false]
window: hamming_asymmetric # [hann_asymmetric, hamming_asymmetric]
spectrogram_type: magnitude # [magnitude, power]
n_mels: 40 # Number of mel bands used
normalize_mel_bands: false # [true, false]
n_fft: 2048 # FFT length
fmin: 0 # Minimum frequency when constructing mel bands
fmax: 22050 # Maximum frequency when constructing mel band
htk: false # Switch for HTK-styled mel-frequency equation
log: true # Logarithmic
mfcc: # Mel-frequency cepstral coefficients
mono: true # [true, false]
window: hamming_asymmetric # [hann_asymmetric, hamming_asymmetric]
spectrogram_type: magnitude # [magnitude, power]
n_mfcc: 20 # Number of MFCC coefficients
n_mels: 40 # Number of mel bands used
n_fft: 2048 # FFT length
fmin: 0 # Minimum frequency when constructing mel bands
fmax: 22050 # Maximum frequency when constructing mel band
htk: false # Switch for HTK-styled mel-frequency equation
mfcc_delta: # MFCC delta coefficients
width: 9 #
mfcc_acceleration: # MFCC acceleration coefficients
width: 9 #
# ==========================================================
# Feature stacker
# ==========================================================
feature_stacker:
# ==========================================================
# Valid feature vector recipe formats:
# - [extractor (string)] => full vector
# - [extractor (string)]=[start index (int)]-[end index (int)] => default channel 0 and vector [start:end]
# - [extractor (string)]=[channel (int)]:[start index (int)]-[end index (int)] => specified channel and vector [start:end]
# - [extractor (string)]=1,2,3,4,5 => vector [1,2,3,4,4]
# - [extractor (string)]=0 => specified channel and full vector
# ==========================================================
stacking_recipe: mel
# ==========================================================
# Feature normalizer
# ==========================================================
feature_normalizer:
enable: true
type: global # [global]
# ==========================================================
# Feature aggregator
# ==========================================================
feature_aggregator:
enable: false
aggregation_recipe: flatten # [mean, std,cov, kurtosis, skew, flatten]
win_length_seconds: 0.1
hop_length_seconds: 0.02
# ==========================================================
# Learner
# ==========================================================
learner:
method: mlp
learner_method_parameters:
gmm:
n_components: 16 # Number of Gaussian components
covariance_type: diag # [diag|tied|full|spherical]
tol: 0.001
reg_covar: 0
max_iter: 40
n_init: 1
init_params: kmeans
random_state: 0
gmm_deprecated:
n_components: 16 # Number of Gaussian components
covariance_type: diag # [diag|full] Diagonal or full covariance matrix
random_state: 0
tol: 0.001
min_covar: 0.001
n_iter: 40
n_init: 1
params: wmc
init_params: wmc
mlp:
seed: 1
keras:
backend: theano
backend_parameters:
floatX: float64
device: cpu
fastmath: false
optimizer: None
openmp: false
threads: 1
CNR: true
validation:
enable: true
setup_source: generate_balanced # [dataset, generate_balanced]
validation_amount: 0.10 # [0.0-1.0]
training:
epochs: 100
batch_size: 256
shuffle: true
callbacks:
- type: EarlyStopping
parameters:
monitor: val_categorical_accuracy # quantity to be monitored.
min_delta: 0.001 # minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement.
patience: 10 # number of epochs with no improvement after which training will be stopped.
verbose: 0 # verbosity mode.
mode: max # {auto, min, max}. In min mode, training will stop when the quantity monitored has stopped decreasing; in max mode it will stop when the quantity monitored has stopped increasing; in auto mode, the direction is automatically inferred from the name of the monitored quantity.
model:
# class_nam can be any standard Keras layer, e.g. Dense, Activation, Dropout
# Magic parameter values: FEATURE_VECTOR_LENGTH, CLASS_COUNT
config:
- class_name: Dense
config:
units: 50
kernel_initializer: uniform
activation: relu
- class_name: Dropout
config:
rate: 0.2
- class_name: Dense
config:
units: 50
kernel_initializer: uniform
activation: relu
- class_name: Dropout
config:
rate: 0.2
- class_name: Dense
config:
units: CLASS_COUNT
kernel_initializer: uniform
activation: sigmoid
loss: binary_crossentropy
optimizer:
type: Adam
metrics:
- categorical_accuracy
# ==========================================================
# Recognizer
# ==========================================================
recognizer:
enable: true
frame_accumulation:
enable: false
type: sliding_sum # [sliding_sum, sliding_mean, sliding_median]
window_length_seconds: 1.0 # seconds
frame_binarization:
enable: true
type: global_threshold # [frame_max, global_threshold]
threshold: 0.5 #
event_activity_processing:
enable: true
type: median_filtering
window_length_seconds: 0.54 # seconds
event_post_processing:
enable: true
minimum_event_length_seconds: 0.1 # seconds
minimum_event_gap_second: 0.1 # seconds
# ==========================================================
# Evaluator
# ==========================================================
evaluator:
enable: true
saving:
enable: true # To save evaluation results into yaml-file
# ==========================================================
# Filename template, fields:
# - {parameter_set}
# - {parameter_hash}
# - {dataset_name}
# ==========================================================
filename: eval_[{parameter_hash}].yaml