forked from sharathadavanne/seld-dcase2019
-
Notifications
You must be signed in to change notification settings - Fork 0
/
parameter.py
74 lines (59 loc) · 3.15 KB
/
parameter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# Parameters used in the feature extraction, neural network model, and training the SELDnet can be changed here.
#
# Ideally, do not change the values of the default parameters. Create separate cases with unique <task-id> as seen in
# the code below (if-else loop) and use them. This way you can easily reproduce a configuration on a later time.
def get_params(argv):
print("SET: {}".format(argv))
# ########### default parameters ##############
params = dict(
quick_test=True, # To do quick test. Trains/test on small subset of dataset, and # of epochs
# INPUT PATH
dataset_dir='/proj/asignal/DCASE2019/dataset/', # Base folder containing the foa/mic and metadata folders
# OUTPUT PATH
feat_label_dir='/proj/asignal/DCASE2019/dataset/feat_label/', # Directory to dump extracted features and labels
model_dir='models/', # Dumps the trained models and training curves in this folder
dcase_output=True, # If true, dumps the results recording-wise in 'dcase_dir' path.
# Set this true after you have finalized your model, save the output, and submit
dcase_dir='results/', # Dumps the recording-wise network output in this folder
# DATASET LOADING PARAMETERS
mode='dev', # 'dev' - development or 'eval' - evaluation dataset
dataset='foa', # 'foa' - ambisonic or 'mic' - microphone signals
# DNN MODEL PARAMETERS
sequence_length=128, # Feature sequence length
batch_size=16, # Batch size
dropout_rate=0, # Dropout rate, constant for all layers
nb_cnn2d_filt=64, # Number of CNN nodes, constant for each layer
pool_size=[8, 8, 4], # CNN pooling, length of list = number of CNN layers, list value = pooling per layer
rnn_size=[128, 128], # RNN contents, length of list = number of layers, list value = number of nodes
fnn_size=[128], # FNN contents, length of list = number of layers, list value = number of nodes
loss_weights=[1., 50.], # [sed, doa] weight for scaling the DNN outputs
nb_epochs=50, # Train for maximum epochs
epochs_per_fit=5, # Number of epochs per fit
)
params['patience'] = int(0.1 * params['nb_epochs']) # Stop training if patience is reached
# ########### User defined parameters ##############
if argv == '1':
print("USING DEFAULT PARAMETERS\n")
elif argv == '2':
params['mode'] = 'dev'
params['dataset'] = 'mic'
elif argv == '3':
params['mode'] = 'eval'
params['dataset'] = 'mic'
elif argv == '4':
params['mode'] = 'dev'
params['dataset'] = 'foa'
elif argv == '5':
params['mode'] = 'eval'
params['dataset'] = 'foa'
# Quick test
elif argv == '999':
print("QUICK TEST MODE\n")
params['quick_test'] = True
params['epochs_per_fit'] = 1
else:
print('ERROR: unknown argument {}'.format(argv))
exit()
for key, value in params.items():
print("\t{}: {}".format(key, value))
return params