-
-
Notifications
You must be signed in to change notification settings - Fork 43
/
Copy pathcnn_experiment.yaml
67 lines (60 loc) · 2.53 KB
/
cnn_experiment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
description: Parameters used to train a CNN for the BFF problem
# -------------------------------------------------------------------------------------------------
# *** Settings for data preprocessing ***
# IMPORTANT - avoid changing these data_parameters (other than the file paths), unless you really
# understand the consequences in the code. Most parameters came from the ray-tracing simulations
# themselves
data_parameters:
input_file: '../bff_data/final_table'
preprocessed_file: '../bff_data/preprocessed_dataset.pkl'
max_time: 6 #in microseconds
sample_freq: 20 #in MHz
beamformings: 32
original_tx_power: 30 #in dBm
original_rx_gain: 0 #in dBi
power_offset: 170
power_scale: 0.01
pos_grid: [400.0, 400.0] #in meters
pos_shift: [183.0, 176.0] #in meters
keep_timeslots: [1, 82]
run_sanity_checks: True
# -------------------------------------------------------------------------------------------------
# *** Settings for overall experiment control ***
experiment_settings:
detection_threshold: -100 #in dBm [depends on the sample frequency, thermal noise]
tx_power: 45 #in dBm
rx_gain: 10 #in dBi
noise_std: 6 #in dB [Gaussian noise applied over dB values -> "log-normal"]
scaler_type: "binarizer"
model_type: "cnn"
tests_per_position: 30
predictions_file: "model_predictions.pkl"
# -------------------------------------------------------------------------------------------------
# *** Settings for model hyperparameters ***
ml_parameters:
# IO settings
input_name: "input"
input_shape: [32, 81, 1] # BF, usable timeslots, 1 "data channel"
output_name: "location"
output_type: "regression"
output_shape: [2]
validation_metric: "euclidean_distance"
model_folder: "../experiments_results/cnn/"
# basic model hyperparams
optimizer_type: "ADAM"
batch_size: 64
batch_size_inference: 256
dropout: 0.01
max_epochs: 1000
early_stopping: 50 # Stops training after these epochs without improvement on val set
fc_layers: 12
fc_neurons: 256
learning_rate: 0.0001
learning_rate_decay: 0.995 # Multiplicative term at the end of each epoch
target_gpu: "0"
mc_dropout: 0 # Number of samples for MC Dropout-based uncertainty estimation. 0 to deactivate.
# architecture specific hyperparams
conv_layers: 1
conv_filters: [8]
conv_filter_size: [[3, 3]]
conv_maxpool: [[2, 1]]