-
Notifications
You must be signed in to change notification settings - Fork 3
/
mt-mm.cfg
68 lines (58 loc) · 1.83 KB
/
mt-mm.cfg
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# Note: Any configs listed here can be passed directly as CLI args, too!
# But, for this convenience, all keys should be unique (even across sections).
# If keys are non-unique, the specifying a CLI arg will override every value.
# See parse_config in utils.py for details.
[General]
test_name: MNIST-MM
output_dir: saved/
n_gpu: 1
[Datasets]
# Note: For {}_num: Set N>0 to use N samples/class, or N=-1 to use all samples
src_path: data_loading/data/mnist.h5
tgt_path: data_loading/data/mnist_m.h5
src_num: -1
tgt_num: 10
sample_ratio: 3
resize_dim: 32
batch_size: 256
shuffle: True
[Model]
# Model parameters are for LenetPlus. Change if using ResNet, VGG-16, etc.
classes: 10
input_dim: 32
feature_size: 256
dropout: 0.5
use_bn: False
use_inn: False
[Loss]
margin: 1.0
alpha: 0.1
[Optimizer]
learning_rate: 0.001
weight_decay: 0.0001
momentum: 0.9
[Metrics]
funcs: loss accuracy
best_metric: accuracy
best_mode: max
# early_stop: 10
[Training]
# Note for training config (epochs: 1000, len_epoch: 20 batch/epoch)
#
# d-SNE training batches are very large (>1M pairs of src/tgt images)
# Because of this, if you were defining an epoch to be "one iteration through
# the training dataset", the number of epochs needed would be very small (~2).
#
# For logging/tensorboard purposes, it's more practical to shorten the length
# of an epoch and increase the number of epoches. The training will be the
# same, with the only difference being when tensorboard events are logged.
epochs: 1000
len_epoch: 20
# Save checkpoint every N epochs
# (best model will always be saved at each epoch -- this is separate)
save_period: 10
# Uncomment below after checkpoint has been created to resume training, e.g.
# resume: saved/MNIST-MM/2020-04-11_00-14-58/ckpt/model_best.pth
[Testing]
# Uncomment below after checkpoint has been created to test model
# ckpt: <path to checkpoint>