-
Notifications
You must be signed in to change notification settings - Fork 81
/
dales.yaml
46 lines (34 loc) · 1012 Bytes
/
dales.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# @package _global_
# to execute this experiment run:
# python train.py experiment=panoptic/dales
defaults:
- override /datamodule: panoptic/dales.yaml
- override /model: panoptic/spt-2.yaml
- override /trainer: gpu.yaml
# all parameters below will be merged with parameters from default configurations set above
# this allows you to overwrite only specified parameters
trainer:
max_epochs: 400
model:
optimizer:
lr: 0.01
weight_decay: 1e-4
partitioner:
regularization: 20
x_weight: 5e-2
cutoff: 100
edge_affinity_loss_lambda: 10
partition_every_n_epoch: 10
logger:
wandb:
project: "spt_dales"
name: "SPT-64"
# metric based on which models will be selected
optimized_metric: "val/pq"
# modify checkpointing callbacks to adapt to partition_every_n_epoch
# being potentially different
callbacks:
model_checkpoint:
every_n_epochs: ${eval:'max(${trainer.check_val_every_n_epoch}, ${model.partition_every_n_epoch})'}
early_stopping:
strict: False