forked from EGiunchiglia/C-HMCNN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.sh
executable file
·60 lines (47 loc) · 1.35 KB
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#!/bin/sh
# Directives to SLURM
#SBATCH -p chaos
#SBATCH -A shared-sml-staff
#SBATCH --signal=B:SIGTERM@120
#SBATCH --gres gpu:1
#SBATCH -t 01:00:00
#SBATCH --mem=10G
#SBATCH -o /nfs/data_chaos/sbortolotti/logs/C-HMCNN/train.out
#SBATCH -e /nfs/data_chaos/sbortolotti/logs/C-HMCNN/train.e
# SCRIPT: run.sh
# AUTHOR: Samuele Bortolotti <[email protected]>
# DATE: 2022-14-11
#
# PURPOSE: Trains and evaluate the network on the gpu cluster
usage() {
test $# = 0 || echo "$@"
echo
echo Trains and evaluates the network on the gpu cluster
echo Options:
echo " -h, --help Print this help"
echo
exit 1
}
args=
while [ $# != 0 ]; do
case $1 in
-h|--help) usage ;;
-?*) usage "Unknown option: $1" ;;
*) args="$args \"$1\"" ;;
esac
shift
done
# Get args
eval "set -- $args"
# enter in the code directory
cd "/nfs/data_chaos/sbortolotti/code/C-HMCNN"
# load the right python environment
python="/nfs/data_chaos/sbortolotti/pkgs/miniconda/envs/chmncc/bin/python"
# load wandb
wandb="/nfs/data_chaos/sbortolotti/pkgs/miniconda/envs/chmncc/bin/wandb"
# log in wandb (REQUIRES KEY)
$wandb login $KEY
trap "trap ' ' TERM INT; kill -TERM 0; wait" TERM INT
# run the experiment
${python} -m chmncc experiment "chmncc" 200 --learning-rate 0.001 --batch-size 128 --test-batch-size 128 --device cuda --project chmncc
wait