-
Notifications
You must be signed in to change notification settings - Fork 20
/
submit-ray-cluster.sbatch_shifter
executable file
·47 lines (37 loc) · 1.24 KB
/
submit-ray-cluster.sbatch_shifter
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#!/bin/bash
#SBATCH -C gpu
#SBATCH --time=00:10:00
#SBATCH -q debug
#SBATCH -A nstaff
#SBATCH --image=nersc/pytorch:ngc-22.02-v0
### This script works for any number of nodes, Ray will find and manage all resources
#SBATCH --nodes=2
### Give all resources to a single Ray task, ray can manage the resources internally
#SBATCH --ntasks-per-node=1
#SBATCH --gpus-per-task=4
#SBATCH --cpus-per-task=128
redis_password=$(uuidgen)
export redis_password
nodes=$(scontrol show hostnames $SLURM_JOB_NODELIST) # Getting the node names
nodes_array=( $nodes )
node_1=${nodes_array[0]}
ip=$node_1
port=6379
ip_head=$ip:$port
export ip_head
echo "IP Head: $ip_head"
echo "STARTING HEAD at $node_1"
srun --nodes=1 --ntasks=1 -w $node_1 shifter bash start-head.sh $ip $redis_password &
sleep 30
worker_num=$(($SLURM_JOB_NUM_NODES - 1)) #number of nodes other than the head node
for (( i=1; i<=$worker_num; i++ ))
do
node_i=${nodes_array[$i]}
echo "STARTING WORKER $i at $node_i"
srun --nodes=1 --ntasks=1 -w $node_i shifter bash start-worker.sh $ip_head $redis_password &
sleep 5
done
##############################################################################################
#### call your code below
python examples/mnist_pytorch_trainable.py --cuda
exit