-
Notifications
You must be signed in to change notification settings - Fork 7
/
RunHPCMD.sh
107 lines (82 loc) · 3.92 KB
/
RunHPCMD.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#!/bin/bash
#; It Script perform segment MD on HPC with time limited.
#; __authors = Ropón-Palacios G., Ramirez-Olivos G. and Gervacio-Villareal A.
#; __date__ = 20 May, 2022.
#; __e-mail__ = [email protected]
#; All code bewlow is sytaxis for HPC with SLRUM gestor.
#; ========================================================
#SBATCH --account=def-nike-ab
#SBATCH [email protected]
#SBATCH --mail-type=ALL
# Allocate 10 CPUs and 1000M RAM per CPU for 5 days
#SBATCH --time=5-0:0
#SBATCH -c10 --mem-per-cpu=1000
#SBATCH --gres=gpu:v100:1
#SBATCH --job-name="viro_wt"
# Load namd-multicore module
#module load StdEnv/2020 intel/2020.1.217 namd-multicore
#module load StdEnv/2020 cuda/11.0 namd-multicore/2.14
SLURM_CPUS_PER_TASK=20
# EQ-GamD
cd 04_eq_gamd/
count_state=$(ls *.state | wc -l)
if [[ $count_state = "0" ]]
then
namd2 +p${SLURM_CPUS_PER_TASK} +devices 0 +idlepoll md_eq_gamd.namd > md_eq_gamd.out
cp gamd-eq-wrap.restart.coor ../05_gamd/
cp gamd-eq-wrap.restart.xsc ../05_gamd/
cp gamd-eq-wrap.restart.gamd ../05_gamd/
cp gamd-eq-wrap.colvars.state ../05_gamd/
fi
cd ../
#Production-GamD
cd 05_gamd/
count_dcd=$(ls *.dcd | wc -l)
count_out=$(ls prod_*.out | wc -l)
echo "No se asuste por este aviso"
if [[ $count_dcd = "0" ]]
then
start_md=1
sed "s/md_${count_out}/omd/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/md_$((count_out-1))/gamd-eq-wrap/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/set restart_inicial 0/set restart_inicial 1/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/set restart_continuar 1/set restart_continuar 0/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
else
sed "s/md_${count_out}/md_${count_dcd}/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/md_$((count_out-1))/md_$((count_dcd-1))/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
namd2 +p${SLURM_CPUS_PER_TASK} +devices 0 +idlepoll prod.namd > prod_${count_dcd}.out
let start_md=$((count_dcd+1));
fi
steps_initial=500000000
md_steps=$(awk -v var=run '{if( $1 == var) {print $2}}' prod.namd)
dt=$(awk -v var=timestep '{if( $1 == var) {print $2/1000}}' prod.namd)
time_ns=$(echo "scale=3; $steps_initial*$dt/1000" | bc)
echo "Time in ns is :" $(echo "scale=3; $time_ns " | bc)
save_each=2 #change it by split nanoseconsd to run. example 1 eq 1ns
n_iterations=$(echo "scale=0; $time_ns/$save_each" | bc)
echo "Number of iterations is: $n_iterations"
namd_steps=$(echo "scale=0; $save_each*1000/$dt " | bc)
sed "s/$md_steps/$namd_steps/g" prod.namd > tmp.namd
rm prod.namd
mv tmp.namd prod.namd
while [ $start_md -le $n_iterations ]; do
prev_i=$((start_md-1))
if [[ $start_md -eq 1 ]]
then
sed "s/omd/md_${start_md}/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
namd2 +p${SLURM_CPUS_PER_TASK} +devices 0 +idlepoll prod.namd > prod_${start_md}.out
elif [[ $start_md -eq 2 ]]
then
sed "s/md_${prev_i}/md_${start_md}/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/gamd-eq-wrap/md_${prev_i}/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/set restart_inicial 1/set restart_inicial 0/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/set restart_continuar 0/set restart_continuar 1/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
namd2 +p${SLURM_CPUS_PER_TASK} +devices 0 +idlepoll prod.namd > prod_${start_md}.out
else
sed "s/md_${prev_i}/md_${start_md}/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
sed "s/md_$((start_md-2))/md_$((start_md-1))/g" prod.namd > tmp.namd && rm prod.namd && mv tmp.namd prod.namd
namd2 +p${SLURM_CPUS_PER_TASK} +devices 0 +idlepoll prod.namd > prod_${start_md}.out
fi
let start_md=$((start_md+1));
done
cd ../