-
Notifications
You must be signed in to change notification settings - Fork 192
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
2 changed files
with
76 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
# Distributed under the MIT License. | ||
# See LICENSE.txt for details. | ||
|
||
Machine: | ||
Name: Urania | ||
Description: | | ||
Supercomputer at the Max Planck Computing Data Facilty. | ||
DefaultProcsPerNode: 72 | ||
DefaultQueue: "p.urania" | ||
DefaultTimeLimit: "1-00:00:00" | ||
LaunchCommandSingleNode: ["srun", "-n", "1"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,65 @@ | ||
{% extends "SubmitTemplateBase.sh" %} | ||
|
||
# Distributed under the MIT License. | ||
# See LICENSE.txt for details. | ||
|
||
# Supercomputer at the Max Planck Computing Data Facility. | ||
# More information: | ||
# https://docs.mpcdf.mpg.de/doc/computing/clusters/systems/Gravitational_Physics_ACR.html | ||
|
||
{% block head %} | ||
# Standard output and error: | ||
#SBATCH -o ./tjob.out.%j | ||
#SBATCH -e ./tjob.err.%j | ||
# Initial working directory: | ||
#SBATCH -D ./ | ||
# Number of MPI Tasks, e.g. 4: | ||
#SBATCH --nodes {{ num_nodes | default(1) }} | ||
#SBATCH --ntasks-per-node=1 | ||
#SBATCH --ntasks-per-core=1 | ||
#SBATCH --cpus-per-task=72 | ||
# Memory usage [MB] of the job is required, 2200 MB per task: | ||
#SBATCH --mem=240000 | ||
# Wall clock limit: | ||
#SBATCH -t {{ time_limit | default("1-00:00:00") }} | ||
#SBATCH -p {{ queue | default("p.debug") }} | ||
{% endblock %} | ||
|
||
{% block charm_ppn %} | ||
# One thread for communication | ||
CHARM_PPN=$(expr ${SLURM_CPUS_PER_TASK} - 2) | ||
{% endblock %} | ||
|
||
{% block list_modules %} | ||
# Load compiler and MPI modules with explicit version specifications, | ||
# consistently with the versions used to build the executable. | ||
module purge | ||
module load gcc/11 | ||
module load impi/2021.7 | ||
module load boost/1.79 | ||
module load gsl/1.16 | ||
module load cmake/3.26 | ||
module load hdf5-serial/1.12.2 | ||
module load anaconda/3/2021.11 | ||
|
||
# Load Spack environment | ||
source /u/guilara/repos/spack/share/spack/setup-env.sh | ||
spack env activate env3_spectre_impi | ||
|
||
# Define Charm paths | ||
export CHARM_ROOT=/u/guilara/charm_impi_2/mpi-linux-x86_64-smp | ||
export PATH=$PATH:/u/guilara/charm_impi_2/mpi-linux-x86_64-smp/bin | ||
|
||
# Spectre directories | ||
export SPECTRE_HOME=/u/guilara/repos/spectre | ||
export SPECTRE_BUILD_DIR=${SPECTRE_HOME}/build_develop | ||
# Load python environment | ||
source $SPECTRE_HOME/env/bin/activate | ||
{% endblock %} | ||
|
||
{% block run_command %} | ||
srun -n ${SLURM_NTASKS} ${SPECTRE_EXECUTABLE} \ | ||
--input-file ${SPECTRE_INPUT_FILE} \ | ||
++ppn ${CHARM_PPN} +pemap 0-34,36-70 +commap 35,71 \ | ||
${SPECTRE_CHECKPOINT:+ +restart "${SPECTRE_CHECKPOINT}"} > tjob.out | ||
{% endblock %} |