Skip to content

Commit

Permalink
Updated share folder to current SWMF version share
Browse files Browse the repository at this point in the history
In order to simplify the installation and building processes, updated
the share folder to the current SWMF version. This handles C code and
openmp by default. Updated Config.pl, schemeSetup.sh, and the base
Makefile to handle the changes (also had to update ModSceRun since one
of its dependancies changed names in the share folder)
  • Loading branch information
Miles A. Engel authored and drsteve committed Feb 6, 2019
1 parent d357642 commit cc339e6
Show file tree
Hide file tree
Showing 296 changed files with 38,271 additions and 9,423 deletions.
50 changes: 4 additions & 46 deletions Config.pl
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
if(/^-scheme/) {$SchemeUser = 'True';
push(@Arguments,"-compiler=gfortran");
push(@Arguments,"-mpi=openmpi");
push(@Arguments,"-openmp");
#system("bash","-c","source schemeSetup.sh");
next;
}
Expand Down Expand Up @@ -53,7 +54,7 @@
# Handle RAM-SCB installation arguments.
foreach(@Arguments){
if(/^-scheme/) {$libs{'gsl'} = "/packages2/.packages2/x86_64-pc-linux-gnu-rhel6/gsl/2.3";
$libs{'netcdf'} = "/packages2/.packages2/x86_64-pc-linux-gnu-rhel6/netcdf/4.4.4";
$libs{'netcdf'} = "/projects/lanl/Carrington/netcdf";
$DoSetLibs = 1;
next;
};
Expand Down Expand Up @@ -160,59 +161,16 @@ sub set_libs

}
#=============================================================================
#sub get_netcdf
# # Grab the required NetCDF library from the given location.
# # If ncdf_path is not given as an argument, the proper
# # ENV variable is used.
#{
# # Check for NetCDF installation path.
# $ncdf_path = $ENV{NETCDFDIR} unless $ncdf_path;
# die "ERROR: Installation path for NetCDF not set! See Help."
# unless $ncdf_path;
#
# # Trim trailing '/' from path as necessary.
# if($ncdf_path=~/(.*)\/$/){$ncdf_path=$1};
# print "Using NetCDF install path: $ncdf_path\n";
#
# # Create directory for NetCDF objects.
# unless(-d 'srcNetcdf'){mkdir('srcNetcdf',0751) or die $!};
# `cd srcNetcdf; cp $ncdf_path/lib/libnetcdf.a .; ar -x libnetcdf.a`;
# `echo $ncdf_path > srcNetcdf/netcdf_install_path.txt`;
#}
##=============================================================================
#sub get_pspline
# # Grab the required NetCDF library from the given location.
# # If ncdf_path is not given as an argument, the proper
# # ENV variable is used.
#{
# # Check for NetCDF installation path.
# $pspl_path = $ENV{PSPLINEDIR} unless $pspl_path;
# die "ERROR: Installation path for Pspline not set! See Help."
# unless $pspl_path;
#
# # Trim trailing '/' from path as necessary.
# if($pspl_path=~/(.*)\/$/){$pspl_path=$1};
# print "Using Pspline install path: $pspl_path\n";
#
# # Create directory for Pspline objects.
# unless(-d 'srcPspline'){mkdir('srcPspline',0751) or die $!};
# `cd srcPspline; cp $pspl_path/lib/libpspline.a .; ar -x libpspline.a`;
# `cd srcPspline; cp $pspl_path/lib/libezcdf.a .; ar -x libezcdf.a`;
# `cd srcPspline; rm -f r8pspltsub.o pspltsub.o`; # Unneeded objects.
# `cd srcPspline; cp $pspl_path/mod/*.mod .`;
# `echo $pspl_path > srcPspline/pspline_install_path.txt`;
#}
#=============================================================================
sub print_help
# Print RAM-SCB help.
{
print "
Additional options for RAM-SCB/Config.pl:
-ncdf=(path) Set installation path for NetCDF libraries.
-pspline=(path) Set installation path for Pspline libraries.
-gsl=(path) Set installation path for GSL libraries.
These MUST be set if environment variables PSPLINEDIR and
These MUST be set if environment variables GSLDIR and
NETCDFDIR are not set. If both flag and env variable are
set, Config.pl will use the flag value. This allows for
multiple installations of the libraries.
Expand Down
16 changes: 0 additions & 16 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -316,22 +316,6 @@ test3_check:
${TESTDIR3}/output_scb/hI_output_d20130317_t001500.dat \
${IMDIR}/output/test3/hI.ref \
>> test3.diff
ncdump -v "FluxH+","B_xyz" \
${TESTDIR3}/output_ram/sat1_d20130317_t000000.nc \
| sed -e '1,/data:/d' > \
${TESTDIR3}/output_ram/sat1.test
${SCRIPTDIR}/DiffNum.pl -b -a=1e-9 \
${TESTDIR3}/output_ram/sat1.test \
${IMDIR}/output/test3/sat1.ref \
>> test3.diff
ncdump -v "FluxH+","B_xyz" \
${TESTDIR3}/output_ram/sat2_d20130317_t000000.nc \
| sed -e '1,/data:/d' > \
${TESTDIR3}/output_ram/sat2.test
${SCRIPTDIR}/DiffNum.pl -b -a=1e-9 \
${TESTDIR3}/output_ram/sat2.test \
${IMDIR}/output/test3/sat2.ref \
>> test3.diff
@echo "Test Successful!"

#TEST 4----------------------------------
Expand Down
4 changes: 4 additions & 0 deletions include/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore
14 changes: 3 additions & 11 deletions schemeSetup.sh
Original file line number Diff line number Diff line change
@@ -1,15 +1,7 @@
#!/bin/bash

module2 load gsl/2.3
module2 load netcdf
module2 load netcdf/fortran-4.4.4
module2 openmpi
module2 load openmpi
module2 load gcc/8.1.0
export LD_LIBRARY_PATH=/projects/lanl/Carrington/netcdf/lib:${LD_LIBRARY_PATH}
./Config.pl -install -scheme
sed -i '66s/.*/.SUFFIXES: .c .f90 .F90 .f .for .ftn .o/' Makefile.conf
sed -i '11iCOMPILE.GCC = ${CUSTOMPATH_F}gcc -std=gnu89' Makefile.conf
sed -i '12s/.*/COMPILE.f77 = ${CUSTOMPATH_F}gfortran -std=legacy/' Makefile.conf
sed -i '13s/.*/COMPILE.f90 = ${CUSTOMPATH_F}gfortran -std=legacy/' Makefile.conf
sed -i '69i.c.o:' Makefile.conf
sed -i '70i\\t${COMPILE.f90} ${Cflag3} $<' Makefile.conf
sed -i '14s/.*/LINK.f90 = ${CUSTOMPATH_MPI}mpif90 -fopenmp/' Makefile.conf
sed -i '35s/.*/OPT3 = -O3 -fopenmp/' Makefile.conf
2 changes: 2 additions & 0 deletions share/CVS/Entries
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
/Makefile/1.22/Tue Jan 20 16:25:42 2015//D2018.06.15.23.00.00
D
6 changes: 6 additions & 0 deletions share/CVS/Entries.Log
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
A D/JobScripts////
A D/Library////
A D/Prologs////
A D/Scripts////
A D/build////
A D/include////
1 change: 1 addition & 0 deletions share/CVS/Repository
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
SWMF_NEW/share
1 change: 1 addition & 0 deletions share/CVS/Root
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
user@computer:/CVS
1 change: 1 addition & 0 deletions share/CVS/Tag
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
D2018.06.15.23.00.00
30 changes: 30 additions & 0 deletions share/JobScripts/CVS/Entries
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/job.bluevista/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.cetuslac/1.1/Tue Jul 7 17:54:34 2015//D2018.06.15.23.00.00
/job.cfe/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.cheyenne/1.2/Fri Feb 9 21:29:36 2018//D2018.06.15.23.00.00
/job.discover/1.1/Mon Jun 7 00:20:40 2010//D2018.06.15.23.00.00
/job.flux-login/1.3/Tue Oct 8 17:34:42 2013//D2018.06.15.23.00.00
/job.grendel/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.h2ologin/1.8/Tue May 23 15:44:34 2017//D2018.06.15.23.00.00
/job.halem/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.hera/1.2/Fri Sep 30 15:54:57 2011//D2018.06.15.23.00.00
/job.jaguarpf-ext/1.1/Tue Jun 19 15:51:38 2012//D2018.06.15.23.00.00
/job.miralac/1.1/Tue Jul 7 17:54:34 2015//D2018.06.15.23.00.00
/job.modi/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.nyx-login-amd/1.6/Tue Oct 8 17:34:42 2013//D2018.06.15.23.00.00
/job.nyx-login-intel/1.6/Tue Oct 8 17:34:42 2013//D2018.06.15.23.00.00
/job.palm/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.pfe/1.22/Fri Dec 30 23:40:21 2016//D2018.06.15.23.00.00
/job.postproc.h2ologin/1.2/Wed Nov 16 21:17:25 2016//D2018.06.15.23.00.00
/job.schirra/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.spbuild/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.sysx/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.turing/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.ubgl/1.6/Thu May 5 21:22:29 2011//D2018.06.15.23.00.00
/job.xena/1.1/Tue Apr 7 16:06:48 2009//D2018.06.15.23.00.00
/job.yslogin/1.5/Thu Nov 12 13:32:22 2015//D2018.06.15.23.00.00
/kill_swmf.pl/1.2/Sat Oct 12 04:01:01 2013//D2018.06.15.23.00.00
/qsub.pfe.pl/1.6/Tue Nov 29 18:44:57 2016//D2018.06.15.23.00.00
/watch.pfe.pl/1.2/Thu Mar 16 20:41:17 2017//D2018.06.15.23.00.00
/work.pl/1.1/Thu Oct 2 14:20:34 2014//D2018.06.15.23.00.00
D
1 change: 1 addition & 0 deletions share/JobScripts/CVS/Repository
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
SWMF_NEW/share/JobScripts
1 change: 1 addition & 0 deletions share/JobScripts/CVS/Root
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
user@computer:/CVS
1 change: 1 addition & 0 deletions share/JobScripts/CVS/Tag
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
D2018.06.15.23.00.00
22 changes: 22 additions & 0 deletions share/JobScripts/job.bluevista
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/csh
#
# LSF batch script to run an SPMD/MPI code
#
#BSUB -a 'poe' # select the mpich-gm elim
#BSUB -P 54042108
#BSUB -x # exlusive use of node (not_shared)
#BSUB -n 4 # number of total tasks
#BSUB -R "span[ptile=1]" # run 1 task per node
#BSUB -o mpilsf.out # output filename (%J to add job id)
#BSUB -e mpilsf.err # error filename
#BSUB -J GITM.test # job name
#BSUB -q premium # queue 30 min
#BSUB -W 04:00

# Run this executable in SPMD mode:
# Fortran example
# mpif90 -Mextend -o mpif mpi.f
mpirun.lsf ./GITM.exe
# ./GITM.exe

#rm mpif
6 changes: 6 additions & 0 deletions share/JobScripts/job.cetuslac
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# This is how to run on MIRA/CETUS
# The -n is the number of nodes (one node has 16 cores).
# The --mode c16 is the number MPI processes per node.
# The -t 10 is the maximum walltime in minutes.
#
# qsub -n 10 --mode c16 -t 10 ./SWMF.exe
57 changes: 57 additions & 0 deletions share/JobScripts/job.cfe
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
#!/bin/csh

# Job script for Columbia machine at NASA Ames

# This file must be customized and run from the run directory! For example
#
# cd run
# qsub job.cfe
#
# To avoid having too many output files run
#
# PostProc.pl -g -r=360 >& PostProc.log &
#
# on the head node (post-processes every 10 minutes).
#
#PBS -N SWMF

# ncpus should be a multiple of 4, mpiprocs=ncpus, and ompthreads=1.
# set the number of CPU-s by changing select: nProc = select*ncpus
#PBS -l select=8:ncpus=16:mpiprocs=16:ompthreads=1
#PBS -l walltime=8:00:00
#PBS -j oe
#PBS -q normal

# Specify group if necessary
### PBS -W group_list=...

# cd into the run directory
cd $PBS_O_WORKDIR

# These settings were found useful for large runs
setenv MPI_MSGS_PER_HOST 100000
setenv MPI_MSGS_PER_PROC 100000
setenv MPI_MSGS_MAX 100000

# run SWMF (the number of processors is already specified above)
# the date/time stamp for runlog is only necessary for automated resubmission
mpiexec SWMF.exe > runlog_`date +%y%m%d%H%M`

exit

# To use automated resubmission remove the 'exit' command above
# Use the #CPUTIMEMAX and #CHECKSTOP commands in PARAM.in
# so the code stops before the wall clock time is exceeded.

# Do not continue unless the job finished successfully
if(! -f SWMF.SUCCESS) exit

# Link latest restart files
./Restart.pl

# Provide a PARAM.in.restart file if you wish and uncomment these lines:
# if(! -f PARAM.in.start) cp PARAM.in PARAM.in.start
# if(-f PARAM.in.restart) cp PARAM.in.restart PARAM.in

# Check final time/iteration and resubmit if not done (modify as needed!)
if(! -d RESTART_t008.00h) qsub job.cfe
35 changes: 35 additions & 0 deletions share/JobScripts/job.cheyenne
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#!/bin/tcsh

# Information:
# https://www2.cisl.ucar.edu/resources/computational-systems/cheyenne

# Name of run
#PBS -N SWMF

# Account to be charged
#PBS -A UMIC0006

# Up to 12 hours (6 hours for share queue)
#PBS -l walltime=12:00:00

# Queues:
# premium # 150% charge
# regular # 100% charge (also interactive)
# economy # 70% charge
# share # 100% charge up to 18 cores for debugging/serial jobs (postprocess)
#PBS -q regular
#PBS -j oe
#PBS -m abe

# Send email notifications:
### PBS -M [email protected]

# There are 36 cores / node
#PBS -l select=32:ncpus=36:mpiprocs=36

### Set TMPDIR as recommended
# setenv TMPDIR /glade/scratch/username/temp
# mkdir -p $TMPDIR

### Run the executable
mpiexec_mpt dplace -s 1 ./SWMF.exe > runlog_`date +%y%m%d%H%M`
54 changes: 54 additions & 0 deletions share/JobScripts/job.discover
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
#!/usr/local/bin/csh

# Job script for Discover.
#
# This file must be customized and run from the run directory! For example
#
# cd run
# qsub job.discover
#
# To avoid having too many output files run
#
# PostProc.pl -g -r=360 >& PostProc.log &
#
# on the head node (post-processes every 10 minutes).
#
#PBS -S /bin/csh
#PBS -N SWMF

# set the number of CPU-s by changing select: nProc = select*mpiprocs
# ncpus should be 8 because there are 8 cores on each node.
#PBS -l select=16:ncpus=8
#PBS -l walltime=12:00:00
#PBS -j oe
#PBS -m e

# Specify group if necessary
### PBS -W group_list=...

# cd into the run directory
cd $PBS_O_WORKDIR


# run SWMF (the number of processors is already specified above)
# the date/time stamp for runlog is only necessary for automated resubmission
mpirun -np 128 ./SWMF.exe > runlog_`date +%y%m%d%H%M`

exit

# To use automated resubmission remove the 'exit' command above
# Use the #CPUTIMEMAX and #CHECKSTOP commands in PARAM.in
# so the code stops before the wall clock time is exceeded.

# Do not continue unless the job finished successfully
if(! -f SWMF.SUCCESS) exit

# Link latest restart files
./Restart.pl

# Provide a PARAM.in.restart file if you wish and uncomment these lines:
# if(! -f PARAM.in.start) cp PARAM.in PARAM.in.start
# if(-f PARAM.in.restart) cp PARAM.in.restart PARAM.in

# Check final time/iteration and resubmit if not done (modify as needed!)
if(! -d RESTART_t008.00h) qsub job.discover
39 changes: 39 additions & 0 deletions share/JobScripts/job.flux-login
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#!/bin/sh
#PBS -V
#PBS -N SWMF
#PBS -l pmem=500mb
#PBS -l walltime=2:00:00

# Use showq or resinfo.pl to see the availability of resources.
# Use diagnose -a crash_flux to see number of cores, users, limits.
# Note: resinfo.pl does not work for flux queues!
#
# showq -w acct=ridley ; resinfo.pl -n ridley
# showq -w acct=crash_flux ; diagnose -a crash_flux
# showq -w acct=liemohn_flux ; diagnose -a liemohn_flux
# showq -w acct=engin_flux ; diagnose -a engin_flux

# Put #PBS in front of one of the following queues that you have access to
#PBS -A ridley -q cac -l qos=cac # 192 cores, 12 cores/node
# -A liemohn_flux -q flux -l qos=flux # 120 cores, 12-16 cores/node
# -A crash_flux -q flux -l qos=flux # 360 cores, 12-16 cores/node
# -A engin_flux -q flux -l qos=flux # 100 cores, 12-16 cores/node, free

# Adjust number number of procs (multiples of 8 or 12 depending on queue)
# or set number of nodes with correct value of process-per-node (ppn).
#PBS -l procs=96
# -l nodes=8:ppn=12
# -l nodes=12:ppn=8

# cd to job submission directory
cd $PBS_O_WORKDIR

# get queue status
qstat -u $USER -n

# look at what else is running
resinfo.pl -n ridley

# run job
mpirun ./SWMF.exe > runlog_`date +%y%m%d%H%M`

Loading

0 comments on commit cc339e6

Please sign in to comment.