-
Notifications
You must be signed in to change notification settings - Fork 31
/
.gitlab-ci.yml
178 lines (161 loc) · 6.22 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
# Both testing stages depend on the build stage. By using the "needs"
# keyword, we prevent the testing stages from blocking, in favor of a
# DAG.
stages:
- init
- install-deps
- build
- test-unit
- test-integ
- clean
##### System Templates #####
# Generic system templates used to contruct the final jobs on specific
# systems within their respective <system>.yml file. Currently
# these are LLNL specific, but can be adjusted or added to as new
# systems become available.
#
# The NNODES, WALL_TIME, QUEUE, and STORAGE_SIZE variables can be altered in
# Gitlab web interface if/when the defaults need to be changed.
.base-template:
variables:
LLNL_SERVICE_USER: unifysrv
retry:
max: 1
when:
- unknown_failure
- stuck_or_timeout_failure
.slurm-single-node-template:
variables:
LLNL_SLURM_SCHEDULER_PARAMETERS: "-N 1 -p $QUEUE -t $UNIT_WALL_TIME"
.slurm-multi-node-template:
variables:
LLNL_SLURM_SCHEDULER_PARAMETERS: "-N $NNODES -p $QUEUE -t $INTEG_WALL_TIME"
.lsf-single-node-template:
variables:
LLNL_LSF_SCHEDULER_PARAMETERS: "-nnodes 1 -q $QUEUE -W $UNIT_WALL_TIME"
SCHEDULER_PARAMETERS: "-nnodes 1 -P $PROJECT_ID -W $UNIT_WALL_TIME"
.lsf-multi-node-template:
variables:
LLNL_LSF_SCHEDULER_PARAMETERS: "-nnodes $NNODES $STAGE_STORAGE -q $QUEUE -W $INTEG_WALL_TIME"
SCHEDULER_PARAMETERS: "-nnodes $NNODES -P $PROJECT_ID -W $INTEG_WALL_TIME"
##### Job Templates #####
# Only use this template in a pre-build job if needing to clone and
# run subsequent jobs from a non-default location.
# The WORKING_DIR envar needs to be defined in the job variables.
#
# The before_script section here overrides the default before_script
# for jobs using this template.
.init-template:
stage: init
before_script:
- mkdir -pv $WORKING_DIR
- cd $WORKING_DIR
script:
- git clone -b ${CI_COMMIT_BRANCH} --depth=1 ${CI_REPOSITORY_URL} $WORKING_DIR
# Check if Spack is installed, but don't install if not. Doing so may create
# issues with the user's/service user's Spack installation.
# Check if the SPACK_ENV_NAME set in the job already exists. If so, this does
# nothing. If not, create and install the Spack Environment for subsequent jobs
# to use.
.install-deps-template:
stage: install-deps
before_script:
- which spack || ((cd $HOME/spack && git describe) && . $HOME/spack/share/spack/setup-env.sh)
- module load $COMPILER
script:
- spack env list | grep $SPACK_ENV_NAME || (spack env create $SPACK_ENV_NAME .spack-env/${SPACK_ENV_NAME}/spack.yaml && spack env activate $SPACK_ENV_NAME && spack install --fail-fast)
needs: []
# Build script used by each system to build UnifyFS. The CC and FC variables are
# set in the specific job scripts and evaluated here in order to customize which
# compiler will be used for each job.
# An artifact is created to pass on to the testing stages. The
# test-unit stage requires the unifyfs-build/ files and the test-integ
# stage requires the unifyfs-install/ files.
.build-template:
stage: build
script:
- CC_PATH=$($CC_COMMAND)
- FC_PATH=$($FC_COMMAND)
- GOTCHA_INSTALL=$(spack location -i gotcha)
- SPATH_INSTALL=$(spack location -i spath)
- ./autogen.sh
- mkdir -p unifyfs-build unifyfs-install && cd unifyfs-build
- ../configure CC=$CC_PATH FC=$FC_PATH --prefix=${WORKING_DIR}/unifyfs-install --with-gotcha=$GOTCHA_INSTALL --with-spath=$SPATH_INSTALL --enable-fortran --disable-silent-rules
- make V=1
- make V=1 install
artifacts:
name: "${CI_JOB_NAME}-${CI_PIPELINE_ID}"
untracked: true
expire_in: 6 hour
paths:
- unifyfs-build/
- unifyfs-install/
.unit-test-template:
stage: test-unit
script:
- cd unifyfs-build/t && $JOB_LAUNCH_COMMAND make check
after_script:
- rm -rf /tmp/unify* /tmp/tmp.* /tmp/na_sm | true
# Run the integration test suite with the options provided from the specific
# job.
# Variables used in the integration test suite can be adjusted in the Gitlab web
# interface. See our testing documentation for full details.
# If the job was successful, clean up the build directory in the after_script.
.integ-test-template:
stage: test-integ
script:
- cd t/ci && unbuffer prove -v RUN_CI_TESTS.sh $CI_TEST_OPTIONS
after_script:
- >
echo "CI_JOB_STATUS: $CI_JOB_STATUS";
if [[ $CI_JOB_STATUS == 'success' ]]; then
echo "Cleaning CI_BUILDS_DIR: $CI_BUILDS_DIR"
rm -rf $CI_BUILDS_DIR
elif [[ $CI_JOB_STATUS == 'failed' ]]; then
echo "Logs for debugging available in:"
echo "CI_BUILDS_DIR: $CI_BUILDS_DIR"
else
echo "Job status unknown"
fi
##### Jobs #####
# Default before_script for each job. Use an alternative working directory (if
# necessary), module load the compiler associated with this job, and activate
# the Spack Environment associated with this job.
#
# For jobs running in the not-default location, change directories to the
# WORKING_DIR directory. Otherwise, set WORKING_DIR to be the CI_PROJECT_DIR for
# the build step.
# TODO: Look into CUSTOM_CI_BUILDS_DIR as an alternative (must work on ascent).
#
# The COMPILER variable (evaluated here) is set in the specific job scripts.
#
# Activate the Spack Environment created in the install-deps job.
before_script:
- if [[ -d $WORKING_DIR ]]; then cd ${WORKING_DIR}; else export WORKING_DIR=${CI_PROJECT_DIR}; fi
- module load $COMPILER
- spack env activate $SPACK_ENV_NAME && spack env status && spack find
# Scheduled job to fully clean the runner directory to avoid space issues that
# may accumulate over time.
#
# Running with lsf tag here, but this may be too vague as the job may attempt to
# use a runner on a system the service user doesn't have permission to access.
# If so, move this job to a specific system.
# TODO: this fails when the rm tries to run on the path this job is running
# from. Rewrite to exclude current runner.
.full_clean:
stage: clean
extends: .base-template
variables:
GIT_STRATEGY: none
rules:
- if: $FULL_CLEAN == "YES" && $CI_PIPELINE_SOURCE == "schedule"
tags:
- shell
before_script: []
script: rm -rf ${WORKSPACE}/.jacamar-ci/*
needs: []
# System specific jobs
include:
- local: .gitlab/ascent.yml
- local: .gitlab/lassen.yml
- local: .gitlab/quartz.yml