forked from google/fhir-data-pipes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cloudbuild.yaml
361 lines (318 loc) · 13.2 KB
/
cloudbuild.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a config file used by Cloud Build and the Cloud Build local runner to
# run a CI pipeline to check if any changes done pass tests.
steps:
- name: 'docker/compose'
id: 'Launch HAPI Source Server'
args: ['-f', './docker/hapi-compose.yml', '-p', 'hapi-compose', 'up', '--force-recreate', '--remove-orphan', '-d']
- name: 'docker/compose'
id: 'Launch Sink Server Search'
args: ['-f', './docker/sink-compose.yml', '-p', 'sink-server-search', 'up', '--force-recreate', '--remove-orphan', '-d' ]
env:
- SINK_SERVER_NAME=sink-server-search
- SINK_SERVER_PORT=9001
waitFor: ['-']
- name: 'docker/compose'
id: 'Launch Sink Server JDBC'
args: ['-f', './docker/sink-compose.yml', '-p', 'sink-server-jdbc', 'up', '--force-recreate', '--remove-orphan', '-d' ]
env:
- SINK_SERVER_NAME=sink-server-jdbc
- SINK_SERVER_PORT=9002
waitFor: ['-']
- name: 'gcr.io/cloud-builders/docker'
id: 'Wait for the initial Servers Start'
entrypoint: /bin/bash
args:
- -c
- e2e-tests/wait_for_start.sh --HAPI_SERVER_URLS=http://hapi-server:8080,http://sink-server-search:8080,http://sink-server-jdbc:8080
waitFor: ['Launch HAPI Source Server', 'Launch Sink Server Search', 'Launch Sink Server JDBC']
# Note license checking will fail on Cloud Build because of .git dependencies
# and there is not much point updating license headers at this stage anyway.
# Ditto for Spotless (fails because of NPM dep).
- name: 'maven:3.8.5-openjdk-17'
id: 'Compile Bunsen and Pipeline'
entrypoint: /bin/bash
args:
- -c
- mvn --no-transfer-progress -e -T 1C install -Dlicense.skip=true -Dspotless.apply.skip=true
waitFor: ['-']
- name: 'gcr.io/cloud-builders/docker'
id: 'Build Uploader Image'
entrypoint: /bin/bash
args:
- -c
- cd synthea-hiv/uploader; docker build -t ${_REPOSITORY}/synthea-uploader:${_TAG} .
waitFor: ['-']
- name: '${_REPOSITORY}/synthea-uploader:${_TAG}'
id: 'Run Uploader Unit Tests'
entrypoint: /bin/bash
args:
- -c
- cd /uploader; python -m unittest discover -p '*_test.py'
waitFor: ['Build Uploader Image']
- name: 'gcr.io/cloud-builders/docker'
id: 'Build E2E Image'
entrypoint: /bin/bash
args:
- -c
- cd e2e-tests;
docker build -t ${_REPOSITORY}/e2e-tests:${_TAG} .;
docker build -t ${_REPOSITORY}/e2e-tests/controller-spark:${_TAG} ./controller-spark/.;
waitFor: ['-']
- name: '${_REPOSITORY}/synthea-uploader:${_TAG}'
id: 'Upload to HAPI'
env:
- INPUT_DIR=/workspace/synthea-hiv/sample_data
- SINK_TYPE=HAPI
- FHIR_ENDPOINT=http://hapi-server:8080/fhir
- CORES=--cores 8
waitFor: ['Run Uploader Unit Tests', 'Wait for the initial Servers Start']
- name: 'gcr.io/cloud-builders/docker'
id: 'Build Pipeline Images'
entrypoint: /bin/bash
args:
- -c
- cd pipelines/batch;
docker build -t ${_REPOSITORY}/batch-pipeline:${_TAG} .;
cd ../streaming-binlog;
docker build -t ${_REPOSITORY}/streaming-pipeline:${_TAG} .;
waitFor: ['Compile Bunsen and Pipeline']
- name: '${_REPOSITORY}/batch-pipeline:${_TAG}'
id: 'Run Batch Pipeline in FHIR-search mode with HAPI source'
env:
- FHIR_SERVER_URL=http://hapi-server:8080/fhir
- PARQUET_PATH=/workspace/e2e-tests/FHIR_SEARCH_HAPI
- SINK_PATH=http://sink-server-search:8080/fhir
- SINK_USERNAME=hapi
- SINK_PASSWORD=hapi
waitFor: ['Build Pipeline Images', 'Upload to HAPI']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Run E2E Test for FHIR-search mode with HAPI source'
env:
- PARQUET_SUBDIR=FHIR_SEARCH_HAPI
- FHIR_JSON_SUBDIR=FHIR_SEARCH_HAPI_JSON
- DOCKER_NETWORK=--use_docker_network
- SINK_SERVER=http://sink-server-search:8080
waitFor: ['Run Batch Pipeline in FHIR-search mode with HAPI source', 'Build E2E Image']
- name: '${_REPOSITORY}/batch-pipeline:${_TAG}'
id: 'Run Batch Pipeline for JDBC mode with HAPI source'
env:
- JDBC_MODE_ENABLED=true
- JDBC_MODE_HAPI=true
- FHIR_SERVER_URL=http://hapi-server:8080/fhir
- SINK_PATH=http://sink-server-jdbc:8080/fhir
- SINK_USERNAME=hapi
- SINK_PASSWORD=hapi
- FHIR_DATABASE_CONFIG_PATH=/workspace/utils/hapi-postgres-config.json
- PARQUET_PATH=/workspace/e2e-tests/JDBC_HAPI
- JDBC_FETCH_SIZE=1000
waitFor: ['Build Pipeline Images', 'Upload to HAPI']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Run E2E Test for JDBC mode with HAPI source'
env:
- PARQUET_SUBDIR=JDBC_HAPI
- FHIR_JSON_SUBDIR=JDBC_HAPI_FHIR_JSON
- DOCKER_NETWORK=--use_docker_network
- SINK_SERVER=http://sink-server-jdbc:8080
waitFor: ['Run Batch Pipeline for JDBC mode with HAPI source', 'Build E2E Image']
- name: '${_REPOSITORY}/batch-pipeline:${_TAG}'
id: 'Run Batch Pipeline for BULK_EXPORT mode with HAPI source'
env:
- FHIR_FETCH_MODE=BULK_EXPORT
- FHIR_SERVER_URL=http://hapi-server:8080/fhir
- PARQUET_PATH=/workspace/e2e-tests/BULK_EXPORT
waitFor: ['Run E2E Test for JDBC mode with HAPI source']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Run E2E Test for BULK_EXPORT mode with HAPI source'
env:
- PARQUET_SUBDIR=BULK_EXPORT
- FHIR_JSON_SUBDIR=BULK_EXPORT_FHIR_JSON
- DOCKER_NETWORK=--use_docker_network
waitFor: ['Run Batch Pipeline for BULK_EXPORT mode with HAPI source']
# Resetting FHIR sink server
- name: 'docker/compose'
id: 'Turn down FHIR Sink Server Search'
args: [ '-f', './docker/sink-compose.yml', '-p', 'sink-server-search', 'down' ,'-v']
env:
- SINK_SERVER_NAME=sink-server-search
- SINK_SERVER_PORT=9001
waitFor: ['Run E2E Test for FHIR-search mode with HAPI source']
- name: 'docker/compose'
id: 'Turn down FHIR Sink Server JDBC'
args: [ '-f', './docker/sink-compose.yml', '-p', 'sink-server-jdbc', 'down' ,'-v']
env:
- SINK_SERVER_NAME=sink-server-jdbc
- SINK_SERVER_PORT=9002
waitFor: ['Run E2E Test for JDBC mode with HAPI source']
# The `views` database is used for creating flat views from ViewDefinitions.
- name: 'postgres'
id: 'Create views database'
entrypoint: psql
env:
- PGPASSWORD=admin
args: [ '-U', 'admin', '-d', 'postgres', '-h', 'hapi-fhir-db', '-p', '5432',
'-c', 'CREATE DATABASE views;']
waitFor: ['Turn down FHIR Sink Server Search']
- name: 'docker/compose'
id: 'Launch HAPI FHIR Sink Server Controller'
args: [ '-f', './docker/sink-compose.yml', '-p', 'sink-server-controller', 'up','--force-recreate', '-d' ]
env:
- SINK_SERVER_NAME=sink-server-controller
- SINK_SERVER_PORT=9001
waitFor: ['Create views database']
- name: 'docker/compose'
id: 'Bring up controller and Spark containers'
env:
- PIPELINE_CONFIG=/workspace/docker/config
- DWH_ROOT=/workspace/e2e-tests/controller-spark/dwh
- FHIRDATA_SINKFHIRSERVERURL=http://sink-server-controller:8080/fhir
args: [ '-f', './docker/compose-controller-spark-sql-single.yaml', 'up',
'--force-recreate', '-d' ]
waitFor: ['Launch HAPI FHIR Sink Server Controller']
- name: '${_REPOSITORY}/e2e-tests/controller-spark:${_TAG}'
id: 'Run E2E Test for Dockerized Controller and Spark Thriftserver'
waitFor: ['Bring up controller and Spark containers']
# The controller logs don't appear in Cloud Build output because we run it in
# the detached mode. For debugging controller failures we can use something like
# the following (and forcing the previous step to have 0 exit code).
# - name: 'gcr.io/cloud-builders/docker'
# id: 'PRINT CONTROLLER LOGS'
# entrypoint: /bin/bash
# args:
# - -c
# - docker logs pipeline-controller
- name: 'docker/compose'
id: 'Bring down controller and Spark containers'
args: [ '-f', './docker/compose-controller-spark-sql-single.yaml', 'down' ]
waitFor: ['Run E2E Test for Dockerized Controller and Spark Thriftserver']
- name: 'docker/compose'
id: 'Turn down HAPI Source Server'
args: [ '-f', './docker/hapi-compose.yml', 'down' ]
waitFor: ['Bring down controller and Spark containers']
- name: 'docker/compose'
id: 'Turn down FHIR Sink Server Controller for e2e tests'
args: [ '-f', './docker/sink-compose.yml', '-p', 'sink-server-controller', 'down' ,'-v']
env:
- SINK_SERVER_NAME=sink-server-controller
- SINK_SERVER_PORT=9001
waitFor: ['Turn down HAPI Source Server']
- name: 'docker/compose'
id: 'Launch OpenMRS Server and HAPI FHIR Sink Server for OpenMRS'
args: [ '-f', './docker/openmrs-compose.yaml', '-f', './docker/sink-compose.yml', '-p', 'openmrs-project', 'up',
'--force-recreate', '--remove-orphan', '-d' ]
env:
- SINK_SERVER_NAME=sink-server-for-openmrs
- SINK_SERVER_PORT=9002
waitFor: ['Turn down FHIR Sink Server JDBC','Run E2E Test for BULK_EXPORT mode with HAPI source']
- name: 'gcr.io/cloud-builders/docker'
id: 'Wait for Servers Start'
entrypoint: /bin/bash
args:
- -c
- e2e-tests/wait_for_start.sh --HAPI_SERVER_URLS=http://sink-server-for-openmrs:8080 --OPENMRS_SERVER_URLS=http://openmrs:8080
waitFor: ['Launch OpenMRS Server and HAPI FHIR Sink Server for OpenMRS']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Launch Streaming Pipeline'
entrypoint: /bin/bash
args:
- -c
- e2e-tests/wait_for_streaming.sh --FHIR_SERVER_URL=http://openmrs:8080 --SINK_SERVER=http://sink-server-for-openmrs:8080
waitFor: ['Wait for Servers Start']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Run E2E Test for STREAMING, using OpenMRS Source'
env:
- PARQUET_SUBDIR=STREAMING
- FHIR_JSON_SUBDIR=STREAMING_FHIR_JSON
- DOCKER_NETWORK=--use_docker_network
- SINK_SERVER=http://sink-server-for-openmrs:8080
- STREAMING_TEST=--streaming
- OPENMRS_TEST=--openmrs
waitFor: ['Launch Streaming Pipeline']
- name: '${_REPOSITORY}/synthea-uploader:${_TAG}'
id: 'Upload to OpenMRS'
env:
- CONVERT=--convert_to_openmrs
- INPUT_DIR=/workspace/synthea-hiv/sample_data
- SINK_TYPE=OpenMRS
- FHIR_ENDPOINT=http://openmrs:8080/openmrs/ws/fhir2/R4
waitFor: ['Run E2E Test for STREAMING, using OpenMRS Source']
- name: '${_REPOSITORY}/batch-pipeline:${_TAG}'
id: 'Run Batch Pipeline FHIR-search mode with OpenMRS source'
env:
- PARQUET_PATH=/workspace/e2e-tests/FHIR_SEARCH_OPENMRS
- SINK_PATH=http://sink-server-for-openmrs:8080/fhir
- SINK_USERNAME=hapi
- SINK_PASSWORD=hapi
waitFor: ['Upload to OpenMRS']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Run E2E Test for FHIR-search mode with OpenMRS source'
env:
- PARQUET_SUBDIR=FHIR_SEARCH_OPENMRS
- FHIR_JSON_SUBDIR=FHIR_SEARCH_OPENMRS_JSON
- DOCKER_NETWORK=--use_docker_network
- SINK_SERVER=http://sink-server-for-openmrs:8080
- OPENMRS_TEST=--openmrs
waitFor: ['Run Batch Pipeline FHIR-search mode with OpenMRS source']
- name: '${_REPOSITORY}/batch-pipeline:${_TAG}'
id: 'Run Batch Pipeline for JDBC mode with OpenMRS source'
env:
- JDBC_MODE_ENABLED=true
- PARQUET_PATH=/workspace/e2e-tests/JDBC_OPENMRS
- SINK_PATH=http://sink-server-for-openmrs:8080/fhir
- SINK_USERNAME=hapi
- SINK_PASSWORD=hapi
- FHIR_DATABASE_CONFIG_PATH=/workspace/utils/dbz_event_to_fhir_config.json
waitFor: ['Run E2E Test for FHIR-search mode with OpenMRS source']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Run E2E Test for JDBC mode with OpenMRS source'
env:
- PARQUET_SUBDIR=JDBC_OPENMRS
- FHIR_JSON_SUBDIR=JDBC_OPENMRS_FHIR_JSON
- DOCKER_NETWORK=--use_docker_network
- SINK_SERVER=http://sink-server-for-openmrs:8080
- OPENMRS_TEST=--openmrs
waitFor: ['Run Batch Pipeline for JDBC mode with OpenMRS source']
- name: '${_REPOSITORY}/e2e-tests:${_TAG}'
id: 'Test Indicators'
entrypoint: /bin/bash
args:
- -c
- 'cd dwh; ./validate_indicators.sh'
waitFor: ['Run E2E Test for JDBC mode with OpenMRS source', 'Turn down FHIR Sink Server Controller for e2e tests']
- name: 'docker/compose'
id: 'Turn down Webserver and HAPI Server'
args: ['-f', './docker/openmrs-compose.yaml', '-f', './docker/sink-compose.yml', '-p', 'openmrs-project', 'down']
env:
- SINK_SERVER_NAME=sink-server-for-openmrs
- SINK_SERVER_PORT=9002
waitFor: ['Test Indicators']
substitutions:
# To use substitutions in your local build, use the flag --substitutions
# along with the key=value pair that you want to substitute
# More details here: https://cloud.google.com/build/docs/build-debug-locally
_TAG: local # Cloud Build replaces this with the Commit SHA
_REPOSITORY: fhir-analytics # Cloud Build replaces this with
# us-docker.pkg.dev/${PROJECT_ID}/fhir-analytics
images:
# If run locally, images are available on your local machine through Docker
# You can then re-tag images and push to your own Docker repo
- '${_REPOSITORY}/streaming-pipeline:${_TAG}'
- '${_REPOSITORY}/batch-pipeline:${_TAG}'
- '${_REPOSITORY}/e2e-tests:${_TAG}'
- '${_REPOSITORY}/synthea-uploader:${_TAG}'
logsBucket: "gs://cloud-build-gh-logs"
timeout: '2h'
options:
machineType: 'N1_HIGHCPU_32'