forked from infrawatch/service-telemetry-operator
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Jenkinsfile
149 lines (143 loc) · 5.04 KB
/
Jenkinsfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#!/usr/bin/env groovy
// can't just use BUILD_TAG because qdr operator limits name of resources to 60 chars
def namespace = env.JOB_BASE_NAME + '-' + env.BUILD_NUMBER
namespace = namespace.toLowerCase()
def stages_failed = false;
def stf_resource = """
apiVersion: infra.watch/v1beta1
kind: ServiceTelemetry
metadata:
name: default
namespace: ${namespace}
spec:
alerting:
alertmanager:
storage:
strategy: ephemeral
receivers:
snmpTraps:
enabled: true
backends:
events:
elasticsearch:
enabled: true
storage:
strategy: ephemeral
metrics:
prometheus:
enabled: true
storage:
strategy: ephemeral
transports:
qdr:
enabled: true
deploymentSize: 1
web:
enabled: false
elasticsearch_manifest: |
apiVersion: elasticsearch.k8s.elastic.co/v1beta1
kind: Elasticsearch
metadata:
name: elasticsearch
namespace: $namespace
spec:
version: 7.10.2
http:
tls:
certificate:
secretName: 'elasticsearch-es-cert'
nodeSets:
- config:
node.data: true
node.ingest: true
node.master: true
node.store.allow_mmap: true
count: 1
name: default
podTemplate:
metadata:
labels:
tuned.openshift.io/elasticsearch: elasticsearch
spec:
containers:
- name: elasticsearch
resources:
limits:
cpu: '2'
memory: 4Gi
requests:
cpu: '1'
memory: 2Gi
volumes:
- emptyDir: {}
name: elasticsearch-data
"""
def working_branch = "master"
node('ocp-agent') {
container('exec') {
dir('service-telemetry-operator') {
stage ('Clone Upstream') {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
checkout scm
working_branch = sh(script: 'git ls-remote --heads origin | grep $(git rev-parse HEAD) | cut -d / -f 3', returnStdout: true).toString().trim()
// ansible script needs local branch to exist, not detached HEAD
sh "git checkout -b ${working_branch}"
}
}
stage ('Create project') {
if ( currentBuild.result != null ) { stages_failed = true; return; }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
openshift.withCluster(){
openshift.newProject(namespace)
}
}
}
stage('Build STF Containers') {
if ( currentBuild.result != null ) { stages_failed = true; return; }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
ansiColor('xterm') {
ansiblePlaybook(
// use the playbook to build the containers but don't run CI
playbook: 'build/run-ci.yaml',
colorized: true,
extraVars: [
"namespace": namespace,
"__deploy_stf": "false",
"__local_build_enabled": "true",
"__service_telemetry_snmptraps_enabled": "true",
"__service_telemetry_storage_ephemeral_enabled": "true",
"working_branch":"${working_branch}"
]
)
}
}
}
stage('Deploy STF Object') {
if ( currentBuild.result != null ) { stages_failed = true; return; }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
openshift.withCluster() {
openshift.withProject(namespace) {
timeout(time: 300, unit: 'SECONDS') {
openshift.create(stf_resource)
sh "OCP_PROJECT=${namespace} ./build/validate_deployment.sh"
}
}
}
}
}
stage('Run Smoketest') {
if ( currentBuild.result != null ) { stages_failed = true; return; }
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
sh "OCP_PROJECT=${namespace} ./tests/smoketest/smoketest.sh"
}
}
stage('Cleanup') {
openshift.withCluster(){
openshift.selector("project/${namespace}").delete()
if ( stages_failed ) { currentBuild.result = 'FAILURE' }
}
if ( stages_failed ) { currentBuild.result = 'FAILURE' }
}
}
}
}