forked from argoproj/argo-workflows
-
Notifications
You must be signed in to change notification settings - Fork 0
/
workflow-controller-configmap.yaml
395 lines (364 loc) · 17 KB
/
workflow-controller-configmap.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
# This file describes the config settings available in the workflow controller configmap
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
data:
# instanceID is a label selector to limit the controller's watch to a specific instance. It
# contains an arbitrary value that is carried forward into its pod labels, under the key
# workflows.argoproj.io/controller-instanceid, for the purposes of workflow segregation. This
# enables a controller to only receive workflow and pod events that it is interested about,
# in order to support multiple controllers in a single cluster, and ultimately allows the
# controller itself to be bundled as part of a higher level application. If omitted, the
# controller watches workflows and pods that *are not* labeled with an instance id.
instanceID: my-ci-controller
# Namespace is a label selector filter to limit the controller's watch to a specific namespace
namespace: my-namespace
# Parallelism limits the max total parallel workflows that can execute at the same time
# (available since Argo v2.3). Controller must be restarted to take effect.
parallelism: "10"
# Limit the maximum number of incomplete workflows in a namespace.
# Intended for cluster installs that are multi-tenancy environments, to prevent too many workflows in one
# namespace impacting others.
# >= v3.2
namespaceParallelism: "10"
# Globally limits the rate at which pods are created.
# This is intended to mitigate flooding of the Kubernetes API server by workflows with a large amount of
# parallel nodes.
resourceRateLimit: |
limit: 10
burst: 1
# Whether or not to emit events on node completion. These can take a up a lot of space in
# k8s (typically etcd) resulting in errors when trying to create new events:
# "Unable to create audit event: etcdserver: mvcc: database space exceeded"
# This config item allows you to disable this.
# (since v2.9)
nodeEvents: |
enabled: true
# Whether or not to emit events on workflow status changes. These can take a up a lot of space in
# k8s (typically etcd), see nodeEvents above.
# This config item allows you to disable this.
# (since v3.6)
workflowEvents: |
enabled: true
# uncomment following lines if workflow controller runs in a different k8s cluster with the
# workflow workloads, or needs to communicate with the k8s apiserver using an out-of-cluster
# kubeconfig secret
# kubeConfig:
# # name of the kubeconfig secret, may not be empty when kubeConfig specified
# secretName: kubeconfig-secret
# # key of the kubeconfig secret, may not be empty when kubeConfig specified
# secretKey: kubeconfig
# # mounting path of the kubeconfig secret, default to /kube/config
# mountPath: /kubeconfig/mount/path
# # volume name when mounting the secret, default to kubeconfig
# volumeName: kube-config-volume
links: |
# Adds a button to the workflow page. E.g. linking to you logging facility.
- name: Example Workflow Link
scope: workflow
url: http://logging-facility?namespace=${metadata.namespace}&workflowName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
# Adds a button next to the pod. E.g. linking to you logging facility but for the pod only.
- name: Example Pod Link
scope: pod
url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
- name: Pod Logs
scope: pod-logs
url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
- name: Event Source Logs
scope: event-source-logs
url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
- name: Sensor Logs
scope: sensor-logs
url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
# Adds a button to the bottom right of every page to link to your organisation help or chat.
- name: Get help
scope: chat
url: http://my-chat
# Adds a button to the top of workflow view to navigate to customized views.
- name: Completed Workflows
scope: workflow-list
url: http://workflows?label=workflows.argoproj.io/completed=true
# Columns are custom columns that will be exposed in the Workflow List View.
# (available since Argo v3.5)
columns: |
# Adds a column to the Workflow List View
- # The name of this column, e.g., "Workflow Completed".
name: Workflow Completed
# The type of this column, "label" or "annotation".
type: label
# The key of the label or annotation, e.g., "workflows.argoproj.io/completed".
key: workflows.argoproj.io/completed
# uncomment following lines if you want to change navigation bar background color
# navColor: red
# artifactRepository defines the default location to be used as the artifact repository for
# container artifacts.
artifactRepository: |
# archiveLogs will archive the main container logs as an artifact
archiveLogs: true
s3:
# Use the corresponding endpoint depending on your S3 provider:
# AWS: s3.amazonaws.com
# GCS: storage.googleapis.com
# Minio: my-minio-endpoint.default:9000
endpoint: s3.amazonaws.com
bucket: my-bucket
region: us-west-2
# insecure will disable TLS. Primarily used for minio installs not configured with TLS
insecure: false
# keyFormat is a format pattern to define how artifacts will be organized in a bucket.
# It can reference workflow metadata variables such as workflow.namespace, workflow.name,
# pod.name. Can also use strftime formating of workflow.creationTimestamp so that workflow
# artifacts can be organized by date. If omitted, will use `{{workflow.name}}/{{pod.name}}`,
# which has potential for have collisions.
# The following example pattern organizes workflow artifacts under a "my-artifacts" sub dir,
# then sub dirs for year, month, date and finally workflow name and pod.
# e.g.: my-artifacts/2018/08/23/my-workflow-abc123/my-workflow-abc123-1234567890
keyFormat: "my-artifacts\
/{{workflow.creationTimestamp.Y}}\
/{{workflow.creationTimestamp.m}}\
/{{workflow.creationTimestamp.d}}\
/{{workflow.name}}\
/{{pod.name}}"
# The actual secret object (in this example my-s3-credentials), should be created in every
# namespace where a workflow needs to store its artifacts to S3. If omitted,
# attempts to use IAM role to access the bucket (instead of accessKey/secretKey).
accessKeySecret:
name: my-s3-credentials
key: accessKey
secretKeySecret:
name: my-s3-credentials
key: secretKey
# If this is set to true, argo workflows will use AWS SDK default credentials provider chain. This will allow things like
# IRSA and any of the authentication methods that the golang SDK uses in it's default chain.
# If you are using IRSA on AWS, and set this option to true, you will also need to modify Argo-Server Deployment with
# `spec.template.spec.securityContext.fsGroup: 65534` configuration. This is required for IRSA to be able to access
# `/var/run/secrets/eks.amazonaws.com/serviceaccount/token` file, and authenticate with AWS.
useSDKCreds: false
encryptionOptions:
# If this is set to true, SSE-S3 encryption will be used to store objects
# unless kmsKeyId or serverSideCustomerKeySecret is set
enableEncryption: false
# A valid kms key id. If this value is set, the object stored in s3 will be encrypted with SSE-KMS
# Note: You cannot set both kmsKeyId and serverSideCustomerKeySecret
# kmsKeyId: ''
# Allows you to set a json blob of simple key value pairs. See
# https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context
# for more information
# kmsEncryptionContext: ''
# The actual secret object (in this example my-s3-credentials),
# should be created when using a custom secret to encrypt objects in using SSE-C
# Note: You cannot set both kmsKeyId and serverSideCustomerKeySecret
# serverSideCustomerKeySecret:
# name: my-s3-credentials
# key: secretKey
# The command/args for each image, needed when the command is not specified and the emissary executor is used.
# https://argo-workflows.readthedocs.io/en/latest/workflow-executors/#emissary-emissary
images: |
argoproj/argosay:v2:
cmd: [/argosay]
docker/whalesay:latest:
cmd: [/bin/bash]
# Defaults for main containers. These can be overridden by the template.
# <= v3.3 only `resources` are supported.
# >= v3.4 all fields are supported, including security context.
mainContainer: |
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 0.1
memory: 64Mi
limits:
cpu: 0.5
memory: 512Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
# executor controls how the init and wait container should be customized
# (available since Argo v2.3)
executor: |
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 0.1
memory: 64Mi
limits:
cpu: 0.5
memory: 512Mi
# args & env allows command line arguments and environment variables to be appended to the
# executor container and is mainly used for development/debugging purposes.
args:
- --loglevel
- debug
- --gloglevel
- "6"
env:
# ARGO_TRACE enables some tracing information for debugging purposes. Currently it enables
# logging of S3 request/response payloads (including auth headers)
- name: ARGO_TRACE
value: "1"
# metricsConfig controls the path and port for prometheus metrics. Metrics are enabled and emitted on localhost:9090/metrics
# by default.
metricsConfig: |
# Enabled controls metric emission. Default is true, set "enabled: false" to turn off
enabled: true
# Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics"
path: /metrics
# Port is the port where metrics are emitted. Default is "9090"
port: 8080
# MetricsTTL sets how often custom metrics are cleared from memory. Default is "0", metrics are never cleared. Histogram metrics are never cleared.
metricsTTL: "10m"
# IgnoreErrors is a flag that instructs prometheus to ignore metric emission errors. Default is "false"
ignoreErrors: false
# Use a self-signed cert for TLS
# >= 3.6: default true
secure: true
# Options for configuring individual metrics
options:
pod_missing:
disable: true
cronworkflows_triggered_total:
disabledAttributes:
- name
k8s_request_duration:
histogramBuckets: [ 1.0, 2.0, 10.0 ]
# >= 3.6. Which temporality to use for OpenTelemetry. Default is "Cumulative"
temporality: Delta
# DEPRECATED: Legacy metrics are now removed, this field is ignored
disableLegacy: false
# telemetryConfig controls the path and port for prometheus telemetry. Telemetry is enabled and emitted in the same endpoint
# as metrics by default, but can be overridden using this config.
telemetryConfig: |
enabled: true
path: /telemetry
port: 8080
secure: true # Use a self-signed cert for TLS, default false
# enable persistence using postgres
persistence: |
connectionPool:
maxIdleConns: 100
maxOpenConns: 0
connMaxLifetime: 0s # 0 means connections don't have a max lifetime
# if true node status is only saved to the persistence DB to avoid the 1MB limit in etcd
nodeStatusOffLoad: false
# save completed workloads to the workflow archive
archive: false
# the number of days to keep archived workflows (the default is forever)
archiveTTL: 180d
# skip database migration if needed.
# skipMigration: true
# LabelSelector determines the workflow that matches with the matchlabels or matchrequirements, will be archived.
# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
archiveLabelSelector:
matchLabels:
workflows.argoproj.io/archive-strategy: "always"
# Optional name of the cluster I'm running in. This must be unique for your cluster.
clusterName: default
postgresql:
host: localhost
port: 5432
database: postgres
tableName: argo_workflows
# the database secrets must be in the same namespace of the controller
userNameSecret:
name: argo-postgres-config
key: username
passwordSecret:
name: argo-postgres-config
key: password
ssl: true
# sslMode must be one of: disable, require, verify-ca, verify-full
# you can find more information about those ssl options here: https://godoc.org/github.com/lib/pq
sslMode: require
# Optional config for mysql:
# mysql:
# host: localhost
# port: 3306
# database: argo
# tableName: argo_workflows
# userNameSecret:
# name: argo-mysql-config
# key: username
# passwordSecret:
# name: argo-mysql-config
# key: password
# PodSpecLogStrategy enables the logging of pod specs in the controller log.
# podSpecLogStrategy: |
# failedPod: true
# allPods: false
# PodGCGracePeriodSeconds specifies the duration in seconds before a terminating pod is forcefully killed.
# Value must be non-negative integer. A zero value indicates that the pod will be forcefully terminated immediately.
# Defaults to the Kubernetes default of 30 seconds.
podGCGracePeriodSeconds: "60"
# PodGCDeleteDelayDuration specifies the duration before pods in the GC queue get deleted.
# Value must be non-negative. A zero value indicates that the pods will be deleted immediately.
# Defaults to 5 seconds.
podGCDeleteDelayDuration: 30s
# adds initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC.
# initialDelay: 5s
# Workflow retention by number of workflows
# retentionPolicy: |
# completed: 10
# failed: 3
# errored: 3
# Default values that will apply to all Workflows from this controller, unless overridden on the Workflow-level
# See more: docs/default-workflow-specs.md
workflowDefaults: |
metadata:
annotations:
argo: workflows
labels:
foo: bar
spec:
ttlStrategy:
secondsAfterSuccess: 5
parallelism: 3
# SSO Configuration for the Argo server.
# You must also start argo server with `--auth-mode sso`.
# https://argo-workflows.readthedocs.io/en/latest/argo-server-auth-mode/
sso: |
# This is the root URL of the OIDC provider (required).
issuer: https://issuer.root.url/
# Some OIDC providers have alternate root URLs that can be included. These should be reviewed carefully. (optional)
issuerAlias: https://altissuer.root.url
# This defines how long your login is valid for (in hours). (optional)
# If omitted, defaults to 10h. Example below is 10 days.
sessionExpiry: 240h
# This is name of the secret and the key in it that contain OIDC client
# ID issued to the application by the provider (required).
clientId:
name: client-id-secret
key: client-id-key
# This is name of the secret and the key in it that contain OIDC client
# secret issued to the application by the provider (required).
clientSecret:
name: client-secret-secret
key: client-secret-key
# This is the redirect URL supplied to the provider (optional). It must
# be in the form <argo-server-root-url>/oauth2/callback. It must be
# browser-accessible. If omitted, will be automatically generated.
redirectUrl: https://argo-server/oauth2/callback
# Additional scopes to request. Typically needed for SSO RBAC. >= v2.12
scopes:
- groups
- email
- profile
# RBAC Config. >= v2.12
rbac:
enabled: false
# Skip TLS verify, not recommended in production environments. Useful for testing purposes. >= v3.2.4
insecureSkipVerify: false
# workflowRestrictions restricts the Workflows that the controller will process.
# Current options:
# Strict: Only Workflows using "workflowTemplateRef" will be processed. This allows the administrator of the controller
# to set a "library" of templates that may be run by its operator, limiting arbitrary Workflow execution.
# Secure: Only Workflows using "workflowTemplateRef" will be processed and the controller will enforce
# that the WorkflowTemplate that is referenced hasn't changed between operations. If you want to make sure the operator of the
# Workflow cannot run an arbitrary Workflow, use this option.
workflowRestrictions: |
templateReferencing: Strict