forked from openebs-archive/node-disk-manager
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathndm-operator.yaml
272 lines (269 loc) · 7.58 KB
/
ndm-operator.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
# Create NDM configmap
# Define the Service Account
# Define the RBAC rules for the Service Account
# Launch the node-disk-manager ( daemon set )
# Create NDM configmap
apiVersion: v1
kind: ConfigMap
metadata:
name: node-disk-manager-config
namespace: default
data:
# node-disk-manager-config contains config of available probes and filters.
# Probes and Filters will initialize with default values if config for that
# filter or probe are not present in configmap
# udev-probe is default or primary probe it should be enabled to run ndm
# filterconfigs contails configs of filters. To provide a group of include
# and exclude values add it as , separated string
node-disk-manager.config: |
probeconfigs:
- key: udev-probe
name: udev probe
state: true
- key: seachest-probe
name: seachest probe
state: true
- key: smart-probe
name: smart probe
state: true
filterconfigs:
- key: os-disk-exclude-filter
name: os disk exclude filter
state: true
exclude: "/,/etc/hosts,/boot"
- key: vendor-filter
name: vendor filter
state: true
include: ""
exclude: "CLOUDBYT,OpenEBS"
- key: path-filter
name: path filter
state: true
include: ""
exclude: loop
---
# Create NDM Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
name: openebs-ndm-operator
namespace: default
---
# Define Role that allows operations on K8s pods/deployments
# in "default" namespace
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
namespace: default
name: openebs-ndm-operator
rules:
- apiGroups: ["*"]
resources: ["nodes", "pods", "services", "endpoints", "events", "configmaps", "secrets", "jobs"]
verbs:
- '*'
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs:
- '*'
- apiGroups:
- openebs.io
resources:
- disks
- blockdevices
- blockdeviceclaims
verbs:
- '*'
---
# Bind the Service Account with the Role Privileges.
# TODO: Check if default account also needs to be there
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: openebs-ndm-operator
namespace: default
subjects:
- kind: ServiceAccount
name: openebs-ndm-operator
namespace: default
- kind: User
name: system:serviceaccount:default:default
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: openebs-ndm-operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-disk-manager
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
name: node-disk-manager
template:
metadata:
labels:
name: node-disk-manager
spec:
# By default the node-disk-manager will be run on all kubernetes nodes
# If you would like to limit this to only some nodes, say the nodes
# that have storage attached, you could label those node and use nodeSelector.
# Example: Label the storage nodes with - "openebs.io/nodegroup"="storage-node"
# kubectl label node <node-name> "openebs.io/nodegroup"="storage-node"
# nodeSelector:
# "openebs.io/nodegroup": "storage-node"
# Use host network as container network to monitor udev source using netlink
# to detect disk attach and detach events using fd.
hostNetwork: true
serviceAccountName: openebs-ndm-operator
containers:
- name: node-disk-manager
image: openebs/node-disk-manager-amd64:ci
imagePullPolicy: Always
securityContext:
privileged: true
# make udev database available inside container
volumeMounts:
- name: config
mountPath: /host/node-disk-manager.config
subPath: node-disk-manager.config
readOnly: true
- name: udev
mountPath: /run/udev
- name: procmount
mountPath: /host/proc
readOnly: true
- name: sparsepath
mountPath: /var/openebs/sparse
env:
# namespace in which NDM is installed will be passed to NDM Daemonset
# as environment variable
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# pass hostname as env variable using downward API to the NDM container
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# specify the directory where the sparse files need to be created.
# if not specified, then sparse files will not be created.
- name: SPARSE_FILE_DIR
value: "/var/openebs/sparse"
# Size of the sparse file to be created.
- name: SPARSE_FILE_SIZE
value: "1073741824"
# Specify the number of sparse files to be created
- name: SPARSE_FILE_COUNT
value: "1"
volumes:
- name: config
configMap:
name: node-disk-manager-config
- name: udev
hostPath:
path: /run/udev
type: Directory
- name: procmount
# mount /proc/1/mounts (mount file of process 1 of host) inside container
# to read which partition is mounted on / path
hostPath:
path: /proc
type: Directory
- name: sparsepath
hostPath:
path: /var/openebs/sparse
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: node-disk-operator
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
name: node-disk-operator
template:
metadata:
labels:
name: node-disk-operator
spec:
serviceAccountName: openebs-ndm-operator
containers:
- name: node-disk-operator
image: openebs/node-disk-operator-amd64:ci
ports:
- containerPort: 8080
name: liveness
imagePullPolicy: Always
readinessProbe:
exec:
command:
- stat
- /tmp/operator-sdk-ready
initialDelaySeconds: 4
periodSeconds: 10
failureThreshold: 1
env:
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# the service account of this pod
- name: SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
- name: OPERATOR_NAME
value: "node-disk-operator"
- name: CLEANUP_JOB_IMAGE
value: "quay.io/openebs/linux-utils:3.9"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ndm-cluster-exporter
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
name: ndm-cluster-exporter
template:
metadata:
labels:
name: ndm-cluster-exporter
spec:
serviceAccountName: openebs-ndm-operator
containers:
- name: ndm-cluster-exporter
image: openebs/node-disk-exporter-amd64:ci
command:
- /usr/local/bin/exporter
args:
- "start"
- "--mode=cluster"
- "--port=:9100"
- "--metrics=/metrics"
ports:
- containerPort: 9100
protocol: TCP
name: metrics
imagePullPolicy: Always
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---