-
Notifications
You must be signed in to change notification settings - Fork 27
/
zk-persistent.yaml
242 lines (241 loc) · 6.68 KB
/
zk-persistent.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
kind: Template
apiVersion: v1
metadata:
name: zk-persistent
annotations:
openshift.io/display-name: Zookeeper (Persistent)
description: Create a replicated Zookeeper server with persistent storage
iconClass: icon-database
tags: database,zookeeper
labels:
template: zk-persistent
component: zk
parameters:
- name: NAME
value: zk-persistent
required: true
- name: SOURCE_IMAGE
description: Container image
value: zookeeper
required: true
- name: ZOO_VERSION
description: Version
value: "3.7.0"
required: true
- name: ZOO_REPLICAS
description: Number of nodes
value: "3"
required: true
- name: VOLUME_DATA_CAPACITY
description: Persistent volume capacity for zookeeper dataDir directory (e.g. 512Mi, 2Gi)
value: 1Gi
required: true
- name: VOLUME_DATALOG_CAPACITY
description: Persistent volume capacity for zookeeper dataLogDir directory (e.g. 512Mi, 2Gi)
value: 1Gi
required: true
- name: ZOO_TICK_TIME
description: The number of milliseconds of each tick
value: "2000"
required: true
- name: ZOO_INIT_LIMIT
description: The number of ticks that the initial synchronization phase can take
value: "5"
required: true
- name: ZOO_SYNC_LIMIT
description: The number of ticks that can pass between sending a request and getting an acknowledgement
value: "2"
required: true
- name: ZOO_CLIENT_PORT
description: The port at which the clients will connect
value: "2181"
required: true
- name: ZOO_SERVER_PORT
description: Server port
value: "2888"
required: true
- name: ZOO_ELECTION_PORT
description: Election port
value: "3888"
required: true
- name: ZOO_MAX_CLIENT_CNXNS
description: The maximum number of client connections
value: "60"
required: true
- name: ZOO_SNAP_RETAIN_COUNT
description: The number of snapshots to retain in dataDir
value: "3"
required: true
- name: ZOO_PURGE_INTERVAL
description: Purge task interval in hours. Set to 0 to disable auto purge feature
value: "1"
required: true
- name: ZOO_MEMORY
description: JVM heap size
value: "-Xmx512M -Xms512M"
required: true
- name: RESOURCE_MEMORY_REQ
description: The memory resource request.
value: "512M"
required: true
- name: RESOURCE_MEMORY_LIMIT
description: The limits for memory resource.
value: "512M"
required: true
- name: RESOURCE_CPU_REQ
description: The CPU resource request.
value: "300m"
required: true
- name: RESOURCE_CPU_LIMIT
description: The limits for CPU resource.
value: "300m"
required: true
objects:
- apiVersion: v1
kind: Service
metadata:
name: ${NAME}
labels:
zk-name: ${NAME}
component: zk
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- port: ${ZOO_CLIENT_PORT}
name: client
- port: ${ZOO_SERVER_PORT}
name: server
- port: ${ZOO_ELECTION_PORT}
name: election
clusterIP: None
selector:
zk-name: ${NAME}
- apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ${NAME}
labels:
zk-name: ${NAME}
component: zk
spec:
podManagementPolicy: "Parallel"
serviceName: ${NAME}
selector:
matchLabels:
zk-name: ${NAME}
component: zk
replicas: ${ZOO_REPLICAS}
template:
metadata:
labels:
zk-name: ${NAME}
template: zk-persistent
component: zk
# annotations:
## Use this annotation if you want allocate each pod on different node
## Note the number of nodes must be upper than REPLICAS parameter.
# scheduler.alpha.kubernetes.io/affinity: >
# {
# "podAntiAffinity": {
# "requiredDuringSchedulingIgnoredDuringExecution": [{
# "labelSelector": {
# "matchExpressions": [{
# "key": "zk-name",
# "operator": "In",
# "values": ["zk"]
# }]
# },
# "topologyKey": "kubernetes.io/hostname"
# }]
# }
# }
spec:
containers:
- name: ${NAME}
imagePullPolicy: IfNotPresent
image: ${SOURCE_IMAGE}:${ZOO_VERSION}
resources:
requests:
memory: ${RESOURCE_MEMORY_REQ}
cpu: ${RESOURCE_CPU_REQ}
limits:
memory: ${RESOURCE_MEMORY_LIMIT}
cpu: ${RESOURCE_CPU_LIMIT}
ports:
- containerPort: ${ZOO_CLIENT_PORT}
name: client
- containerPort: ${ZOO_SERVER_PORT}
name: server
- containerPort: ${ZOO_ELECTION_PORT}
name: election
env:
- name : SETUP_DEBUG
value: "true"
- name : ZOO_REPLICAS
value: ${ZOO_REPLICAS}
- name : ZK_HEAP_SIZE
value: ${ZOO_HEAP_SIZE}
- name : ZK_tickTime
value: ${ZOO_TICK_TIME}
- name : ZK_initLimit
value: ${ZOO_INIT_LIMIT}
- name : ZK_syncLimit
value: ${ZOO_SYNC_LIMIT}
- name : ZK_maxClientCnxns
value: ${ZOO_MAX_CLIENT_CNXNS}
- name : ZK_autopurge_snapRetainCount
value: ${ZOO_SNAP_RETAIN_COUNT}
- name : ZK_autopurge_purgeInterval
value: ${ZOO_PURGE_INTERVAL}
- name : ZK_clientPort
value: ${ZOO_CLIENT_PORT}
- name : ZOO_SERVER_PORT
value: ${ZOO_SERVER_PORT}
- name : ZOO_ELECTION_PORT
value: ${ZOO_ELECTION_PORT}
- name : JAVA_ZK_JVMFLAGS
value: "\"${ZOO_MEMORY}\""
readinessProbe:
exec:
command:
- zkServer.sh
- status
initialDelaySeconds: 20
timeoutSeconds: 10
livenessProbe:
exec:
command:
- zkServer.sh
- status
initialDelaySeconds: 20
timeoutSeconds: 10
securityContext:
runAsUser: 1001
fsGroup: 1001
volumeMounts:
- name: datadir
mountPath: /opt/zookeeper/data
- name: datalogdir
mountPath: /opt/zookeeper/data-log
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: ${VOLUME_DATA_CAPACITY}
selector:
component: zk
contents: data
- metadata:
name: datalogdir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: ${VOLUME_DATALOG_CAPACITY}
selector:
component: zk
contents: datalog