forked from taikoxyz/taiko-helm-charts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
255 lines (210 loc) · 6.12 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
# -- Overrides the chart's name
nameOverride: ""
# -- Overrides the chart's computed fullname
fullnameOverride: ""
# -- Number of replicas
replicas: 1
image:
# -- forkmon container image repository
repository: holiman/nodemonitor
# -- forkmon container image tag
tag: latest
# -- forkmon container pull policy
pullPolicy: IfNotPresent
# -- Extra args for the forkmon container
extraArgs: []
# -- Command replacement for the forkmon container
customCommand: [] # Only change this if you need to change the default command
# -- Set a network name such as mainnet, holesky, sepolia
network: ""
# -- Config file
# @default -- See `values.yaml`
config: |
# How often to reload data from the nodes
reload_interval = "10s"
# If specified, a http server will serve static content here
server_address = "0.0.0.0:{{ .Values.httpPort}}"
# Shown in the document title, if specified
chain_name="{{ .Values.network }}"
# Third party providers
infura_key = "<INFURA_API_KEY>"
infura_endpoint = "https://mainnet.infura.io/v3/"
alchemy_key = "<ALCHEMY_API_KEY>"
alchemy_endpoint = "https://eth-mainnet.g.alchemy.com/v2/"
etherscan_key = "<ETHERSCAN_API_KEY>"
etherscan_endpoint = "https://api.etherscan.io/api"
[Metrics]
enabled = true
endpoint = "<INFLUX_ENDPOINT>"
username = "<INFLUX_USERNAME>"
database = "<INFLUX_DB_NAME>"
password = "<INFLUX_PASSWORD>"
# Local or non third party connection require rpc kind
{{- range .Values.endpoints }}
[[clients]]
url = {{ .addr | quote }}
name = {{ .name | quote }}
kind = {{ .kind | quote }}
{{- end }}
# Third party client configuration doesn't require url
# Kind can vary between infura, alchemy or etherscan
# Recommended to enable rate limit for third party services
{{- range .Values.thirdparty_endpoints }}
[[clients]]
name = {{ .name | quote }}
kind = {{ .kind | quote }}
rate_limit = {{ .rate_limit }}
{{- end }}
# -- Third party endpoints that you would like to monitor
thirdparty_endpoints:
- name: "mainnet-infura"
kind: "infura"
rate_limit: 5
- name: "mainnet-alchemy"
kind: "alchemy"
rate_limit: 5
- name: "mainnet-etherscan"
kind: "etherscan"
rate_limit: 5
# -- Endpoints that you would like to monitor
endpoints:
- addr: http://execution-client:8545
name: "execution-client-name"
kind: "rpc"
ingress:
# -- Ingress resource for the HTTP API
enabled: false
# -- Annotations for Ingress
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Ingress host
hosts:
- host: chart-example.local
paths: []
# -- Ingress TLS
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# - HTTP port for forkmon interface
httpPort: 8080
service:
# -- Service type
type: ClusterIP
# -- Affinity configuration for pods
affinity: {}
# -- Image pull secrets for Docker images
imagePullSecrets: []
# -- Annotations for the StatefulSet
annotations: {}
# -- Liveness probe
# @default -- See `values.yaml`
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 60
periodSeconds: 120
# -- Readiness probe
# @default -- See `values.yaml`
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: 10
periodSeconds: 10
# -- Node selector for pods
nodeSelector: {}
# -- Pod labels
podLabels: {}
# -- Pod annotations
podAnnotations: {}
# -- Pod management policy
podManagementPolicy: OrderedReady
# -- Pod priority class
priorityClassName: null
# -- Resource requests and limits
resources: {}
# limits:
# cpu: 500m
# memory: 2Gi
# requests:
# cpu: 300m
# memory: 1Gi
# -- The security context for pods
# @default -- See `values.yaml`
securityContext:
fsGroup: 10001
runAsGroup: 10001
runAsNonRoot: true
runAsUser: 10001
# -- The security context for containers
# @default -- See `values.yaml`
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
serviceAccount:
# -- Specifies whether a service account should be created
create: true
# -- Annotations to add to the service account
annotations: {}
# -- The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# -- How long to wait until the pod is forcefully terminated
terminationGracePeriodSeconds: 30
# -- Tolerations for pods
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
# -- Topology Spread Constraints for pods
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
topologySpreadConstraints: []
# -- Define the PodDisruptionBudget spec
# If not set then a PodDisruptionBudget will not be created
podDisruptionBudget: {}
# minAvailable: 1
# maxUnavailable: 1
# -- Update stategy for the Statefulset
updateStrategy:
# -- Update stategy type
type: RollingUpdate
# -- Additional init containers
initContainers: []
# - name: my-init-container
# image: busybox:latest
# command: ['sh', '-c', 'echo hello']
# -- Additional containers
extraContainers: []
# -- Additional volumes
extraVolumes: []
# -- Additional volume mounts
extraVolumeMounts: []
# -- Additional ports. Useful when using extraContainers
extraPorts: []
# -- Additional env variables
extraEnv: []
# -- Additional env variables injected via a created secret
secretEnv: {}
persistence:
# -- Uses an EmptyDir when not enabled
enabled: false
# -- Use an existing PVC when persistence.enabled
existingClaim: null
# -- Access mode for the volume claim template
accessModes:
- ReadWriteOnce
# -- Requested size for volume claim template
size: 1Gi
# -- Use a specific storage class
# E.g 'local-path' for local storage to achieve best performance
# Read more (https://github.com/rancher/local-path-provisioner)
storageClassName: null
# -- Annotations for volume claim template
annotations: {}
# -- Selector for volume claim template
selector: {}
# matchLabels:
# app.kubernetes.io/name: something