diff --git a/charts/clash/Chart.yaml b/charts/clash/Chart.yaml index 853f90b..d4b0fc0 100644 --- a/charts/clash/Chart.yaml +++ b/charts/clash/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.0.1 +version: 0.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/charts/clash/templates/clash.yaml b/charts/clash/templates/clash.yaml index b9cde04..0e6f748 100644 --- a/charts/clash/templates/clash.yaml +++ b/charts/clash/templates/clash.yaml @@ -17,7 +17,7 @@ spec: volumes: - name: clash-conf-volume hostPath: - path: /root/config.yaml + path: /root/clash/ containers: - name: clash image: dreamacro/clash:v1.12.0 @@ -28,6 +28,8 @@ spec: volumeMounts: - name: clash-conf-volume mountPath: /root/.config/clash + nodeSelector: + kubernetes.io/hostname: ali-node1 --- apiVersion: v1 kind: Service diff --git a/charts/clash/templates/yacd.yaml b/charts/clash/templates/yacd.yaml new file mode 100644 index 0000000..4c4598d --- /dev/null +++ b/charts/clash/templates/yacd.yaml @@ -0,0 +1,61 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: yacd +spec: + replicas: 1 + selector: + matchLabels: + # manage pods with the label app: yacd + app: yacd + template: + metadata: + labels: + app: yacd + spec: + containers: + - name: yacd + image: haishanh/yacd + ports: + - containerPort: 80 + nodeSelector: + kubernetes.io/hostname: ali-node1 +--- +apiVersion: v1 +kind: Service +metadata: + name: yacd-svc +spec: + ports: + - name: http1 + port: 80 + selector: + # apply service to any pod with label app: yacd + app: yacd +--- +{{- if .Values.ingress.enabled -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: yacd-ingress + annotations: + kubernetes.io/ingress.class: "nginx" + cert-manager.io/cluster-issuer: "letsencrypt-prod" +spec: + tls: + - hosts: + - yacd.ihave.cool + secretName: yacd-tls + rules: + - host: yacd.ihave.cool + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: yacd-svc + port: + number: 80 +{{- end }} diff --git a/headscale/etc/headscale/config.yaml b/headscale/etc/headscale/config.yaml index c128f55..460051f 100644 --- a/headscale/etc/headscale/config.yaml +++ b/headscale/etc/headscale/config.yaml @@ -123,8 +123,8 @@ tls_letsencrypt_challenge_type: HTTP-01 tls_letsencrypt_listen: ":http" ## Use already defined certificates: -tls_cert_path: "" -tls_key_path: "" +tls_cert_path: "/etc/headscale/certs/headscale.ihave.cool.crt" +tls_key_path: "/etc/headscale/certs/headscale.ihave.cool.key" log: # Output formatting for logs: text or json diff --git a/k3s/registries.yaml b/k3s/registries.yaml index cb209cd..c142522 100644 --- a/k3s/registries.yaml +++ b/k3s/registries.yaml @@ -1,43 +1,31 @@ mirrors: k8s.gcr.io: endpoint: + - "https://registry.cn-shenzhen.aliyuncs.com" - "https://registry.cn-hangzhou.aliyuncs.com" rewrite: "(.*)": "icool/$1" docker.io: endpoint: - # - "https://x8pwdtpq.mirror.aliyuncs.com" - - "https://registry.cn-hangzhou.aliyuncs.com" + - "https://x8pwdtpq.mirror.aliyuncs.com" rewrite: # "^rancher/(.*)": "mirrorproject/rancher-images/$1" "^rancher/mirrored-pause:3.6": "icool/rancher.mirrored-pause:3.6" "^k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0": "icool/gcr.csi-node-driver-registrar:v2.3.0" "^k8s.gcr.io/sig-storage/csi-resizer:v1.2.0": "icool/gcr.csi-resizer:v1.2.0" "^k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3": "icool/gcr.csi-snapshotter:v3.0.3" - "^k8s.gcr.io/sig-storage/csi-attacher:v3.1.0": "icool/gcr.csi-snapshotter:v3.0.3" + "^k8s.gcr.io/sig-storage/csi-attacher:v3.1.0": "icool/gcr.csi-attacher:v3.1.0" "^k8s.gcr.io/sig-storage/snapshot-controller:v3.0.3": "icool/gcr.snapshot-controller:v3.0.3" "^k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0": "icool/gcr.csi-provisioner:v3.0.0" configs: - # "x8pwdtpq.mirror.aliyuncs.com": - "*.gcr.io": - auth: - username: xxx - password: xxx - "registry.cn-hangzhou.aliyuncs.com": + "x8pwdtpq.mirror.aliyuncs.com": auth: - username: ${ALIYUN_REGISTER_USER} - password: ${ALIYUN_REGISTER_PASS} - -# docker push "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-node-driver-registrar:v2.3.0" -# docker push "registry.cn-hangzhou.aliyuncs.com/icool/gcr.snapshot-controller:v3.0.3" -# docker push "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-resizer:v1.2.0" -# docker push "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-snapshotter:v3.0.3" -# docker push "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-attacher:v3.1.0" -# docker push "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-provisioner:v3.0.0" + username: 18780992520 + password: zc1998zc -# docker tag "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-node-driver-registrar:v2.3.0" "k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0" -# docker tag "registry.cn-hangzhou.aliyuncs.com/icool/gcr.snapshot-controller:v3.0.3" "k8s.gcr.io/sig-storage/snapshot-controller:v3.0.3" -# docker tag "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-resizer:v1.2.0" "k8s.gcr.io/sig-storage/csi-resizer:v1.2.0" -# docker tag "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-snapshotter:v3.0.3" "k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3" -# docker tag "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-attacher:v3.1.0" "k8s.gcr.io/sig-storage/csi-attacher:v3.1.0" -# docker tag "registry.cn-hangzhou.aliyuncs.com/icool/gcr.csi-provisioner:v3.0.0" "k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0" +# configs: +# # "x8pwdtpq.mirror.aliyuncs.com": +# "registry.cn-hangzhou.aliyuncs.com": +# auth: +# username: ${ALIYUN_REGISTER_USER} +# password: ${ALIYUN_REGISTER_PASS} diff --git a/openebs/install.sh b/openebs/install.sh new file mode 100644 index 0000000..b7d2f6b --- /dev/null +++ b/openebs/install.sh @@ -0,0 +1,7 @@ +helm uninstall openebs -n openebs + +helm install openebs -n openebs --create-namespace --set cstor.enabled=true --debug . + + +k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0 +k8s.gcr.io/sig-storage/csi-node-driver-registrar diff --git a/openebs/values.yaml b/openebs/values.yaml index c84e04b..e69de29 100644 --- a/openebs/values.yaml +++ b/openebs/values.yaml @@ -1,761 +0,0 @@ -# Default values for openebs. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -rbac: - # Specifies whether RBAC resources should be created - create: true - pspEnabled: false - # rbac.kyvernoEnabled: `true` if Kyverno Policy resources should be created - kyvernoEnabled: false - -serviceAccount: - create: true - name: - -imagePullSecrets: [] - # - name: image-pull-secret - -release: - # "openebs.io/version" label for control plane components - version: "3.3.0" - -# Legacy components will be installed if it is enabled. -# Legacy components are - admission-server, maya api-server, snapshot-operator -# and k8s-provisioner -legacy: - enabled: false - -image: - pullPolicy: IfNotPresent - repository: "" - -apiserver: - enabled: true - image: "openebs/m-apiserver" - imageTag: "2.12.2" - replicas: 1 - ports: - externalPort: 5656 - internalPort: 5656 - sparse: - enabled: "false" - nodeSelector: {} - tolerations: [] - affinity: {} - healthCheck: - initialDelaySeconds: 30 - periodSeconds: 60 - ## apiserver resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 500m - # memory: 1Gi - - -defaultStorageConfig: - enabled: "true" - -# Directory used by the OpenEBS to store debug information and so forth -# that are generated in the course of running OpenEBS containers. -varDirectoryPath: - baseDir: "/var/openebs" - -provisioner: - enabled: true - image: "openebs/openebs-k8s-provisioner" - imageTag: "2.12.2" - replicas: 1 - enableLeaderElection: true - patchJivaNodeAffinity: enabled - nodeSelector: {} - tolerations: [] - affinity: {} - healthCheck: - initialDelaySeconds: 30 - periodSeconds: 60 - ## provisioner resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 500m - # memory: 1Gi - -# If you want to enable local pv as a dependency chart then set -# `localprovisioner.enabled: false` and enable it as dependency chart. -# If you are using custom configuration then update those configuration -# under `localpv-provisioner` key. -localprovisioner: - enabled: true - image: "openebs/provisioner-localpv" - imageTag: "3.3.0" - replicas: 1 - enableLeaderElection: true - # These fields are deprecated. Please use the fields (see below) - # - deviceClass.enabled - # - hostpathClass.enabled - enableDeviceClass: true - enableHostpathClass: true - # This sets default directory used by the provisioner to provision - # hostpath volumes. - basePath: "/var/openebs/local" - # This sets the number of times the provisioner should try - # with a polling interval of 5 seconds, to get the Blockdevice - # Name from a BlockDeviceClaim, before the BlockDeviceClaim - # is deleted. E.g. 12 * 5 seconds = 60 seconds timeout - waitForBDBindTimeoutRetryCount: "12" - nodeSelector: {} - tolerations: [] - affinity: {} - healthCheck: - initialDelaySeconds: 30 - periodSeconds: 60 - ## localprovisioner resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 500m - # memory: 1Gi - - deviceClass: - # Name of default device StorageClass. - name: openebs-device - # If true, enables creation of the openebs-device StorageClass - enabled: true - # Available reclaim policies: Delete/Retain, defaults: Delete. - reclaimPolicy: Delete - # If true, sets the openebs-device StorageClass as the default StorageClass - isDefaultClass: false - # Custom node affinity label(s) for example "openebs.io/node-affinity-value" - # that will be used instead of hostnames - # This helps in cases where the hostname changes when the node is removed and - # added back with the disks still intact. - # Example: - # nodeAffinityLabels: - # - "openebs.io/node-affinity-key-1" - # - "openebs.io/node-affinity-key-2" - nodeAffinityLabels: [] - # Sets the filesystem to be written to the blockdevice before - # mounting (filesystem volumes) - # This is only usable if the selected BlockDevice does not already - # have a filesystem - # Valid values: "ext4", "xfs" - fsType: "ext4" - # Label block devices in the cluster that you would like the openEBS localPV - # Provisioner to pick up those specific block devices available on the node. - # Set the label key and value as shown in the example below. - # - # To read more: https://github.com/openebs/dynamic-localpv-provisioner/blob/develop/docs/tutorials/device/blockdevicetag.md - # - # Example: - # blockDeviceSelectors: - # ndm.io/driveType: "SSD" - # ndm.io/fsType: "none" - blockDeviceSelectors: {} - - hostpathClass: - # Name of the default hostpath StorageClass - name: openebs-hostpath - # If true, enables creation of the openebs-hostpath StorageClass - enabled: true - # Available reclaim policies: Delete/Retain, defaults: Delete. - reclaimPolicy: Delete - # If true, sets the openebs-hostpath StorageClass as the default StorageClass - isDefaultClass: false - # Path on the host where local volumes of this storage class are mounted under. - # NOTE: If not specified, this defaults to the value of localprovisioner.basePath. - basePath: "" - # Custom node affinity label(s) for example "openebs.io/node-affinity-value" - # that will be used instead of hostnames - # This helps in cases where the hostname changes when the node is removed and - # added back with the disks still intact. - # Example: - # nodeAffinityLabels: - # - "openebs.io/node-affinity-key-1" - # - "openebs.io/node-affinity-key-2" - nodeAffinityLabels: [] - # Prerequisite: XFS Quota requires an XFS filesystem mounted with - # the 'pquota' or 'prjquota' mount option. - xfsQuota: - # If true, enables XFS project quota - enabled: false - # Detailed configuration options for XFS project quota. - # If XFS Quota is enabled with the default values, the usage limit - # is set at the storage capacity specified in the PVC. - softLimitGrace: "0%" - hardLimitGrace: "0%" - # Prerequisite: EXT4 Quota requires an EXT4 filesystem mounted with - # the 'prjquota' mount option. - ext4Quota: - # If true, enables XFS project quota - enabled: false - # Detailed configuration options for EXT4 project quota. - # If EXT4 Quota is enabled with the default values, the usage limit - # is set at the storage capacity specified in the PVC. - softLimitGrace: "0%" - hardLimitGrace: "0%" - -snapshotOperator: - enabled: true - controller: - image: "openebs/snapshot-controller" - imageTag: "2.12.2" - ## snapshot controller resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 500m - # memory: 1Gi - provisioner: - image: "openebs/snapshot-provisioner" - imageTag: "2.12.2" - ## snapshot provisioner resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 500m - # memory: 1Gi - replicas: 1 - enableLeaderElection: true - upgradeStrategy: "Recreate" - nodeSelector: {} - tolerations: [] - affinity: {} - healthCheck: - initialDelaySeconds: 30 - periodSeconds: 60 - -# If you want to enable openebs as a dependency chart then set `ndm.enabled: false`, -# `ndmOperator.enabled: false` and enable it as dependency chart. If you are using -# custom configuration then update those configuration under `openebs-ndm` key. -ndm: - enabled: true - image: "openebs/node-disk-manager" - imageTag: "2.0.0" - sparse: - path: "/var/openebs/sparse" - size: "10737418240" - count: "0" - filters: - enableOsDiskExcludeFilter: true - osDiskExcludePaths: "/,/etc/hosts,/boot" - enableVendorFilter: true - excludeVendors: "CLOUDBYT,OpenEBS" - enablePathFilter: true - includePaths: "" - excludePaths: "/dev/loop,/dev/fd0,/dev/sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd,/dev/zd" - probes: - enableSeachest: false - nodeSelector: {} - tolerations: [] - healthCheck: - initialDelaySeconds: 30 - periodSeconds: 60 - ## ndm resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 500m - # memory: 1Gi - -# If you want to enable openebs as a dependency chart then set `ndm.enabled: false`, -# `ndmOperator.enabled: false` and enable it as dependency chart. If you are using -# custom configuration then update those configuration under `openebs-ndm` key. -ndmOperator: - enabled: true - image: "openebs/node-disk-operator" - imageTag: "2.0.0" - replicas: 1 - upgradeStrategy: Recreate - nodeSelector: {} - tolerations: [] - healthCheck: - initialDelaySeconds: 15 - periodSeconds: 20 - readinessCheck: - initialDelaySeconds: 5 - periodSeconds: 10 - ## ndmOperator resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 1000m - # memory: 2Gi - # requests: - # cpu: 500m - # memory: 1Gi - -ndmExporter: - enabled: false - image: - registry: - repository: openebs/node-disk-exporter - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: 2.0.0 - nodeExporter: - name: ndm-node-exporter - podLabels: - name: openebs-ndm-node-exporter - # The TCP port number used for exposing ndm-node-exporter metrics. - # If not set, service will not be created to expose metrics endpoint to serviceMonitor - # and listen-port flag will not be set and container port will be empty. - metricsPort: 9101 - clusterExporter: - name: ndm-cluster-exporter - podLabels: - name: openebs-ndm-cluster-exporter - # The TCP port number used for exposing ndm-cluster-exporter metrics. - # If not set, service will not be created to expose metrics endpoint to serviceMonitor - # and listen-port flag will not be set and container port will be empty. - metricsPort: 9100 - -webhook: - enabled: true - image: "openebs/admission-server" - imageTag: "2.12.2" - failurePolicy: "Fail" - replicas: 1 - healthCheck: - initialDelaySeconds: 30 - periodSeconds: 60 - nodeSelector: {} - tolerations: [] - affinity: {} - hostNetwork: false - ## admission-server resource requests and limits - ## Reference: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # limits: - # cpu: 500m - # memory: 1Gi - # requests: - # cpu: 250m - # memory: 500Mi - -# If you are migrating from 2.x to 3.x and if you are using custom values -# then put this configuration under `localpv-provisioner` and `openebs-ndm` key. -helper: - image: "openebs/linux-utils" - imageTag: "3.3.0" - -# These are ndm related configuration. If you want to enable openebs as a dependency -# chart then set `ndm.enabled: false`, `ndmOperator.enabled: false` and enable it as -# dependency chart. If you are using custom configuration then update those configuration -# under `openebs-ndm` key. -featureGates: - enabled: true - GPTBasedUUID: - enabled: true - featureGateFlag: "GPTBasedUUID" - APIService: - enabled: false - featureGateFlag: "APIService" - address: "0.0.0.0:9115" - UseOSDisk: - enabled: false - featureGateFlag: "UseOSDisk" - ChangeDetection: - enabled: false - featureGateFlag: "ChangeDetection" - PartitionTableUUID: - enabled: false - featureGateFlag: "PartitionTableUUID" - -crd: - enableInstall: true - -# If you are migrating from 2.x to 3.x and if you are using custom values -# then put these configuration under `cstor` key. -policies: - monitoring: - enabled: true - image: "openebs/m-exporter" - imageTag: "2.12.2" - -analytics: - enabled: true - # Specify in hours the duration after which a ping event needs to be sent. - pingInterval: "24h" - -jiva: - - # non csi configuration - image: "openebs/jiva" - imageTag: "2.12.2" - replicas: 3 - defaultStoragePath: "/var/openebs" - - # jiva csi driver configuration - # do not enable or configure any sub dependency here - # only jiva csi related settings can be added here - # ref - https://openebs.github.io/jiva-operator - - # jiva chart dependency tree is here - - # jiva - # | - localpv-provisioner - # | | - openebs-ndm - - # Enable localpv-provisioner and openebs-ndm as root dependency not as - # sub dependency. - # openebs - # | - jiva(enable) - # | | - localpv-provisioner(disable) - # | | | - openebs-ndm(disable) - # | - localpv-provisioner(enable) - # | - openebs-ndm(enable) - - enabled: false - openebsLocalpv: - enabled: false - localpv-provisioner: - openebsNDM: - enabled: false - - # Sample configuration if you want to configure jiva csi driver with custom values. - # This is a small part of the full configuration. Full configuration available - # here - https://openebs.github.io/jiva-operator - -# rbac: -# create: true -# pspEnabled: false -# -# jivaOperator: -# controller: -# image: -# registry: quay.io/ -# repository: openebs/jiva -# tag: 3.3.0 -# replica: -# image: -# registry: quay.io/ -# repository: openebs/jiva -# tag: 3.3.0 -# image: -# registry: quay.io/ -# repository: openebs/jiva-operator -# pullPolicy: IfNotPresent -# tag: 3.3.0 -# -# jivaCSIPlugin: -# remount: "true" -# image: -# registry: quay.io/ -# repository: openebs/jiva-csi -# pullPolicy: IfNotPresent -# tag: 3.3.0 - -cstor: - - # non csi configuration - pool: - image: "openebs/cstor-pool" - imageTag: "2.12.2" - poolMgmt: - image: "openebs/cstor-pool-mgmt" - imageTag: "2.12.2" - target: - image: "openebs/cstor-istgt" - imageTag: "2.12.2" - volumeMgmt: - image: "openebs/cstor-volume-mgmt" - imageTag: "2.12.2" - - # cstor csi driver configuration - # do not enable or configure any sub dependency here - # only cstor csi related settings can be added here - # ref - https://openebs.github.io/cstor-operators - - # cstor chart dependency tree is here - - # cstor - # | - openebs-ndm - - # Enable openebs-ndm as root dependency not as sub dependency. - # openebs - # | - cstor(enable) - # | | - openebs-ndm(disable) - # | - openebs-ndm(enable) - enabled: false - openebsNDM: - enabled: false - - # Sample configuration if you want to configure cstor csi driver with custom values. - # This is a small part of the full configuration. Full configuration available - # here - https://openebs.github.io/cstor-operators - -# imagePullSecrets: [] -# -# rbac: -# create: true -# pspEnabled: false -# -# cspcOperator: -# poolManager: -# image: -# registry: quay.io/ -# repository: openebs/cstor-pool-manager -# tag: 3.3.0 -# cstorPool: -# image: -# registry: quay.io/ -# repository: openebs/cstor-pool -# tag: 3.3.0 -# cstorPoolExporter: -# image: -# registry: quay.io/ -# repository: openebs/m-exporter -# tag: 3.3.0 -# image: -# registry: quay.io/ -# repository: openebs/cspc-operator -# pullPolicy: IfNotPresent -# tag: 3.3.0 -# -# cvcOperator: -# target: -# image: -# registry: quay.io/ -# repository: openebs/cstor-istgt -# tag: 3.3.0 -# volumeMgmt: -# image: -# registry: quay.io/ -# repository: openebs/cstor-volume-manager -# tag: 3.3.0 -# volumeExporter: -# image: -# registry: quay.io/ -# repository: openebs/m-exporter -# tag: 3.3.0 -# image: -# registry: quay.io/ -# repository: openebs/cvc-operator -# pullPolicy: IfNotPresent -# tag: 3.3.0 -# -# cstorCSIPlugin: -# image: -# registry: quay.io/ -# repository: openebs/cstor-csi-driver -# pullPolicy: IfNotPresent -# tag: 3.3.0 -# -# admissionServer: -# componentName: cstor-admission-webhook -# image: -# registry: quay.io/ -# repository: openebs/cstor-webhook -# pullPolicy: IfNotPresent -# tag: 3.3.0 - -# ndm configuration goes here -# https://openebs.github.io/node-disk-manager -openebs-ndm: - enabled: false - - # Sample configuration if you want to configure openebs ndm with custom values. - # This is a small part of the full configuration. Full configuration available - # here - https://openebs.github.io/node-disk-manager - -# imagePullSecrets: [] -# -# ndm: -# image: -# registry: quay.io/ -# repository: openebs/node-disk-manager -# pullPolicy: IfNotPresent -# tag: 2.0.0 -# sparse: -# path: "/var/openebs/sparse" -# size: "10737418240" -# count: "0" -# filters: -# enableOsDiskExcludeFilter: true -# osDiskExcludePaths: "/,/etc/hosts,/boot" -# enableVendorFilter: true -# excludeVendors: "CLOUDBYT,OpenEBS" -# enablePathFilter: true -# includePaths: "" -# excludePaths: "loop,fd0,sr0,/dev/ram,/dev/dm-,/dev/md,/dev/rbd,/dev/zd" -# probes: -# enableSeachest: false -# enableUdevProbe: true -# enableSmartProbe: true -# -# ndmOperator: -# image: -# registry: quay.io/ -# repository: openebs/node-disk-operator -# pullPolicy: IfNotPresent -# tag: 2.0.0 -# -# helperPod: -# image: -# registry: quay.io/ -# repository: openebs/linux-utils -# pullPolicy: IfNotPresent -# tag: 3.3.0 -# -# featureGates: -# enabled: true -# GPTBasedUUID: -# enabled: true -# featureGateFlag: "GPTBasedUUID" -# APIService: -# enabled: false -# featureGateFlag: "APIService" -# address: "0.0.0.0:9115" -# UseOSDisk: -# enabled: false -# featureGateFlag: "UseOSDisk" -# ChangeDetection: -# enabled: false -# featureGateFlag: "ChangeDetection" -# -# varDirectoryPath: -# baseDir: "/var/openebs" - - # local pv provisioner configuration goes here - # do not enable or configure any sub dependency here - # ref - https://openebs.github.io/dynamic-localpv-provisioner - - # local pv chart dependency tree is here - - # localpv-provisioner - # | - openebs-ndm - - # Enable openebs-ndm as root dependency not as sub dependency. - # openebs - # | - localpv-provisioner(enable) - # | | - openebs-ndm(disable) - # | - openebs-ndm(enable) -localpv-provisioner: - enabled: false - openebsNDM: - enabled: false - - # Sample configuration if you want to configure openebs locapv with custom values. - # This is a small part of the full configuration. Full configuration available - # here - https://openebs.github.io/dynamic-localpv-provisioner - -# imagePullSecrets: [] -# -# rbac: -# create: true -# pspEnabled: false -# -# localpv: -# image: -# registry: quay.io/ -# repository: openebs/provisioner-localpv -# tag: 3.3.0 -# pullPolicy: IfNotPresent -# healthCheck: -# initialDelaySeconds: 30 -# periodSeconds: 60 -# replicas: 1 -# enableLeaderElection: true -# basePath: "/var/openebs/local" -# -# helperPod: -# image: -# registry: quay.io/ -# repository: openebs/linux-utils -# pullPolicy: IfNotPresent -# tag: 3.3.0 - -# zfs local pv configuration goes here -# ref - https://openebs.github.io/zfs-localpv -zfs-localpv: - enabled: false - - # Sample configuration if you want to configure zfs locapv with custom values. - # This is a small part of the full configuration. Full configuration available - # here - https://openebs.github.io/zfs-localpv - -# imagePullSecrets: [] -# -# rbac: -# pspEnabled: false -# -# zfsPlugin: -# image: -# registry: quay.io/ -# repository: openebs/zfs-driver -# pullPolicy: IfNotPresent -# tag: 2.1.0 - -# lvm local pv configuration goes here -# ref - https://openebs.github.io/lvm-localpv -lvm-localpv: - enabled: false - - # Sample configuration if you want to configure lvm localpv with custom values. - # This is a small part of the full configuration. Full configuration available - # here - https://openebs.github.io/lvm-localpv - -# imagePullSecrets: [] -# -# rbac: -# pspEnabled: false -# -# lvmPlugin: -# image: -# registry: quay.io/ -# repository: openebs/lvm-driver -# pullPolicy: IfNotPresent -# tag: 1.0.0 - -# openebs nfs provisioner configuration goes here -# ref - https://openebs.github.io/dynamic-nfs-provisioner -nfs-provisioner: - enabled: false - - # Sample configuration if you want to configure lvm localpv with custom values. - # This is a small part of the full configuration. Full configuration available - # here - https://openebs.github.io/dynamic-nfs-provisioner - -# imagePullSecrets: [] -# -# rbac: -# pspEnabled: false -# -# nfsProvisioner: -# image: -# registry: -# repository: openebs/provisioner-nfs -# tag: 0.9.0 -# pullPolicy: IfNotPresent -# enableLeaderElection: "true" -# nfsServerAlpineImage: -# registry: -# repository: openebs/nfs-server-alpine -# tag: 0.9.0 - -cleanup: - image: - # Make sure that registry name end with a '/'. - # For example : quay.io/ is a correct value here and quay.io is incorrect - registry: - repository: bitnami/kubectl - tag: - imagePullSecrets: [] - # - name: image-pull-secret diff --git a/zipkin/zipkin.yaml b/zipkin/zipkin.yaml index a1826da..511e81a 100644 --- a/zipkin/zipkin.yaml +++ b/zipkin/zipkin.yaml @@ -45,7 +45,7 @@ spec: # - name: ES_HOSTS # value: 'elasticsearch.be-base:9200' nodeSelector: - app.kubernetes.io/name: tx-master01 + kubernetes.io/hostname: tx-master01 serviceAccount: default affinity: {} initContainers: []