From dc9f5002253ca0a4fd72ed658a770d8375e8398f Mon Sep 17 00:00:00 2001 From: Ram <1331672+chukka@users.noreply.github.com> Date: Tue, 17 Oct 2023 17:49:20 +0530 Subject: [PATCH 1/4] [artifactory] 7.68.14 release --- stable/artifactory-cpp-ce/CHANGELOG.md | 2 +- stable/artifactory-cpp-ce/Chart.yaml | 6 +++--- stable/artifactory-ha/CHANGELOG.md | 2 +- stable/artifactory-ha/Chart.yaml | 4 ++-- stable/artifactory-jcr/CHANGELOG.md | 2 +- stable/artifactory-jcr/Chart.yaml | 6 +++--- stable/artifactory-oss/CHANGELOG.md | 2 +- stable/artifactory-oss/Chart.yaml | 6 +++--- stable/artifactory/CHANGELOG.md | 2 +- stable/artifactory/Chart.yaml | 4 ++-- 10 files changed, 18 insertions(+), 18 deletions(-) diff --git a/stable/artifactory-cpp-ce/CHANGELOG.md b/stable/artifactory-cpp-ce/CHANGELOG.md index b14f80080..e4fc03e92 100644 --- a/stable/artifactory-cpp-ce/CHANGELOG.md +++ b/stable/artifactory-cpp-ce/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory CE for C++ Chart Changelog All changes to this chart will be documented in this file -## [107.68.11] - Jul 20, 2023 +## [107.68.14] - Jul 20, 2023 * Disabled federation services when splitServicesToContainers=true ## [107.45.0] - Aug 25, 2022 diff --git a/stable/artifactory-cpp-ce/Chart.yaml b/stable/artifactory-cpp-ce/Chart.yaml index f3bf78a48..4126f909c 100644 --- a/stable/artifactory-cpp-ce/Chart.yaml +++ b/stable/artifactory-cpp-ce/Chart.yaml @@ -1,9 +1,9 @@ apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.14 dependencies: - name: artifactory repository: file://charts/artifactory - version: 107.68.11 + version: 107.68.14 description: JFrog Artifactory CE for C++ home: https://www.jfrog.com/artifactory/ icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-cpp-ce/logo/conan.png @@ -21,4 +21,4 @@ name: artifactory-cpp-ce sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.14 diff --git a/stable/artifactory-ha/CHANGELOG.md b/stable/artifactory-ha/CHANGELOG.md index 7123f652a..8302a8fef 100644 --- a/stable/artifactory-ha/CHANGELOG.md +++ b/stable/artifactory-ha/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory-ha Chart Changelog All changes to this chart will be documented in this file -## [107.68.11] - Sep 20, 2023 +## [107.68.14] - Sep 20, 2023 * Fixed rtfs context * Fixed - Metadata service does not respect customVolumeMounts for DB CAs [GH-1815](https://github.com/jfrog/charts/issues/1815) diff --git a/stable/artifactory-ha/Chart.yaml b/stable/artifactory-ha/Chart.yaml index d41409541..adaf5e9d6 100644 --- a/stable/artifactory-ha/Chart.yaml +++ b/stable/artifactory-ha/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.14 dependencies: - condition: postgresql.enabled name: postgresql @@ -21,4 +21,4 @@ name: artifactory-ha sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.14 diff --git a/stable/artifactory-jcr/CHANGELOG.md b/stable/artifactory-jcr/CHANGELOG.md index a63810854..a57b8d595 100644 --- a/stable/artifactory-jcr/CHANGELOG.md +++ b/stable/artifactory-jcr/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Container Registry Chart Changelog All changes to this chart will be documented in this file. -## [107.68.11] - Jul 20, 2023 +## [107.68.14] - Jul 20, 2023 * Disabled federation services when splitServicesToContainers=true ## [107.45.0] - Aug 25, 2022 diff --git a/stable/artifactory-jcr/Chart.yaml b/stable/artifactory-jcr/Chart.yaml index 89ca6bbee..b3825fb36 100644 --- a/stable/artifactory-jcr/Chart.yaml +++ b/stable/artifactory-jcr/Chart.yaml @@ -1,9 +1,9 @@ apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.14 dependencies: - name: artifactory repository: file://charts/artifactory - version: 107.68.11 + version: 107.68.14 description: JFrog Container Registry home: https://jfrog.com/container-registry/ icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory-jcr/logo/jcr-logo.png @@ -22,4 +22,4 @@ name: artifactory-jcr sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.14 diff --git a/stable/artifactory-oss/CHANGELOG.md b/stable/artifactory-oss/CHANGELOG.md index 9d1dcf427..032b4079d 100644 --- a/stable/artifactory-oss/CHANGELOG.md +++ b/stable/artifactory-oss/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory OSS Chart Changelog All changes to this chart will be documented in this file -## [107.68.11] - Jul 20, 2023 +## [107.68.14] - Jul 20, 2023 * Disabled federation services when splitServicesToContainers=true ## [107.45.0] - Aug 25, 2022 diff --git a/stable/artifactory-oss/Chart.yaml b/stable/artifactory-oss/Chart.yaml index bbc15ea3f..5e33d8927 100644 --- a/stable/artifactory-oss/Chart.yaml +++ b/stable/artifactory-oss/Chart.yaml @@ -1,9 +1,9 @@ apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.14 dependencies: - name: artifactory repository: file://charts/artifactory - version: 107.68.11 + version: 107.68.14 description: JFrog Artifactory OSS home: https://www.jfrog.com/artifactory/ icon: https://raw.githubusercontent.com/jfrog/charts/master/stable/artifactory/logo/artifactory-logo.png @@ -20,4 +20,4 @@ name: artifactory-oss sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.14 diff --git a/stable/artifactory/CHANGELOG.md b/stable/artifactory/CHANGELOG.md index e78c9dfad..fde64bcd9 100644 --- a/stable/artifactory/CHANGELOG.md +++ b/stable/artifactory/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory Chart Changelog All changes to this chart will be documented in this file. -## [107.68.11] - Sep 20, 2023 +## [107.68.14] - Sep 20, 2023 * Fixed rtfs context * Fixed - Metadata service does not respect customVolumeMounts for DB CAs [GH-1815](https://github.com/jfrog/charts/issues/1815) diff --git a/stable/artifactory/Chart.yaml b/stable/artifactory/Chart.yaml index 62d16e900..db6195f4c 100644 --- a/stable/artifactory/Chart.yaml +++ b/stable/artifactory/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.14 dependencies: - condition: postgresql.enabled name: postgresql @@ -21,4 +21,4 @@ name: artifactory sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.14 From f3db985189489e10d1501d94099fdbb5c78351d6 Mon Sep 17 00:00:00 2001 From: Ram <1331672+chukka@users.noreply.github.com> Date: Tue, 17 Oct 2023 17:49:50 +0530 Subject: [PATCH 2/4] [distribution] 2.20.3 release --- stable/distribution/CHANGELOG.md | 2 +- stable/distribution/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stable/distribution/CHANGELOG.md b/stable/distribution/CHANGELOG.md index c92103371..5daf66593 100644 --- a/stable/distribution/CHANGELOG.md +++ b/stable/distribution/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Distribution Chart Changelog All changes to this project chart be documented in this file. -## [102.20.2] - Aug 29, 2023 +## [102.20.3] - Aug 29, 2023 * Updated redis version tag to `7.2.0-debian-11-r2` * Enabled `unifiedSecretInstallation` by default diff --git a/stable/distribution/Chart.yaml b/stable/distribution/Chart.yaml index 0961e369e..7a425b364 100644 --- a/stable/distribution/Chart.yaml +++ b/stable/distribution/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 2.20.2 +appVersion: 2.20.3 dependencies: - condition: postgresql.enabled name: postgresql @@ -19,4 +19,4 @@ name: distribution sources: - https://github.com/jfrog/charts type: application -version: 102.20.2 +version: 102.20.3 From 5264718150119775b2a388cde0800beb02deee14 Mon Sep 17 00:00:00 2001 From: Ram <1331672+chukka@users.noreply.github.com> Date: Tue, 17 Oct 2023 17:50:23 +0530 Subject: [PATCH 3/4] [xray] 3.83.9 release --- stable/xray/CHANGELOG.md | 5 +- stable/xray/Chart.yaml | 4 +- stable/xray/logo/xray-logo.png | Bin 11734 -> 11735 bytes stable/xray/templates/_helpers.tpl | 6 +- stable/xray/templates/migration-hook.yaml | 5 +- stable/xray/values.yaml | 337 ++++++++++++++-------- 6 files changed, 230 insertions(+), 127 deletions(-) diff --git a/stable/xray/CHANGELOG.md b/stable/xray/CHANGELOG.md index 51c0de336..3ecc316c0 100644 --- a/stable/xray/CHANGELOG.md +++ b/stable/xray/CHANGELOG.md @@ -1,7 +1,10 @@ # JFrog Xray Chart Changelog All changes to this chart will be documented in this file. -## [103.82.11] - Jul 16, 2023 +## [103.83.9] - Sep 15,2023 +* Fixed - Support to configure privateRegistry for pre-upgrade-hook + +## [103.80.0] - Jul 16, 2023 * Added `podSecurityContext.enabled` and `containerSecurityContext.enabled` to support openshift ## [103.79.0] - Jul 3, 2023 diff --git a/stable/xray/Chart.yaml b/stable/xray/Chart.yaml index 278b42de8..7f4896f86 100644 --- a/stable/xray/Chart.yaml +++ b/stable/xray/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 3.82.11 +appVersion: 3.83.9 dependencies: - condition: postgresql.enabled name: postgresql @@ -24,4 +24,4 @@ name: xray sources: - https://github.com/jfrog/charts type: application -version: 103.82.11 +version: 103.83.9 diff --git a/stable/xray/logo/xray-logo.png b/stable/xray/logo/xray-logo.png index 200a562d99631b02e4afdd06baebaffd063bf9dc..23ed35bf5286f568c3e8b671a6e9c32fb30272d8 100644 GIT binary patch delta 277 zcmcZ>eLb3|Gr-TCmrII^fq{W{BR3->BkyEh#>b4jn?EuNgE*|re}Eh&mRu00fE6SY z!gdVE*(}YG1`=!Hj0BQ*I6HwtXDt42}os0nt=>iD0Kx$mPxM#k}5KdK#65C9Ux8m3Va}WJH-<~4!e>yh~up6 z3nV8h-vpA`DjEX3D{s}6H@jXtqp(|R>9%Ix$%|E7fl@D3&I0vpSLFff*?dIJ0i=~x fqZvqcYG?z6wrXgw06F(`(t)I#t}>9!(#-(?{_9H@ delta 276 zcmcZ}eJz^1Gr-TCONxtufq`oy4NIZfGNM%c!fecwBbp=S4ORoizsxpm0iRCgKAWa4ed?0yy#S=gdhmtjjf zBqu4~1d=%_8UkD^Z`GAIyIwn^uv=^Cwq~x$OH^EeQm<6b0`=@r1$v5W^HDVikXAO0 eW+2(6p$!z;rlG+C:8082" username: "" password: "" + systemYaml: | configVersion: 1 router: @@ -219,6 +238,7 @@ xray: container: registry: {{ include "xray.getRegistryByService" (list . "exposures") }} image: {{ .Values.exposures.image.repository }} + # Sidecar containers for tailing Xray logs loggers: [] # - router-request.log @@ -248,6 +268,7 @@ xray: # limits: # memory: "128Mi" # cpu: "50m" + ## Role Based Access ## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ rbac: @@ -255,63 +276,65 @@ rbac: role: ## Rules to create. It follows the role specification rules: - - apiGroups: - - '' - resources: - - services - - endpoints - - pods - - pods/log - - events - verbs: - - get - - watch - - list - - apiGroups: - - 'batch' - resources: - - jobs - verbs: - - get - - watch - - list - - create - - delete + - apiGroups: + - '' + resources: + - services + - endpoints + - pods + - pods/log + - events + verbs: + - get + - watch + - list + - apiGroups: + - 'batch' + resources: + - jobs + verbs: + - get + - watch + - list + - create + - delete + networkpolicy: [] -# Allows all ingress and egress -# - name: xray -# podSelector: -# matchLabels: -# app: xray -# egress: -# - {} -# ingress: -# - {} -# Uncomment to allow only xray pods to communicate with postgresql (if postgresql.enabled is true) -# - name: postgres -# podSelector: -# matchLabels: -# app.kubernetes.io/name: postgresql -# ingress: -# - from: -# - podSelector: -# matchLabels: -# app: xray -# Uncomment to allow only xray pods to communicate with rabbitmq (if rabbitmq.enabled is true) -# - name: rabbitmq -# podSelector: -# matchLabels: -# app.kubernetes.io/name: rabbitmq -# ingress: -# - from: -# - podSelector: -# matchLabels: -# app: xray + # Allows all ingress and egress + # - name: xray + # podSelector: + # matchLabels: + # app: xray + # egress: + # - {} + # ingress: + # - {} + # Uncomment to allow only xray pods to communicate with postgresql (if postgresql.enabled is true) + # - name: postgres + # podSelector: + # matchLabels: + # app.kubernetes.io/name: postgresql + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: xray + # Uncomment to allow only xray pods to communicate with rabbitmq (if rabbitmq.enabled is true) + # - name: rabbitmq + # podSelector: + # matchLabels: + # app.kubernetes.io/name: rabbitmq + # ingress: + # - from: + # - podSelector: + # matchLabels: + # app: xray ## Affinity rules nodeSelector: {} affinity: {} tolerations: [] + ## Apply horizontal pod auto scaling on Xray pods ## Ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ autoscaling: @@ -350,11 +373,14 @@ autoscaling: value: "100" - name: impactAnalysis value: "100" + + logger: image: registry: releases-docker.jfrog.io repository: ubi9/ubi-minimal - tag: 9.1.0.1793 + tag: 9.2.717 + ## Service Account ## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/ ## @@ -365,10 +391,12 @@ serviceAccount: name: ## Explicitly mounts the API credentials for the Service Account automountServiceAccountToken: true + ## By default, the Xray StatefulSet is created with a securityContext that sets the `runAsUser` and the `fsGroup` to the `common.xrayUserId` value. ## If you want to disable the pod securityContext for the Xray StatefulSet, set this tag to false podSecurityContext: enabled: true + ## @param containerSecurityContext.enabled enable the container's Security Context containerSecurityContext: enabled: true @@ -377,6 +405,8 @@ containerSecurityContext: capabilities: drop: - NET_RAW + + # PostgreSQL ## Configuration values for the postgresql dependency ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md @@ -414,6 +444,7 @@ postgresql: # limits: # memory: "2Gi" # cpu: "1" + ## If NOT using the PostgreSQL in this chart (postgresql.enabled=false), database: type: "postgresql" @@ -439,6 +470,7 @@ database: # actualUsername: # name: "xray-database-creds" # key: "db-actualUsername" + # RabbitMQ ## Configuration values for the rabbitmq dependency ## ref: https://github.com/bitnami/charts/blob/master/bitnami/rabbitmq/README.md @@ -455,6 +487,7 @@ rabbitmq: repository: bitnami/rabbitmq tag: 3.11.10-debian-11-r5 extraPlugins: "rabbitmq_management" + auth: ## Enable encryption to rabbitmq ## ref: https://www.rabbitmq.com/ssl.html @@ -485,9 +518,11 @@ rabbitmq: existingSecret: "" key: "" password: "" + caCertificate: serverCertificate: serverKey: + # Rabbitmq tls-certs secret name, as by default it will have {{ .Release.Name }}-rabbitmq-certs. existingSecret: existingSecretFullChain: false @@ -610,6 +645,10 @@ rabbitmq: migration: ## Migration is required to be performed only once hence this option can be disabled once the feature flags are enabled in rabbitmq. enabled: true + image: + registry: releases-docker.jfrog.io + repository: bitnami/kubectl + tag: 1.24.12 ## Service account for the pre-upgrade hook to perform rabbitmq migration serviceAccount: create: true @@ -632,9 +671,12 @@ rabbitmq: - create - get - list + + # This is automatically set based on rabbitmqTLS enabled flag. extraConfiguration: |- management.listener.ssl = {{ template "xray.rabbitmq.isTlsEnabled" . }} + # Common Xray settings common: ## Note that by default we use appVersion to get image tag @@ -645,49 +687,52 @@ common: # Spread Xray pods evenly across your nodes or some other topology topologySpreadConstraints: [] - # - maxSkew: 1 - # topologyKey: kubernetes.io/hostname - # whenUnsatisfiable: DoNotSchedule - # labelSelector: - # matchLabels: - # app: '{{ template "xray.name" . }}' - # role: '{{ template "xray.name" . }}' - # release: "{{ .Release.Name }}" + # - maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: '{{ template "xray.name" . }}' + # role: '{{ template "xray.name" . }}' + # release: "{{ .Release.Name }}" # Xray configuration to be written to xray_config.yaml xrayConfig: stdOutEnabled: true indexAllBuilds: false support-router: true + # Use rabbitmq connection config from environment variables. # If false, then connection details should be set directly in system.yaml (systemYaml section). # When using external rabbitmq, set this to false rabbitmq: connectionConfigFromEnvironment: true + ## Custom command to run before Xray startup. Runs BEFORE any microservice-specific preStartCommand preStartCommand: + ## Add custom volumes # If .Values.xray.unifiedSecretInstallation is true then secret name should be '{{ template "xray.name" . }}-unified-secret'. - customVolumes: "" + customVolumes: | # - name: custom-script # configMap: # name: custom-script ## Add custom volumesMounts - customVolumeMounts: "" + customVolumeMounts: | # - name: custom-script # mountPath: /scripts/script.sh # subPath: script.sh # Add any list of configmaps to Xray - configMaps: "" + configMaps: | # posthook-start.sh: |- # echo "This is a post start script" # posthook-end.sh: |- # echo "This is a post end script" ## Add custom init containers execution before predefined init containers - customInitContainersBegin: "" + customInitContainersBegin: | # - name: "custom-setup" # image: "{{ .Values.initContainerImage }}" # imagePullPolicy: "{{ .Values.imagePullPolicy }}" @@ -703,7 +748,7 @@ common: # name: data-volume ## Add custom init containers execution after predefined init containers - customInitContainers: "" + customInitContainers: | # - name: "custom-systemyaml-setup" # image: "{{ .Values.initContainerImage }}" # imagePullPolicy: "{{ .Values.imagePullPolicy }}" @@ -721,7 +766,7 @@ common: ## Add custom sidecar containers # - The provided example uses a custom volume (customVolumes) # - The provided example shows running container as root (id 0) - customSidecarContainers: "" + customSidecarContainers: | # - name: "sidecar-list-etc" # image: "{{ .Values.initContainerImage }}" # imagePullPolicy: "{{ .Values.imagePullPolicy }}" @@ -776,6 +821,7 @@ common: ## GKE, AWS & OpenStack) ## # storageClass: "-" + ## @param extraEnvVars Extra environment variables to add to xray containers ## E.g: ## extraEnvVars: @@ -783,6 +829,7 @@ common: ## value: BAR ## extraEnvVars: + analysis: name: xray-analysis ## Note that by default we use appVersion to get image tag/version @@ -794,17 +841,18 @@ analysis: externalPort: 7000 annotations: {} extraEnvVars: + # Add lifecycle hooks for the analysis pod lifecycle: {} - # postStart: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] ## Add custom volumesMounts - customVolumeMounts: "" + customVolumeMounts: | # - name: custom-script # mountPath: /scripts/script.sh # subPath: script.sh @@ -822,6 +870,7 @@ analysis: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} failureThreshold: 3 successThreshold: 1 + startupProbe: enabled: true config: | @@ -834,8 +883,10 @@ analysis: failureThreshold: 30 periodSeconds: {{ .Values.probes.timeoutSeconds }} timeoutSeconds: 1 + ## Custom command to run before Xray Analysis startup. Runs AFTER the common.preStartCommand preStartCommand: + resources: {} # requests: # memory: "1Gi" @@ -843,6 +894,7 @@ analysis: # limits: # memory: "2Gi" # cpu: "1" + sbom: enabled: false name: xray-sbom @@ -854,17 +906,18 @@ sbom: externalPort: 7006 annotations: {} extraEnvVars: + # Add lifecycle hooks for the indexer pod lifecycle: {} - # postStart: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] ## Add custom volumesMounts - customVolumeMounts: "" + customVolumeMounts: | # - name: custom-script # mountPath: /scripts/script.sh # subPath: script.sh @@ -882,6 +935,7 @@ sbom: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} failureThreshold: 3 successThreshold: 1 + startupProbe: enabled: true config: | @@ -894,8 +948,10 @@ sbom: failureThreshold: 30 periodSeconds: 5 timeoutSeconds: {{ .Values.probes.timeoutSeconds }} + ## Custom command to run before Xray Indexer startup. Runs AFTER the common.preStartCommand preStartCommand: + resources: {} # requests: # memory: "1Gi" @@ -903,6 +959,7 @@ sbom: # limits: # memory: "2Gi" # cpu: "1" + indexer: name: xray-indexer ## Note that by default we use appVersion to get image tag/version @@ -914,17 +971,18 @@ indexer: externalPort: 7002 annotations: {} extraEnvVars: + # Add lifecycle hooks for the indexer pod lifecycle: {} - # postStart: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] ## Add custom volumesMounts - customVolumeMounts: "" + customVolumeMounts: | # - name: custom-script # mountPath: /scripts/script.sh # subPath: script.sh @@ -942,6 +1000,7 @@ indexer: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} failureThreshold: 3 successThreshold: 1 + startupProbe: enabled: true config: | @@ -954,8 +1013,10 @@ indexer: failureThreshold: 30 periodSeconds: 5 timeoutSeconds: {{ .Values.probes.timeoutSeconds }} + ## Custom command to run before Xray Indexer startup. Runs AFTER the common.preStartCommand preStartCommand: + resources: {} # requests: # memory: "1Gi" @@ -963,6 +1024,7 @@ indexer: # limits: # memory: "2Gi" # cpu: "1" + persist: name: xray-persist ## Note that by default we use appVersion to get image tag/version @@ -974,17 +1036,18 @@ persist: externalPort: 7003 annotations: {} extraEnvVars: + # Add lifecycle hooks for the persist pod lifecycle: {} - # postStart: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] ## Add custom volumesMounts - customVolumeMounts: "" + customVolumeMounts: | # - name: custom-script # mountPath: /scripts/script.sh # subPath: script.sh @@ -1002,6 +1065,7 @@ persist: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} failureThreshold: 3 successThreshold: 1 + startupProbe: enabled: true config: | @@ -1014,8 +1078,10 @@ persist: failureThreshold: 30 periodSeconds: 5 timeoutSeconds: {{ .Values.probes.timeoutSeconds }} + ## Custom command to run before Xray Persist startup. Runs AFTER the common.preStartCommand preStartCommand: + resources: {} # requests: # memory: "1Gi" @@ -1023,6 +1089,7 @@ persist: # limits: # memory: "2Gi" # cpu: "1" + server: name: xray-server ## Note that by default we use appVersion to get image tag/version @@ -1036,18 +1103,18 @@ server: extraEnvVars: # Add lifecycle hooks for the insight pods lifecycle: {} - # postStart: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] # mailServer: "" # indexAllBuilds: false ## Add custom volumesMounts - customVolumeMounts: "" + customVolumeMounts: | # - name: custom-script # mountPath: /scripts/script.sh # subPath: script.sh @@ -1061,9 +1128,11 @@ server: ## additionalSpec: | ## customKey: customVal ## - additionalSpec: "" + additionalSpec: | + statefulset: annotations: {} + livenessProbe: enabled: true config: | @@ -1077,6 +1146,7 @@ server: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} failureThreshold: 3 successThreshold: 1 + startupProbe: enabled: true config: | @@ -1089,8 +1159,10 @@ server: failureThreshold: 30 periodSeconds: 5 timeoutSeconds: {{ .Values.probes.timeoutSeconds }} + ## Custom command to run before Xray Server startup. Runs AFTER the common.preStartCommand preStartCommand: + resources: {} # requests: # memory: "1Gi" @@ -1098,14 +1170,17 @@ server: # limits: # memory: "2Gi" # cpu: "1" + contextualAnalysis: image: registry: releases-docker.jfrog.io repository: jfrog/xray-jas-contextual-analysis + exposures: image: registry: releases-docker.jfrog.io repository: jfrog/xray-jas-exposures + router: name: router image: @@ -1130,16 +1205,17 @@ router: extraEnvVars: # Add lifecycle hooks for the router pod lifecycle: {} - # postStart: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] annotations: {} + ## Add custom volumesMounts - customVolumeMounts: "" + customVolumeMounts: | # - name: custom-script # mountPath: /scripts/script.sh # subPath: script.sh @@ -1157,6 +1233,7 @@ router: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} failureThreshold: 5 successThreshold: 1 + readinessProbe: enabled: true config: | @@ -1170,6 +1247,7 @@ router: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} failureThreshold: 5 successThreshold: 1 + startupProbe: enabled: true config: | @@ -1182,10 +1260,13 @@ router: failureThreshold: 30 periodSeconds: 5 timeoutSeconds: {{ .Values.probes.timeoutSeconds }} + persistence: mountPath: "/var/opt/jfrog/router" + # Add any of the loggers to a sidecar if you want to be able to see them with kubectl logs or a log collector in your k8s cluster loggers: [] + observability: name: observability image: @@ -1204,13 +1285,14 @@ observability: # Add lifecycle hooks for the observability pod lifecycle: {} - # postStart: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] - # preStop: - # exec: - # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] + # postStart: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the preStart handler > /usr/share/message"] extraEnvVars: + livenessProbe: enabled: true config: | @@ -1224,6 +1306,7 @@ observability: timeoutSeconds: {{ .Values.probes.timeoutSeconds }} periodSeconds: 10 successThreshold: 1 + startupProbe: enabled: true config: | @@ -1236,8 +1319,10 @@ observability: failureThreshold: 90 periodSeconds: 5 timeoutSeconds: {{ .Values.probes.timeoutSeconds }} + persistence: mountPath: "/var/opt/jfrog/observability" + # Filebeat Sidecar container ## The provided filebeat configuration is for Xray logs. It assumes you have a logstash installed and configured properly. filebeat: @@ -1247,8 +1332,11 @@ filebeat: repository: "docker.elastic.co/beats/filebeat" version: 7.16.2 logstashUrl: "logstash:5044" + annotations: {} + terminationGracePeriod: 10 + livenessProbe: exec: command: @@ -1261,6 +1349,7 @@ filebeat: initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 5 + readinessProbe: exec: command: @@ -1273,6 +1362,7 @@ filebeat: initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 5 + resources: {} # requests: # memory: "100Mi" @@ -1300,10 +1390,12 @@ filebeat: output: logstash: hosts: ["{{ .Values.filebeat.logstashUrl }}"] + ## Allows to add additional kubernetes resources ## Use --- as a separator between multiple resources ## For an example, refer - https://github.com/jfrog/log-analytics-prometheus/blob/master/xray-values.yaml -additionalResources: "" +additionalResources: | + # Adding entries to a Pod's /etc/hosts file # For an example, refer - https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases hostAliases: [] @@ -1319,6 +1411,7 @@ hostAliases: [] ## Specify common probes parameters probes: timeoutSeconds: 5 + ## To limit the amount of jobs created by xray execution service quota: enabled: true From b3c16ef22d04d289a5973dde75a7f95c4ca6baa1 Mon Sep 17 00:00:00 2001 From: Ram <1331672+chukka@users.noreply.github.com> Date: Tue, 17 Oct 2023 17:51:26 +0530 Subject: [PATCH 4/4] [jfrog-platform] 10.15.3 release --- stable/jfrog-platform/CHANGELOG.md | 6 ++++++ stable/jfrog-platform/Chart.lock | 12 ++++++------ stable/jfrog-platform/Chart.yaml | 12 ++++++------ stable/jfrog-platform/values.yaml | 2 +- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/stable/jfrog-platform/CHANGELOG.md b/stable/jfrog-platform/CHANGELOG.md index d4f4203eb..ee4cf42db 100644 --- a/stable/jfrog-platform/CHANGELOG.md +++ b/stable/jfrog-platform/CHANGELOG.md @@ -1,6 +1,12 @@ # JFrog Platform Chart Changelog (GA releases only) All changes to this chart will be documented in this file. +## [10.15.3] - Oct 16, 2023 +* Update dependency artifactory chart version to 107.68.14 +* Update dependency xray chart version to 103.83.9 +* Update dependency distribution chart version to 102.20.3 +* Update dependency pipelines chart version to 101.44.5 + ## [10.15.2] - Sep 28, 2023 * Update dependency artifactory chart version to 107.68.11 * Update dependency xray chart version to 103.82.11 diff --git a/stable/jfrog-platform/Chart.lock b/stable/jfrog-platform/Chart.lock index 2f834ef0c..956cadb92 100644 --- a/stable/jfrog-platform/Chart.lock +++ b/stable/jfrog-platform/Chart.lock @@ -10,18 +10,18 @@ dependencies: version: 12.10.1 - name: artifactory repository: https://charts.jfrog.io/ - version: 107.68.11 + version: 107.68.14 - name: xray repository: https://charts.jfrog.io/ - version: 103.82.11 + version: 103.83.9 - name: distribution repository: https://charts.jfrog.io/ - version: 102.20.2 + version: 102.20.3 - name: insight repository: https://charts.jfrog.io/ version: 101.15.4 - name: pipelines repository: https://charts.jfrog.io/ - version: 101.43.2 -digest: sha256:44905adc601fc4e279ccefd75e57f7f57475c810da9114b339827a7cb0cc2d7b -generated: "2023-09-28T08:26:41.419588+05:30" + version: 101.44.5 +digest: sha256:5a7e847fd17da2d2b9eaf0bfa4ff6fe43b00ccf51a7d57b163316520117c7cd7 +generated: "2023-10-16T09:20:21.037251+05:30" diff --git a/stable/jfrog-platform/Chart.yaml b/stable/jfrog-platform/Chart.yaml index 18955310a..0413ff3e7 100644 --- a/stable/jfrog-platform/Chart.yaml +++ b/stable/jfrog-platform/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.14 dependencies: - condition: postgresql.enabled name: postgresql @@ -16,15 +16,15 @@ dependencies: - condition: artifactory.enabled name: artifactory repository: https://charts.jfrog.io/ - version: 107.68.11 + version: 107.68.14 - condition: xray.enabled name: xray repository: https://charts.jfrog.io/ - version: 103.82.11 + version: 103.83.9 - condition: distribution.enabled name: distribution repository: https://charts.jfrog.io/ - version: 102.20.2 + version: 102.20.3 - condition: insight.enabled name: insight repository: https://charts.jfrog.io/ @@ -32,7 +32,7 @@ dependencies: - condition: pipelines.enabled name: pipelines repository: https://charts.jfrog.io/ - version: 101.43.2 + version: 101.44.5 description: The Helm chart for JFrog Platform (Universal, hybrid, end-to-end DevOps automation) home: https://jfrog.com/platform/ @@ -55,4 +55,4 @@ name: jfrog-platform sources: - https://github.com/jfrog/charts type: application -version: 10.15.2 +version: 10.15.3 diff --git a/stable/jfrog-platform/values.yaml b/stable/jfrog-platform/values.yaml index 2805b6016..55b236875 100644 --- a/stable/jfrog-platform/values.yaml +++ b/stable/jfrog-platform/values.yaml @@ -224,7 +224,7 @@ redis: artifactory: enabled: true unifiedUpgradeAllowed: true - installerInfo: '{"productId": "Helm_JFrogPlatform/{{ printf "10.15.2-%s" .Chart.AppVersion }}", "features": [ { "featureId": "Platform/{{ printf "%s-%s" "kubernetes" .Capabilities.KubeVersion.Version }}"}]}' + installerInfo: '{"productId": "Helm_JFrogPlatform/{{ printf "10.15.3-%s" .Chart.AppVersion }}", "features": [ { "featureId": "Platform/{{ printf "%s-%s" "kubernetes" .Capabilities.KubeVersion.Version }}"}]}' postgresql: enabled: false waitForDatabase: false