From 9150291c70c1f536828f627bc8fd88fc17b65215 Mon Sep 17 00:00:00 2001 From: sys_softwarerecipes Date: Fri, 18 Oct 2024 13:14:06 +0530 Subject: [PATCH] adding chart torchserve-0.1.0 --- charts/torchserve-0.1.0/.helmignore | 23 +++++ charts/torchserve-0.1.0/Chart.yaml | 11 +++ charts/torchserve-0.1.0/README.md | 41 ++++++++ charts/torchserve-0.1.0/README.md.gotmpl | 26 +++++ charts/torchserve-0.1.0/templates/NOTES.txt | 21 ++++ .../torchserve-0.1.0/templates/_helpers.tpl | 62 ++++++++++++ charts/torchserve-0.1.0/templates/deploy.yaml | 95 +++++++++++++++++++ charts/torchserve-0.1.0/templates/pvc.yaml | 29 ++++++ .../torchserve-0.1.0/templates/service.yaml | 61 ++++++++++++ .../templates/tests/test-connection.yaml | 63 ++++++++++++ charts/torchserve-0.1.0/values.yaml | 56 +++++++++++ 11 files changed, 488 insertions(+) create mode 100644 charts/torchserve-0.1.0/.helmignore create mode 100644 charts/torchserve-0.1.0/Chart.yaml create mode 100644 charts/torchserve-0.1.0/README.md create mode 100644 charts/torchserve-0.1.0/README.md.gotmpl create mode 100644 charts/torchserve-0.1.0/templates/NOTES.txt create mode 100644 charts/torchserve-0.1.0/templates/_helpers.tpl create mode 100644 charts/torchserve-0.1.0/templates/deploy.yaml create mode 100644 charts/torchserve-0.1.0/templates/pvc.yaml create mode 100644 charts/torchserve-0.1.0/templates/service.yaml create mode 100644 charts/torchserve-0.1.0/templates/tests/test-connection.yaml create mode 100644 charts/torchserve-0.1.0/values.yaml diff --git a/charts/torchserve-0.1.0/.helmignore b/charts/torchserve-0.1.0/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/charts/torchserve-0.1.0/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/torchserve-0.1.0/Chart.yaml b/charts/torchserve-0.1.0/Chart.yaml new file mode 100644 index 0000000..a2b9490 --- /dev/null +++ b/charts/torchserve-0.1.0/Chart.yaml @@ -0,0 +1,11 @@ +apiVersion: v2 +appVersion: 1.16.0 +description: TorchServe is a performant, flexible and easy to use tool for serving + PyTorch models in production on Intel GPUs. +maintainers: +- email: tyler.titsworth@intel.com + name: tylertitsworth + url: https://github.com/tylertitsworth +name: torchserve +type: application +version: 0.1.0 diff --git a/charts/torchserve-0.1.0/README.md b/charts/torchserve-0.1.0/README.md new file mode 100644 index 0000000..956c72d --- /dev/null +++ b/charts/torchserve-0.1.0/README.md @@ -0,0 +1,41 @@ +# TorchServe with Intel Optimizations + +TorchServe is a performant, flexible and easy to use tool for serving PyTorch models in production on Intel GPUs. + +For more information about how to use TorchServe with Intel Optimizations, check out the [container documentation](../../../pytorch/serving/README.md). + +![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square) + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| deploy.env | object | `{"configMapName":"intel-proxy-config","enabled":true}` | Add Environment mapping | +| deploy.image | string | `"intel/intel-optimized-pytorch:2.3.0-serving-cpu"` | Intel Optimized torchserve image | +| deploy.modelConfig | string | `"/home/model-server/config.properties"` | Model Server Configuration file location | +| deploy.models | string | `"all"` | Models to be loaded | +| deploy.replicas | int | `1` | Number of pods | +| deploy.resources.limits | object | `{"cpu":"4000m","memory":"1Gi"}` | Maximum resources per pod | +| deploy.resources.requests | object | `{"cpu":"1000m","memory":"512Mi"}` | Minimum resources per pod | +| deploy.storage.nfs | object | `{"enabled":false,"path":"nil","readOnly":true,"server":"nil","subPath":"nil"}` | Network File System (NFS) storage for models | +| deploy.tokens_disabled | bool | `true` | Set token authentication on or off. Checkout the latest [torchserve docs](https://github.com/pytorch/serve/blob/master/docs/token_authorization_api.md) for more details. | +| fullnameOverride | string | `""` | Full qualified Domain Name | +| nameOverride | string | `""` | Name of the serving service | +| pvc.size | string | `"1Gi"` | Size of the storage | +| service.type | string | `"NodePort"` | Type of service | + +## Next Steps + +There are some additional steps that can be taken to prepare your service for your users: + +- Enable [Autoscaling](https://github.com/pytorch/serve/blob/master/kubernetes/autoscale.md#autoscaler) via Prometheus +- Enable [Intel GPU](https://github.com/intel/intel-device-plugins-for-kubernetes/blob/main/cmd/gpu_plugin/README.md#install-to-nodes-with-intel-gpus-with-fractional-resources) +- Enable [Metrics](https://pytorch.org/serve/metrics.html) and [Metrics API](https://pytorch.org/serve/metrics_api.html). +- Enable [Profiling](https://github.com/pytorch/serve/blob/master/docs/performance_guide.md#profiling). +- Export an [INT8 Model for IPEX](https://github.com/pytorch/serve/blob/f7ae6f8281ac6e26404a6ae4d210535c9dc96d9a/examples/intel_extension_for_pytorch/README.md#creating-and-exporting-int8-model-for-intel-extension-for-pytorch) +- Integrate an [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to your service to serve to a hostname rather than an ip address. +- Integrate [MLFlow](https://github.com/mlflow/mlflow-torchserve). +- Integrate an [SSL Certificate](https://pytorch.org/serve/configuration.html#enable-ssl) in your model config file to serve models securely. + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/charts/torchserve-0.1.0/README.md.gotmpl b/charts/torchserve-0.1.0/README.md.gotmpl new file mode 100644 index 0000000..465c03a --- /dev/null +++ b/charts/torchserve-0.1.0/README.md.gotmpl @@ -0,0 +1,26 @@ +# TorchServe with Intel Optimizations + +{{ template "chart.description" . }} + +For more information about how to use TorchServe with Intel Optimizations, check out the [container documentation](../../../pytorch/serving/README.md). + +{{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }} + +{{ template "chart.requirementsSection" . }} + +{{ template "chart.valuesSection" . }} + +## Next Steps + +There are some additional steps that can be taken to prepare your service for your users: + +- Enable [Autoscaling](https://github.com/pytorch/serve/blob/master/kubernetes/autoscale.md#autoscaler) via Prometheus +- Enable [Intel GPU](https://github.com/intel/intel-device-plugins-for-kubernetes/blob/main/cmd/gpu_plugin/README.md#install-to-nodes-with-intel-gpus-with-fractional-resources) +- Enable [Metrics](https://pytorch.org/serve/metrics.html) and [Metrics API](https://pytorch.org/serve/metrics_api.html). +- Enable [Profiling](https://github.com/pytorch/serve/blob/master/docs/performance_guide.md#profiling). +- Export an [INT8 Model for IPEX](https://github.com/pytorch/serve/blob/f7ae6f8281ac6e26404a6ae4d210535c9dc96d9a/examples/intel_extension_for_pytorch/README.md#creating-and-exporting-int8-model-for-intel-extension-for-pytorch) +- Integrate an [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) to your service to serve to a hostname rather than an ip address. +- Integrate [MLFlow](https://github.com/mlflow/mlflow-torchserve). +- Integrate an [SSL Certificate](https://pytorch.org/serve/configuration.html#enable-ssl) in your model config file to serve models securely. + +{{ template "helm-docs.versionFooter" . }} diff --git a/charts/torchserve-0.1.0/templates/NOTES.txt b/charts/torchserve-0.1.0/templates/NOTES.txt new file mode 100644 index 0000000..7cf61fc --- /dev/null +++ b/charts/torchserve-0.1.0/templates/NOTES.txt @@ -0,0 +1,21 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "torchserve.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "torchserve.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "torchserve.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:30000/ping +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "torchserve.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} +{{- if eq false .Values.deploy.tokens_disabled }} +2. Display the tokens for accessing the APIs. For more details about token authentication checkout: https://github.com/pytorch/serve/blob/master/docs/token_authorization_api.md + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "torchserve.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl exec --namespace {{ .Release.Namespace }} $POD_NAME -- cat /home/model-server/key_file.json +{{- end }} diff --git a/charts/torchserve-0.1.0/templates/_helpers.tpl b/charts/torchserve-0.1.0/templates/_helpers.tpl new file mode 100644 index 0000000..f9624f5 --- /dev/null +++ b/charts/torchserve-0.1.0/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "torchserve.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "torchserve.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "torchserve.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "torchserve.labels" -}} +helm.sh/chart: {{ include "torchserve.chart" . }} +{{ include "torchserve.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "torchserve.selectorLabels" -}} +app.kubernetes.io/name: {{ include "torchserve.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "torchserve.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "torchserve.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/charts/torchserve-0.1.0/templates/deploy.yaml b/charts/torchserve-0.1.0/templates/deploy.yaml new file mode 100644 index 0000000..85f0314 --- /dev/null +++ b/charts/torchserve-0.1.0/templates/deploy.yaml @@ -0,0 +1,95 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "torchserve.fullname" . }} + labels: + {{- include "torchserve.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deploy.replicas }} + selector: + matchLabels: + {{- include "torchserve.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "torchserve.selectorLabels" . | nindent 8 }} + spec: + containers: + - name: torchserve + image: {{ .Values.deploy.image }} + args: + - 'torchserve' + - '--start' + - '--ts-config' + - {{ .Values.deploy.modelConfig }} + - '--model-store' + - 'model-store' + - '--workflow-store' + - 'model-store' + - '--models' + - {{ .Values.deploy.models }} + {{- if eq .Values.deploy.env.enabled true }} + envFrom: + - configMapRef: + name: {{ .Values.deploy.env.configMapName }} + {{- end }} + env: + - name: TS_DISABLE_TOKEN_AUTHORIZATION + value: "{{ .Values.deploy.tokens_disabled }}" + ports: + - name: rest-1 + containerPort: 8080 + - name: rest-2 + containerPort: 8081 + - name: rest-3 + containerPort: 8082 + - name: grpc-1 + containerPort: 7070 + - name: grpc-2 + containerPort: 7071 + volumeMounts: + {{- if .Values.deploy.storage.nfs.enabled }} + - name: model + mountPath: /home/model-server/model-store + subPath: {{ .Values.deploy.storage.nfs.subPath }} + {{- else }} + - name: model + mountPath: /home/model-server/model-store + {{- end }} + resources: + requests: + cpu: {{ .Values.deploy.resources.requests.cpu }} + memory: {{ .Values.deploy.resources.requests.memory }} + limits: + cpu: {{ .Values.deploy.resources.limits.cpu }} + memory: {{ .Values.deploy.resources.limits.memory }} + securityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + {{- if .Values.deploy.storage.nfs.enabled }} + - name: model + nfs: + server: {{ .Values.deploy.storage.nfs.server }} + path: {{ .Values.deploy.storage.nfs.path }} + readOnly: {{ .Values.deploy.storage.nfs.readOnly }} + emptyDir: {} + {{- else }} + - name: model + persistentVolumeClaim: + claimName: {{ include "torchserve.fullname" . }}-model-dir + {{- end }} diff --git a/charts/torchserve-0.1.0/templates/pvc.yaml b/charts/torchserve-0.1.0/templates/pvc.yaml new file mode 100644 index 0000000..37abcf7 --- /dev/null +++ b/charts/torchserve-0.1.0/templates/pvc.yaml @@ -0,0 +1,29 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +{{- if not .Values.deploy.storage.nfs.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "torchserve.fullname" . }}-model-dir + labels: + {{- include "torchserve.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{ .Values.pvc.size }} +{{- end }} diff --git a/charts/torchserve-0.1.0/templates/service.yaml b/charts/torchserve-0.1.0/templates/service.yaml new file mode 100644 index 0000000..b203115 --- /dev/null +++ b/charts/torchserve-0.1.0/templates/service.yaml @@ -0,0 +1,61 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "torchserve.fullname" . }} + labels: + {{- include "torchserve.labels" . | nindent 4 }} +spec: + ports: + - name: rest-1 + {{- if eq .Values.service.type "NodePort" }} + nodePort: 30000 + {{- else }} + targetPort: 30000 + {{- end }} + port: 8080 + - name: rest-2 + {{- if eq .Values.service.type "NodePort" }} + nodePort: 30001 + {{- else }} + targetPort: 30001 + {{- end }} + port: 8081 + - name: rest-3 + {{- if eq .Values.service.type "NodePort" }} + nodePort: 30002 + {{- else }} + targetPort: 30002 + {{- end }} + port: 8082 + - name: grpc-1 + {{- if eq .Values.service.type "NodePort" }} + nodePort: 30003 + {{- else }} + targetPort: 30003 + {{- end }} + port: 7070 + - name: grpc-2 + {{- if eq .Values.service.type "NodePort" }} + nodePort: 30004 + {{- else }} + targetPort: 30004 + {{- end }} + port: 7071 + selector: + {{- include "torchserve.selectorLabels" . | nindent 6 }} + type: {{ .Values.service.type }} diff --git a/charts/torchserve-0.1.0/templates/tests/test-connection.yaml b/charts/torchserve-0.1.0/templates/tests/test-connection.yaml new file mode 100644 index 0000000..2172f7f --- /dev/null +++ b/charts/torchserve-0.1.0/templates/tests/test-connection.yaml @@ -0,0 +1,63 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "torchserve.fullname" . }}-test-connection" + labels: + {{- include "torchserve.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: ping + image: busybox + command: ['wget'] + args: ['-O', '-', '{{ include "torchserve.fullname" . }}:8080/ping'] + - name: register-model + image: curlimages/curl + {{- if eq .Values.deploy.env.enabled true }} + envFrom: + - configMapRef: + name: {{ .Values.deploy.env.configMapName }} + {{- end }} + command: ['sh', '-c'] + args: ['curl https://torchserve.pytorch.org/mar_files/squeezenet1_1.mar -o /home/model-server/model-store/squeezenet1_1.mar && + curl --noproxy "*" -X POST "{{ include "torchserve.fullname" . }}:8081/models?initial_workers=1&url=squeezenet1_1.mar" && + curl -O https://raw.githubusercontent.com/pytorch/serve/master/docs/images/kitten_small.jpg && + curl --noproxy "*" -X POST {{ include "torchserve.fullname" . }}:8080/v2/models/squeezenet1_1/infer -T kitten_small.jpg'] + volumeMounts: + {{- if .Values.deploy.storage.nfs.enabled }} + - name: model + mountPath: /home/model-server/model-store + subPath: {{ .Values.deploy.storage.nfs.subPath }} + {{- else }} + - name: model + mountPath: /home/model-server/model-store + {{- end }} + restartPolicy: Never + volumes: + {{- if .Values.deploy.storage.nfs.enabled }} + - name: model + nfs: + server: {{ .Values.deploy.storage.nfs.server }} + path: {{ .Values.deploy.storage.nfs.path }} + readOnly: {{ .Values.deploy.storage.nfs.readOnly }} + emptyDir: {} + {{- else }} + - name: model + persistentVolumeClaim: + claimName: {{ include "torchserve.fullname" . }}-model-dir + {{- end }} diff --git a/charts/torchserve-0.1.0/values.yaml b/charts/torchserve-0.1.0/values.yaml new file mode 100644 index 0000000..f59e1c4 --- /dev/null +++ b/charts/torchserve-0.1.0/values.yaml @@ -0,0 +1,56 @@ +# Copyright (c) 2024 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -- Name of the serving service +nameOverride: "" +# -- Full qualified Domain Name +fullnameOverride: "" +deploy: + # -- Intel Optimized torchserve image + image: intel/intel-optimized-pytorch:2.3.0-serving-cpu + # -- Add Environment mapping + env: + configMapName: intel-proxy-config + enabled: true + # -- Set token authentication on or off. Checkout the latest [torchserve docs](https://github.com/pytorch/serve/blob/master/docs/token_authorization_api.md) for more details. + tokens_disabled: true + # -- Models to be loaded + models: all + # -- Model Server Configuration file location + modelConfig: /home/model-server/config.properties + # -- Number of pods + replicas: 1 + resources: + # -- Maximum resources per pod + limits: + cpu: 4000m + memory: 1Gi + # -- Minimum resources per pod + requests: + cpu: 1000m + memory: 512Mi + storage: + # -- Network File System (NFS) storage for models + nfs: + enabled: false + server: nil + path: nil + readOnly: true + subPath: nil +service: + # -- Type of service + type: NodePort +pvc: + # -- Size of the storage + size: 1Gi