From 22a9cbcac3f37589ac47346eb584903287916cd3 Mon Sep 17 00:00:00 2001 From: Christopher Desiniotis Date: Fri, 31 Mar 2023 09:11:29 -0700 Subject: [PATCH] Bump version to v0.14.0 Signed-off-by: Christopher Desiniotis --- README.md | 45 ++++++++++--------- RELEASE.md | 2 +- .../helm/nvidia-device-plugin/Chart.lock | 6 +-- .../helm/nvidia-device-plugin/Chart.yaml | 6 +-- .../charts/gpu-feature-discovery/Chart.yaml | 4 +- ...xtensions-v1beta1-nvidia-device-plugin.yml | 2 +- ...a-device-plugin-compat-with-cpumanager.yml | 2 +- ...plugin-privileged-with-service-account.yml | 2 +- nvidia-device-plugin.yml | 2 +- versions.mk | 4 +- 10 files changed, 40 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 5fb9e7117..4195bfdaf 100644 --- a/README.md +++ b/README.md @@ -124,7 +124,7 @@ Once you have configured the options above on all the GPU nodes in your cluster, you can enable GPU support by deploying the following Daemonset: ```shell -$ kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.14.0-rc.3/nvidia-device-plugin.yml +$ kubectl create -f https://raw.githubusercontent.com/NVIDIA/k8s-device-plugin/v0.14.0/nvidia-device-plugin.yml ``` **Note:** This is a simple static daemonset meant to demonstrate the basic @@ -462,11 +462,11 @@ $ helm repo add nvdp https://nvidia.github.io/k8s-device-plugin $ helm repo update ``` -Then verify that the latest release (`v0.14.0-rc.3`) of the plugin is available: +Then verify that the latest release (`v0.14.0`) of the plugin is available: ``` $ helm search repo nvdp --devel NAME CHART VERSION APP VERSION DESCRIPTION -nvdp/nvidia-device-plugin 0.14.0-rc.3 0.14.0-rc.3 A Helm chart for ... +nvdp/nvidia-device-plugin 0.14.0 0.14.0 A Helm chart for ... ``` Once this repo is updated, you can begin installing packages from it to deploy @@ -477,7 +477,7 @@ The most basic installation command without any options is then: helm upgrade -i nvdp nvdp/nvidia-device-plugin \ --namespace nvidia-device-plugin \ --create-namespace \ - --version 0.14.0-rc.3 + --version 0.14.0 ``` **Note:** You only need the to pass the `--devel` flag to `helm search repo` @@ -486,7 +486,7 @@ version (e.g. `-rc.1`). Full releases will be listed without this. ### Configuring the device plugin's `helm` chart -The `helm` chart for the latest release of the plugin (`v0.14.0-rc.3`) includes +The `helm` chart for the latest release of the plugin (`v0.14.0`) includes a number of customizable values. Prior to `v0.12.0` the most commonly used values were those that had direct @@ -496,7 +496,7 @@ case of the original values is then to override an option from the `ConfigMap` if desired. Both methods are discussed in more detail below. The full set of values that can be set are found here: -[here](https://github.com/NVIDIA/k8s-device-plugin/blob/v0.14.0-rc.3/deployments/helm/nvidia-device-plugin/values.yaml). +[here](https://github.com/NVIDIA/k8s-device-plugin/blob/v0.14.0/deployments/helm/nvidia-device-plugin/values.yaml). #### Passing configuration to the plugin via a `ConfigMap`. @@ -535,7 +535,7 @@ EOF And deploy the device plugin via helm (pointing it at this config file and giving it a name): ``` $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set-file config.map.config=/tmp/dp-example-config0.yaml @@ -557,7 +557,7 @@ $ kubectl create cm -n nvidia-device-plugin nvidia-plugin-configs \ ``` ``` $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set config.name=nvidia-plugin-configs @@ -585,7 +585,7 @@ EOF And redeploy the device plugin via helm (pointing it at both configs with a specified default). ``` $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set config.default=config0 \ @@ -604,7 +604,7 @@ $ kubectl create cm -n nvidia-device-plugin nvidia-plugin-configs \ ``` ``` $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set config.default=config0 \ @@ -690,7 +690,7 @@ chart values that are commonly overridden are: ``` Please take a look in the -[`values.yaml`](https://github.com/NVIDIA/k8s-device-plugin/blob/v0.14.0-rc.3/deployments/helm/nvidia-device-plugin/values.yaml) +[`values.yaml`](https://github.com/NVIDIA/k8s-device-plugin/blob/v0.14.0/deployments/helm/nvidia-device-plugin/values.yaml) file to see the full set of overridable parameters for the device plugin. Examples of setting these options include: @@ -699,7 +699,7 @@ Enabling compatibility with the `CPUManager` and running with a request for 100ms of CPU time and a limit of 512MB of memory. ```shell $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set compatWithCPUManager=true \ @@ -710,7 +710,7 @@ $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ Using the legacy Daemonset API (only available on Kubernetes < `v1.16`): ```shell $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set legacyDaemonsetAPI=true @@ -719,7 +719,7 @@ $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ Enabling compatibility with the `CPUManager` and the `mixed` `migStrategy` ```shell $ helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set compatWithCPUManager=true \ @@ -738,7 +738,7 @@ Discovery to perform this labeling. To enable it, simply set `gfd.enabled=true` during helm install. ``` helm upgrade -i nvdp nvdp/nvidia-device-plugin \ - --version=0.14.0-rc.3 \ + --version=0.14.0 \ --namespace nvidia-device-plugin \ --create-namespace \ --set gfd.enabled=true @@ -793,14 +793,14 @@ Using the default values for the flags: $ helm upgrade -i nvdp \ --namespace nvidia-device-plugin \ --create-namespace \ - https://nvidia.github.io/k8s-device-plugin/stable/nvidia-device-plugin-0.14.0-rc.3.tgz + https://nvidia.github.io/k8s-device-plugin/stable/nvidia-device-plugin-0.14.0.tgz ``` ## Building and Running Locally The next sections are focused on building the device plugin locally and running it. It is intended purely for development and testing, and not required by most users. -It assumes you are pinning to the latest release tag (i.e. `v0.14.0-rc.3`), but can +It assumes you are pinning to the latest release tag (i.e. `v0.14.0`), but can easily be modified to work with any available tag or branch. ### With Docker @@ -808,8 +808,8 @@ easily be modified to work with any available tag or branch. #### Build Option 1, pull the prebuilt image from [Docker Hub](https://hub.docker.com/r/nvidia/k8s-device-plugin): ```shell -$ docker pull nvcr.io/nvidia/k8s-device-plugin:v0.14.0-rc.3 -$ docker tag nvcr.io/nvidia/k8s-device-plugin:v0.14.0-rc.3 nvcr.io/nvidia/k8s-device-plugin:devel +$ docker pull nvcr.io/nvidia/k8s-device-plugin:v0.14.0 +$ docker tag nvcr.io/nvidia/k8s-device-plugin:v0.14.0 nvcr.io/nvidia/k8s-device-plugin:devel ``` Option 2, build without cloning the repository: @@ -817,7 +817,7 @@ Option 2, build without cloning the repository: $ docker build \ -t nvcr.io/nvidia/k8s-device-plugin:devel \ -f deployments/container/Dockerfile.ubuntu \ - https://github.com/NVIDIA/k8s-device-plugin.git#v0.14.0-rc.3 + https://github.com/NVIDIA/k8s-device-plugin.git#v0.14.0 ``` Option 3, if you want to modify the code: @@ -871,6 +871,11 @@ $ ./k8s-device-plugin --pass-device-specs ## Changelog +### Version v0.14.0 + +- Promote v0.14.0-rc.3 to v0.14.0 +- Bumped `nvidia-container-toolkit` dependency to latest version for newer CDI spec generation code + ### Version v0.14.0-rc.3 - Removed `--cdi-enabled` config option and instead trigger CDI injection based on `cdi-annotation` strategy. diff --git a/RELEASE.md b/RELEASE.md index a84564434..93bfb2409 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -9,7 +9,7 @@ Publishing the helm chart is currently manual, and we should move to an automate # Release Process Checklist - [ ] Update the README changelog -- [ ] Update the README to change occurances of the old version (e.g: `v0.14.0-rc.3`) with the new version +- [ ] Update the README to change occurances of the old version (e.g: `v0.14.0`) with the new version - [ ] Commit, Tag and Push to Gitlab - [ ] Build a new helm package with `helm package ./deployments/helm/nvidia-device-plugin` - [ ] Switch to the `gh-pages` branch and move the newly generated package to the `stable` helm repo diff --git a/deployments/helm/nvidia-device-plugin/Chart.lock b/deployments/helm/nvidia-device-plugin/Chart.lock index b6ae3322d..f3388f6b6 100644 --- a/deployments/helm/nvidia-device-plugin/Chart.lock +++ b/deployments/helm/nvidia-device-plugin/Chart.lock @@ -4,6 +4,6 @@ dependencies: version: 0.12.1 - name: gpu-feature-discovery repository: "" - version: 0.8.0-rc.2 -digest: sha256:6831cb91438a837c3f23a499c9607ab463e6ec2aa8c5048aded93325492adf53 -generated: "2023-03-28T22:22:47.487194+02:00" + version: 0.8.0 +digest: sha256:5d8a4c5f04fd3c2a71fbe49b580c89964c3a3049dee5b792e7a50eef0ddf7a7d +generated: "2023-03-31T11:32:52.478579-07:00" diff --git a/deployments/helm/nvidia-device-plugin/Chart.yaml b/deployments/helm/nvidia-device-plugin/Chart.yaml index e99aeaf89..82ce4d4cd 100644 --- a/deployments/helm/nvidia-device-plugin/Chart.yaml +++ b/deployments/helm/nvidia-device-plugin/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: nvidia-device-plugin type: application description: A Helm chart for the nvidia-device-plugin on Kubernetes -version: "0.14.0-rc.3" -appVersion: "0.14.0-rc.3" +version: "0.14.0" +appVersion: "0.14.0" kubeVersion: ">= 1.10.0-0" home: https://github.com/NVIDIA/k8s-device-plugin @@ -15,5 +15,5 @@ dependencies: repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts - name: gpu-feature-discovery alias: gfd - version: "0.8.0-rc.2" + version: "0.8.0" condition: gfd.enabled diff --git a/deployments/helm/nvidia-device-plugin/charts/gpu-feature-discovery/Chart.yaml b/deployments/helm/nvidia-device-plugin/charts/gpu-feature-discovery/Chart.yaml index 732070529..35956ac00 100644 --- a/deployments/helm/nvidia-device-plugin/charts/gpu-feature-discovery/Chart.yaml +++ b/deployments/helm/nvidia-device-plugin/charts/gpu-feature-discovery/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: gpu-feature-discovery type: application description: A Helm chart for gpu-feature-discovery on Kubernetes -version: "0.8.0-rc.2" -appVersion: "0.8.0-rc.2" +version: "0.8.0" +appVersion: "0.8.0" kubeVersion: ">= 1.10.0-0" home: https://github.com/NVIDIA/gpu-feature-discovery diff --git a/deployments/static/extensions-v1beta1-nvidia-device-plugin.yml b/deployments/static/extensions-v1beta1-nvidia-device-plugin.yml index 52b1014e7..824ac7775 100644 --- a/deployments/static/extensions-v1beta1-nvidia-device-plugin.yml +++ b/deployments/static/extensions-v1beta1-nvidia-device-plugin.yml @@ -35,7 +35,7 @@ spec: # See https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ priorityClassName: "system-node-critical" containers: - - image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0-rc.3 + - image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0 name: nvidia-device-plugin-ctr env: - name: FAIL_ON_INIT_ERROR diff --git a/deployments/static/nvidia-device-plugin-compat-with-cpumanager.yml b/deployments/static/nvidia-device-plugin-compat-with-cpumanager.yml index abda1ff56..33975a316 100644 --- a/deployments/static/nvidia-device-plugin-compat-with-cpumanager.yml +++ b/deployments/static/nvidia-device-plugin-compat-with-cpumanager.yml @@ -38,7 +38,7 @@ spec: # See https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ priorityClassName: "system-node-critical" containers: - - image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0-rc.3 + - image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0 name: nvidia-device-plugin-ctr env: - name: FAIL_ON_INIT_ERROR diff --git a/deployments/static/nvidia-device-plugin-privileged-with-service-account.yml b/deployments/static/nvidia-device-plugin-privileged-with-service-account.yml index 36e71e643..65ab27756 100644 --- a/deployments/static/nvidia-device-plugin-privileged-with-service-account.yml +++ b/deployments/static/nvidia-device-plugin-privileged-with-service-account.yml @@ -124,7 +124,7 @@ spec: - env: - name: PASS_DEVICE_SPECS value: "true" - image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0-rc.3 + image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0 name: nvidia-device-plugin-ctr securityContext: privileged: true diff --git a/nvidia-device-plugin.yml b/nvidia-device-plugin.yml index 7750ce22f..d59691cf7 100644 --- a/nvidia-device-plugin.yml +++ b/nvidia-device-plugin.yml @@ -38,7 +38,7 @@ spec: # See https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ priorityClassName: "system-node-critical" containers: - - image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0-rc.3 + - image: nvcr.io/nvidia/k8s-device-plugin:v0.14.0 name: nvidia-device-plugin-ctr env: - name: FAIL_ON_INIT_ERROR diff --git a/versions.mk b/versions.mk index fa64feee9..d8ccdd2ff 100644 --- a/versions.mk +++ b/versions.mk @@ -14,12 +14,12 @@ MODULE := github.com/NVIDIA/k8s-device-plugin -VERSION ?= v0.14.0-rc.3 +VERSION ?= v0.14.0 # vVERSION represents the version with a guaranteed v-prefix vVERSION := v$(VERSION:v%=%) CUDA_VERSION ?= 12.1.0 -GOLANG_VERSION ?= 1.20.1 +GOLANG_VERSION ?= 1.20.2 GIT_COMMIT ?= $(shell git describe --match="" --dirty --long --always --abbrev=40 2> /dev/null || echo "")