diff --git a/.editorconfig b/.editorconfig index bc1dfe40c8faa..1a235f9c90853 100644 --- a/.editorconfig +++ b/.editorconfig @@ -16,5 +16,8 @@ indent_size = 2 indent_style = space indent_size = 4 +[*.{yaml}] +insert_final_newline = true + [Makefile] indent_style = tab diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 00c5032f3bf71..5b223b56fe489 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -12,7 +12,6 @@ aliases: sig-docs-localization-owners: # Admins for localization content - a-mccarthy - divya-mohan0209 - - jimangel - kbhawkey - natalisucks - onlydole @@ -22,15 +21,11 @@ aliases: - tengqm sig-docs-de-owners: # Admins for German content - bene2k1 - - mkorbi - rlenferink sig-docs-de-reviews: # PR reviews for German content - bene2k1 - - mkorbi - rlenferink sig-docs-en-owners: # Admins for English content - - annajung - - bradtopol - divya-mohan0209 - katcosgrove # RT 1.29 Docs Lead - kbhawkey @@ -42,7 +37,7 @@ aliases: - tengqm sig-docs-en-reviews: # PR reviews for English content - bradtopol - - dipesh-rawat + - dipesh-rawat - divya-mohan0209 - kbhawkey - mehabhalodiya @@ -54,6 +49,7 @@ aliases: - sftim - shannonxtreme - tengqm + - windsonsea sig-docs-es-owners: # Admins for Spanish content - 92nqb - krol3 @@ -111,20 +107,18 @@ aliases: - atoato88 - bells17 - kakts - - ptux - t-inu sig-docs-ko-owners: # Admins for Korean content - gochist - - ianychoi - jihoon-seo - seokho-son - yoonian - ysyukr sig-docs-ko-reviews: # PR reviews for Korean content - gochist - - ianychoi - jihoon-seo - jmyung + - jongwooo - seokho-son - yoonian - ysyukr @@ -151,7 +145,6 @@ aliases: sig-docs-zh-reviews: # PR reviews for Chinese content - asa3311 - chenrui333 - - chenxuc - howieyuen # idealhack - kinzhi @@ -206,16 +199,15 @@ aliases: - Arhell - idvoretskyi - MaxymVlasov - - Potapy4 # authoritative source: git.k8s.io/community/OWNERS_ALIASES committee-steering: # provide PR approvals for announcements - - cblecker - - cpanato - bentheelder - justaugustus - mrbobbytables + - pacoxu - palnabarun - - tpepper + - pohly + - soltysh # authoritative source: https://git.k8s.io/sig-release/OWNERS_ALIASES sig-release-leads: - cpanato # SIG Technical Lead diff --git a/README-hi.md b/README-hi.md index af64eae8f7c3f..958a44ead119c 100644 --- a/README-hi.md +++ b/README-hi.md @@ -3,7 +3,7 @@ [![Build Status](https://api.travis-ci.org/kubernetes/website.svg?branch=master)](https://travis-ci.org/kubernetes/website) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) -स्वागत है! इस रिपॉजिटरी में [कुबरनेट्स वेबसाइट और दस्तावेज़](https://kubernetes.io/) बनाने के लिए आवश्यक सभी संपत्तियां हैं। हम बहुत खुश हैं कि आप योगदान करना चाहते हैं! +स्वागत है! इस रिपॉजिटरी में [कुबरनेट्स वेबसाइट और दस्तावेज](https://kubernetes.io/) बनाने के लिए आवश्यक सभी संपत्तियाँ हैं। हम बहुत खुश हैं कि आप योगदान करना चाहते हैं! ## डॉक्स में योगदान देना @@ -37,8 +37,6 @@ > यदि आप विंडोज पर हैं, तो आपको कुछ और टूल्स की आवश्यकता होगी जिन्हें आप [Chocolatey](https://chocolatey.org) के साथ इंस्टॉल कर सकते हैं। -> यदि आप डॉकर के बिना स्थानीय रूप से वेबसाइट चलाना पसंद करते हैं, तो नीचे Hugo का उपयोग करके स्थानीय रूप से साइट चलाना देखें। - यदि आप डॉकर के बिना स्थानीय रूप से वेबसाइट चलाना पसंद करते हैं, तो नीचे दिए गए Hugo का उपयोग करके स्थानीय रूप से [साइट को चलाने](#hugo-का-उपयोग-करते-हुए-स्थानीय-रूप-से-साइट-चलाना) का तरीका देखें। यदि आप [डॉकर](https://www.docker.com/get-started) चला रहे हैं, तो स्थानीय रूप से `कुबेरनेट्स-ह्यूगो` Docker image बनाएँ: diff --git a/README-pl.md b/README-pl.md index 7544de45835a6..62dc2d0ee22f3 100644 --- a/README-pl.md +++ b/README-pl.md @@ -43,7 +43,7 @@ make container-image make container-serve ``` -Jeśli widzisz błędy, prawdopodobnie kontener z Hugo nie dysponuje wystarczającymi zasobami. Aby rozwiązać ten problem, zwiększ ilość dostępnych zasobów CPU i pamięci dla Dockera na Twojej maszynie ([MacOSX](https://docs.docker.com/docker-for-mac/#resources) i [Windows](https://docs.docker.com/docker-for-windows/#resources)). +Jeśli widzisz błędy, prawdopodobnie kontener z Hugo nie dysponuje wystarczającymi zasobami. Aby rozwiązać ten problem, zwiększ ilość dostępnych zasobów CPU i pamięci dla Dockera na Twojej maszynie ([MacOS](https://docs.docker.com/desktop/settings/mac/) i [Windows](https://docs.docker.com/desktop/settings/windows/)). Aby obejrzeć zawartość serwisu, otwórz w przeglądarce adres . Po każdej zmianie plików źródłowych, Hugo automatycznie aktualizuje stronę i odświeża jej widok w przeglądarce. diff --git a/README-pt.md b/README-pt.md index 3de16340509e3..ae2f644ed869d 100644 --- a/README-pt.md +++ b/README-pt.md @@ -49,7 +49,7 @@ Para executar o build do website em um contêiner, execute o comando abaixo: make container-serve ``` -Caso ocorram erros, é provável que o contêiner que está executando o Hugo não tenha recursos suficientes. A solução é aumentar a quantidade de CPU e memória disponível para o Docker ([MacOSX](https://docs.docker.com/docker-for-mac/#resources) e [Windows](https://docs.docker.com/docker-for-windows/#resources)). +Caso ocorram erros, é provável que o contêiner que está executando o Hugo não tenha recursos suficientes. A solução é aumentar a quantidade de CPU e memória disponível para o Docker ([MacOS](https://docs.docker.com/desktop/settings/mac/) e [Windows](https://docs.docker.com/desktop/settings/windows/)). Abra seu navegador em http://localhost:1313 para visualizar o website. Conforme você faz alterações nos arquivos fontes, o Hugo atualiza o website e força a atualização do navegador. diff --git a/README.md b/README.md index e4cdfdc8162bd..c5417a43f663d 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This repository contains the assets required to build the [Kubernetes website and documentation](https://kubernetes.io/). We're glad that you want to contribute! - [Contributing to the docs](#contributing-to-the-docs) -- [Localization READMEs](#localization-readmemds) +- [Localization READMEs](#localization-readmes) ## Using this repository diff --git a/assets/scss/_base.scss b/assets/scss/_base.scss index 71983485eda66..c432b9d26addc 100644 --- a/assets/scss/_base.scss +++ b/assets/scss/_base.scss @@ -902,9 +902,16 @@ section#cncf { margin: 0; } +//Table Content +.tab-content table{ + border-collapse: separate; + border-spacing: 6px; +} + .tab-pane { border-radius: 0.25rem; padding: 0 16px 16px; + overflow: auto; border: 1px solid #dee2e6; &:first-of-type.active { diff --git a/assets/scss/_case-studies.scss b/assets/scss/_case-studies.scss index 4f44864127525..5c907d1c08809 100644 --- a/assets/scss/_case-studies.scss +++ b/assets/scss/_case-studies.scss @@ -1,7 +1,7 @@ // SASS for Case Studies pages go here: hr { - background-color: #999999; + background-color: #303030; margin-top: 0; } diff --git a/assets/scss/_custom.scss b/assets/scss/_custom.scss index 9b46f9eafd02b..6ec7f28f17b34 100644 --- a/assets/scss/_custom.scss +++ b/assets/scss/_custom.scss @@ -317,21 +317,64 @@ footer { /* DOCS */ .launch-cards { - button { - cursor: pointer; - box-sizing: border-box; - background: none; - margin: 0; - border: 0; - } + padding: 0; + display: grid; + grid-template-columns: repeat(3, 1fr); + row-gap: 1em; + .launch-card { + display: flex; + padding: 0 30px 0 0; + .card-content{ + width: fit-content; + display: flex; + flex-direction: column; + margin: 0; + row-gap: 1em; + h2 { + font-size: 1.75em; + padding: 0.5em 0; + margin: 0; + a { + display: none; + } + } + + p { + margin: 0; + } + + ul { + list-style: none; + height: fit-content; + line-height: 1.6; + padding: 0; + margin-block-end: auto; + } + + br { + display: none; + } + + button { + height: min-content; + width: auto; + padding: .5em 1em; + cursor: pointer; + box-sizing: border-box; + } + } + } - ul, - li { - list-style: none; - padding-left: 0; - } + @media only screen and (max-width: 1000px) { + grid-template-columns: 1fr; + + .launch-card { + width: 100%; + } +} } + // table of contents .td-toc { padding-top: 1.5rem !important; @@ -637,19 +680,6 @@ main.content { } } -/* COMMUNITY legacy styles */ -/* Leave these in place until localizations are caught up */ - -.newcommunitywrapper { - .news { - margin-left: 0; - - @media screen and (min-width: 768px) { - margin-left: 10%; - } - } -} - /* CASE-STUDIES */ // Many of the case studies have small variations in markup and styles; @@ -954,6 +984,16 @@ div.alert > em.javascript-required { #bing-results-container { padding: 1em; } +.bing-result { + margin-bottom: 1em; +} +.bing-result-url { + font-size: 14px; +} +.bing-result-snippet { + color: #666666; + font-size: 14px; +} #bing-pagination-container { padding: 1em; margin-bottom: 1em; diff --git a/content/de/_index.html b/content/de/_index.html index ab7427938fafa..04d0b01e4e545 100644 --- a/content/de/_index.html +++ b/content/de/_index.html @@ -4,6 +4,7 @@ cid: home --- +{{< site-searchbar >}} {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} @@ -42,12 +43,12 @@

Die Herausforderungen bei der Migration von über 150 Microservices auf Kube

- Besuche die KubeCon Europe vom 18. bis 21. April 2023 + Besuche die KubeCon + CloudNativeCon Europe vom 19. bis 22. März 2024



- Besuche die KubeCon North America vom 6. bis 9. November 2023 + Besuche die KubeCon + CloudNativeCon North America vom 12. bis 15. November 2024
diff --git a/content/de/community/_index.html b/content/de/community/_index.html index 77d6005a37b2c..9cf2822e8d891 100644 --- a/content/de/community/_index.html +++ b/content/de/community/_index.html @@ -246,7 +246,7 @@

Aktuelle Neuigkeiten



diff --git a/content/de/docs/home/_index.md b/content/de/docs/home/_index.md index 128cd67c2e2e3..a65d99458c967 100644 --- a/content/de/docs/home/_index.md +++ b/content/de/docs/home/_index.md @@ -4,7 +4,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Home" +linkTitle: "Dokumentation" main_menu: true weight: 10 hide_feedback: true diff --git a/content/de/docs/tasks/tools/install-kubectl-linux.md b/content/de/docs/tasks/tools/install-kubectl-linux.md index 93126b9e4a7d3..78d31379f87ae 100644 --- a/content/de/docs/tasks/tools/install-kubectl-linux.md +++ b/content/de/docs/tasks/tools/install-kubectl-linux.md @@ -51,7 +51,7 @@ Um kubectl auf Linux zu installieren, gibt es die folgenden Möglichkeiten: Download der kubectl Checksum-Datei: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" ``` Kubectl Binary mit der Checksum-Datei validieren: @@ -236,7 +236,7 @@ Untenstehend ist beschrieben, wie die Autovervollständigungen für Fish und Zsh Download der kubectl-convert Checksum-Datei: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" ``` Kubectl-convert Binary mit der Checksum-Datei validieren: diff --git a/content/de/docs/tutorials/_index.md b/content/de/docs/tutorials/_index.md index 3ab69b607377c..e38355c294fbb 100644 --- a/content/de/docs/tutorials/_index.md +++ b/content/de/docs/tutorials/_index.md @@ -12,18 +12,13 @@ Ein Tutorial zeigt, wie Sie ein Ziel erreichen, das größer ist als eine einzel Ein Tutorial besteht normalerweise aus mehreren Abschnitten, die jeweils eine Abfolge von Schritten haben. Bevor Sie die einzelnen Lernprogramme durchgehen, möchten Sie möglicherweise ein Lesezeichen zur Seite mit dem [Standardisierten Glossar](/docs/reference/glossary/) setzen um später Informationen nachzuschlagen. - - ## Grundlagen * [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) ist ein ausführliches interaktives Lernprogramm, das Ihnen hilft, das Kubernetes-System zu verstehen und einige grundlegende Kubernetes-Funktionen auszuprobieren. - * [Scalable Microservices mit Kubernetes (Udacity)](https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615) (Englisch) - * [Einführung in Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) (Englisch) - * [Hello Minikube](/docs/tutorials/hello-minikube/) ## Konfiguration @@ -33,36 +28,26 @@ Bevor Sie die einzelnen Lernprogramme durchgehen, möchten Sie möglicherweise e ## Stateless Anwendungen * [Freigeben einer externen IP-Adresse für den Zugriff auf eine Anwendung in einem Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/) - * [Beispiel: Bereitstellung der PHP-Gästebuchanwendung mit Redis](/docs/tutorials/stateless-application/guestbook/) ## Stateful Anwendungen * [StatefulSet Grundlagen](/docs/tutorials/stateful-application/basic-stateful-set/) - * [Beispiel: WordPress und MySQL mit persistenten Volumes](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) - * [Beispiel: Bereitstellen von Cassandra mit Stateful-Sets](/docs/tutorials/stateful-application/cassandra/) - * [ZooKeeper, ein verteiltes CP-System](/docs/tutorials/stateful-application/zookeeper/) ## Clusters * [AppArmor](/docs/tutorials/clusters/apparmor/) - -* [seccomp](/docs/tutorials/clusters/seccomp/) +* [Seccomp](/docs/tutorials/clusters/seccomp/) ## Services * [Source IP verwenden](/docs/tutorials/services/source-ip/) - - ## {{% heading "whatsnext" %}} - Wenn Sie ein Tutorial schreiben möchten, lesen Sie [Seitenvorlagen verwenden](/docs/home/contribute/page-templates/) für weitere Informationen zum Typ der Tutorial-Seite und zur Tutorial-Vorlage. - - diff --git a/content/en/_index.html b/content/en/_index.html index 7ad3ba6752237..e615cae35bda3 100644 --- a/content/en/_index.html +++ b/content/en/_index.html @@ -47,12 +47,12 @@

The Challenges of Migrating 150+ Microservices to Kubernetes



- Attend KubeCon + CloudNativeCon Europe on April 18-21, 2023 + Attend KubeCon + CloudNativeCon Europe on March 19-22, 2024



- Attend KubeCon + CloudNativeCon North America on November 6-9, 2023 + Attend KubeCon + CloudNativeCon North America on November 12-15, 2024
diff --git a/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md b/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md index 68855e40788c4..044f12bc3daaa 100644 --- a/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md +++ b/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md @@ -20,7 +20,7 @@ Real time visualization is a strength that UI’s have over CLI’s, and with 1. Based on user research with Kubernetes’ predecessor [Borg](http://research.google.com/pubs/pub43438.html) and continued community feedback, we know logs are tremendously important to users. For this reason we’re constantly looking for ways to improve these features in Dashboard. This release includes a fix for an issue wherein large numbers of logs would crash the system, as well as the introduction of the ability to view logs by date. **Showing More Resources** -The previous release brought all workloads to Dashboard: Pods, Pet Sets, Daemon Sets, Replication Controllers, Replica Set, Services, & Deployments. With 1.4, we expand upon that set of objects by including Services, Ingresses, Persistent Volume Claims, Secrets, & Config Maps. We’ve also introduced an “Admin” section with the Namespace-independent global objects of Namespaces, Nodes, and Persistent Volumes. With the addition of roles, these will be shown only to cluster operators, and developers’ side nav will begin with the Namespace dropdown. +The previous release brought all workloads to Dashboard: Pods, Pet Sets, Daemon Sets, Replication Controllers, Replica Set, Services, & Deployments. With 1.4, we expand upon that set of objects by including Services, Ingresses, Persistent Volume Claims, Secrets, & ConfigMaps. We’ve also introduced an “Admin” section with the Namespace-independent global objects of Namespaces, Nodes, and Persistent Volumes. With the addition of roles, these will be shown only to cluster operators, and developers’ side nav will begin with the Namespace dropdown. Like glue binding together a loose stack of papers into a book, we needed some way to impose order on these resources for their value to be realized, so one of the features we’re most excited to announce in 1.4 is navigation. diff --git a/content/en/blog/_posts/2018-11-08-kubernetes-docs-update-i18n.md b/content/en/blog/_posts/2018-11-08-kubernetes-docs-update-i18n.md index 4c9af11a9eb09..d9ac11759e922 100644 --- a/content/en/blog/_posts/2018-11-08-kubernetes-docs-update-i18n.md +++ b/content/en/blog/_posts/2018-11-08-kubernetes-docs-update-i18n.md @@ -37,7 +37,7 @@ Prow automatically applies language labels based on file path. Thanks to SIG Doc /language ko ``` -These repo labels let reviewers filter for PRs and issues by language. For example, you can now filter the k/website dashboard for [PRs with Chinese content](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+label%3Alanguage%2Fzh). +These repo labels let reviewers filter for PRs and issues by language. For example, you can now filter the kubernetes/website dashboard for [PRs with Chinese content](https://github.com/kubernetes/website/pulls?utf8=%E2%9C%93&q=is%3Aopen+is%3Apr+label%3Alanguage%2Fzh). ### Team review diff --git a/content/en/blog/_posts/2020-08-26-kubernetes-release-1.19.md b/content/en/blog/_posts/2020-08-26-kubernetes-release-1.19.md index 39559ffe4ca5f..16129272bffe3 100644 --- a/content/en/blog/_posts/2020-08-26-kubernetes-release-1.19.md +++ b/content/en/blog/_posts/2020-08-26-kubernetes-release-1.19.md @@ -77,7 +77,7 @@ Check out the full details of the Kubernetes 1.19 release in our [release notes] ## Availability -Kubernetes 1.19 is available for download on [GitHub](https://github.com/kubernetes/kubernetes/releases/tag/v1.19.0). To get started with Kubernetes, check out these [interactive tutorials](https://kubernetes.io/docs/tutorials/) or run local Kubernetes clusters using Docker container “nodes” with [KinD](https://kind.sigs.k8s.io/) (Kubernetes in Docker). You can also easily install 1.19 using [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/). +Kubernetes 1.19 is available for download on [GitHub](https://github.com/kubernetes/kubernetes/releases/tag/v1.19.0). To get started with Kubernetes, check out these [interactive tutorials](https://kubernetes.io/docs/tutorials/) or run local Kubernetes clusters using Docker container “nodes” with [kind](https://kind.sigs.k8s.io/) (Kubernetes in Docker). You can also easily install 1.19 using [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/). ## Release Team This release is made possible through the efforts of hundreds of individuals who contributed both technical and non-technical content. Special thanks to the [release team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.19/release_team.md) led by Taylor Dolezal, Senior Developer Advocate at HashiCorp. The 34 release team members coordinated many aspects of the release, from documentation to testing, validation, and feature completeness. diff --git a/content/en/blog/_posts/2020-09-03-warnings/index.md b/content/en/blog/_posts/2020-09-03-warnings/index.md index a5cfb9f710db7..5d31aedef2f41 100644 --- a/content/en/blog/_posts/2020-09-03-warnings/index.md +++ b/content/en/blog/_posts/2020-09-03-warnings/index.md @@ -63,7 +63,7 @@ This metric has labels for the API `group`, `version`, `resource`, and `subresou and a `removed_release` label that indicates the Kubernetes release in which the API will no longer be served. This is an example query using `kubectl`, [prom2json](https://github.com/prometheus/prom2json), -and [jq](https://stedolan.github.io/jq/) to determine which deprecated APIs have been requested +and [jq](https://jqlang.github.io/jq/) to determine which deprecated APIs have been requested from the current instance of the API server: ```sh diff --git a/content/en/blog/_posts/2021-07-15-SIG-Usability-Spotlight.md b/content/en/blog/_posts/2021-07-15-SIG-Usability-Spotlight.md index 43488fc11f9fd..722d26372d624 100644 --- a/content/en/blog/_posts/2021-07-15-SIG-Usability-Spotlight.md +++ b/content/en/blog/_posts/2021-07-15-SIG-Usability-Spotlight.md @@ -5,7 +5,18 @@ date: 2021-07-15 slug: sig-usability-spotlight-2021 --- -**Author:** Kunal Kushwaha, Civo +**Author:** Kunal Kushwaha (Civo) + +{{< note >}} +SIG Usability, which is featured in this Spotlight blog, has been deprecated and is no longer active. +As a result, the links and information provided in this blog post may no longer be valid or relevant. +Should there be renewed interest and increased participation in the future, the SIG may be revived. +However, as of August 2023 the SIG is inactive per the Kubernetes community policy. +The Kubernetes project encourages you to explore other +[SIGs](https://github.com/kubernetes/community/blob/master/sig-list.md#special-interest-groups) +and resources available on the Kubernetes website to stay up-to-date with the latest developments +and enhancements in Kubernetes. +{{< /note >}} ## Introduction diff --git a/content/en/blog/_posts/2021-10-18-kpng-specialized-proxiers.md b/content/en/blog/_posts/2021-10-18-kpng-specialized-proxiers.md index 2c60c12f3f079..1e1b32b265ce3 100644 --- a/content/en/blog/_posts/2021-10-18-kpng-specialized-proxiers.md +++ b/content/en/blog/_posts/2021-10-18-kpng-specialized-proxiers.md @@ -210,7 +210,7 @@ podip=$(cat /tmp/out | jq -r '.Endpoints[]|select(.Local == true)|select(.IPs.V6 ip6tables -t nat -A PREROUTING -d $xip/128 -j DNAT --to-destination $podip ``` -Assuming the JSON output above is stored in `/tmp/out` ([jq](https://stedolan.github.io/jq/) is an *awesome* program!). +Assuming the JSON output above is stored in `/tmp/out` ([jq](https://jqlang.github.io/jq/) is an *awesome* program!). As this is an example we make it really simple for ourselves by using diff --git a/content/en/blog/_posts/2022-07-13-gateway-api-in-beta.md b/content/en/blog/_posts/2022-07-13-gateway-api-in-beta.md index c7605230deece..878c0c4fbf8d0 100644 --- a/content/en/blog/_posts/2022-07-13-gateway-api-in-beta.md +++ b/content/en/blog/_posts/2022-07-13-gateway-api-in-beta.md @@ -169,7 +169,7 @@ If there's something on this list you want to get involved in, or there's something not on this list that you want to advocate for to get on the roadmap please join us in the #sig-network-gateway-api channel on Kubernetes Slack or our weekly [community calls](https://gateway-api.sigs.k8s.io/contributing/community/#meetings). -[gep1016]:https://github.com/kubernetes-sigs/gateway-api/blob/master/site-src/geps/gep-1016.md +[gep1016]:https://github.com/kubernetes-sigs/gateway-api/blob/main/geps/gep-1016.md [grpc]:https://grpc.io/ [pr1085]:https://github.com/kubernetes-sigs/gateway-api/pull/1085 [tcpr]:https://github.com/kubernetes-sigs/gateway-api/blob/main/apis/v1alpha2/tcproute_types.go diff --git a/content/en/blog/_posts/2022-08-31-cgroupv2-ga.md b/content/en/blog/_posts/2022-08-31-cgroupv2-ga.md index d4345195746b8..4071d4458160d 100644 --- a/content/en/blog/_posts/2022-08-31-cgroupv2-ga.md +++ b/content/en/blog/_posts/2022-08-31-cgroupv2-ga.md @@ -118,8 +118,8 @@ Scenarios in which you might need to update to cgroup v2 include the following: DaemonSet for monitoring pods and containers, update it to v0.43.0 or later. * If you deploy Java applications, prefer to use versions which fully support cgroup v2: * [OpenJDK / HotSpot](https://bugs.openjdk.org/browse/JDK-8230305): jdk8u372, 11.0.16, 15 and later - * [IBM Semeru Runtimes](https://www.eclipse.org/openj9/docs/version0.33/#control-groups-v2-support): jdk8u345-b01, 11.0.16.0, 17.0.4.0, 18.0.2.0 and later - * [IBM Java](https://www.ibm.com/docs/en/sdk-java-technology/8?topic=new-service-refresh-7#whatsnew_sr7__fp15): 8.0.7.15 and later + * [IBM Semeru Runtimes](https://www.ibm.com/support/pages/apar/IJ46681): 8.0.382.0, 11.0.20.0, 17.0.8.0, and later + * [IBM Java](https://www.ibm.com/support/pages/apar/IJ46681): 8.0.8.6 and later ## Learn more diff --git a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-best-effort.svg b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-best-effort.svg index cf9283885855e..e35b2f39509bb 100644 --- a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-best-effort.svg +++ b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-best-effort.svg @@ -1,87 +1,395 @@ - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-limit.svg b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-limit.svg index 3a545f20dd85f..a2ba00c58fd4e 100644 --- a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-limit.svg +++ b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-limit.svg @@ -1,226 +1,1072 @@ - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-no-limits.svg b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-no-limits.svg index 845f5d0d07bb2..57b207b80a0be 100644 --- a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-no-limits.svg +++ b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-no-limits.svg @@ -1,203 +1,951 @@ - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high.svg b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high.svg index 02357ef901582..4ba0b15957a28 100644 --- a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high.svg +++ b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high.svg @@ -1,252 +1,1195 @@ - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/index.md b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/index.md index 01e7b957775a7..ff72afe083322 100644 --- a/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/index.md +++ b/content/en/blog/_posts/2023-05-05-memory-qos-cgroups-v2/index.md @@ -10,18 +10,19 @@ slug: qos-memory-resources Kubernetes v1.27, released in April 2023, introduced changes to Memory QoS (alpha) to improve memory management capabilites in Linux nodes. -Support for Memory QoS was initially added in Kubernetes v1.22, and later some -[limitations](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos#reasons-for-changing-the-formula-of-memoryhigh-calculation-in-alpha-v127) -around the formula for calculating `memory.high` were identified. These limitations are +Support for Memory QoS was initially added in Kubernetes v1.22, and later some +[limitations](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos#reasons-for-changing-the-formula-of-memoryhigh-calculation-in-alpha-v127) +around the formula for calculating `memory.high` were identified. These limitations are addressed in Kubernetes v1.27. ## Background Kubernetes allows you to optionally specify how much of each resources a container needs -in the Pod specification. The most common resources to specify are CPU and Memory. +in the Pod specification. The most common resources to specify are CPU and Memory. For example, a Pod manifest that defines container resource requirements could look like: -``` + +```yaml apiVersion: v1 kind: Pod metadata: @@ -40,19 +41,19 @@ spec: * `spec.containers[].resources.requests` - When you specify the resource request for containers in a Pod, the + When you specify the resource request for containers in a Pod, the [Kubernetes scheduler](/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler) uses this information to decide which node to place the Pod on. The scheduler - ensures that for each resource type, the sum of the resource requests of the + ensures that for each resource type, the sum of the resource requests of the scheduled containers is less than the total allocatable resources on the node. * `spec.containers[].resources.limits` - When you specify the resource limit for containers in a Pod, the kubelet enforces - those limits so that the running containers are not allowed to use more of those + When you specify the resource limit for containers in a Pod, the kubelet enforces + those limits so that the running containers are not allowed to use more of those resources than the limits you set. -When the kubelet starts a container as a part of a Pod, kubelet passes the +When the kubelet starts a container as a part of a Pod, kubelet passes the container's requests and limits for CPU and memory to the container runtime. The container runtime assigns both CPU request and CPU limit to a container. Provided the system has free CPU time, the containers are guaranteed to be @@ -61,9 +62,9 @@ the configured limit i.e. containers CPU usage will be throttled if they use more CPU than the specified limit within a given time slice. Prior to Memory QoS feature, the container runtime only used the memory -limit and discarded the memory `request` (requests were, and still are, +limit and discarded the memory `request` (requests were, and still are, also used to influence [scheduling](/docs/concepts/scheduling-eviction/#scheduling)). -If a container uses more memory than the configured limit, +If a container uses more memory than the configured limit, the Linux Out Of Memory (OOM) killer will be invoked. Let's compare how the container runtime on Linux typically configures memory @@ -71,26 +72,26 @@ request and limit in cgroups, with and without Memory QoS feature: * **Memory request** - The memory request is mainly used by kube-scheduler during (Kubernetes) Pod + The memory request is mainly used by kube-scheduler during (Kubernetes) Pod scheduling. In cgroups v1, there are no controls to specify the minimum amount of memory the cgroups must always retain. Hence, the container runtime did not use the value of requested memory set in the Pod spec. - cgroups v2 introduced a `memory.min` setting, used to specify the minimum + cgroups v2 introduced a `memory.min` setting, used to specify the minimum amount of memory that should remain available to the processes within a given cgroup. If the memory usage of a cgroup is within its effective min boundary, the cgroup’s memory won’t be reclaimed under any conditions. - If the kernel cannot maintain at least `memory.min` bytes of memory for the + If the kernel cannot maintain at least `memory.min` bytes of memory for the processes within the cgroup, the kernel invokes its OOM killer. In other words, - the kernel guarantees at least this much memory is available or terminates + the kernel guarantees at least this much memory is available or terminates processes (which may be outside the cgroup) in order to make memory more available. Memory QoS maps `memory.min` to `spec.containers[].resources.requests.memory` - to ensure the availability of memory for containers in Kubernetes Pods. + to ensure the availability of memory for containers in Kubernetes Pods. * **Memory limit** The `memory.limit` specifies the memory limit, beyond which if the container tries - to allocate more memory, Linux kernel will terminate a process with an + to allocate more memory, Linux kernel will terminate a process with an OOM (Out of Memory) kill. If the terminated process was the main (or only) process inside the container, the container may exit. @@ -103,7 +104,7 @@ request and limit in cgroups, with and without Memory QoS feature: specify the hard limit for memory usage. If the memory consumption goes above this level, the kernel invokes its OOM Killer. - cgroups v2 also added `memory.high` configuration . Memory QoS uses `memory.high` + cgroups v2 also added `memory.high` configuration. Memory QoS uses `memory.high` to set memory usage throttle limit. If the `memory.high` limit is breached, the offending cgroups are throttled, and the kernel tries to reclaim memory which may avoid an OOM kill. @@ -113,25 +114,25 @@ request and limit in cgroups, with and without Memory QoS feature: ### Cgroups v2 memory controller interfaces & Kubernetes container resources mapping Memory QoS uses the memory controller of cgroups v2 to guarantee memory resources in -Kubernetes. cgroupv2 interfaces that this feature uses are: +Kubernetes. cgroupv2 interfaces that this feature uses are: + * `memory.max` * `memory.min` * `memory.high`. {{< figure src="/blog/2023/05/05/qos-memory-resources/memory-qos-cal.svg" title="Memory QoS Levels" alt="Memory QoS Levels" >}} -`memory.max` is mapped to `limits.memory` specified in the Pod spec. The kubelet and -the container runtime configure the limit in the respective cgroup. The kernel +`memory.max` is mapped to `limits.memory` specified in the Pod spec. The kubelet and +the container runtime configure the limit in the respective cgroup. The kernel enforces the limit to prevent the container from using more than the configured -resource limit. If a process in a container tries to consume more than the -specified limit, kernel terminates a process(es) with an out of -memory Out of Memory (OOM) error. +resource limit. If a process in a container tries to consume more than the +specified limit, kernel terminates a process(es) with an Out of Memory (OOM) error. {{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-max.svg" title="memory.max maps to limits.memory" alt="memory.max maps to limits.memory" >}} `memory.min` is mapped to `requests.memory`, which results in reservation of memory resources -that should never be reclaimed by the kernel. This is how Memory QoS ensures the availability of -memory for Kubernetes pods. If there's no unprotected reclaimable memory available, the OOM +that should never be reclaimed by the kernel. This is how Memory QoS ensures the availability of +memory for Kubernetes pods. If there's no unprotected reclaimable memory available, the OOM killer is invoked to make more memory available. {{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-min.svg" title="memory.min maps to requests.memory" alt="memory.min maps to requests.memory" >}} @@ -139,14 +140,16 @@ killer is invoked to make more memory available. For memory protection, in addition to the original way of limiting memory usage, Memory QoS throttles workload approaching its memory limit, ensuring that the system is not overwhelmed by sporadic increases in memory usage. A new field, `memoryThrottlingFactor`, is available in -the KubeletConfiguration when you enable MemoryQoS feature. It is set to 0.9 by default. +the KubeletConfiguration when you enable MemoryQoS feature. It is set to 0.9 by default. `memory.high` is mapped to throttling limit calculated by using `memoryThrottlingFactor`, -`requests.memory` and `limits.memory` as in the formula below, and rounding down the +`requests.memory` and `limits.memory` as in the formula below, and rounding down the value to the nearest page size: {{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-high.svg" title="memory.high formula" alt="memory.high formula" >}} -**Note**: If a container has no memory limits specified, `limits.memory` is substituted for node allocatable memory. +{{< note >}} +If a container has no memory limits specified, `limits.memory` is substituted for node allocatable memory. +{{< /note >}} **Summary:** @@ -158,8 +161,8 @@ value to the nearest page size: - -
memory.max memory.max specifies the maximum memory limit, a container is allowed to use. If a process within the container - tries to consume more memory than the configured limit, - the kernel terminates the process with an Out of Memory (OOM) error. + tries to consume more memory than the configured limit, + the kernel terminates the process with an Out of Memory (OOM) error.

It is mapped to the container's memory limit specified in Pod manifest. @@ -167,7 +170,7 @@ value to the nearest page size:
memory.minmemory.min specifies a minimum amount of memory + memory.min specifies a minimum amount of memory the cgroups must always retain, i.e., memory that should never be reclaimed by the system. If there's no unprotected reclaimable memory available, OOM kill is invoked. @@ -178,8 +181,8 @@ value to the nearest page size:
memory.highmemory.high specifies the memory usage throttle limit. - This is the main mechanism to control a cgroup's memory use. If + memory.high specifies the memory usage throttle limit. + This is the main mechanism to control a cgroup's memory use. If cgroups memory use goes over the high boundary specified here, the cgroups processes are throttled and put under heavy reclaim pressure.
@@ -193,51 +196,56 @@ value to the nearest page size:
-**Note** `memory.high` is set only on container level cgroups while `memory.min` is set on +{{< note >}} +`memory.high` is set only on container level cgroups while `memory.min` is set on container, pod, and node level cgroups. +{{< /note >}} ### `memory.min` calculations for cgroups heirarchy When container memory requests are made, kubelet passes `memory.min` to the back-end CRI runtime (such as containerd or CRI-O) via the `Unified` field in CRI during -container creation. The `memory.min` in container level cgroups will be set to: - -$memory.min = pod.spec.containers[i].resources.requests[memory]$ -for every ith container in a pod -
-
-Since the `memory.min` interface requires that the ancestor cgroups directories are all -set, the pod and node cgroups directories need to be set correctly. - -`memory.min` in pod level cgroup: -$memory.min = \sum_{i=0}^{no. of pods}pod.spec.containers[i].resources.requests[memory]$ -for every ith container in a pod -
-
-`memory.min` in node level cgroup: -$memory.min = \sum_{i}^{no. of nodes}\sum_{j}^{no. of pods}pod[i].spec.containers[j].resources.requests[memory]$ -for every jth container in every ith pod on a node -
-
-Kubelet will manage the cgroups hierarchy of the pod level and node level cgroups +container creation. For every ith container in a pod, the `memory.min` +in container level cgroups will be set to: + +```formula +memory.min = pod.spec.containers[i].resources.requests[memory] +``` + +Since the `memory.min` interface requires that the ancestor cgroups directories are all +set, the pod and node cgroups directories need to be set correctly. + +For every ith container in a pod, `memory.min` in pod level cgroup: + +```formula +memory.min = \sum_{i=0}^{no. of pods}pod.spec.containers[i].resources.requests[memory] +``` + +For every jth container in every ith pod on a node, `memory.min` in node level cgroup: + +```formula +memory.min = \sum_{i}^{no. of nodes}\sum_{j}^{no. of pods}pod[i].spec.containers[j].resources.requests[memory] +``` + +Kubelet will manage the cgroups hierarchy of the pod level and node level cgroups directly using the libcontainer library (from the runc project), while container cgroups limits are managed by the container runtime. ### Support for Pod QoS classes -Based on user feedback for the Alpha feature in Kubernetes v1.22, some users would like +Based on user feedback for the Alpha feature in Kubernetes v1.22, some users would like to opt out of MemoryQoS on a per-pod basis to ensure there is no early memory throttling. -Therefore, in Kubernetes v1.27 Memory QOS also supports memory.high to be set as per +Therefore, in Kubernetes v1.27 Memory QOS also supports memory.high to be set as per Quality of Service(QoS) for Pod classes. Following are the different cases for memory.high as per QOS classes: -1. **Guaranteed pods** by their QoS definition require memory requests=memory limits and are -not overcommitted. Hence MemoryQoS feature is disabled on those pods by not setting -memory.high. This ensures that Guaranteed pods can fully use their memory requests up -to their set limit, and not hit any throttling. +1. **Guaranteed pods** by their QoS definition require memory requests=memory limits and are + not overcommitted. Hence MemoryQoS feature is disabled on those pods by not setting + memory.high. This ensures that Guaranteed pods can fully use their memory requests up + to their set limit, and not hit any throttling. -2. **Burstable pods** by their QoS definition require at least one container in the Pod with -CPU or memory request or limit set. +1. **Burstable pods** by their QoS definition require at least one container in the Pod with + CPU or memory request or limit set. * When requests.memory and limits.memory are set, the formula is used as-is: @@ -248,7 +256,7 @@ CPU or memory request or limit set. {{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-high-no-limits.svg" title="memory.high when requests and limits are not set" alt="memory.high when requests and limits are not set" >}} -3. **BestEffort** by their QoS definition do not require any memory or CPU limits or requests. +1. **BestEffort** by their QoS definition do not require any memory or CPU limits or requests. For this case, kubernetes sets requests.memory = 0 and substitute limits.memory for node allocatable memory in the formula: @@ -261,10 +269,10 @@ Guaranteed QoS pods do not set `memory.high` as their memory is guaranteed. The prerequisites for enabling Memory QoS feature on your Linux node are: -1. Verify the [requirements](/docs/concepts/architecture/cgroups/#requirements) +1. Verify the [requirements](/docs/concepts/architecture/cgroups/#requirements) related to [Kubernetes support for cgroups v2](/docs/concepts/architecture/cgroups) - are met. -2. Ensure CRI Runtime supports Memory QoS. At the time of writing, only containerd + are met. +1. Ensure CRI Runtime supports Memory QoS. At the time of writing, only containerd and CRI-O provide support compatible with Memory QoS (alpha). This was implemented in the following PRs: * Containerd: [Feature: containerd-cri support LinuxContainerResources.Unified #5627](https://github.com/containerd/containerd/pull/5627). @@ -291,8 +299,9 @@ and review of this feature: * David Porter([bobbypage](https://github.com/bobbypage)) * Mrunal Patel([mrunalp](https://github.com/mrunalp)) -For those interested in getting involved in future discussions on Memory QoS feature, +For those interested in getting involved in future discussions on Memory QoS feature, you can reach out SIG Node by several means: + - Slack: [#sig-node](https://kubernetes.slack.com/messages/sig-node) - [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-node) - [Open Community Issues/PRs](https://github.com/kubernetes/community/labels/sig%2Fnode) diff --git a/content/en/blog/_posts/2023-07-20-sig-cli-spotlight.md b/content/en/blog/_posts/2023-07-20-sig-cli-spotlight.md index ba0f3e48399ce..2db123fbf2e8b 100644 --- a/content/en/blog/_posts/2023-07-20-sig-cli-spotlight.md +++ b/content/en/blog/_posts/2023-07-20-sig-cli-spotlight.md @@ -3,7 +3,7 @@ layout: blog title: "Spotlight on SIG CLI" date: 2023-07-20 slug: sig-cli-spotlight-2023 -canonicalUrl: https://www.kubernetes.dev/blog/2023/07/13/sig-cli-spotlight-2023/ +canonicalUrl: https://www.kubernetes.dev/blog/2023/07/20/sig-cli-spotlight-2023/ --- **Author**: Arpit Agrawal diff --git a/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md b/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md index dc178954e964f..38ec5fd9db8c4 100644 --- a/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md +++ b/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md @@ -134,7 +134,7 @@ There are three significant differences that you should be aware of: packages as the Google-hosted repository - Kubernetes repositories for v1.28 and onwards are going to have published only versions that are used by that Kubernetes minor release - - Speaking of v1.28, only kubernetes-cni 1.2.0 and cri-tols v1.28 are going + - Speaking of v1.28, only kubernetes-cni 1.2.0 and cri-tools v1.28 are going to be available in the repository for Kubernetes v1.28 - Similar for v1.29, we only plan on publishing cri-tools v1.29 and whatever kubernetes-cni version is going to be used by Kubernetes v1.29 diff --git a/content/en/blog/_posts/2023-08-31-legacy-package-repository-deprecation/index.md b/content/en/blog/_posts/2023-08-31-legacy-package-repository-deprecation/index.md index da7e2b3ed6d92..00ae52cd93a98 100644 --- a/content/en/blog/_posts/2023-08-31-legacy-package-repository-deprecation/index.md +++ b/content/en/blog/_posts/2023-08-31-legacy-package-repository-deprecation/index.md @@ -100,10 +100,12 @@ community-owned repositories (`pkgs.k8s.io`). ## Can I continue to use the legacy package repositories? -The existing packages in the legacy repositories will be available for the foreseeable +~~The existing packages in the legacy repositories will be available for the foreseeable future. However, the Kubernetes project can't provide _any_ guarantees on how long is that going to be. The deprecated legacy repositories, and their contents, might -be removed at any time in the future and without a further notice period. +be removed at any time in the future and without a further notice period.~~ + +**UPDATE**: The legacy packages are expected to go away in January 2024. The Kubernetes project **strongly recommends** migrating to the new community-owned repositories **as soon as possible**. diff --git a/content/en/blog/_posts/2023-09-25-kubeadm-use-etcd-learner-mode.md b/content/en/blog/_posts/2023-09-25-kubeadm-use-etcd-learner-mode.md index f812fd8c38408..13b4ef25b9eb4 100644 --- a/content/en/blog/_posts/2023-09-25-kubeadm-use-etcd-learner-mode.md +++ b/content/en/blog/_posts/2023-09-25-kubeadm-use-etcd-learner-mode.md @@ -9,7 +9,7 @@ slug: kubeadm-use-etcd-learner-mode The [`kubeadm`](/docs/reference/setup-tools/kubeadm/) tool now supports etcd learner mode, which allows you to enhance the resilience and stability -of your Kubernetes clusters by leveraging the [learner mode](https://etcd.io/docs/v3.4/learning/design-learner/#appendix-learner-implementation-in-v34) +of your Kubernetes clusters by leveraging the [learner mode](https://etcd.io/docs/v3.4/learning/design-learner/#appendix-learner-implementation-in-v34) feature introduced in etcd version 3.4. This guide will walk you through using etcd learner mode with kubeadm. By default, kubeadm runs a local etcd instance on each control plane node. @@ -23,19 +23,19 @@ promoted to a voting member only after the etcd data are fully aligned. etcd learner mode offers several compelling reasons to consider its adoption in Kubernetes clusters: - 1. **Enhanced Resilience**: etcd learner nodes are non-voting members that catch up with - the leader's logs before becoming fully operational. This prevents new cluster members - from disrupting the quorum or causing leader elections, making the cluster more resilient - during membership changes. - 2. **Reduced Cluster Unavailability**: Traditional approaches to adding new members often - result in cluster unavailability periods, especially in slow infrastructure or misconfigurations. - etcd learner mode minimizes such disruptions. - 3. **Simplified Maintenance**: Learner nodes provide a safer and reversible way to add or replace - cluster members. This reduces the risk of accidental cluster outages due to misconfigurations or - missteps during member additions. - 4. **Improved Network Tolerance**: In scenarios involving network partitions, learner mode allows - for more graceful handling. Depending on the partition a new member lands, it can seamlessly - integrate with the existing cluster without causing disruptions. +1. **Enhanced Resilience**: etcd learner nodes are non-voting members that catch up with + the leader's logs before becoming fully operational. This prevents new cluster members + from disrupting the quorum or causing leader elections, making the cluster more resilient + during membership changes. +1. **Reduced Cluster Unavailability**: Traditional approaches to adding new members often + result in cluster unavailability periods, especially in slow infrastructure or misconfigurations. + etcd learner mode minimizes such disruptions. +1. **Simplified Maintenance**: Learner nodes provide a safer and reversible way to add or replace + cluster members. This reduces the risk of accidental cluster outages due to misconfigurations or + missteps during member additions. +1. **Improved Network Tolerance**: In scenarios involving network partitions, learner mode allows + for more graceful handling. Depending on the partition a new member lands, it can seamlessly + integrate with the existing cluster without causing disruptions. In summary, the etcd learner mode improves the reliability and manageability of Kubernetes clusters during member additions and changes, making it a valuable feature for cluster operators. @@ -80,7 +80,7 @@ ETCDCTL_API=3 etcdctl --endpoints 127.0.0.1:2379 \ --cert=/etc/kubernetes/pki/etcd/server.crt \ --key=/etc/kubernetes/pki/etcd/server.key \ --cacert=/etc/kubernetes/pki/etcd/ca.crt \ - member list + member list ... dc543c4d307fadb9, started, node1, https://10.6.177.40:2380, https://10.6.177.40:2379, false ``` @@ -88,20 +88,24 @@ dc543c4d307fadb9, started, node1, https://10.6.177.40:2380, https://10.6.177.40: To check if the Kubernetes control plane is healthy, run `kubectl get node -l node-role.kubernetes.io/control-plane=` and check if the nodes are ready. -Note: It is recommended to have an odd number of members in a etcd cluster. +{{< note >}} +It is recommended to have an odd number of members in an etcd cluster. +{{< /note >}} Before joining a worker node to the new Kubernetes cluster, ensure that the control plane nodes are healthy. ## What's next - The feature gate `EtcdLearnerMode` is alpha in v1.27 and we expect it to graduate to beta in the next - minor release of Kubernetes (v1.29). -- etcd has an open issue that may make the process more automatic: + minor release of Kubernetes (v1.29). +- etcd has an open issue that may make the process more automatic: [Support auto-promoting a learner member to a voting member](https://github.com/etcd-io/etcd/issues/15107). -- Learn more about the kubeadm [configuration format](/docs/reference/config-api/kubeadm-config.v1beta3/) here. +- Learn more about the kubeadm [configuration format](/docs/reference/config-api/kubeadm-config.v1beta3/). ## Feedback Was this guide helpful? If you have any feedback or encounter any issues, please let us know. Your feedback is always welcome! Join the bi-weekly [SIG Cluster Lifecycle meeting](https://docs.google.com/document/d/1Gmc7LyCIL_148a9Tft7pdhdee0NBHdOfHS1SAF0duI4/edit) -or weekly [kubeadm office hours](https://docs.google.com/document/d/130_kiXjG7graFNSnIAgtMS1G8zPDwpkshgfRYS0nggo/edit). Or reach us via [Slack](https://slack.k8s.io/) (channel **#kubeadm**), or the [SIG's mailing list](https://groups.google.com/g/kubernetes-sig-cluster-lifecycle). +or weekly [kubeadm office hours](https://docs.google.com/document/d/130_kiXjG7graFNSnIAgtMS1G8zPDwpkshgfRYS0nggo/edit). +Or reach us via [Slack](https://slack.k8s.io/) (channel **#kubeadm**), or the +[SIG's mailing list](https://groups.google.com/g/kubernetes-sig-cluster-lifecycle). diff --git a/content/en/blog/_posts/2023-10-02-steering-committee-results-2023.md b/content/en/blog/_posts/2023-10-02-steering-committee-results-2023.md new file mode 100644 index 0000000000000..09a58993a4fea --- /dev/null +++ b/content/en/blog/_posts/2023-10-02-steering-committee-results-2023.md @@ -0,0 +1,60 @@ +--- +layout: blog +title: "Announcing the 2023 Steering Committee Election Results" +date: 2023-10-02 +slug: steering-committee-results-2023 +canonicalUrl: https://www.kubernetes.dev/blog/2023/10/02/steering-committee-results-2023/ +--- + +**Author**: Kaslin Fields + +The [2023 Steering Committee Election](https://github.com/kubernetes/community/tree/master/elections/steering/2023) is now complete. The Kubernetes Steering Committee consists of 7 seats, 4 of which were up for election in 2023. Incoming committee members serve a term of 2 years, and all members are elected by the Kubernetes Community. + +This community body is significant since it oversees the governance of the entire Kubernetes project. With that great power comes great responsibility. You can learn more about the steering committee’s role in their [charter](https://github.com/kubernetes/steering/blob/master/charter.md). + +Thank you to everyone who voted in the election; your participation helps support the community’s continued health and success. + +## Results + +Congratulations to the elected committee members whose two year terms begin immediately (listed in alphabetical order by GitHub handle): + +* **Stephen Augustus ([@justaugustus](https://github.com/justaugustus)), Cisco** +* **Paco Xu 徐俊杰 ([@pacoxu](https://github.com/pacoxu)), DaoCloud** +* **Patrick Ohly ([@pohly](https://github.com/pohly)), Intel** +* **Maciej Szulik ([@soltysh](https://github.com/soltysh)), Red Hat** + +They join continuing members: + +* **Benjamin Elder ([@bentheelder](https://github.com/bentheelder)), Google** +* **Bob Killen ([@mrbobbytables](https://github.com/mrbobbytables)), Google** +* **Nabarun Pal ([@palnabarun](https://github.com/palnabarun)), VMware** + +Stephen Augustus is a returning Steering Committee Member. + +## Big Thanks! + +Thank you and congratulations on a successful election to this round’s election officers: + +* Bridget Kromhout ([@bridgetkromhout](https://github.com/bridgetkromhout)) +* Davanum Srinavas ([@dims](https://github.com/dims)) +* Kaslin Fields ([@kaslin](https://github.com/kaslin)) + +Thanks to the Emeritus Steering Committee Members. Your service is appreciated by the community: + +* Christoph Blecker ([@cblecker](https://github.com/cblecker)) +* Carlos Tadeu Panato Jr. ([@cpanato](https://github.com/cpanato)) +* Tim Pepper ([@tpepper](https://github.com/tpepper)) + +And thank you to all the candidates who came forward to run for election. + +## Get Involved with the Steering Committee + +This governing body, like all of Kubernetes, is open to all. You can follow along with Steering Committee [backlog items](https://github.com/orgs/kubernetes/projects/40) and weigh in by filing an issue or creating a PR against their [repo](https://github.com/kubernetes/steering). They have an open meeting on [the first Monday at 9:30am PT of every month](https://github.com/kubernetes/steering). They can also be contacted at their public mailing list steering@kubernetes.io. + +You can see what the Steering Committee meetings are all about by watching past meetings on the [YouTube Playlist](https://www.youtube.com/playlist?list=PL69nYSiGNLP1yP1B_nd9-drjoxp0Q14qM). + +If you want to meet some of the newly elected Steering Committee members, join us for the Steering AMA at the [Kubernetes Contributor Summit in Chicago](https://k8s.dev/summit). + +--- + +_This post was written by the [Contributor Comms Subproject](https://github.com/kubernetes/community/tree/master/communication/contributor-comms). If you want to write stories about the Kubernetes community, learn more about us._ diff --git a/content/en/blog/_posts/2023-10-05-sig-architecture-conformance-spotlight.md b/content/en/blog/_posts/2023-10-05-sig-architecture-conformance-spotlight.md new file mode 100644 index 0000000000000..dbfd3d67a98ab --- /dev/null +++ b/content/en/blog/_posts/2023-10-05-sig-architecture-conformance-spotlight.md @@ -0,0 +1,197 @@ +--- +layout: blog +title: "Spotlight on SIG Architecture: Conformance" +slug: sig-architecture-conformance-spotlight-2023 +date: 2023-10-05 +canonicalUrl: https://www.k8s.dev/blog/2023/10/05/sig-architecture-conformance-spotlight-2023/ +--- + + +**Author**: Frederico Muñoz (SAS Institute) + +_This is the first interview of a SIG Architecture Spotlight series +that will cover the different subprojects. We start with the SIG +Architecture: Conformance subproject_ + +In this [SIG +Architecture](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md) +spotlight, we talked with [Riaan +Kleinhans](https://github.com/Riaankl) (ii-Team), Lead for the +[Conformance +sub-project](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md#conformance-definition-1). + +## About SIG Architecture and the Conformance subproject + +**Frederico (FSM)**: Hello Riaan, and welcome! For starters, tell us a +bit about yourself, your role and how you got involved in Kubernetes. + +**Riaan Kleinhans (RK)**: Hi! My name is Riaan Kleinhans and I live in +South Africa. I am the Project manager for the [ii-Team](ii.nz) in New +Zealand. When I joined ii the plan was to move to New Zealand in April +2020 and then Covid happened. Fortunately, being a flexible and +dynamic team we were able to make it work remotely and in very +different time zones. + +The ii team have been tasked with managing the Kubernetes Conformance +testing technical debt and writing tests to clear the technical +debt. I stepped into the role of project manager to be the link +between monitoring, test writing and the community. Through that work +I had the privilege of meeting [Dan Kohn](https://github.com/dankohn) +in those first months, his enthusiasm about the work we were doing was +a great inspiration. + +**FSM**: Thank you - so, your involvement in SIG Architecture started +because of the conformance work? + +**RK**: SIG Architecture is the home for the Kubernetes Conformance +subproject. Initially, most of my interactions were directly with SIG +Architecture through the Conformance sub-project. However, as we +began organizing the work by SIG, we started engaging directly with +each individual SIG. These engagements with the SIGs that own the +untested APIs have helped us accelerate our work. + +**FSM**: How would you describe the main goals and +areas of intervention of the Conformance sub-project? + +**RM**: The Kubernetes Conformance sub-project focuses on guaranteeing +compatibility and adherence to the Kubernetes specification by +developing and maintaining a comprehensive conformance test suite. Its +main goals include assuring compatibility across different Kubernetes +implementations, verifying adherence to the API specification, +supporting the ecosystem by encouraging conformance certification, and +fostering collaboration within the Kubernetes community. By providing +standardised tests and promoting consistent behaviour and +functionality, the Conformance subproject ensures a reliable and +compatible Kubernetes ecosystem for developers and users alike. + +## More on the Conformance Test Suite + +**FSM**: A part of providing those standardised tests is, I believe, +the [Conformance Test +Suite](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md). Could +you explain what it is and its importance? + +**RK**: The Kubernetes Conformance Test Suite checks if Kubernetes +distributions meet the project's specifications, ensuring +compatibility across different implementations. It covers various +features like APIs, networking, storage, scheduling, and +security. Passing the tests confirms proper implementation and +promotes a consistent and portable container orchestration platform. + +**FSM**: Right, the tests are important in the way they define the +minimum features that any Kubernetes cluster must support. Could you +describe the process around determining which features are considered +for inclusion? Is there any tension between a more minimal approach, +and proposals from the other SIGs? + +**RK**: The requirements for each endpoint that undergoes conformance +testing are clearly defined by SIG Architecture. Only API endpoints +that are generally available and non-optional features are eligible +for conformance. Over the years, there have been several discussions +regarding conformance profiles, exploring the possibility of including +optional endpoints like RBAC, which are widely used by most end users, +in specific profiles. However, this aspect is still a work in +progress. + +Endpoints that do not meet the conformance criteria are listed in +[ineligible_endpoints.yaml](https://github.com/kubernetes/kubernetes/blob/master/test/conformance/testdata/ineligible_endpoints.yaml), +which is publicly accessible in the Kubernetes repo. This file can be +updated to add or remove endpoints as their status or requirements +change. These ineligible endpoints are also visible on +[APISnoop](https://apisnoop.cncf.io/). + +Ensuring transparency and incorporating community input regarding the +eligibility or ineligibility of endpoints is of utmost importance to +SIG Architecture. + +**FSM**: Writing tests for new features is something generally +requires some kind of enforcement. How do you see the evolution of +this in Kubernetes? Was there a specific effort to improve the process +in a way that required tests would be a first-class citizen, or was +that never an issue? + +**RK**: When discussions surrounding the Kubernetes conformance +programme began in 2018, only approximately 11% of endpoints were +covered by tests. At that time, the CNCF's governing board requested +that if funding were to be provided for the work to cover missing +conformance tests, the Kubernetes Community should adopt a policy of +not allowing new features to be added unless they include conformance +tests for their stable APIs. + +SIG Architecture is responsible for stewarding this requirement, and +[APISnoop](https://apisnoop.cncf.io/) has proven to be an invaluable +tool in this regard. Through automation, APISnoop generates a pull +request every weekend to highlight any discrepancies in Conformance +coverage. If any endpoints are promoted to General Availability +without a conformance test, it will be promptly identified. This +approach helps prevent the accumulation of new technical debt. + +Additionally, there are plans in the near future to create a release +informing job, which will add an additional layer to prevent any new +technical debt. + +**FSM**: I see, tooling and automation play an important role +there. What are, in your opinion, the areas that, conformance-wise, +still require some work to be done? In other words, what are the +current priority areas marked for improvement? + +**RK**: We have reached the “100% Conformance Tested” milestone in +release 1.27! + +At that point, the community took another look at all the endpoints +that were listed as ineligible for conformance. The list was populated +through community input over several years. Several endpoints +that were previously deemed ineligible for conformance have been +identified and relocated to a new dedicated list, which is currently +receiving focused attention for conformance test development. Again, +that list can also be checked on apisnoop.cncf.io. + +To ensure the avoidance of new technical debt in the conformance +project, there are upcoming plans to establish a release informing job +as an additional preventive measure. + +While APISnoop is currently hosted on CNCF infrastructure, the project +has been generously donated to the Kubernetes community. Consequently, +it will be transferred to community-owned infrastructure before the +end of 2023. + +**FSM**: That's great news! For anyone wanting to help, what are the +venues for collaboration that you would highlight? Do all of them +require solid knowledge of Kubernetes as a whole, or are there ways +someone newer to the project can contribute? + +**RK**: Contributing to conformance testing is akin to the task of +"washing the dishes" – it may not be highly visible, but it remains +incredibly important. It necessitates a strong understanding of +Kubernetes, particularly in the areas where the endpoints need to be +tested. This is why working with each SIG that owns the API endpoint +being tested is so important. + +As part of our commitment to making test writing accessible to +everyone, the ii team is currently engaged in the development of a +"click and deploy" solution. This solution aims to enable anyone to +swiftly create a working environment on real hardware within +minutes. We will share updates regarding this development as soon as +we are ready. + +**FSM**: That's very helpful, thank you. Any final comments you would +like to share with our readers? + +**RK**: Conformance testing is a collaborative community endeavour that +involves extensive cooperation among SIGs. SIG Architecture has +spearheaded the initiative and provided guidance. However, the +progress of the work relies heavily on the support of all SIGs in +reviewing, enhancing, and endorsing the tests. + +I would like to extend my sincere appreciation to the ii team for +their unwavering commitment to resolving technical debt over the +years. In particular, [Hippie Hacker](https://github.com/hh)'s +guidance and stewardship of the vision has been +invaluable. Additionally, I want to give special recognition to +Stephen Heywood for shouldering the majority of the test writing +workload in recent releases, as well as to Zach Mandeville for his +contributions to APISnoop. + +**FSM**: Many thanks for your availability and insightful comments, +I've personally learned quite a bit with it and I'm sure our readers +will as well. diff --git a/content/en/blog/_posts/2023-10-10-cri-o-community-package-infrastructure.md b/content/en/blog/_posts/2023-10-10-cri-o-community-package-infrastructure.md new file mode 100644 index 0000000000000..14c5e198ab286 --- /dev/null +++ b/content/en/blog/_posts/2023-10-10-cri-o-community-package-infrastructure.md @@ -0,0 +1,189 @@ +--- +layout: blog +title: "CRI-O is moving towards pkgs.k8s.io" +date: 2023-10-10 +slug: cri-o-community-package-infrastructure +--- + +**Author:** Sascha Grunert + +The Kubernetes community [recently announced](/blog/2023/08/31/legacy-package-repository-deprecation/) +that their legacy package repositories are frozen, and now they moved to +[introduced community-owned package repositories](/blog/2023/08/15/pkgs-k8s-io-introduction) powered by the +[OpenBuildService (OBS)](https://build.opensuse.org/project/subprojects/isv:kubernetes). +CRI-O has a long history of utilizing +[OBS for their package builds](https://github.com/cri-o/cri-o/blob/e292f17/install.md#install-packaged-versions-of-cri-o), +but all of the packaging efforts have been done manually so far. + +The CRI-O community absolutely loves Kubernetes, which means that they're +delighted to announce that: + +**All future CRI-O packages will be shipped as part of the officially supported +Kubernetes infrastructure hosted on pkgs.k8s.io!** + +There will be a deprecation phase for the existing packages, which is currently +being [discussed in the CRI-O community](https://github.com/cri-o/cri-o/discussions/7315). +The new infrastructure will only support releases of CRI-O `>= v1.28.2` as well as +release branches newer than `release-1.28`. + +## How to use the new packages + +In the same way as the Kubernetes community, CRI-O provides `deb` and `rpm` +packages as part of a dedicated subproject in OBS, called +[`isv:kubernetes:addons:cri-o`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o). +This project acts as an umbrella and provides `stable` (for CRI-O tags) as well as +`prerelease` (for CRI-O `release-1.y` and `main` branches) package builds. + +**Stable Releases:** + +- [`isv:kubernetes:addons:cri-o:stable`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:stable): Stable Packages + - [`isv:kubernetes:addons:cri-o:stable:v1.29`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:stable:v1.29): `v1.29.z` tags + - [`isv:kubernetes:addons:cri-o:stable:v1.28`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:stable:v1.28): `v1.28.z` tags + +**Prereleases:** + +- [`isv:kubernetes:addons:cri-o:prerelease`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease): Prerelease Packages + - [`isv:kubernetes:addons:cri-o:prerelease:main`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease:main): [`main`](https://github.com/cri-o/cri-o/commits/main) branch + - [`isv:kubernetes:addons:cri-o:prerelease:v1.29`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease:v1.29): [`release-1.29`](https://github.com/cri-o/cri-o/commits/release-1.29) branch + - [`isv:kubernetes:addons:cri-o:prerelease:v1.28`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease:v1.28): [`release-1.28`](https://github.com/cri-o/cri-o/commits/release-1.28) branch + +There are no stable releases available in the v1.29 repository yet, because +v1.29.0 will be released in December. The CRI-O community will also **not** +support release branches older than `release-1.28`, because there have been CI +requirements merged into `main` which could be only backported to `release-1.28` +with appropriate efforts. + +For example, If an end-user would like to install the latest available version +of the CRI-O `main` branch, then they can add the repository in the same way as +they do for Kubernetes. + +### `rpm` Based Distributions + +For `rpm` based distributions, you can run the following commands as a `root` user +to install CRI-O together with Kubernetes: + +#### Add the Kubernetes repo + +```bash +cat < + + +
Host Laptop





Airgap VM









Host Laptop...
Airgap VM
Airgap VM
Host Network Adapter
Host Netwo...
Bridge Interface
Bridge Int...
10.1.1.2
10.1.1.2
10.1.1.1
10.1.1.1
Local
LAN
Local...
  192.168.1.2
  192.168.1.2
Bidirection Communication
Bidirectio...
Internet
Internet
No Communication
No Communi...
Bidirection Communication
Bidirectio...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/content/en/blog/_posts/2023-10-12-bootstrap-an-air-gapped-cluster-with-kubeadm/example_production_topology.svg b/content/en/blog/_posts/2023-10-12-bootstrap-an-air-gapped-cluster-with-kubeadm/example_production_topology.svg new file mode 100644 index 0000000000000..3e70fc16f1209 --- /dev/null +++ b/content/en/blog/_posts/2023-10-12-bootstrap-an-air-gapped-cluster-with-kubeadm/example_production_topology.svg @@ -0,0 +1,4 @@ + + + +
Control Plane Node 01
Control Plan...
Internet
Internet
Control Plane Node 02
Control Plan...
Control Plane Node 03
Control Plan...
Worker Node 1
Worker Node 1
Worker Node 2
Worker Node 2
Worker Node 3
Worker Node 3
Worker Node n
Worker Node n
Docker Registry
Docker Regis...
Workstation
Workstation
Workstation
Workstation
IT Admin
IT Ad...
IT Admin
IT Ad...
Physically carry artifacts across the air gap
Physically carry artifa...
Download required artifacts from the insecure side of the air gap
Download required artif...
Load software artifacts onto the proper nodes on the secure side of the air gap
Load software artifacts...
Air Gapped Environment
Air Gapped...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/content/en/blog/_posts/2023-10-12-bootstrap-an-air-gapped-cluster-with-kubeadm/index.md b/content/en/blog/_posts/2023-10-12-bootstrap-an-air-gapped-cluster-with-kubeadm/index.md new file mode 100644 index 0000000000000..f10b4eeec7472 --- /dev/null +++ b/content/en/blog/_posts/2023-10-12-bootstrap-an-air-gapped-cluster-with-kubeadm/index.md @@ -0,0 +1,698 @@ +--- +layout: blog +title: "Bootstrap an Air Gapped Cluster With Kubeadm" +date: 2023-10-12 +slug: bootstrap-an-air-gapped-cluster-with-kubeadm +--- + +**Author:** Rob Mengert (Defense Unicorns) + +Ever wonder how software gets deployed onto a system that is deliberately disconnected from the Internet and other networks? These systems are typically disconnected due to their sensitive nature. Sensitive as in utilities (power/water), banking, healthcare, weapons systems, other government use cases, etc. Sometimes it's technically a water gap, if you're running Kubernetes on an underwater vessel. Still, these environments need software to operate. This concept of deployment in a disconnected state is what it means to deploy to the other side of an [air gap](https://en.wikipedia.org/wiki/Air_gap_(networking)). + +Again, despite this posture, software still needs to run in these environments. Traditionally, software artifacts are physically carried across the air gap on hard drives, USB sticks, CDs, or floppy disks (for ancient systems, it still happens). Kubernetes lends itself particularly well to running software behind an air gap for several reasons, largely due to its declarative nature. + +In this blog article, I will walk through the process of bootstrapping a Kubernetes +cluster in an air-gapped lab environment using Fedora Linux and kubeadm. + +## The Air Gap VM Setup + +A real air-gapped network can take some effort to set up, so for this post, I will use an example VM on a laptop and do some network modifications. Below is the topology: + +{{< figure src="airgap-vm.svg" alt="Topology on the host/laptop which shows that connectivity to the internet from the air gap VM is not possible. However, connectivity between the host/laptop and the VM is possible" >}} + +### Local topology + +This VM will have its network connectivity disabled but in a way that doesn't shut down the VM's virtual NIC. Instead, its network will be downed by injecting a default route to a dummy interface, making anything internet-hosted unreachable. However, the VM still has a connected route to the bridge interface on the host, which means that network connectivity to the host is still working. This posture means that data can be transferred from the host/laptop to the VM via `scp`, even with the default route on the VM black-holing all traffic that isn't destined for the local bridge subnet. This type of transfer is analogous to carrying data across the air gap and will be used throughout this post. + +Other details about the lab setup: + +**VM OS:** Fedora 37 +**Kubernetes Version:** v1.27.3 +**CNI Plugins Version:** v1.3.0 +**CNI Provider and Version:** Flannel v0.22.0 + +While this single VM lab is a simplified example, the below diagram more approximately shows what a real air-gapped environment could look like: + +{{< figure src="example_production_topology.svg" alt="Example production topology which shows 3 control plane Kubernetes nodes and 'n' worker nodes along with a Docker registry in an air-gapped environment. Additionally shows two workstations, one on each side of the air gap and an IT admin which physically carries the artifacts across." >}} + +Note, there is still intentional isolation between the environment and the internet. There are also some things that are not shown in order to keep the diagram simple, for example malware scanning on the secure side of the air gap. + +Back to the single VM lab environment. + +## Identifying the required software artifacts + +I have gone through the trouble of identifying all of the required software components that need to be carried across the air gap in order for this cluster to be stood up: + +- Docker (to host an internal container image registry) +- Containerd +- libcgroup +- socat +- conntrack-tools +- CNI plugins +- crictl +- kubeadm +- kubelet +- kubectl and k9s (strictly speaking, these aren't required to bootstrap a cluster but they are handy to interact with one) +- kubelet.service systemd file +- kubeadm configuration file +- Docker registry container image +- Kubernetes component container images +- CNI network plugin container images ([Flannel](https://github.com/flannel-io/flannel) will be used for this lab) +- CNI network plugin manifests +- CNI tooling container images + +The way I identified these was by trying to do the installation and working through all of the errors that are thrown around an additional dependency being required. In a real air-gapped scenario, each transport of artifacts across the air gap could represent anywhere from 20 minutes to several weeks of time spent by the installer. That is to say that the target system could be located in a data center on the same floor as your desk, at a satellite downlink facility in the middle of nowhere, or on a submarine that's out to sea. Knowing what is on that system at any given time is important so you know what you have to bring. + +## Prepare the Node for K8s + +Before downloading and moving the artifacts to the VM, let's first prep that VM to run Kubernetes. + +### VM preparation + +_Run these steps as a normal user_ + +**Make destination directory for software artifacts** + +```bash +mkdir ~/tmp +``` +_Run the following steps as the superuser_ (`root`) + +Write to `/etc/sysctl.d/99-k8s-cri.conf`: + +```bash +cat > /etc/sysctl.d/99-k8s-cri.conf << EOF +net.bridge.bridge-nf-call-iptables=1 +net.ipv4.ip_forward=1 +net.bridge.bridge-nf-call-ip6tables=1 +EOF +``` + +Write to `/etc/modules-load.d/k8s.conf` (enable `overlay` and `nbr_netfilter`): +```bash +echo -e overlay\\nbr_netfilter > /etc/modules-load.d/k8s.conf +``` +Install iptables: +```bash +dnf -y install iptables-legacy +``` +Set iptables to use legacy mode (not `nft` emulating `iptables`): +```bash +update-alternatives --set iptables /usr/sbin/iptables-legacy +``` + +Turn off swap: +```bash +touch /etc/systemd/zram-generator.conf +systemctl mask systemd-zram-setup@.service +sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab +``` + +Disable `firewalld` (this is OK in a demo context): +```bash +systemctl disable --now firewalld +``` + +Disable `systemd-resolved`: +```bash +systemctl disable --now systemd-resolved +``` + +Configure DNS defaults for NetworkManager: +```bash +sed -i '/\[main\]/a dns=default' /etc/NetworkManager/NetworkManager.conf +``` + +Blank the system-level DNS resolver configuration: + +```bash +unlink /etc/resolv.conf || true +touch /etc/resolv.conf +``` + +Disable SELinux _(just for a demo - check before doing this in production!)_: + +```bash +setenforce 0 +``` + +**Make sure all changes survive a reboot** +```bash +reboot +``` +## Download all the artifacts + +On the laptop/host machine, download all of the artifacts enumerated in the previous section. Since the air gapped VM is running Fedora 37, all of the dependencies shown in this part are for Fedora 37. Note, this procedure will only work on AArch64 or AMD64 CPU architectures as they are the most popular and widely available.. You can execute this procedure anywhere you have write permissions; your home directory is a perfectly suitable choice. + +Note, operating system packages for the Kubernetes artifacts that need to be carried across can now be found at [pkgs.k8s.io](/blog/2023/08/15/pkgs-k8s-io-introduction/). This blog post will use a combination of Fedora repositories and GitHub in order to download all of the required artifacts. When you’re doing this on your own cluster, you should decide whether to use the official Kubernetes packages, or the official packages from your operating system distribution - both are valid choices. + + + +```bash +# Set architecture variables +UARCH=$(uname -m) + +if [["$UARCH" == "arm64" || "$UARCH" == "aarch64"]]; then + + ARCH="aarch64" + K8s_ARCH="arm64" + +else + + ARCH="x86_64" + K8s_ARCH="amd64" + +fi +``` + +Set environment variables for software versions to use: + +```bash +CNI_PLUGINS_VERSION="v1.3.0" +CRICTL_VERSION="v1.27.0" +KUBE_RELEASE="v1.27.3" +RELEASE_VERSION="v0.15.1" +K9S_VERSION="v0.27.4" +``` + +**Create a `download` directory, change into it, and download all of the RPMs and configuration files** +```bash +mkdir download && cd download + +curl -O https://download.docker.com/linux/fedora/37/${ARCH}/stable/Packages/docker-ce-cli-23.0.2-1.fc37.${ARCH}.rpm + +curl -O https://download.docker.com/linux/fedora/37/${ARCH}/stable/Packages/containerd.io-1.6.19-3.1.fc37.${ARCH}.rpm + +curl -O https://download.docker.com/linux/fedora/37/${ARCH}/stable/Packages/docker-compose-plugin-2.17.2-1.fc37.${ARCH}.rpm + +curl -O https://download.docker.com/linux/fedora/37/${ARCH}/stable/Packages/docker-ce-rootless-extras-23.0.2-1.fc37.${ARCH}.rpm + +curl -O https://download.docker.com/linux/fedora/37/${ARCH}/stable/Packages/docker-ce-23.0.2-1.fc37.${ARCH}.rpm + +curl -O https://download-ib01.fedoraproject.org/pub/fedora/linux/releases/37/Everything/${ARCH}/os/Packages/l/libcgroup-3.0-1.fc37.${ARCH}.rpm + +echo -e "\nDownload Kubernetes Binaries" + +curl -L -O "https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_VERSION}/cni-plugins-linux-${K8s_ARCH}-${CNI_PLUGINS_VERSION}.tgz" + +curl -L -O "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${K8s_ARCH}.tar.gz" + +curl -L --remote-name-all https://dl.k8s.io/release/${KUBE_RELEASE}/bin/linux/${K8s_ARCH}/{kubeadm,kubelet} + +curl -L -O "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" + +curl -L -O "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" + +curl -L -O "https://dl.k8s.io/release/${KUBE_RELEASE}/bin/linux/${K8s_ARCH}/kubectl" + +echo -e "\nDownload dependencies" + +curl -O "https://dl.fedoraproject.org/pub/fedora/linux/releases/37/Everything/${ARCH}/os/Packages/s/socat-1.7.4.2-3.fc37.${ARCH}.rpm" + +curl -O "https://dl.fedoraproject.org/pub/fedora/linux/releases/37/Everything/${ARCH}/os/Packages/l/libcgroup-3.0-1.fc37.${ARCH}.rpm" + +curl -O "https://dl.fedoraproject.org/pub/fedora/linux/releases/37/Everything/${ARCH}/os/Packages/c/conntrack-tools-1.4.6-4.fc37.${ARCH}.rpm" + +curl -LO "https://github.com/derailed/k9s/releases/download/${K9S_VERSION}/k9s_Linux_${K8s_ARCH}.tar.gz" + +curl -LO "https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml" +``` +**Download all of the necessary container images:** + +```bash +images=( + "registry.k8s.io/kube-apiserver:${KUBE_RELEASE}" + "registry.k8s.io/kube-controller-manager:${KUBE_RELEASE}" + "registry.k8s.io/kube-scheduler:${KUBE_RELEASE}" + "registry.k8s.io/kube-proxy:${KUBE_RELEASE}" + "registry.k8s.io/pause:3.9" + "registry.k8s.io/etcd:3.5.7-0" + "registry.k8s.io/coredns/coredns:v1.10.1" + "registry:2.8.2" + "flannel/flannel:v0.22.0" + "flannel/flannel-cni-plugin:v1.1.2" +) + +for image in "${images[@]}"; do + # Pull the image from the registry + docker pull "$image" + + # Save the image to a tar file on the local disk + image_name=$(echo "$image" | sed 's|/|_|g' | sed 's/:/_/g') + docker save -o "${image_name}.tar" "$image" + +done +``` + +The above commands will take a look at the CPU architecture for the current host/laptop, create and change into a directory called download, and finally download all of the dependencies. Each of these files must then be transported over the air gap via scp. The exact syntax of the command will vary depending on the user on the VM, if you created an SSH key, and the IP of your air gap VM. The rough syntax is: +```bash +scp -i <> <> <>@<>:~/tmp/ +``` +Once all of the files have been transported to the air gapped VM, the rest of the blog post will take place from the VM. Open a terminal session to that system. + +### Put the artifacts in place + +Everything that is needed in order to bootstrap a Kubernetes cluster now exists on the air-gapped VM. This section is a lot more complicated since various types of artifacts are now on disk on the air-gapped VM. Get a root shell on the air gap VM as the rest of this section will be executed from there. Let's start by setting the same architecture variables and environmental as were set on the host/laptop and then install all of the RPM packages: + +```bash +UARCH=$(uname -m) +# Set architecture variables + +if [["$UARCH" == "arm64" || "$UARCH" == "aarch64"]]; then + + ARCH="aarch64" + K8s_ARCH="arm64" + +else + + ARCH="x86_64" + K8s_ARCH="amd64" + +fi + +# Set environment variables +CNI_PLUGINS_VERSION="v1.3.0" +CRICTL_VERSION="v1.27.0" +KUBE_RELEASE="v1.27.3" +RELEASE_VERSION="v0.15.1" +K9S_VERSION="v0.27.4" + +cd ~/tmp/ + +dnf -y install ./*.rpm +``` + +Next, install the CNI plugins and `crictl`: + + +```bash +mkdir -p /opt/cni/bin +tar -C /opt/cni/bin -xz -f "cni-plugins-linux-${K8s_ARCH}-v1.3.0.tgz" +tar -C /usr/local/bin-xz -f "crictl-v1.27.0-linux-${K8s_ARCH}.tar.gz" +``` + +Make kubeadm, kubelet and kubectl executable and move them from the `/tmp` +directory to `/usr/local/bin`: +```bash +chmod +x kubeadm kubelet kubectl +mv kubeadm kubelet kubectl /usr/local/bin +``` + +Define an override for the systemd kubelet service file, and move it to the proper location: + +```bash +mkdir -p /etc/systemd/system/kubelet.service.d + +sed "s:/usr/bin:/usr/local/bin:g" 10-kubeadm.conf > /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +``` + +The CRI plugin for containerd is disabled by default; enable it: +```bash +sed -i 's/^disabled_plugins = \["cri"\]/#&/' /etc/containerd/config.toml +``` + +Put a custom `/etc/docker/daemon.json` file in place: +```bash +echo '{ + +"exec-opts": ["native.cgroupdriver=systemd"], + +"insecure-registries" : ["localhost:5000"], + +"allow-nondistributable-artifacts": ["localhost:5000"], + +"log-driver": "json-file", + +"log-opts": { + +"max-size": "100m" + +}, + +"group": "rnd", + +"storage-driver": "overlay2", + +"storage-opts": [ + +"overlay2.override_kernel_check=true" + +] + +}' > /etc/docker/daemon.json +``` +Two important items to highlight in the Docker `daemon.json` configuration file. The insecure-registries line means that the registry in brackets does not support TLS. Even inside an air gapped environment, this isn't a good practice but is fine for the purposes of this lab. The allow-nondistributable-artifacts line tells Docker to permit pushing nondistributable artifacts to this registry. Docker by default does not push these layers to avoid potential issues around licensing or distribution rights. A good example of this is the Windows base container image. This line will allow layers that Docker marks as "foreign" to be pushed to the registry. While not a big deal for this article, that line could be required for some air gapped environments. All layers have to exist locally since nothing inside the air gapped environment can reach out to a public container image registry to get what it needs. + +(Re)start Docker and enable it so it starts at system boot: + +```bash +systemctl restart docker +systemctl enable docker +``` + +Start, and enable, containerd and the kubelet: + +```bash +systemctl enable --now containerd +systemctl enable --now kubelet +``` + +The container image registry that runs in Docker is only required for any CNI related containers and subsequent workload containers. This registry is **not** used to house the Kubernetes component containers. Note, nerdctl would have also worked here as an alternative to Docker and would have allowed for direct interaction with containerd. Docker was chosen for its familiarity. + +Start a container image registry inside Docker: + +```bash +docker load -i registry_2.8.2.tar +docker run -d -p 5000:5000 --restart=always --name registry registry:2.8.2 +``` + +### Load Flannel containers into the Docker registry + +**Note**: _Flannel was chosen for this lab due to familiarity. Chose whatever CNI works best in your environment._ + +```bash +docker load -i flannel_flannel_v0.22.0.tar +docker load -i flannel_flannel-cni-plugin_v1.1.2.tar +docker tag flannel/flannel:v0.22.0 localhost:5000/flannel/flannel:v0.22.0 +docker tag flannel/flannel-cni-plugin:v1.1.1 localhost:5000/flannel/flannel-cni-plugin:v1.1.1 +docker push localhost:5000/flannel/flannel:v0.22.0 +docker push localhost:5000/flannel/flannel-cni-plugin:v1.1.1 +``` + +Load container images for Kubernetes components, via `ctr`: + +```bash +images_files=( + "registry.k8s.io/kube-apiserver:${KUBE_RELEASE}" + "registry.k8s.io/kube-controller-manager:${KUBE_RELEASE}" + "registry.k8s.io/kube-scheduler:${KUBE_RELEASE}" + "registry.k8s.io/kube-proxy:${KUBE_RELEASE}" + "registry.k8s.io/pause:3.9" + "registry.k8s.io/etcd:3.5.7-0" + "registry.k8s.io/coredns/coredns:v1.10.1" + +) + + +for index in "${!image_files[@]}"; do + + if [[-f "${image_files[$index]}" ]]; then + + # The below line loads the images where they need to be on the VM + ctr -n k8s.io images import ${image_files[$index]} + + else + + echo "File ${image_files[$index]} not found!" 1>&2 + + fi + +done +``` + +A totally reasonable question here could be "Why not use the Docker registry that was just stood up to house the K8s component images?" This simply didn't work even with the proper modification to the configuration file that gets passed to kubeadm. + +### Spin up the Kubernetes cluster + +Check if a cluster is already running and tear it down if it is: + +```bash +if systemctl is-active --quiet kubelet; then + + # Reset the Kubernetes cluster + + echo "A Kubernetes cluster is already running. Resetting the cluster..." + + kubeadm reset -f + +fi +``` + +Log into the Docker registry from inside the air-gapped VM: + +```bash +# OK for a demo; use secure credentials in production! + +DOCKER_USER=user +DOCKER_PASS=pass +echo ${DOCKER_PASS} | docker login --username=${DOCKER_USER} --password-stdin localhost:5000 +``` + +Create a cluster configuration file and initialize the cluster: + +```bash +echo "--- + +apiVersion: kubeadm.k8s.io/v1beta3 +kind: ClusterConfiguration +clusterName: kubernetes +kubernetesVersion: v1.27.3 +networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 # --pod-network-cidr + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubeadm.k8s.io/v1beta3 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 10.10.10.10 # Update to the IP address of the air gap VM + bindPort: 6443 +nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock # or rely on autodetection + name: airgap # this must match the hostname of the air gap VM +# Since this is a single node cluster, this taint has to be commented out, +# otherwise the coredns pods will not come up. +# taints: +# - effect: NoSchedule +# key: node-role.kubernetes.io/master" > kubeadm_cluster.yaml + +kubeadm init --config kubeadm_config.yaml +``` + +Set `$KUBECONFIG` and use `kubectl` to wait until the API server is healthy: + +```bash +export KUBECONFIG=/etc/kubernetes/admin.conf + +until kubectl get nodes; do + echo -e "\nWaiting for API server to respond..." 1>&2 + sleep 5 + +done +``` + +### Set up networking + +Update Flannel image locations in the Flannel manifest, and apply it: + +```bash +sed -i 's/image: docker\.io/image: localhost:5000/g' kube-flannel.yaml +kubectl apply -f kube-flannel.yaml +``` + +Run `kubectl get pods -A --watch` until all pods are up and running. + +## Run an example Pod + +With a cluster operational, the next step is a workload. For this simple demonstration, the [Podinfo](https://github.com/stefanprodan/podinfo) application will be deployed. + +### Install Helm + +This first part of the procedure must be executed from the host/laptop. If not already present, install Helm following [Installing Helm](https://helm.sh/docs/intro/install/). + +Next, download the helm binary for Linux: + +```bash +UARCH=$(uname -m) +# Reset the architecture variables if needed +if [["$UARCH" == "arm64" || "$UARCH" == "aarch64"]]; then + + ARCH="aarch64" + K8s_ARCH="arm64" + +else + + ARCH="x86_64" + K8s_ARCH="amd64" + +fi + +curl -LO https://get.helm.sh/helm-v3.12.2-linux-${K8s_ARCH}.tar.gz +``` + +Add the Podinfo helm repository, download the Podinfo helm chart, download the Podinfo container image, and then finally save it to the local disk: + +```bash +helm repo add https://stefanprodan.github.io/podinfo +helm fetch podinfo/podinfo --version 6.4.0 +docker pull ghcr.io/stefanprodan/podinfo:6.4.0 +``` + +### Save the podinfo image to a tar file on the local disk +```bash +docker save -o podinfo_podinfo-6.4.0.tar ghcr.io/stefanprodan/podinfo +``` + +```suggestion + +### Transfer the image across the air gap + +Reuse the `~/tmp` directory created on the air gapped VM to transport these artifacts across the air gap: + +```bash +scp -i <> <> <>@<>:~/tmp/ +``` + +### Continue on the isolated side + +_Now pivot over to the air gap VM for the rest of the installation procedure._ + + +Switch into `~/tmp`: +``` +cd ~/tmp +``` + +Extract and move the `helm` binary: + +``` +tar -zxvf helm-v3.0.0-linux-amd64.tar.gz +mv linux-amd64/helm /usr/local/bin/helm +``` + +Load the Podinfo container image into the local Docker registry: + +```bash +docker load -i podinfo_podinfo-6.4.0.tar +docker tag podinfo/podinfo:6.4.0 localhost:5000/podinfo/podinfo:6.4.0 +docker push localhost:5000/podinfo/podinfo:6.4.0 +``` + +Ensure "$KUBECONFIG` is set correctly, then install the Podinfo Helm chart: + +``` +# Outside of a demo or lab environment, use lower (or even least) privilege +# credentials to manage your workloads. +export KUBECONFIG=/etc/kubernetes/admin.conf +helm install podinfo ./podinfo-6.4.0.tgz --set image.repository=localhost:5000/podinfo/podinfo +``` + +Verify that the Podinfo application comes up: +```bash +kubectl get pods -n default +``` +Or run k9s (a terminal user interface for Kubernetes): +```bash +k9s +``` +## Zarf + +Zarf is an open-source tool that takes a declarative approach to software packaging and delivery, including air gap. This same podinfo application will be installed onto the air gap VM using Zarf in this section. The first step is to install [Zarf](https://zarf.dev/install/) on the host/laptop. + +Alternatively, a prebuilt binary can be downloaded onto the host/laptop from [GitHub](https://github.com/defenseunicorns/zarf/releases/) for various OS/CPU architectures. + +A binary is also needed across the air gap on the VM: +```bash +UARCH=$(uname -m) +# Set the architecture variables if needed +if [["$UARCH" == "arm64" || "$UARCH" == "aarch64"]]; then + + ARCH="aarch64" + K8s_ARCH="arm64" + +else + + ARCH="x86_64" + K8s_ARCH="amd64" + +fi + +export ZARF_VERSION=v0.28.3 + +curl -LO "https://github.com/defenseunicorns/zarf/releases/download/${ZARF_VERSION}/zarf_${ZARF_VERSION}_Linux_${K8s_ARCH}" +``` +Zarf needs to bootstrap itself into a Kubernetes cluster through the use of an init package. That also needs to be transported across the air gap so let's download it onto the host/laptop: +```bash +curl -LO "https://github.com/defenseunicorns/zarf/releases/download/${ZARF_VERSION}/zarf-init-${K8s_ARCH}-${ZARF_VERSION}.tar.zst" +``` +The way that Zarf is declarative is through the use of a zarf.yaml file. Here is the zarf.yaml file that will be used for this Podinfo installation. Write it to whatever directory you you have write access to on your host/laptop; your home directory is fine: +``` +echo 'kind: ZarfPackageConfig +metadata: + name: podinfo + description: "Deploy helm chart for the podinfo application in K8s via zarf" +components: + - name: podinfo + required: true + charts: + - name: podinfo + version: 6.4.0 + namespace: podinfo-helm-namespace + releaseName: podinfo + url: https://stefanprodan.github.io/podinfo + images: + - ghcr.io/stefanprodan/podinfo:6.4.0' > zarf.yaml +``` +The next step is to build the Podinfo package. This must be done from the same directory location where the zarf.yaml file is located. +```bash +zarf package create --confirm +``` +This command will download the defined helm chart and image and put them into a single file written to disk. This single file is all that needs to be carried across the air gap: + +```bash +ls zarf-package-* + +``` +Sample output: +```bash +zarf-package-podinfo-arm64.tar.zst +``` + +Transport the linux zarf binary, zarf init package and Podinfo package over to the air gapped VM: + +```bash +scp -i <> <> <>@<>:~/tmp/ +``` + +From the air gapped VM, switch into the ~/tmp directory where all of the artifacts were placed: +```bash +cd ~/tmp +``` +Set `$KUBECONFIG` to a file with credentials for the local cluster; also set the the Zarf version: +```bash +export KUBECONFIG=/etc/kubernetes/admin.conf + +export ZARF_VERSION=$(zarf version) +``` +Make the `zarf` binary executable and (as `root`) move it to `/usr/bin`: +```bash +chmod +x zarf && sudo mv zarf /usr/bin +``` + +Likewise, move the Zarf init package to `/usr/bin`: +```bash +mv zarf-init-arm64-${ZARF_VERSION}.tar.zst /usr/bin +``` + +Initialize Zarf into the cluster: +``` +zarf init --confirm --components=git-server +``` +When this command is done, a Zarf package is ready to be deployed. +```bash +zarf package deploy +``` +This command will search the current directory for a Zarf package. Select the podinfo package (zarf-package-podinfo-${K8s_ARCH}.tar.zst) and continue. Once the package deployment is complete, run `zarf tools monitor` in order to bring up k9s to view the cluster. + +## Conclusion + +This is one method that can be used to spin up an air-gapped cluster and two methods to deploy +a mission application. Your mileage may vary on different operating systems regarding the +exact software artifacts that need to be carried across the air gap, but conceptually this procedure is still valid. + +This demo also created an artificial air-gapped environment. In the real world, every missed dependency +could represent hours, if not days, or weeks of lost time to get running software in the air-gapped environment. +This artificial air gap also obscured some common methods or air gap software delivery such as using a +_data diode_. Depending on the environment, the diode can be very expensive to use. +Also, none of the artifacts were scanned before being carried across the air gap. +The presence of the air gap in general means that the workload running there is more sensitive, and nothing should be carried across unless it's known to be safe. \ No newline at end of file diff --git a/content/en/blog/_posts/2023-10-20-kcs-shanghai/index.md b/content/en/blog/_posts/2023-10-20-kcs-shanghai/index.md new file mode 100644 index 0000000000000..e32bdcb0df615 --- /dev/null +++ b/content/en/blog/_posts/2023-10-20-kcs-shanghai/index.md @@ -0,0 +1,114 @@ +--- +layout: blog +title: "A Quick Recap of 2023 China Kubernetes Contributor Summit" +slug: kcs-shanghai +date: 2023-10-20 +canonicalUrl: https://www.kubernetes.dev/blog/2023/10/20/kcs-shanghai/ +--- + +**Author:** Paco Xu and Michael Yao (DaoCloud) + +On September 26, 2023, the first day of +[KubeCon + CloudNativeCon + Open Source Summit China 2023](https://www.lfasiallc.com/kubecon-cloudnativecon-open-source-summit-china/), +nearly 50 contributors gathered in Shanghai for the Kubernetes Contributor Summit. + +{{< figure src="/blog/2023/10/20/kcs-shanghai/kcs04.jpeg" alt="All participants in the 2023 Kubernetes Contributor Summit" caption="All participants in the 2023 Kubernetes Contributor Summit" >}} + +This marked the first in-person offline gathering held in China after three years of the pandemic. + +## A joyful meetup + +The event began with welcome speeches from [Kevin Wang](https://github.com/kevin-wangzefeng) from Huawei Cloud, +one of the co-chairs of KubeCon, and [Puja](https://github.com/puja108) from Giant Swarm. + +Following the opening remarks, the contributors introduced themselves briefly. Most attendees were from China, +while some contributors had made the journey from Europe and the United States specifically for the conference. +Technical experts from companies such as Microsoft, Intel, Huawei, as well as emerging forces like DaoCloud, +were present. Laughter and cheerful voices filled the room, regardless of whether English was spoken with +European or American accents or if conversations were carried out in authentic Chinese language. This created +an atmosphere of comfort, joy, respect, and anticipation. Past contributions brought everyone closer, and +mutual recognition and accomplishments made this offline gathering possible. + +{{< figure src="/blog/2023/10/20/kcs-shanghai/kcs06.jpeg" alt="Face to face meeting in Shanghai" caption="Face to face meeting in Shanghai" >}} + +The attending contributors were no longer just GitHub IDs; they transformed into vivid faces. +From sitting together and capturing group photos to attempting to identify "Who is who," +a loosely connected collective emerged. This team structure, although loosely knit and free-spirited, +was established to pursue shared dreams. + +As the saying goes, "You reap what you sow." Each effort has been diligently documented within +the Kubernetes community contributions. Regardless of the passage of time, the community will +not erase those shining traces. Brilliance can be found in your PRs, issues, or comments. +It can also be seen in the smiling faces captured in meetup photos or heard through stories +passed down among contributors. + +## Technical sharing and discussions + +Next, there were three technical sharing sessions: + +- [sig-multi-cluster](https://github.com/kubernetes/community/blob/master/sig-multicluster/README.md): + [Hongcai Ren](https://github.com/RainbowMango), a maintainer of Karmada, provided an introduction to + the responsibilities and roles of this SIG. Their focus is on designing, discussing, implementing, + and maintaining APIs, tools, and documentation related to multi-cluster management. + Cluster Federation, one of Karmada's core concepts, is also part of their work. + +- [helmfile](https://github.com/helmfile/helmfile): [yxxhero](https://github.com/yxxhero) + from [GitLab](https://gitlab.cn/) presented how to deploy Kubernetes manifests declaratively, + customize configurations, and leverage the latest features of Helm, including Helmfile. + +- [sig-scheduling](https://github.com/kubernetes/community/blob/master/sig-scheduling/README.md): + [william-wang](https://github.com/william-wang) from Huawei Cloud shared the recent updates and + future plans of SIG Scheduling. This SIG is responsible for designing, developing, and testing + components related to Pod scheduling. + +{{< figure src="/blog/2023/10/20/kcs-shanghai/kcs03.jpeg" alt="A technical session about sig-multi-cluster" caption="A technical session about sig-multi-cluster" >}} + +Following the sessions, a video featuring a call for contributors by [Sergey Kanzhelev](https://github.com/SergeyKanzhelev), +the SIG-Node Chair, was played. The purpose was to encourage more contributors to join the Kubernetes community, +with a special emphasis on the popular SIG-Node. + +Lastly, Kevin hosted an Unconference collective discussion session covering topics such as +multi-cluster management, scheduling, elasticity, AI, and more. For detailed minutes of +the Unconference meeting, please refer to . + +## China's contributor statistics + +The contributor summit took place in Shanghai, with 90% of the attendees being Chinese. +Within the Cloud Native Computing Foundation (CNCF) ecosystem, contributions from China have been steadily increasing. Currently: + +- Chinese contributors account for 9% of the total. +- Contributions from China make up 11.7% of the overall volume. +- China ranks second globally in terms of contributions. + +{{< note >}} +The data is from KubeCon keynotes by Chris Aniszczyk, CTO of Cloud Native Computing Foundation, +on September 26, 2023. This probably understates Chinese contributions. A lot of Chinese contributors +use VPNs and may not show up as being from China in the stats accurately. +{{< /note >}} + +The Kubernetes Contributor Summit is an inclusive meetup that welcomes all community contributors, including: + +- New Contributors +- Current Contributors + - docs + - code + - community management +- Subproject members +- Members of Special Interest Group (SIG) / Working Group (WG) +- Active Contributors +- Casual Contributors + +## Acknowledgments + +We would like to express our gratitude to the organizers of this event: + +- [Kevin Wang](https://github.com/kevin-wangzefeng), the co-chair of KubeCon and the lead of the kubernetes contributor summit. +- [Paco Xu](https://github.com/pacoxu), who actively coordinated the venue, meals, invited contributors from both China and + international sources, and established WeChat groups to collect agenda topics. They also shared details of the event + before and after its occurrence through [pre and post announcements](https://github.com/kubernetes/community/issues/7510). +- [Mengjiao Liu](https://github.com/mengjiao-liu), who was responsible for organizing, coordinating, + and facilitating various matters related to the summit. + +We extend our appreciation to all the contributors who attended the China Kubernetes Contributor Summit in Shanghai. +Your dedication and commitment to the Kubernetes community are invaluable. +Together, we continue to push the boundaries of cloud native technology and shape the future of this ecosystem. diff --git a/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs03.jpeg b/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs03.jpeg new file mode 100644 index 0000000000000..c6131bfc911f2 Binary files /dev/null and b/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs03.jpeg differ diff --git a/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs04.jpeg b/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs04.jpeg new file mode 100644 index 0000000000000..61cb7ef8526fe Binary files /dev/null and b/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs04.jpeg differ diff --git a/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs06.jpeg b/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs06.jpeg new file mode 100644 index 0000000000000..f66c505e7a8d4 Binary files /dev/null and b/content/en/blog/_posts/2023-10-20-kcs-shanghai/kcs06.jpeg differ diff --git a/content/en/blog/_posts/2023-10-23-pv-last-phase-transtition-time.md b/content/en/blog/_posts/2023-10-23-pv-last-phase-transtition-time.md new file mode 100644 index 0000000000000..7efeae7f3f1f3 --- /dev/null +++ b/content/en/blog/_posts/2023-10-23-pv-last-phase-transtition-time.md @@ -0,0 +1,104 @@ +--- +layout: blog +title: PersistentVolume Last Phase Transition Time in Kubernetes +date: 2023-10-23 +slug: persistent-volume-last-phase-transition-time +--- + +**Author:** Roman Bednář (Red Hat) + +In the recent Kubernetes v1.28 release, we (SIG Storage) introduced a new alpha feature that aims to improve PersistentVolume (PV) +storage management and help cluster administrators gain better insights into the lifecycle of PVs. +With the addition of the `lastPhaseTransitionTime` field into the status of a PV, +cluster administrators are now able to track the last time a PV transitioned to a different +[phase](/docs/concepts/storage/persistent-volumes/#phase), allowing for more efficient +and informed resource management. + +## Why do we need new PV field? {#why-new-field} + +PersistentVolumes in Kubernetes play a crucial role in providing storage resources to workloads running in the cluster. +However, managing these PVs effectively can be challenging, especially when it comes +to determining the last time a PV transitioned between different phases, such as +`Pending`, `Bound` or `Released`. +Administrators often need to know when a PV was last used or transitioned to certain +phases; for instance, to implement retention policies, perform cleanup, or monitor storage health. + +In the past, Kubernetes users have faced data loss issues when using the `Delete` retain policy and had to resort to the safer `Retain` policy. +When we planned the work to introduce the new `lastPhaseTransitionTime` field, we +wanted to provide a more generic solution that can be used for various use cases, +including manual cleanup based on the time a volume was last used or producing alerts based on phase transition times. + +## How lastPhaseTransitionTime helps + +Provided you've enabled the feature gate (see [How to use it](#how-to-use-it), the new `.status.lastPhaseTransitionTime` field of a PersistentVolume (PV) +is updated every time that PV transitions from one phase to another. +Whether it's transitioning from `Pending` to `Bound`, `Bound` to `Released`, or any other phase transition, the `lastPhaseTransitionTime` will be recorded. +For newly created PVs the phase will be set to `Pending` and the `lastPhaseTransitionTime` will be recorded as well. + +This feature allows cluster administrators to: + +1. Implement Retention Policies + + With the `lastPhaseTransitionTime`, administrators can now track when a PV was last used or transitioned to the `Released` phase. + This information can be crucial for implementing retention policies to clean up resources that have been in the `Released` phase for a specific duration. + For example, it is now trivial to write a script or a policy that deletes all PVs that have been in the `Released` phase for a week. + +2. Monitor Storage Health + + By analyzing the phase transition times of PVs, administrators can monitor storage health more effectively. + For example, they can identify PVs that have been in the `Pending` phase for an unusually long time, which may indicate underlying issues with the storage provisioner. + +## How to use it + +The `lastPhaseTransitionTime` field is alpha starting from Kubernetes v1.28, so it requires +the `PersistentVolumeLastPhaseTransitionTime` feature gate to be enabled. + +If you want to test the feature whilst it's alpha, you need to enable this feature gate on the `kube-controller-manager` and the `kube-apiserver`. + +Use the `--feature-gates` command line argument: + +```shell +--feature-gates="...,PersistentVolumeLastPhaseTransitionTime=true" +``` + +Keep in mind that the feature enablement does not have immediate effect; the new field will be populated whenever a PV is updated and transitions between phases. +Administrators can then access the new field through the PV status, which can be retrieved using standard Kubernetes API calls or through Kubernetes client libraries. + +Here is an example of how to retrieve the `lastPhaseTransitionTime` for a specific PV using the `kubectl` command-line tool: + +```shell +kubectl get pv -o jsonpath='{.status.lastPhaseTransitionTime}' +``` + +## Going forward + +This feature was initially introduced as an alpha feature, behind a feature gate that is disabled by default. +During the alpha phase, we (Kubernetes SIG Storage) will collect feedback from the end user community and address any issues or improvements identified. + +Once sufficient feedback has been received, or no complaints are received the feature can move to beta. +The beta phase will allow us to further validate the implementation and ensure its stability. + +At least two Kubernetes releases will happen between the release where this field graduates +to beta and the release that graduates the field to general availability (GA). That means that +the earliest release where this field could be generally available is Kubernetes 1.32, +likely to be scheduled for early 2025. + +## Getting involved + +We always welcome new contributors so if you would like to get involved you can +join our [Kubernetes Storage Special-Interest-Group](https://github.com/kubernetes/community/tree/master/sig-storage) (SIG). + +If you would like to share feedback, you can do so on our +[public Slack channel](https://app.slack.com/client/T09NY5SBT/C09QZFCE5). +If you're not already part of that Slack workspace, you can visit https://slack.k8s.io/ for an invitation. + +Special thanks to all the contributors that provided great reviews, shared valuable insight and helped implement this feature (alphabetical order): + +- Han Kang ([logicalhan](https://github.com/logicalhan)) +- Jan Šafránek ([jsafrane](https://github.com/jsafrane)) +- Jordan Liggitt ([liggitt](https://github.com/liggitt)) +- Kiki ([carlory](https://github.com/carlory)) +- Michelle Au ([msau42](https://github.com/msau42)) +- Tim Bannister ([sftim](https://github.com/sftim)) +- Wojciech Tyczynski ([wojtek-t](https://github.com/wojtek-t)) +- Xing Yang ([xing-yang](https://github.com/xing-yang)) diff --git a/content/en/blog/_posts/2023-10-24-kubernetes-1.28-release-interview.md b/content/en/blog/_posts/2023-10-24-kubernetes-1.28-release-interview.md new file mode 100644 index 0000000000000..6311b7808714b --- /dev/null +++ b/content/en/blog/_posts/2023-10-24-kubernetes-1.28-release-interview.md @@ -0,0 +1,223 @@ +--- +layout: blog +title: "Plants, process and parties: the Kubernetes 1.28 release interview" +date: 2023-10-24 +--- + +**Author**: Craig Box + +Since 2018, one of my favourite contributions to the Kubernetes community has been to [share the story of each release](https://www.google.com/search?q=%22release+interview%22+site%3Akubernetes.io%2Fblog). Many of these stories were told on behalf of a past employer; by popular demand, I've brought them back, now under my own name. If you were a fan of the old show, I would be delighted if you would [subscribe](https://craigbox.substack.com/about). + +Back in August, [we welcomed the release of Kubernetes 1.28](/blog/2023/08/15/kubernetes-v1-28-release/). That release was led by [Grace Nguyen](https://twitter.com/gracenng), a CS student at the University of Waterloo. Grace joined me for the traditional release interview, and while you can read her story below, [I encourage you to listen to it if you can](https://craigbox.substack.com/p/the-kubernetes-128-release-interview). + +*This transcript has been lightly edited and condensed for clarity.* + +--- + +**You're a student at the University of Waterloo, so I want to spend the first two minutes of this interview talking about the Greater Kitchener-Waterloo region. It's August, so this is one of the four months of the year when there's no snow visible on the ground?**
+Well, it's not that bad. I think the East Coast has it kind of good. I grew up in Calgary, but I do love summer here in Waterloo. We have a [petting zoo](https://goo.gl/maps/W1nM7LjNZPv) close to our university campus, so I go and see the llamas sometimes. + +**Is that a new thing?**
+I'm not sure, it seems like it's been around five-ish years, the Waterloo Park? + +**I lived there in 2007, for a couple of years, just to set the scene for why we're talking about this. I think they were building a lot of the park then. I do remember, of course, that [Kitchener holds the second largest Oktoberfest in the world](https://www.oktoberfest.ca/). Is that something you've had a chance to check out?**
+I have not. I actually didn't know that was a fact. + +**The local civic organization is going to have to do a bit more work, I feel. Do you like ribs?**
+I have mixed feelings about ribs. It's kind of a hit or miss situation for me so far. + +**Again, that might be something that's changed over the last few years. The Ribfests used to have a lot of trophies with little pigs on top of them, but I feel that the shifting dining habits of the world might mean they have to offer some vegan or vegetarian options, to please the modern palette.**
+[LAUGHS] For sure. Do you recommend the Oktoberfest here? Have you been? + +**I went a couple of times. It was a lot of fun.**
+Okay. + +**It's basically just drinking. I would have recommended it back then; I'm not sure it would be quite what I'd be doing today.**
+All right, good to know. + +**The Ribfest, however, I would go back just for that.**
+Oh, ok. + +**And the great thing about Ribfests as a concept is that they have one in every little town. [The Kitchener Ribfest](https://kitchenerribandbeerfest.com/), I looked it up, it's in July; you've just missed that. But, you could go to the [Waterloo Ribfest](https://northernheatribseries.ca/waterloo/) in September.**
+Oh, it is in September? They have their own Ribfest? + +**They do. I think Guelph has one, and Cambridge has one. That's the advantage of the region — there are lots of little cities. Kitchener and Waterloo are two cities that grew into each other — they do call them the Twin Cities. I hear that they finally built the light rail link between the two of them?**
+It is fantastic, and makes the city so much more walkable. + +**Yes, you can go from one mall to the other. That's Canada for you.**
+Well, Uptown is really nice. I quite like it. It's quite cozy. + +**Do you ever cross the border over into Kitchener? Or only when you've lost a bet?**
+Yeah, not a lot. Only for farmer's market, I say. + +**It's worthwhile. There's a lot of good food there, I remember.**
+Yeah. Quite lovely. + +**Now we've got all that out of the way, let's travel back in time a little bit. You mentioned there that you went to high school in Calgary?**
+I did. I had not been to Ontario before I went to university. Calgary was frankly too cold and not walkable enough for me. + +**I basically say the same thing about Waterloo and that's why I moved to England.**
+Fascinating. Gets better. + +**How did you get into tech?**
+I took a computer science class in high school. I was one of maybe only three women in the class, and I kind of stuck with it since. + +**Was the gender distribution part of your thought process at the time?**
+Yeah, I think I was drawn to it partially because I didn't see a lot of people who looked like me in the class. + +**You followed it through to university. What is it that you're studying?**
+I am studying computer engineering, so a lot of hardware stuff. + +**You're involved in the [UW Cybersecurity Club](https://www.facebook.com/groups/uwcyber/). What can you tell me about that without having to kill me?**
+Oh, we are very nice and friendly people! I told myself I'm going to have a nice and chill summer and then I got chosen to lead the release and also ended up running the Waterloo Cybersecurity Club. The club kind of died out during the pandemic, because we weren't on campus, but we have so many smart and amazing people who are in cybersecurity, so it's great to get them together and I learned so many things. + +**Is that like the modern equivalent of the [LAN party](https://en.wikipedia.org/wiki/LAN_party)? You're all getting into a dark room and trying to hack the Gibson?**
+[LAUGHS] Well, you'll have to explain to me again what a LAN party is. Do you bring your own PC? + +**You used to. Back in the day it was incomprehensible that you could communicate with a different person in a different place at a fast enough speed, so you had to physically sit next to somebody and plug a cable in between you.**
+Okay, well kind of the same, I guess. We bring our own laptop and we go to CTF competitions together. + +**They didn't have laptops back in the days of LAN parties. You'd bring a giant 19-inch square monitor, and everything. It was a badge of honor what you could carry.**
+Okay. Can't relate, but good to know. [LAUGHS] + +**One of the more unique aspects of UW is its [co-op system](https://uwaterloo.ca/future-students/co-op). Tell us a little bit about that?**
+As part of my degree, I am required to do minimum five and maximum six co-ops. I've done all six of them. Two of them were in Kubernetes and that's how I got started. + +**A co-op is a placement, as opposed to something you do on campus?**
+Right, so co-op is basically an internship. My first one was at the Canada Revenue Agency. We didn't have wifi and I had my own cubicle, which is interesting. They don't do that anymore, they have open office space. But my second was at Ericsson, where I learned about Kubernetes. It was during the pandemic. KubeCon offered virtual attendance for students and I signed up and I poked around and I have been around since. + +**What was it like going through university during the COVID years? What did that mean in terms of the fact you would previously have traveled to these internships? Did you do them all from home?**
+I'm not totally sure what I missed out on. For sure, a lot of relationship building, but also that we do have to move a lot as part of the co-op experience. Last fall I was in San Francisco, I was in Palo Alto earlier this year. A lot of that dynamic has already been the case. + +**Definitely different weather systems, Palo Alto versus Waterloo.**
+Oh, for sure. Yes, yes. Really glad I was there over the winter. + +**The first snow would fall in Ontario about the end of October and it would pile up over the next few months. There were still piles that hadn't melted by June. That's why I say, there were only four months of the year, July through September, where there was no snow on the ground.**
+ That's true. Didn't catch any snow in Palo Alto, and honestly, that's great. [CHUCKLES] + +**Thank you, global warming, I guess.**
+Oh no! [LAUGHS] + +**Tell me about the co-op term that you did working with Kubernetes at Ericsson?**
+This was such a long time ago, but we were trying to build some sort of pipeline to deploy testing. It was running inside a cluster, and I learned Helm charts and all that good stuff. And then, for the co-op after that, I worked at a Canadian startup in FinTech. It was 24/7 Kubernetes, [building their secret injection system, using ArgoCD to automatically pull secrets from 1Password](https://medium.com/@nng.grace/automated-kubernetes-secret-injection-with-1password-secret-automation-and-hashicorp-vault-8db826c50c1d). + +**How did that lead you on to involvement with the release team?**
+It was over the pandemic, so I didn't have a lot to do, I went to the conference, saw so many cool talks. One that really stuck out to me was [a Kubernetes hacking talk by Tabitha Sable and V Korbes](https://www.youtube.com/watch?v=-4W3ChRVeLI). I thought it was the most amazing thing and it was so cool. One of my friends was on the release team at the time, and she showed me what she does. I applied and thankfully got in. I didn't have any open source experience. It was fully like one of those things where someone took a chance on me. + +**How would you characterize the experience that you've had to date? You have had involvement with pretty much every release since then.**
+Yeah, I think it was a really formative experience, and the community has been such a big part of it. + +**You started as an enhancement shadow with Kubernetes 1.22, eventually moving up to enhancements lead, then you moved on to be the release lead shadow. Obviously, you are the lead for 1.28, but for 1.27 you did something a bit different. What was that, and why did you do it?**
+For 1.25 and 1.26, I was release lead shadow, so I had an understanding of what that role was like. I wanted to shadow another team, and at that time I thought CI Signal was a big black box to me. I joined the team, but I also had capacity for other things, I joined as a branch manager associate as well. + +**What is the difference between that role and the traditional release team roles we think about?**
+Yeah, that's a great question. So the branch management role is a more constant role. They don't necessarily get swapped out every release. You shadow as an associate, so you do things like cut releases, distribute them, update distros, things like that. It's a really important role, and the folks that are in there are more technical. So if you have been on the release team for a long time and are looking for more permanent role, I recommend looking into that. + +**Congratulations again on [the release of 1.28 today](/blog/2023/08/15/kubernetes-v1-28-release/).**
+Yeah, thank you. + +**What is the best new feature in Kubernetes 1.28, and why is it [sidecar container support](/blog/2023/08/25/native-sidecar-containers/)?**
+Great question. I am as excited as you. In 1.28, we have a new feature in alpha, which is sidecar container support. We introduced a new field called restartPolicy for init containers, that allows the containers to live throughout the life cycle of the pod and not block the pod from terminating. Craig, you know a lot about this, but there are so many use cases for this. It is a very common pattern. You use it for logging, monitoring, metrics; also configs and secrets as well. + +**And the service mesh!**
+And the service mesh. + +**Very popular. I will say that the Sidecar pattern was called out very early on, in [a blog post Brendan Burns wrote](/blog/2015/06/the-distributed-system-toolkit-patterns/), talking about how you can achieve some of the things you just mentioned. Support for it in Kubernetes has been— it's been a while, shall we say. I've been doing these interviews since 2018, and September 2019 was when [I first had a conversation with a release manager](/blog/2019/12/06/when-youre-in-the-release-team-youre-family-the-kubernetes-1.16-release-interview/) who felt they had to apologize for Sidecar containers not shipping in that release.**
+Well, here we are! + +**Thank you for not letting the side down.**
+[LAUGHS] + +**There are a bunch of other features that are going to GA in 1.28. Tell me about what's new with [kubectl events](https://github.com/kubernetes/enhancements/issues/1440)?**
+It got a new CLI and now it is separate from kubectl get. I think that changes in the CLI are always a little bit more apparent because they are user-facing. + +**Are there a lot of other user-facing changes, or are most of the things in the release very much behind the scenes?**
+I would say it's a good mix of both; it depends on what you're interested in. + +**I am interested, of course, in [non-graceful node shutdown support](https://github.com/kubernetes/enhancements/issues/2268). What can you tell us about that?**
+Right, so for situations where you have a hardware failure or a broken OS, we have added additional support for a better graceful shutdown. + +**If someone trips over the power cord at your LAN party and your cluster goes offline as a result?**
+Right, exactly. More availability! That's always good. + +**And if it's not someone tripping over your power cord, it's probably DNS that broke your cluster. What's changed in terms of DNS configuration?**
+Oh, we introduced [a new feature gate to allow more DNS search path](https://github.com/kubernetes/enhancements/issues/2595). + +**Is that all there is to it?**
+That's pretty much it. [LAUGHING] Yeah, you can have more and longer DNS search path. + +**It can never be long enough. Just search everything! If .com doesn't work, try .net and try .io after that.**
+Surely. + +**Those are a few of the big features that are moving to stable. Obviously, over the course of the last few releases, features come in, moving from Alpha to Beta and so on. New features coming in today might not be available to people for a while. As you mentioned, there are feature gates that you can enable to allow people to have access to these. What are some of the newest features that have been introduced that are in Alpha, that are particularly interesting to you personally?**
+I have two. The first one is [`kubectl delete --interactive`](https://github.com/kubernetes/enhancements/issues/3895). I'm always nervous when I delete something, you know, it's going to be a typo or it's going to be on the wrong tab. So we have an `--interactive` flag for that now. + +**So you can get feedback on what you're about to delete before you do it?**
+Right; confirmation is good! + +**You mentioned two there, what was the second one?**
+Right; this one is close to my heart. It is a SIG Release KEP, [publishing on community infrastructure](https://github.com/kubernetes/enhancements/issues/1731). I'm not sure if you know, but as part of my branch management associate role in 1.27, I had the opportunity to cut a few releases. It takes up to 12 hours sometimes. And now, we are hoping that the process only includes release managers, so we don't have to call up the folks at Google and, you know, lengthen that process anymore. + +**Is 12 hours the expected length for software of this size, or is there work in place to try and bring that down?**
+There's so much work in place to bring that down. I think 12 hours is on the shorter end of it. Unfortunately, we have had a situation where we have to, you know, switch the release manager because it's just so late at night for them. + +**They've fallen asleep halfway through?**
+Exactly, yeah. 6 to 12 hours, I think, is our status quo. + +**The theme for this release is "[Planternetes](/blog/2023/08/15/kubernetes-v1-28-release/#release-theme-and-logo)". That's going to need some explanation, I feel.**
+Okay. I had full creative control over this. It is summer in the northern hemisphere, and I am a big house plant fanatic. It's always a little sad when I have to move cities for co-op and can't take my plants with me. + +**Is that a border control thing? They don't let you take them over the border?**
+It's not even that; they're just so clunky and fragile. It's usually not worth the effort. But I think our community is very much like a garden. We have very critical roles in the ecosystem and we all have to work together. + +**Will you be posting seeds out to contributors and growing something together all around the world?**
+That would be so cool if we had merch, like a little card with seeds embedded in it. I don't think we have the budget for that though. [LAUGHS] + +**You say that. There are people who are inspired in many different areas. I love talking to the release managers and hearing the things that they're interested in. You should think about taking some seeds off one of your plants, and just spreading them around the world. People can take pictures, and tag you in them on Instagram.**
+That's cool. You know how we have a SIG Beard? We can have a SIG Plant. + +**You worked for a long time with the release lead for 1.27. Xander Grzywinski. One of the benefits of having [done my interview with him in writing](https://craigbox.substack.com/p/kubernetes-and-chill) and not as a podcast is I didn't have to try and butcher pronouncing his surname. Can you help me out here?**
+I unfortunately cannot. I don't want to butcher it either! + +**Anyway, Xander told me that he suspected that in this release you would have to deal with some very last-minute PRs, as is tradition. Was that the case?**
+I vividly remember the last minute PRs from last release because I was trying to cut the releases, as part of the branch management team. Thankfully, that was not the case this release. We have had other challenges, of course. + +**Can you tell me some of those challenges?**
+I think improvement on documentation is always a big part. The KEP process can be very daunting to new contributors. How do you get people to review your KEPs? How do you opt in? All that stuff. We're improving documentations for that. + +**As someone who has been through a lot of releases, I've been feeling, like you've said, that the last minute nature has slowed down a little. The process is perhaps improving. Do you see that, or do you think there's still a long way to go for the leads to improve it?**
+I think we've come very far. When I started in 1.22, we were using spreadsheets to track a hundred enhancements. It was a monster; I was terrified to touch it. Now, we're on GitHub boards. As a result of that, we are actually merging the bug triage and CI Signal team in 1.29. + +**What's the impact of that?**
+The bug triage team is now using the GitHub board to track issues, which is much more efficient. We are able to merge the two teams together. + +**I have heard a rumor that GitHub boards are powered by spreadsheets underneath.**
+Honestly, even if that's true, the fact that it's on the same platform and it has better version control is just magical. + +**At this time, the next release lead has not yet been announced, but tradition dictates that you write down your feelings, best wishes and instructions to them in an envelope, which you'll leave in their desk drawer. What are you going to put inside that envelope?**
+Our 1.28 release lead is fantastic and they're so capable of handling the release— + +**That's you, isn't it?**
+1.29? [LAUGHS] No, I'm too tired. I need to catch up on my sleep. My advice for them? It's going to be okay. It's all going to be okay. I was going to echo Leo's and Cici's words, to overcommunicate, but I think that has been said enough times already. + +**You've communicated enough. Stop! No more communication!**
+Yeah, no more communication. [LAUGHS] It's going to be okay. And honestly, shout out to my emeritus advisor, Leo, for reminding me that. Sometimes there are a lot of fires and it can be overwhelming, but it will be okay. + +**As we've alluded to a little bit throughout our conversation, there are a lot of people in the Kubernetes community who, for want of a better term, have had "a lot of experience" at running these systems. Then there are, of course, a lot of people who are just at the beginning of their careers; like yourself, at university. How do you see the difference between how those groups interact? Is there one team throughout, or what do you think that each can learn from the other?**
+I think the diversity of the team is one of its strengths and I really enjoy it. I learn so much from folks who have been doing this for 20 years or folks who are new to the industry like I am. + +**I know the CNCF goes to a lot of effort to enable new people to take part. Is there anything that you can say about how people might get involved?**
+Firstly, I think SIG Release has started a wonderful tradition, or system, of [helping new folks join the release team as a shadow](https://github.com/kubernetes/sig-release/blob/master/release-team/shadows.md), and helping them grow into bigger positions, like leads. I think other SIGs are also following that template as well. But a big part of me joining and sticking with the community has been the ability to go to conferences. As I said, my first conference was KubeCon, when I was not involved in the community at all. And so a big shout-out to the CNCF and the companies that sponsor the Dan Kohn and the speaker scholarships. They have been the sole reason that I was able to attend KubeCon, meet people, and feel the power of the community. + +**Last year's KubeCon in North America was in Detroit?**
+Detroit, [I was there, yeah](https://medium.com/@nng.grace/kubecon-in-the-motor-city-4e23e0446751). + +**That's quite a long drive?**
+I was in SF, so I flew over. + +**You live right next door! If only you'd been in Waterloo.**
+Yeah, but who knows? Maybe I'll do a road trip from Waterloo to Chicago this year. + +--- + +_[Grace Nguyen](https://twitter.com/GraceNNG) is a student at the University of Waterloo, and was the release team lead for Kubernetes 1.28. Subscribe to [Let's Get To The News](https://craigbox.substack.com/about#§follow-the-podcast), or search for it wherever you get your podcasts._ \ No newline at end of file diff --git a/content/en/blog/_posts/2023-10-25-introducing-ingress2gateway/gateway-api-resources.svg b/content/en/blog/_posts/2023-10-25-introducing-ingress2gateway/gateway-api-resources.svg new file mode 100644 index 0000000000000..3484bb01e67d7 --- /dev/null +++ b/content/en/blog/_posts/2023-10-25-introducing-ingress2gateway/gateway-api-resources.svg @@ -0,0 +1,1539 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A modern set of APIs for deployinglayer 4 and layer 7 routing in Kubernetes + Designed to be generic, expressive,extensible, and role-oriented + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/en/blog/_posts/2023-10-25-introducing-ingress2gateway/index.md b/content/en/blog/_posts/2023-10-25-introducing-ingress2gateway/index.md new file mode 100644 index 0000000000000..3e726fd30e034 --- /dev/null +++ b/content/en/blog/_posts/2023-10-25-introducing-ingress2gateway/index.md @@ -0,0 +1,203 @@ +--- +layout: blog +title: "Introducing ingress2gateway; Simplifying Upgrades to Gateway API" +date: 2023-10-25T10:00:00-08:00 +slug: introducing-ingress2gateway +--- + +***Authors:*** Lior Lieberman (Google), Kobi Levi (independent) + +Today we are releasing [ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway), a tool +that can help you migrate from [Ingress](/docs/concepts/services-networking/ingress/) to [Gateway +API](https://gateway-api.sigs.k8s.io). Gateway API is just weeks away from graduating to GA, if you +haven't upgraded yet, now's the time to think about it! + + +## Background + +In the ever-evolving world of Kubernetes, networking plays a pivotal role. As more applications are +deployed in Kubernetes clusters, effective exposure of these services to clients becomes a critical +concern. If you've been working with Kubernetes, you're likely familiar with the [Ingress API], +which has been the go-to solution for managing external access to services. + +[Ingress API]:/docs/concepts/services-networking/ingress/ + +The Ingress API provides a way to route external traffic to your applications within the cluster, +making it an indispensable tool for many Kubernetes users. Ingress has its limitations however, and +as applications become more complex and the demands on your Kubernetes clusters increase, these +limitations can become bottlenecks. + +Some of the limitations are: + +- **Insufficient common denominator** - by attempting to establish a common denominator for various + HTTP proxies, Ingress can only accommodate basic HTTP routing, forcing more features of + contemporary proxies like traffic splitting and header matching into provider-specific, + non-transferable annotations. +- **Inadequate permission model** - Ingress spec configures both infrastructure and application + configuration in one object. With Ingress, the cluster operator and application developer operate + on the same Ingress object without being aware of each other’s roles. This creates an insufficient + role-based access control and has high potential for setup errors. +- **Lack of protocol diversity** - Ingress primarily focuses on HTTP(S) routing and does not provide + native support for other protocols, such as TCP, UDP and gRPC. This limitation makes it less + suitable for handling non-HTTP workloads. + +## Gateway API + +To overcome this, Gateway API is designed to provide a more flexible, extensible, and powerful way +to manage traffic to your services. + +Gateway API is just weeks away from a GA (General Availability) release. It provides a standard +Kubernetes API for ingress traffic control. It offers extended functionality, improved +customization, and greater flexibility. By focusing on modular and expressive API resources, Gateway +API makes it possible to describe a wider array of routing configurations and models. + +The transition from Ingress API to Gateway API in Kubernetes is driven by advantages and advanced +functionalities that Gateway API offers, with its foundation built on four core principles: a +role-oriented approach, portability, expressiveness and extensibility. + +### A role-oriented approach + +Gateway API employs a role-oriented approach that aligns with the conventional roles within +organizations involved in configuring Kubernetes service networking. This approach enables +infrastructure engineers, cluster operators, and application developers to collectively address +different aspects of Gateway API. + +For instance, infrastructure engineers play a pivotal role in deploying GatewayClasses, +cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived +from them, laying the groundwork for robust service networking. + +Subsequently, cluster operators utilize these GatewayClasses to deploy gateways. A Gateway in +Kubernetes' Gateway API defines how external traffic can be directed to Services within the cluster, +essentially bridging non-Kubernetes sources to Kubernetes-aware destinations. It represents a +request for a load balancer configuration aligned with a GatewayClass’ specification. The Gateway +spec may not be exhaustive as some details can be supplied by the GatewayClass controller, ensuring +portability. Additionally, a Gateway can be linked to multiple Route references to channel specific +traffic subsets to designated services. + +Lastly, application developers configure route resources (such as HTTPRoutes), to manage +configuration (e.g. timeouts, request matching/filter) and Service composition (e.g. path routing to +backends) Route resources define protocol-specific rules for mapping requests from a Gateway to +Kubernetes Services. HTTPRoute is for multiplexing HTTP or terminated HTTPS connections. It's +intended for use in cases where you want to inspect the HTTP stream and use HTTP request data for +either routing or modification, for example using HTTP Headers for routing, or modifying them +in-flight. + +{{< figure src="gateway-api-resources.svg" alt="Diagram showing the key resources that make up Gateway API and how they relate to each other. The resources shown are GatewayClass, Gateway, and HTTPRoute; the Service API is also shown" class="diagram-medium" >}} + +### Portability + +With more than 20 [API +implementations](https://gateway-api.sigs.k8s.io/implementations/#implementations), Gateway API is +designed to be more portable across different implementations, clusters and environments. It helps +reduce Ingress' reliance on non-portable, provider-specific annotations, making your configurations +more consistent and easier to manage across multiple clusters. + +Gateway API commits to supporting the 5 latest Kubernetes minor versions. That means that Gateway +API currently supports Kubernetes 1.24+. + +### Expressiveness + +Gateway API provides standard, Kubernetes-backed support for a wide range of features, such as +header-based matching, traffic splitting, weight-based routing, request mirroring and more. With +Ingress, these features need custom provider-specific annotations. + +### Extensibility + +Gateway API is designed with extensibility as a core feature. Rather than enforcing a +one-size-fits-all model, it offers the flexibility to link custom resources at multiple layers +within the API's framework. This layered approach to customization ensures that users can tailor +configurations to their specific needs without overwhelming the main structure. By doing so, Gateway +API facilitates more granular and context-sensitive adjustments, allowing for a fine-tuned balance +between standardization and adaptability. This becomes particularly valuable in complex cloud-native +environments where specific use cases require nuanced configurations. A critical difference is that +Gateway API has a much broader base set of features and a standard pattern for extensions that can +be more expressive than annotations were on Ingress. + + +## Upgrading to Gateway + +Migrating from Ingress to Gateway API may seem intimidating, but luckily Kubernetes just released a +tool to simplify the process. [ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway) +assists in the migration by converting your existing Ingress resources into Gateway API resources. +Here is how you can get started with Gateway API and using ingress2gateway: + +1. [Install a Gateway + controller](https://gateway-api.sigs.k8s.io/guides/#installing-a-gateway-controller) OR [install + the Gateway API CRDs manually](https://gateway-api.sigs.k8s.io/guides/#installing-gateway-api) . + +2. Install [ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway). + + If you have a Go development environment locally, you can install `ingress2gateway` with: + + ``` + go install github.com/kubernetes-sigs/ingress2gateway@v0.1.0 + ``` + + This installs `ingress2gateway` to `$(go env GOPATH)/bin/ingress2gateway`. + + Alternatively, follow the installation guide + [here](https://github.com/kubernetes-sigs/ingress2gateway#installation). + +3. Once the tool is installed, you can use it to convert the ingress resources in your cluster to + Gateway API resources. + + ``` + ingress2gateway print + ``` + + This above command will: + + 1. Load your current Kubernetes client config including the active context, namespace and + authentication details. + 2. Search for ingresses and provider-specific resources in that namespace. + 3. Convert them to Gateway API resources (Currently only Gateways and HTTPRoutes). For other + options you can run the tool with `-h`, or refer to + [https://github.com/kubernetes-sigs/ingress2gateway#options](https://github.com/kubernetes-sigs/ingress2gateway#options). + +4. Review the converted Gateway API resources, validate them, and then apply them to your cluster. + +5. Send test requests to your Gateway to check that it is working. You could get your gateway + address using `kubectl get gateway -n -o + jsonpath='{.status.addresses}{"\n"}'`. + +6. Update your DNS to point to the new Gateway. + +7. Once you've confirmed that no more traffic is going through your Ingress configuration, you can + safely delete it. + +## Wrapping up + +Achieving reliable, scalable and extensible networking has always been a challenging objective. The +Gateway API is designed to improve the current Kubernetes networking standards like ingress and +reduce the need for implementation specific annotations and CRDs. + +It is a Kubernetes standard API, consistent across different platforms and implementations and most +importantly it is future proof. Gateway API is the next generation of the Ingress API, but has a +larger scope than that, expanding to tackle mesh and layer 4 routing as well. Gateway API and +ingress2gateway are supported by a dedicated team under SIG Network that actively work on it and +manage the ecosystem. It is also likely to receive more updates and community support. + +### The Road Ahead + +ingress2gateway is just getting started. We're planning to onboard more providers, introduce support +for more types of Gateway API routes, and make sure everything syncs up smoothly with the ongoing +development of Gateway API. + +Excitingly, Gateway API is also making significant strides. While v1.0 is about to launching, +there's still a lot of work ahead. This release incorporates many new experimental features, with +additional functionalities currently in the early stages of planning and development. + +If you're interested in helping to contribute, we would love to have you! Please check out the +[community page](https://gateway-api.sigs.k8s.io/contributing/community/) which includes links to +the Slack channel and community meetings. We look forward to seeing you!! + +### Useful Links + +- Get involved with the Ingress2Gateway project on + [GitHub](https://github.com/kubernetes-sigs/ingress2gateway) +- Open a new issue - + [ingress2gateway](https://github.com/kubernetes-sigs/ingress2gateway/issues/new/choose), [Gateway + API](https://github.com/kubernetes-sigs/gateway-api/issues/new/choose). +- Join our [discussions](https://github.com/kubernetes-sigs/gateway-api/discussions). +- [Gateway API Getting Started](https://gateway-api.sigs.k8s.io/guides/) +- [Gateway API Implementations](https://gateway-api.sigs.k8s.io/implementations/#gateways) diff --git a/content/en/blog/_posts/2023-10-31-Gateway-API-GA/gateway-api-logo.png b/content/en/blog/_posts/2023-10-31-Gateway-API-GA/gateway-api-logo.png new file mode 100644 index 0000000000000..5a2215397f327 Binary files /dev/null and b/content/en/blog/_posts/2023-10-31-Gateway-API-GA/gateway-api-logo.png differ diff --git a/content/en/blog/_posts/2023-10-31-Gateway-API-GA/index.md b/content/en/blog/_posts/2023-10-31-Gateway-API-GA/index.md new file mode 100644 index 0000000000000..2575379f77e1c --- /dev/null +++ b/content/en/blog/_posts/2023-10-31-Gateway-API-GA/index.md @@ -0,0 +1,153 @@ +--- +layout: blog +title: "Gateway API v1.0: GA Release" +date: 2023-10-31T10:00:00-08:00 +slug: gateway-api-ga +--- + +**Authors:** Shane Utt (Kong), Nick Young (Isovalent), Rob Scott (Google) + +On behalf of Kubernetes SIG Network, we are pleased to announce the v1.0 release of [Gateway +API](https://gateway-api.sigs.k8s.io/)! This release marks a huge milestone for +this project. Several key APIs are graduating to GA (generally available), while +other significant features have been added to the Experimental channel. + +## What's new + +### Graduation to v1 +This release includes the graduation of +[Gateway](https://gateway-api.sigs.k8s.io/api-types/gateway/), +[GatewayClass](https://gateway-api.sigs.k8s.io/api-types/gatewayclass/), and +[HTTPRoute](https://gateway-api.sigs.k8s.io/api-types/httproute/) to v1, which +means they are now generally available (GA). This API version denotes a high +level of confidence in the API surface and provides guarantees of backwards +compatibility. Note that although, the version of these APIs included in the +Standard channel are now considered stable, that does not mean that they are +complete. These APIs will continue to receive new features via the Experimental +channel as they meet graduation criteria. For more information on how all of +this works, refer to the [Gateway API Versioning +Policy](https://gateway-api.sigs.k8s.io/concepts/versioning/). + +### Logo +Gateway API now has a logo! This logo was designed through a collaborative +process, and is intended to represent the idea that this is a set of Kubernetes +APIs for routing traffic both north-south and east-west: + +![Gateway API Logo](gateway-api-logo.png "Gateway API Logo") + +### CEL Validation +Historically, Gateway API has bundled a validating webhook as part of installing +the API. Starting in v1.0, webhook installation is optional and only recommended +for Kubernetes 1.24. Gateway API now includes +[CEL](/docs/reference/using-api/cel/) validation rules as +part of the +[CRDs](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). +This new form of validation is supported in Kubernetes 1.25+, and thus the +validating webhook is no longer required in most installations. + +### Standard channel +This release was primarily focused on ensuring that the existing beta APIs were +well defined and sufficiently stable to graduate to GA. That led to a variety of +spec clarifications, as well as some improvements to status to improve the +overall UX when interacting with Gateway API. + +### Experimental channel +Most of the changes included in this release were limited to the experimental +channel. These include HTTPRoute timeouts, TLS config from Gateways to backends, +WebSocket support, Gateway infrastructure labels, and more. Stay tuned for a +follow up blog post that will cover each of these new features in detail. + +### Everything else +For a full list of the changes included in this release, please refer to the +[v1.0.0 release +notes](https://github.com/kubernetes-sigs/gateway-api/releases/tag/v1.0.0). + +## How we got here + +The idea of Gateway API was initially [proposed](https://youtu.be/Ne9UJL6irXY?si=wgtC9w8PMB5ZHil2) +4 years ago at KubeCon San Diego as the next generation +of Ingress API. Since then, an incredible community has formed to develop what +has likely become the most collaborative API in Kubernetes history. Over 170 +people have contributed to this API so far, and that number continues to grow. + +A special thank you to the 20+ [community members who agreed to take on an +official role in the +project](https://github.com/kubernetes-sigs/gateway-api/blob/main/OWNERS_ALIASES), +providing some time for reviews and sharing the load of maintaining the project! + +We especially want to highlight the emeritus maintainers that played a pivotal +role in the early development of this project: + +* [Bowei Du](https://github.com/bowei) +* [Daneyon Hansen](https://github.com/danehans) +* [Harry Bagdi](https://github.com/hbagdi) + +## Try it out + +Unlike other Kubernetes APIs, you don't need to upgrade to the latest version of +Kubernetes to get the latest version of Gateway API. As long as you're running +one of the 5 most recent minor versions of Kubernetes (1.24+), you'll be able to +get up and running with the latest version of Gateway API. + +To try out the API, follow our [Getting Started +guide](https://gateway-api.sigs.k8s.io/guides/). + +## What's next + +This release is just the beginning of a much larger journey for Gateway API, and +there are still plenty of new features and new ideas in flight for future +releases of the API. + +One of our key goals going forward is to work to stabilize and graduate other +experimental features of the API. These include [support for service +mesh](https://gateway-api.sigs.k8s.io/concepts/gamma/), additional route types +([GRPCRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GRPCRoute), +[TCPRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute), +[TLSRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TLSRoute), +[UDPRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.UDPRoute)), +and a variety of experimental features. + +We've also been working towards moving +[ReferenceGrant](https://gateway-api.sigs.k8s.io/api-types/referencegrant/) into +a built-in Kubernetes API that can be used for more than just Gateway API. +Within Gateway API, we've used this resource to safely enable cross-namespace +references, and that concept is now being adopted by other SIGs. The new version +of this API will be owned by SIG Auth and will likely include at least some +modifications as it migrates to a built-in Kubernetes API. + +### Gateway API at KubeCon + CloudNativeCon + +At [KubeCon North America +(Chicago)](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/) +and the adjacent [Contributor +Summit](https://www.kubernetes.dev/events/2023/kcsna/) there are several talks +related to Gateway API that will go into more detail on these topics. If you're +attending either of these events this year, considering adding these to your +schedule. + +**Contributor Summit:** + +- [Lessons Learned Building a GA API with CRDs](https://sched.co/1Sp9u) +- [Conformance Profiles: Building a generic conformance test reporting framework](https://sched.co/1Sp9l) +- [Gateway API: Beyond GA](https://sched.co/1SpA9) + +**KubeCon Main Event:** + +- [Gateway API: The Most Collaborative API in Kubernetes History Is GA](https://sched.co/1R2qM) + +**KubeCon Office Hours:** + +Gateway API maintainers will be holding office hours sessions at KubeCon if +you'd like to discuss or brainstorm any related topics. To get the latest +updates on these sessions, join the `#sig-network-gateway-api` channel on +[Kubernetes Slack](https://slack.kubernetes.io/). + +## Get involved + +We've only barely scratched the surface of what's in flight with Gateway API. +There are lots of opportunities to get involved and help define the future of +Kubernetes routing APIs for both Ingress and Mesh. + +If this is interesting to you, please [join us in the +community](https://gateway-api.sigs.k8s.io/contributing/) and help us build the +future of Gateway API together! diff --git a/content/en/blog/_posts/2023-11-02-kcseu2023-spotlight/index.md b/content/en/blog/_posts/2023-11-02-kcseu2023-spotlight/index.md new file mode 100644 index 0000000000000..ff6d2cad50ce6 --- /dev/null +++ b/content/en/blog/_posts/2023-11-02-kcseu2023-spotlight/index.md @@ -0,0 +1,176 @@ +--- +layout: blog +title: "Kubernetes Contributor Summit: Behind-the-scenes" +slug: k8s-contributor-summit-behind-the-scenes +date: 2023-11-03 +canonicalUrl: https://www.k8s.dev/blog/2023/11/03/k8s-contributor-summit-behind-the-scenes/ +--- + +**Author** : Frederico Muñoz (SAS Institute) + +Every year, just before the official start of KubeCon+CloudNativeCon, there's a special event that +has a very special place in the hearts of those organizing and participating in it: the Kubernetes +Contributor Summit. To find out why, and to provide a behind-the-scenes perspective, we interview +Noah Abrahams, whom amongst other roles was the co-lead for the Kubernetes Contributor Summit in +2023. + + +**Frederico Muñoz (FSM)**: Hello Noah, and welcome. Could you start by introducing yourself and +telling us how you got involved in Kubernetes? + +**Noah Abrahams (NA)**: I’ve been in this space for quite a while.  I got started in IT in the mid +90's, and I’ve been working in the "Cloud" space for about 15 years.  It was, frankly, through a +combination of sheer luck (being in the right place at the right time) and having good mentors to +pull me into those places (thanks, Tim!), that I ended up at a startup called Apprenda in 2016. +While I was there, they pivoted into Kubernetes, and it was the best thing that could have happened +to my career.  It was around v1.2 and someone asked me if I could give a presentation on Kubernetes +concepts at "my local meetup" in Las Vegas.  The meetup didn’t exist yet, so I created it, and got +involved in the wider community.  One thing led to another, and soon I was involved in ContribEx, +joined the release team, was doing booth duty for the CNCF, became an ambassador, and here we are +today. + +## The Contributor Summit + +![KCSEU 2023 group photo](kcseu2023-group.jpg) + +**FM**: Before leading the organisation of the KCSEU 2023, how many other Contributor Summits were +you a part of? + +**NA**: I was involved in four or five before taking the lead. If I'm recalling correctly, I +attended the summit in Copenhagen, then sometime in 2018 I joined the wrong meeting, because the +summit staff meeting was listed on the ContribEx calendar. Instead of dropping out of the call, I +listened a bit, then volunteered to take on some work that didn't look like it had anybody yet +dedicated to it. I ended up running Ops in Seattle and helping run the New Contributor Workshop in +Shanghai, that year. Since then, I’ve been involved in all but two, since I missed both Barcelona +and Valencia. + +**FM**: Have you noticed any major changes in terms of how the conference is organized throughout +the years? Namely in terms of number of participants, venues, speakers, themes... + +**NA**: The summit changes over the years with the ebb and flow of the desires of the contributors +that attend. While we can typically expect about the same number of attendees, depending on the +region that the event is held in, we adapt the style and content greatly based on the feedback that +we receive at the end of each event. Some years, contributors ask for more free-style or +unconference type sessions, and we plan on having more of those, but some years, people ask for more +planned sessions or workshops, so that's what we facilitate. We also have to continually adapt to +the venue that we have, the number of rooms we're allotted, how we're going to share the space with +other events and so forth. That all goes into the planning ahead of time, from how many talk tracks +we’ll have, to what types of tables and how many microphones we want in a room. + +There has been one very significant change over the years, though, and that is that we no longer run +the New Contributor Workshop. While the content was valuable, running the session during the summit +never led to any people who weren’t already contributing to the project becoming dedicated +contributors to the project, so we removed it from the schedule. We'll deliver that content another +way, while we’ll keep the summit focused on existing contributors. + +## What makes it special + +**FM**: Going back to the introduction I made, I’ve heard several participants saying that KubeCon +is great, but that the Contributor Summit is for them the main event. In your opinion, why do you +think that makes it so? + +**NA**: I think part of it ties into what I mentioned a moment ago, the flexibility in our content +types. For many contributors, I think the summit is basically "How Kubecon used to be", back when +it was primarily a gathering of the contributors to talk about the health of the project and the +work that needed to be done. So, in that context, if the contributors want to discuss, say, a new +Working Group, then they have dedicated space to do so in the summit. They also have the space to +sit down and hack on a tough problem, discuss architectural philosophy, bring potential problems to +more people’s attention, refine our methods, and so forth. Plus, the unconference aspect allows for +some malleability on the day-of, for whatever is most important right then and there. Whatever +folks want to get out of this environment is what we’ll provide, and having a space and time +specifically to address your particular needs is always going to be well received. + +Let's not forget the social aspect, too. Despite the fact that we're a global community and work +together remotely and asynchronously, it's still easier to work together when you have a personal +connection, and can put a face to a Github handle. Zoom meetings are a good start, but even a +single instance of in-person time makes a big difference in how people work together. So, getting +folks together a couple times a year makes the project run more smoothly. + +## Organizing the Summit + +**FM**: In terms of the organization team itself, could you share with us a general overview of the +staffing process? Who are the people that make it happen? How many different teams are involved? + +**NA**: There's a bit of the "usual suspects" involved in making this happen, many of whom you'll +find in the ContribEx meetings, but really it comes down to whoever is going to step up and do the +work. We start with a general call out for volunteers from the org. There's a Github issue where +we'll track the staffing and that will get shouted out to all the usual comms channels: slack, +k-dev, etc. + +From there, there's a handful of different teams, overseeing content/program committee, +registration, communications, day-of operations, the awards the SIGs present to their members, the +after-summit social event, and so on. The leads for each team/role are generally picked from folks +who have stepped up and worked the event before, either as a shadow, or a previous lead, so we know +we can rely on them, which is a recurring theme. The leads pick their shadows from whoever pipes up +on the issue, and the teams move forward, operating according to their role books, which we try to +update at the end of each summit, with what we've learned over the past few months. It's expected +that a shadow will be in line to lead that role at some point in a future summit, so we always have +a good bench of folks available to make this event happen. A couple of the roles also have some +non-shadow volunteers where people can step in to help a bit, like as an on-site room monitor, and +get a feel for how things are put together without having to give a serious up-front commitment, but +most of the folks working the event are dedicated to both making the summit successful, and coming +back to do so in the future. Of course, the roster can change over time, or even suddenly, as +people gain or lose travel budget, get new jobs, only attend Europe or North America or Asia, etc. +It's a constant dance, relying 100% on the people who want to make this project successful. + +Last, but not least, is the Summit lead. They have to keep the entire process moving forward, be +willing to step in to keep bike-shedding from derailing our deadlines, make sure the right people +are talking to one another, lead all our meetings to make sure everyone gets a voice, etc. In some +cases, the lead has to even be willing to take over an entirely separate role, in case someone gets +sick or has any other extenuating circumstances, to make sure absolutely nothing falls through the +cracks. The lead is only allowed to volunteer after they’ve been through this a few times and know +what the event entails. Event planning is not for the faint of heart. + + +**FM**: The participation of volunteers is essential, but there's also the topic of CNCF support: +how does this dynamic play out in practice? + +**NA**: This event would not happen in its current form without our CNCF liaison. They provide us +with space, make sure we are fed and caffeinated and cared for, bring us outside spaces to evaluate, +so we have somewhere to hold the social gathering, get us the budget so we have t-shirts and patches +and the like, and generally make it possible for us to put this event together. They're even +responsible for the signage and arrows, so the attendees know where to go. They're the ones sitting +at the front desk, keeping an eye on everything and answering people's questions. At the same time, +they're along to facilitate, and try to avoid influencing our planning. + +There's a ton of work that goes into making the summit happen that is easy to overlook, as an +attendee, because people tend to expect things to just work. It is not exaggerating to say this +event would not have happened like it has over the years, without the help from our liaisons, like +Brienne and Deb. They are an integral part of the team. + +## A look ahead + +**FM**: Currently, we’re preparing the NA 2023 summit, how is it going? Any changes in format +compared with previous ones? + +**NA**: I would say it's going great, though I'm sort of emeritus lead for this event, mostly +picking up the things that I see need to be done and don't have someone assigned to it. We're +always learning from our past experiences and making small changes to continually be better, from +how many people need to be on a particular rotation to how far in advance we open and close the CFP. +There's no major changes right now, just continually providing the content that the contributors +want. + +**FM**: For our readers that might be interested in joining in the Kubernetes Contributor Summit, is +there anything they should know? + +**NA**: First of all, the summit is an event by and for Org members. If you're not already an org +member, you should be getting involved before trying to attend the summit, as the content is curated +specifically towards the contributors and maintainers of the project. That applies to the staff, as +well, as all the decisions should be made with the interests and health of kubernetes contributors +being the end goal. We get a lot of people who show interest in helping out, but then aren't ready +to make any sort of commitment, and that just makes more work for us. If you're not already a +proven and committed member of this community, it’s difficult for us to place you in a position that +requires reliability. We have made some rare exceptions when we need someone local to help us out, +but those are few and far between. + +If you are, however, already a member, we'd love to have you. The more people that are involved, +the better the event becomes. That applies to both dedicated staff, and those in attendance +bringing CFPs, unconference topics, and just contributing to the discussions. If you're part of +this community and you're going to be at KubeCon, I would highly urge you to attend, and if you're +not yet an org member, let's make that happen! + +**FM**: Indeed! Any final comments you would like to share? + +**NA**: Just that the Contributor Summit is, for me, the ultimate manifestation of the Hallway +Track. By being here, you're part of the conversations that move this project forward. It's good +for you, and it's good for Kubernetes. I hope to see you all in Chicago! diff --git a/content/en/blog/_posts/2023-11-02-kcseu2023-spotlight/kcseu2023-group.jpg b/content/en/blog/_posts/2023-11-02-kcseu2023-spotlight/kcseu2023-group.jpg new file mode 100644 index 0000000000000..88f6abdaf3a34 Binary files /dev/null and b/content/en/blog/_posts/2023-11-02-kcseu2023-spotlight/kcseu2023-group.jpg differ diff --git a/content/en/blog/_posts/2023-11-02-sig-architecture-prod-readiness-spotlight.md b/content/en/blog/_posts/2023-11-02-sig-architecture-prod-readiness-spotlight.md new file mode 100644 index 0000000000000..94515e46b25fe --- /dev/null +++ b/content/en/blog/_posts/2023-11-02-sig-architecture-prod-readiness-spotlight.md @@ -0,0 +1,139 @@ +--- +layout: blog +title: "Spotlight on SIG Architecture: Production Readiness" +slug: sig-architecture-production-readiness-spotlight-2023 +date: 2023-11-02 +canonicalUrl: https://www.k8s.dev/blog/2023/11/02/sig-architecture-production-readiness-spotlight-2023/ +--- + +**Author**: Frederico Muñoz (SAS Institute) + +_This is the second interview of a SIG Architecture Spotlight series that will cover the different +subprojects. In this blog, we will cover the [SIG Architecture: Production Readiness +subproject](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md#production-readiness-1)_. + +In this SIG Architecture spotlight, we talked with [Wojciech Tyczynski](https://github.com/wojtek-t) +(Google), lead of the Production Readiness subproject. + +## About SIG Architecture and the Production Readiness subproject + +**Frederico (FSM)**: Hello Wojciech, could you tell us a bit about yourself, your role and how you +got involved in Kubernetes? + +**Wojciech Tyczynski (WT)**: I started contributing to Kubernetes in January 2015. At that time, +Google (where I was and still am working) decided to start a Kubernetes team in the Warsaw office +(in addition to already existing teams in California and Seattle). I was lucky enough to be one of +the seeding engineers for that team. + +After two months of onboarding and helping with different tasks across the project towards 1.0 +launch, I took ownership of the scalability area and I was leading Kubernetes to support clusters +with 5000 nodes. I’m still involved in [SIG Scalability](https://github.com/kubernetes/community/blob/master/sig-scalability/README.md) +as its Technical Lead. That was the start of a journey since scalability is such a cross-cutting topic, +and I started contributing to many other areas including, over time, to SIG Architecture. + +**FSM**: In SIG Architecture, why specifically the Production Readiness subproject? Was it something +you had in mind from the start, or was it an unexpected consequence of your initial involvement in +scalability? + +**WT**: After reaching that milestone of [Kubernetes supporting 5000-node clusters](https://kubernetes.io/blog/2017/03/scalability-updates-in-kubernetes-1-6/), +one of the goals was to ensure that Kubernetes would not degrade its scalability properties over time. While +non-scalable implementation is always fixable, designing non-scalable APIs or contracts is +problematic. I was looking for a way to ensure that people are thinking about +scalability when they create new features and capabilities without introducing too much overhead. + +This is when I joined forces with [John Belamaric](https://github.com/johnbelamaric) and +[David Eads](https://github.com/deads2k) and created a Production Readiness subproject within SIG +Architecture. While setting the bar for scalability was only one of a few motivations for it, it +ended up fitting quite well. At the same time, I was already involved in the overall reliability of +the system internally, so other goals of Production Readiness were also close to my heart. + +**FSM**: To anyone new to how SIG Architecture works, how would you describe the main goals and +areas of intervention of the Production Readiness subproject? + +**WT**: The goal of the Production Readiness subproject is to ensure that any feature that is added +to Kubernetes can be reliably used in production clusters. This primarily means that those features +are observable, scalable, supportable, can always be safely enabled and in case of production issues +also disabled. + +## Production readiness and the Kubernetes project + +**FSM**: Architectural consistency being one of the goals of the SIG, is this made more challenging +by the [distributed and open nature of Kubernetes](https://www.cncf.io/reports/kubernetes-project-journey-report/)? +Do you feel this impacts the approach that Production Readiness has to take? + +**WT**: The distributed nature of Kubernetes certainly impacts Production Readiness, because it +makes thinking about aspects like enablement/disablement or scalability more challenging. To be more +precise, when enabling or disabling features that span multiple components you need to think about +version skew between them and design for it. For scalability, changes in one component may actually +result in problems for a completely different one, so it requires a good understanding of the whole +system, not just individual components. But it’s also what makes this project so interesting. + +**FSM**: Those running Kubernetes in production will have their own perspective on things, how do +you capture this feedback? + +**WT**: Fortunately, we aren’t talking about _"them"_ here, we’re talking about _"us"_: all of us are +working for companies that are managing large fleets of Kubernetes clusters and we’re involved in +that too, so we suffer from those problems ourselves. + +So while we’re trying to get feedback (our annual PRR survey is very important for us), it rarely +reveals completely new problems - it rather shows the scale of them. And we try to react to it - +changes like "Beta APIs off by default" happen in reaction to the data that we observe. + +**FSM**: On the topic of reaction, that made me think of how the [Kubernetes Enhancement Proposal (KEP)](https://github.com/kubernetes/enhancements/blob/master/keps/NNNN-kep-template/README.md) +template has a Production Readiness Review (PRR) section, which is tied to the graduation +process. Was this something born out of identified insufficiencies? How would you describe the +results? + +**WT**: As mentioned above, the overall goal of the Production Readiness subproject is to ensure +that every newly added feature can be reliably used in production. It’s not possible to enforce that +by a central team - we need to make it everyone's problem. + +To achieve it, we wanted to ensure that everyone designing their new feature is thinking about safe +enablement, scalability, observability, supportability, etc. from the very beginning. Which means +not when the implementation starts, but rather during the design. Given that KEPs are effectively +Kubernetes design docs, making it part of the KEP template was the way to achieve the goal. + +**FSM**: So, in a way making sure that feature owners have thought about the implications of their +proposal. + +**WT**: Exactly. We already observed that just by forcing feature owners to think through the PRR +aspects (via forcing them to fill in the PRR questionnaire) many of the original issues are going +away. Sure - as PRR approvers we’re still catching gaps, but even the initial versions of KEPs are +better now than they used to be a couple of years ago in what concerns thinking about +productionisation aspects, which is exactly what we wanted to achieve - spreading the culture of +thinking about reliability in its widest possible meaning. + +**FSM**: We've been talking about the PRR process, could you describe it for our readers? + +**WT**: The [PRR process](https://github.com/kubernetes/community/blob/master/sig-architecture/production-readiness.md) +is fairly simple - we just want to ensure that you think through the productionisation aspects of +your feature early enough. If you do your job, it’s just a matter of answering some questions in the +KEP template and getting approval from a PRR approver (in addition to regular SIG approval). If you +didn’t think about those aspects earlier, it may require spending more time and potentially revising +some decisions, but that’s exactly what we need to make the Kubernetes project reliable. + +## Helping with Production Readiness + +**FSM**: Production Readiness seems to be one area where a good deal of prior exposure is required +in order to be an effective contributor. Are there also ways for someone newer to the project to +contribute? + +**WT**: PRR approvers have to have a deep understanding of the whole Kubernetes project to catch +potential issues. Kubernetes is such a large project now with so many nuances that people who are +new to the project can simply miss the context, no matter how senior they are. + +That said, there are many ways that you may implicitly help. Increasing the reliability of +particular areas of the project by improving its observability and debuggability, increasing test +coverage, and building new kinds of tests (upgrade, downgrade, chaos, etc.) will help us a lot. Note +that the PRR subproject is focused on keeping the bar at the design level, but we should also care +equally about the implementation. For that, we’re relying on individual SIGs and code approvers, so +having people there who are aware of productionisation aspects, and who deeply care about it, will +help the project a lot. + +**FSM**: Thank you! Any final comments you would like to share with our readers? + +**WT**: I would like to highlight and thank all contributors for their cooperation. While the PRR +adds some additional work for them, we see that people care about it, and what’s even more +encouraging is that with every release the quality of the answers improves, and questions "do I +really need a metric reflecting if my feature works" or "is downgrade really that important" don’t +really appear anymore. diff --git a/content/en/blog/_posts/2023-11-07-introducing-sig-etcd.md b/content/en/blog/_posts/2023-11-07-introducing-sig-etcd.md new file mode 100644 index 0000000000000..b7889f75af9e9 --- /dev/null +++ b/content/en/blog/_posts/2023-11-07-introducing-sig-etcd.md @@ -0,0 +1,35 @@ +--- +layout: blog +title: "Introducing SIG etcd" +slug: introducing-sig-etcd +date: 2023-11-07 +canonicalUrl: https://etcd.io/blog/2023/introducing-sig-etcd/ +--- + +**Authors**: Han Kang (Google), Marek Siarkowicz (Google), Frederico Muñoz (SAS Institute) + +Special Interest Groups (SIGs) are a fundamental part of the Kubernetes project, with a substantial share of the community activity happening within them. When the need arises, [new SIGs can be created](https://github.com/kubernetes/community/blob/master/sig-wg-lifecycle.md), and that was precisely what happened recently. + +[SIG etcd](https://github.com/kubernetes/community/blob/master/sig-etcd/README.md) is the most recent addition to the list of Kubernetes SIGs. In this article we will get to know it a bit better, understand its origins, scope, and plans. + +## The critical role of etcd + +If we look inside the control plane of a Kubernetes cluster, we will find [etcd](https://kubernetes.io/docs/concepts/overview/components/#etcd), a consistent and highly-available key value store used as Kubernetes' backing store for all cluster data -- this description alone highlights the critical role that etcd plays, and the importance of it within the Kubernetes ecosystem. + +This critical role makes the health of the etcd project and community an important consideration, and [concerns about the state of the project](https://groups.google.com/a/kubernetes.io/g/steering/c/e-O-tVSCJOk/m/N9IkiWLEAgAJ) in early 2022 did not go unnoticed. The changes in the maintainer team, amongst other factors, contributed to a situation that needed to be addressed. + +## Why a special interest group + +With the critical role of etcd in mind, it was proposed that the way forward would be to create a new special interest group. If etcd was already at the heart of Kubernetes, creating a dedicated SIG not only recognises that role, it would make etcd a first-class citizen of the Kubernetes community. + +Establishing SIG etcd creates a dedicated space to make explicit the contract between etcd and Kubernetes api machinery and to prevent, on the etcd level, changes which violate this contract. Additionally, etcd will be able to adopt the processes that Kubernetes offers its SIGs ([KEPs](https://www.kubernetes.dev/resources/keps/), [PRR](https://github.com/kubernetes/community/blob/master/sig-architecture/production-readiness.md), [phased feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/), amongst others) in order to improve the consistency and reliability of the codebase. Being able to use these processes will be a substantial benefit to the etcd community. + +As a SIG, etcd will also be able to draw contributor support from Kubernetes proper: active contributions to etcd from Kubernetes maintainers would decrease the likelihood of breaking Kubernetes changes, through the increased number of potential reviewers and the integration with existing testing framework. This will not only benefit Kubernetes, which will be able to better participate and shape the direction of etcd in terms of the critical role it plays, but also etcd as a whole. + +## About SIG etcd + +The recently created SIG is already working towards its goals, defined in its [Charter](https://github.com/kubernetes/community/blob/master/sig-etcd/charter.md) and [Vision](https://github.com/kubernetes/community/blob/master/sig-etcd/vision.md). The purpose is clear: to ensure etcd is a reliable, simple, and scalable production-ready store for building cloud-native distributed systems and managing cloud-native infrastructure via orchestrators like Kubernetes. + +The scope of SIG etcd is not exclusively about etcd as a Kubernetes component, it also covers etcd as a standard solution. Our goal is to make etcd the most reliable key-value storage to be used anywhere, unconstrained by any Kubernetes-specific limits and scaling to meet the requirements of many diverse use-cases. + +We are confident that the creation of SIG etcd constitutes an important milestone in the lifecycle of the project, simultaneously improving etcd itself, and also the integration of etcd with Kubernetes. We invite everyone interested in etcd to [visit our page](https://github.com/kubernetes/community/blob/master/sig-etcd/README.md), [join us at our Slack channel](https://kubernetes.slack.com/messages/etcd), and get involved in this new stage of etcd's life. diff --git a/content/en/blog/_posts/2023-11-16-mid-cycle-1.29.md b/content/en/blog/_posts/2023-11-16-mid-cycle-1.29.md new file mode 100644 index 0000000000000..82d0bd795e54a --- /dev/null +++ b/content/en/blog/_posts/2023-11-16-mid-cycle-1.29.md @@ -0,0 +1,72 @@ +--- +layout: blog +title: 'Kubernetes Removals, Deprecations, and Major Changes in Kubernetes 1.29' +date: 2023-11-16 +slug: kubernetes-1-29-upcoming-changes +--- + +**Authors:** Carol Valencia, Kristin Martin, Abigail McCarthy, James Quigley, Hosam Kamel + + +As with every release, Kubernetes v1.29 will introduce feature deprecations and removals. Our continued ability to produce high-quality releases is a testament to our robust development cycle and healthy community. The following are some of the deprecations and removals coming in the Kubernetes 1.29 release. + +## The Kubernetes API removal and deprecation process + +The Kubernetes project has a well-documented deprecation policy for features. This policy states that stable APIs may only be deprecated when a newer, stable version of that same API is available and that APIs have a minimum lifetime for each stability level. A deprecated API is one that has been marked for removal in a future Kubernetes release; it will continue to function until removal (at least one year from the deprecation), but usage will result in a warning being displayed. Removed APIs are no longer available in the current version, at which point you must migrate to using the replacement. + +* Generally available (GA) or stable API versions may be marked as deprecated, but must not be removed within a major version of Kubernetes. +* Beta or pre-release API versions must be supported for 3 releases after deprecation. +* Alpha or experimental API versions may be removed in any release without prior deprecation notice. + +Whether an API is removed as a result of a feature graduating from beta to stable or because that API simply did not succeed, all removals comply with this deprecation policy. Whenever an API is removed, migration options are communicated in the documentation. + +## A note about the k8s.gcr.io redirect to registry.k8s.io + +To host its container images, the Kubernetes project uses a community-owned image registry called registry.k8s.io. Starting last March traffic to the old k8s.gcr.io registry began being redirected to registry.k8s.io. The deprecated k8s.gcr.io registry will eventually be phased out. For more details on this change or to see if you are impacted, please read [k8s.gcr.io Redirect to registry.k8s.io - What You Need to Know](/blog/2023/03/10/image-registry-redirect/). + +## A note about the Kubernetes community-owned package repositories + +Earlier in 2023, the Kubernetes project [introduced](/blog/2023/08/15/pkgs-k8s-io-introduction/) `pkgs.k8s.io`, community-owned software repositories for Debian and RPM packages. The community-owned repositories replaced the legacy Google-owned repositories (`apt.kubernetes.io` and `yum.kubernetes.io`). +On September 13, 2023, those legacy repositories were formally deprecated and their contents frozen. + +For more information on this change or to see if you are impacted, please read the [deprecation announcement](/blog/2023/08/31/legacy-package-repository-deprecation/). + +## Deprecations and removals for Kubernetes v1.29 + +See the official list of [API removals](/docs/reference/using-api/deprecation-guide/#v1-29) for a full list of planned deprecations for Kubernetes v1.29. + +### Removal of in-tree integrations with cloud providers ([KEP-2395](https://kep.k8s.io/2395)) + +The [feature gates](/docs/reference/command-line-tools-reference/feature-gates/) `DisableCloudProviders` and `DisableKubeletCloudCredentialProviders` will both be set to `true` by default for Kubernetes v1.29. This change will require that users who are currently using in-tree cloud provider integrations (Azure, GCE, or vSphere) enable external cloud controller managers, or opt in to the legacy integration by setting the associated feature gates to `false`. + +Enabling external cloud controller managers means you must run a suitable cloud controller manager within your cluster's control plane; it also requires setting the command line argument `--cloud-provider=external` for the kubelet (on every relevant node), and across the control plane (kube-apiserver and kube-controller-manager). + +For more information about how to enable and run external cloud controller managers, read [Cloud Controller Manager Administration](/docs/tasks/administer-cluster/running-cloud-controller/) and [Migrate Replicated Control Plane To Use Cloud Controller Manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/). + +For general information about cloud controller managers, please see +[Cloud Controller Manager](/docs/concepts/architecture/cloud-controller/) in the Kubernetes documentation. + +### Removal of the `v1beta2` flow control API group + +The _flowcontrol.apiserver.k8s.io/v1beta2_ API version of FlowSchema and PriorityLevelConfiguration will [no longer be served](/docs/reference/using-api/deprecation-guide/#v1-29) in Kubernetes v1.29. + +To prepare for this, you can edit your existing manifests and rewrite client software to use the `flowcontrol.apiserver.k8s.io/v1beta3` API version, available since v1.26. All existing persisted objects are accessible via the new API. Notable changes in `flowcontrol.apiserver.k8s.io/v1beta3` include +that the PriorityLevelConfiguration `spec.limited.assuredConcurrencyShares` field was renamed to `spec.limited.nominalConcurrencyShares`. + + +### Deprecation of the `status.nodeInfo.kubeProxyVersion` field for Node + +The `.status.kubeProxyVersion` field for Node objects will be [marked as deprecated](https://github.com/kubernetes/enhancements/issues/4004) in v1.29 in preparation for its removal in a future release. This field is not accurate and is set by kubelet, which does not actually know the kube-proxy version, or even if kube-proxy is running. + +## Want to know more? + +Deprecations are announced in the Kubernetes release notes. You can see the announcements of pending deprecations in the release notes for: + +* [Kubernetes v1.25](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#deprecation) +* [Kubernetes v1.26](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#deprecation) +* [Kubernetes v1.27](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#deprecation) +* [Kubernetes v1.28](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#deprecation) + +We will formally announce the deprecations that come with [Kubernetes v1.29](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#deprecation) as part of the CHANGELOG for that release. + +For information on the deprecation and removal process, refer to the official Kubernetes [deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-parts-of-the-api) document. \ No newline at end of file diff --git a/content/en/blog/_posts/2023-11-16-the-case-for-kubernetes-limits/index.md b/content/en/blog/_posts/2023-11-16-the-case-for-kubernetes-limits/index.md new file mode 100644 index 0000000000000..6e0fe50c14f19 --- /dev/null +++ b/content/en/blog/_posts/2023-11-16-the-case-for-kubernetes-limits/index.md @@ -0,0 +1,50 @@ +--- +layout: blog +title: "The Case for Kubernetes Resource Limits: Predictability vs. Efficiency" +date: 2023-11-16 +slug: the-case-for-kubernetes-resource-limits +--- + +**Author:** Milan Plžík (Grafana Labs) + +There’s been quite a lot of posts suggesting that not using Kubernetes resource limits might be a fairly useful thing (for example, [For the Love of God, Stop Using CPU Limits on Kubernetes](https://home.robusta.dev/blog/stop-using-cpu-limits/) or [Kubernetes: Make your services faster by removing CPU limits](https://erickhun.com/posts/kubernetes-faster-services-no-cpu-limits/) ). The points made there are totally valid – it doesn’t make much sense to pay for compute power that will not be used due to limits, nor to artificially increase latency. This post strives to argue that limits have their legitimate use as well. + +As a Site Reliability Engineer on the [Grafana Labs](https://grafana.com/) platform team, which maintains and improves internal infrastructure and tooling used by the product teams, I primarily try to make Kubernetes upgrades as smooth as possible. But I also spend a lot of time going down the rabbit hole of various interesting Kubernetes issues. This article reflects my personal opinion, and others in the community may disagree. + +Let’s flip the problem upside down. Every pod in a Kubernetes cluster has inherent resource limits – the actual CPU, memory, and other resources of the machine it’s running on. If those physical limits are reached by a pod, it will experience throttling similar to what is caused by reaching Kubernetes limits. + +## The problem +Pods without (or with generous) limits can easily consume the extra resources on the node. This, however, has a hidden cost – the amount of extra resources available often heavily depends on pods scheduled on the particular node and their actual load. These extra resources make each pod a special snowflake when it comes to real resource allocation. Even worse, it’s fairly hard to figure out the resources that the pod had at its disposal at any given moment – certainly not without unwieldy data mining of pods running on a particular node, their resource consumption, and similar. And finally, even if we pass this obstacle, we can only have data sampled up to a certain rate and get profiles only for a certain fraction of our calls. This can be scaled up, but the amount of observability data generated might easily reach diminishing returns. Thus, there’s no easy way to tell if a pod had a quick spike and for a short period of time used twice as much memory as usual to handle a request burst. + +Now, with Black Friday and Cyber Monday approaching, businesses expect a surge in traffic. Good performance data/benchmarks of the past performance allow businesses to plan for some extra capacity. But is data about pods without limits reliable? With memory or CPU instant spikes handled by the extra resources, everything might look good according to past data. But once the pod bin-packing changes and the extra resources get more scarce, everything might start looking different – ranging from request latencies rising negligibly to requests slowly snowballing and causing pod OOM kills. While almost no one actually cares about the former, the latter is a serious issue that requires instant capacity increase. + +## Configuring the limits +Not using limits takes a tradeoff – it opportunistically improves the performance if there are extra resources available, but lowers predictability of the performance, which might strike back in the future. There are a few approaches that can be used to increase the predictability again. Let’s pick two of them to analyze: + +- **Configure workload limits to be a fixed (and small) percentage more than the requests** – I'll call it _fixed-fraction headroom_. This allows the use of some extra shared resources, but keeps the per-node overcommit bound and can be taken to guide worst-case estimates for the workload. Note that the bigger the limits percentage is, the bigger the variance in the performance that might happen across the workloads. +- **Configure workloads with `requests` = `limits`**. From some point of view, this is equivalent to giving each pod its own tiny machine with constrained resources; the performance is fairly predictable. This also puts the pod into the _Guaranteed_ QoS class, which makes it get evicted only after _BestEffort_ and _Burstable_ pods have been evicted by a node under resource pressure (see [Quality of Service for Pods](/docs/concepts/workloads/pods/pod-qos/)). + +Some other cases might also be considered, but these are probably the two simplest ones to discuss. + + +## Cluster resource economy +Note that in both cases discussed above, we’re effectively preventing the workloads from using some cluster resources it has at the cost of getting more predictability – which might sound like a steep price to pay for a bit more stable performance. Let’s try to quantify the impact there. + +### Bin-packing and cluster resource allocation +Firstly, let’s discuss bin-packing and cluster resource allocation. There’s some inherent cluster inefficiency that comes to play – it’s hard to achieve 100% resource allocation in a Kubernetes cluster. Thus, some percentage will be left unallocated. + +When configuring fixed-fraction headroom limits, a proportional amount of this will be available to the pods. If the percentage of unallocated resources in the cluster is lower than the constant we use for setting fixed-fraction headroom limits (see the figure, line 2), all the pods together are able to theoretically use up all the node’s resources; otherwise there are some resources that will inevitably be wasted (see the figure, line 1). In order to eliminate the inevitable resource waste, the percentage for fixed-fraction headroom limits should be configured so that it’s at least equal to the expected percentage of unallocated resources. + +{{
}} + +For requests = limits (see the figure, line 3), this does not hold: Unless we’re able to allocate all node’s resources, there’s going to be some inevitably wasted resources. Without any knobs to turn on the requests/limits side, the only suitable approach here is to ensure efficient bin-packing on the nodes by configuring correct machine profiles. This can be done either manually or by using a variety of cloud service provider tooling – for example [Karpenter](https://karpenter.sh/) for EKS or [GKE Node auto provisioning](https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning). + +### Optimizing actual resource utilization +Free resources also come in the form of unused resources of other pods (reserved vs. actual CPU utilization, etc.), and their availability can’t be predicted in any reasonable way. Configuring limits makes it next to impossible to utilize these. Looking at this from a different perspective, if a workload wastes a significant amount of resources it has requested, re-visiting its own resource requests might be a fair thing to do. Looking at past data and picking more fitting resource requests might help to make the packing more tight (although at the price of worsening its performance – for example increasing long tail latencies). + +## Conclusion +Optimizing resource requests and limits is hard. Although it’s much easier to break things when setting limits, those breakages might help prevent a catastrophe later by giving more insights into how the workload behaves in bordering conditions. There are cases where setting limits makes less sense: batch workloads (which are not latency-sensitive – for example non-live video encoding), best-effort services (don’t need that level of availability and can be preempted), clusters that have a lot of spare resources by design (various cases of specialty workloads – for example services that handle spikes by design). + +On the other hand, setting limits shouldn’t be avoided at all costs – even though figuring out the "right” value for limits is harder and configuring a wrong value yields less forgiving situations. Configuring limits helps you learn about a workload’s behavior in corner cases, and there are simple strategies that can help when reasoning about the right value. It’s a tradeoff between efficient resource usage and performance predictability and should be considered as such. + +There’s also an economic aspect of workloads with spiky resource usage. Having “freebie” resources always at hand does not serve as an incentive to improve performance for the product team. Big enough spikes might easily trigger efficiency issues or even problems when trying to defend a product’s SLA – and thus, might be a good candidate to mention when assessing any risks. diff --git a/content/en/blog/_posts/2023-11-16-the-case-for-kubernetes-limits/requests-limits-configurations.svg b/content/en/blog/_posts/2023-11-16-the-case-for-kubernetes-limits/requests-limits-configurations.svg new file mode 100644 index 0000000000000..082561532cb02 --- /dev/null +++ b/content/en/blog/_posts/2023-11-16-the-case-for-kubernetes-limits/requests-limits-configurations.svg @@ -0,0 +1,281 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/en/blog/_posts/2023-11-24-sig-testing-spotlight.md b/content/en/blog/_posts/2023-11-24-sig-testing-spotlight.md new file mode 100644 index 0000000000000..2da6bbb2a17ac --- /dev/null +++ b/content/en/blog/_posts/2023-11-24-sig-testing-spotlight.md @@ -0,0 +1,204 @@ +--- +layout: blog +title: "Spotlight on SIG Testing" +slug: sig-testing-spotlight-2023 +date: 2023-11-24 +canonicalUrl: https://www.kubernetes.dev/blog/2023/11/24/sig-testing-spotlight-2023/ +--- + +**Author:** Sandipan Panda + +Welcome to another edition of the _SIG spotlight_ blog series, where we +highlight the incredible work being done by various Special Interest +Groups (SIGs) within the Kubernetes project. In this edition, we turn +our attention to [SIG Testing](https://github.com/kubernetes/community/tree/master/sig-testing#readme), +a group interested in effective testing of Kubernetes and automating +away project toil. SIG Testing focus on creating and running tools and +infrastructure that make it easier for the community to write and run +tests, and to contribute, analyze and act upon test results. + +To gain some insights into SIG Testing, [Sandipan +Panda](https://github.com/sandipanpanda) spoke with [Michelle Shepardson](https://github.com/michelle192837), +a senior software engineer at Google and a chair of SIG Testing, and +[Patrick Ohly](https://github.com/pohly), a software engineer and architect at +Intel and a SIG Testing Tech Lead. + +## Meet the contributors + +**Sandipan:** Could you tell us a bit about yourself, your role, and +how you got involved in the Kubernetes project and SIG Testing? + +**Michelle:** Hi! I'm Michelle, a senior software engineer at +Google. I first got involved in Kubernetes through working on tooling +for SIG Testing, like the external instance of TestGrid. I'm part of +oncall for TestGrid and Prow, and am now a chair for the SIG. + +**Patrick:** Hello! I work as a software engineer and architect in a +team at Intel which focuses on open source Cloud Native projects. When +I ramped up on Kubernetes to develop a storage driver, my very first +question was "how do I test it in a cluster and how do I log +information?" That interest led to various enhancement proposals until +I had (re)written enough code that also took over official roles as +SIG Testing Tech Lead (for the [E2E framework](https://github.com/kubernetes-sigs/e2e-framework)) and +structured logging WG lead. + +## Testing practices and tools + +**Sandipan:** Testing is a field in which multiple approaches and +tools exist; how did you arrive at the existing practices? + +**Patrick:** I can’t speak about the early days because I wasn’t +around yet 😆, but looking back at some of the commit history it’s +pretty obvious that developers just took what was available and +started using it. For E2E testing, that was +[Ginkgo+Gomega](https://github.com/onsi/ginkgo). Some hacks were +necessary, for example around cleanup after a test run and for +categorising tests. Eventually this led to Ginkgo v2 and [revised best +practices for E2E testing](https://www.kubernetes.dev/blog/2023/04/12/e2e-testing-best-practices-reloaded/). +Regarding unit testing opinions are pretty diverse: some maintainers +prefer to use just the Go standard library with hand-written +checks. Others use helper packages like stretchr/testify. That +diversity is okay because unit tests are self-contained - contributors +just have to be flexible when working on many different areas. +Integration testing falls somewhere in the middle. It’s based on Go +unit tests, but needs complex helper packages to bring up an apiserver +and other components, then runs tests that are more like E2E tests. + +## Subprojects owned by SIG Testing + +**Sandipan:** SIG Testing is pretty diverse. Can you give a brief +overview of the various subprojects owned by SIG Testing? + +**Michelle:** Broadly, we have subprojects related to testing +frameworks, and infrastructure, though they definitely overlap. So +for the former, there's +[e2e-framework](https://pkg.go.dev/sigs.k8s.io/e2e-framework) (used +externally), +[test/e2e/framework](https://pkg.go.dev/k8s.io/kubernetes/test/e2e/framework) +(used for Kubernetes itself) and kubetest2 for end-to-end testing, +as well as boskos (resource rental for e2e tests), +[KIND](https://kind.sigs.k8s.io/) (Kubernetes-in-Docker, for local +testing and development), and the cloud provider for KIND. For the +latter, there's [Prow](https://docs.prow.k8s.io/) (K8s-based CI/CD and +chatops), and a litany of other tools and utilities for triage, +analysis, coverage, Prow/TestGrid config generation, and more in the +test-infra repo. + +*If you are willing to learn more and get involved with any of the SIG +Testing subprojects, check out the [SIG Testing README](https://github.com/kubernetes/community/tree/master/sig-testing#subprojects).* + +## Key challenges and accomplishments + +**Sandipan:** What are some of the key challenges you face? + +**Michelle:** Kubernetes is a gigantic project in every aspect, from +contributors to code to users and more. Testing and infrastructure +have to meet that scale, keeping up with every change from every repo +under Kubernetes while facilitating developing, improving, and +releasing the project as much as possible, though of course, we're not +the only SIG involved in that. I think another other challenge is +staffing subprojects. SIG Testing has a number of subprojects that +have existed for years, but many of the original maintainers for them +have moved on to other areas or no longer have the time to maintain +them. We need to grow long-term expertise and owners in those +subprojects. + +**Patrick:** As Michelle said, the sheer size can be a challenge. It’s +not just the infrastructure, also our processes must scale with the +number of contributors. It’s good to document best practices, but not +good enough: we have many new contributors, which is good, but having +reviewers explain best practices doesn’t scale - assuming that the +reviewers even know about them! It also doesn’t help that existing +code cannot get updated immediately because there is so much of it, in +particular for E2E testing. The initiative to [apply stricter linting to new or modified code](https://groups.google.com/a/kubernetes.io/g/dev/c/myGiml72IbM/m/QdO5bgQiAQAJ) +while accepting that existing code doesn’t pass those same linter +checks helps a bit. + +**Sandipan:** Any SIG accomplishments that you are proud of and would +like to highlight? + +**Patrick:** I am biased because I have been driving this, but I think +that the [E2E framework](https://github.com/kubernetes-sigs/e2e-framework) and linting are now in a much better shape than +they used to be. We may soon be able to run integration tests with +race detection enabled, which is important because we currently only +have that for unit tests and those tend to be less complex. + +**Sandipan:** Testing is always important, but is there anything +specific to your work in terms of the Kubernetes release process? + +**Patrick:** [test flakes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/flaky-tests.md)… +if we have too many of those, development velocity goes down because +PRs cannot be merged without clean test runs and those become less +likely. Developers also lose trust in testing and just "retest" until +they have a clean run, without checking whether failures might indeed +be related to a regression in their current change. + +## The people and the scope + +**Sandipan:** What are some of your favourite things about this SIG? + +**Michelle:** The people, of course 🙂. Aside from that, I like the +broad scope SIG Testing has. I feel like even small changes can make a +big difference for fellow contributors, and even if my interests +change over time, I'll never run out of projects to work on. + +**Patrick:** I can work on things that make my life and the life of my +fellow developers better, like the tooling that we have to use every +day while working on some new feature elsewhere. + +**Sandipan:** Are there any funny / cool / TIL anecdotes that you +could tell us? + +**Patrick:** I started working on E2E framework enhancements five +years ago, then was less active there for a while. When I came back +and wanted to test some new enhancement, I asked about how to write +unit tests for the new code and was pointed to some existing tests +which looked vaguely familiar, as if I had *seen* them before. I +looked at the commit history and found that I had *written* them! I’ll +let you decide whether that says something about my failing long-term +memory or simply is normal… Anyway, folks, remember to write good +commit messages and comments; someone will need them at some point - +it might even be yourself! + +## Looking ahead + +**Sandipan:** What areas and/or subprojects does your SIG need help with? + +**Michelle:** Some subprojects aren't staffed at the moment and could +use folks willing to learn more about +them. [boskos](https://github.com/kubernetes-sigs/boskos#boskos) and +[kubetest2](https://github.com/kubernetes-sigs/kubetest2#kubetest2) +especially stand out to me, since both are important for testing but +lack dedicated owners. + +**Sandipan:** Are there any useful skills that new contributors to SIG +Testing can bring to the table? What are some things that people can +do to help this SIG if they come from a background that isn’t directly +linked to programming? + +**Michelle:** I think user empathy, writing clear feedback, and +recognizing patterns are really useful. Someone who uses the test +framework or tooling and can outline pain points with clear examples, +or who can recognize a wider issue in the project and pull data to +inform solutions for it. + +**Sandipan:** What’s next for SIG Testing? + +**Patrick:** Stricter linting will soon become mandatory for new +code. There are several E2E framework sub-packages that could be +modernised, if someone wants to take on that work. I also see an +opportunity to unify some of our helper code for E2E and integration +testing, but that needs more thought and discussion. + +**Michelle:** I'm looking forward to making some usability +improvements for some of our tools and infra, and to supporting more +long-term contributions and growth of contributors into long-term +roles within the SIG. If you're interested, hit us up! + +Looking ahead, SIG Testing has exciting plans in store. You can get in +touch with the folks at SIG Testing in their [Slack channel](https://kubernetes.slack.com/messages/sig-testing) or attend +one of their regular [bi-weekly meetings on Tuesdays](https://github.com/kubernetes/community/tree/master/sig-testing#meetings). If +you are interested in making it easier for the community to run tests +and contribute test results, to ensure Kubernetes is stable across a +variety of cluster configurations and cloud providers, join the SIG +Testing community today! diff --git a/content/en/blog/_posts/2023-11-28-Gateway-API-Future.md b/content/en/blog/_posts/2023-11-28-Gateway-API-Future.md new file mode 100644 index 0000000000000..69023b8a33fb2 --- /dev/null +++ b/content/en/blog/_posts/2023-11-28-Gateway-API-Future.md @@ -0,0 +1,259 @@ +--- +layout: blog +title: "New Experimental Features in Gateway API v1.0" +date: 2023-11-28T10:00:00-08:00 +slug: gateway-api-ga +--- + +***Authors:*** Candace Holman (Red Hat), Dave Protasowski (VMware), Gaurav K Ghildiyal (Google), John Howard (Google), Simone Rodigari (IBM) + +Recently, the [Gateway API](https://gateway-api.sigs.k8s.io/) [announced its v1.0 GA release](/blog/2023/10/31/gateway-api-ga/), marking a huge milestone for the project. + +Along with stabilizing some of the core functionality in the API, a number of exciting new *experimental* features have been added. + +## Backend TLS Policy + +`BackendTLSPolicy` is a new Gateway API type used for specifying the TLS configuration of the connection from the Gateway to backend Pods via the Service API object. +It is specified as a [Direct PolicyAttachment](https://gateway-api.sigs.k8s.io/geps/gep-713/#direct-policy-attachment) without defaults or overrides, applied to a Service that accesses a backend, where the BackendTLSPolicy resides in the same namespace as the Service to which it is applied. +All Gateway API Routes that point to a referenced Service should respect a configured `BackendTLSPolicy`. + +While there were existing ways provided for [TLS to be configured for edge and passthrough termination](https://gateway-api.sigs.k8s.io/guides/tls/#tls-configuration), this new API object specifically addresses the configuration of TLS in order to convey HTTPS from the Gateway dataplane to the backend. +This is referred to as "backend TLS termination" and enables the Gateway to know how to connect to a backend Pod that has its own certificate. + +![Termination Types](https://gateway-api.sigs.k8s.io/geps/images/1897-TLStermtypes.png) + +The specification of a `BackendTLSPolicy` consists of: +- `targetRef` - Defines the targeted API object of the policy. Only Service is allowed. +- `tls` - Defines the configuration for TLS, including `hostname`, `caCertRefs`, and `wellKnownCACerts`. Either `caCertRefs` or `wellKnownCACerts` may be specified, but not both. +- `hostname` - Defines the Server Name Indication (SNI) that the Gateway uses to connect to the backend. The certificate served by the backend must match this SNI. +- `caCertRefs` - Defines one or more references to objects that contain PEM-encoded TLS certificates, which are used to establish a TLS handshake between the Gateway and backend. +- `wellKnownCACerts` - Specifies whether or not system CA certificates may be used in the TLS handshake between the Gateway and backend. + +### Examples + +#### Using System Certificates + +In this example, the `BackendTLSPolicy` is configured to use system certificates to connect with a TLS-encrypted upstream connection where Pods backing the `dev` Service are expected to serve a valid certificate for `dev.example.com`. + +```yaml +apiVersion: gateway.networking.k8s.io/v1alpha2 +kind: BackendTLSPolicy +metadata: + name: tls-upstream-dev +spec: + targetRef: + kind: Service + name: dev-service + group: "" + tls: + wellKnownCACerts: "System" + hostname: dev.example.com +``` + +#### Using Explicit CA Certificates + +In this example, the `BackendTLSPolicy` is configured to use certificates defined in the configuration map `auth-cert` to connect with a TLS-encrypted upstream connection where Pods backing the `auth` Service are expected to serve a valid certificate for `auth.example.com`. + +```yaml +apiVersion: gateway.networking.k8s.io/v1alpha2 +kind: BackendTLSPolicy +metadata: + name: tls-upstream-auth +spec: + targetRef: + kind: Service + name: auth-service + group: "" + tls: + caCertRefs: + - kind: ConfigMapReference + name: auth-cert + group: "" + hostname: auth.example.com +``` + +The following illustrates a BackendTLSPolicy that configures TLS for a Service serving a backend: + +{{< mermaid >}} +flowchart LR + client(["client"]) + gateway["Gateway"] + style gateway fill:#02f,color:#fff + httproute["HTTP
Route"] + style httproute fill:#02f,color:#fff + service["Service"] + style service fill:#02f,color:#fff + pod1["Pod"] + style pod1 fill:#02f,color:#fff + pod2["Pod"] + style pod2 fill:#02f,color:#fff + client -.->|HTTP
request| gateway + gateway --> httproute + httproute -.->|BackendTLSPolicy|service + service --> pod1 & pod2 +{{}} + +For more information, refer to the [documentation for TLS](https://gateway-api.sigs.k8s.io/guides/tls). + +## HTTPRoute Timeouts + +A key enhancement in Gateway API's latest release (v1.0) is the introduction of the `timeouts` field within HTTPRoute Rules. This feature offers a dynamic way to manage timeouts for incoming HTTP requests, adding precision and reliability to your gateway setups. + +With Timeouts, developers can fine-tune their Gateway API's behavior in two fundamental ways: + +1. **Request Timeout**: + + The request timeout is the duration within which the Gateway API implementation must send a response to a client's HTTP request. + It allows flexibility in specifying when this timeout starts, either before or after the entire client request stream is received, making it implementation-specific. + This timeout efficiently covers the entire request-response transaction, enhancing the responsiveness of your services. + +1. **Backend Request Timeout**: + + The backendRequest timeout is a game-changer for those dealing with backends. + It sets a timeout for a single request sent from the Gateway to a backend service. + This timeout spans from the initiation of the request to the reception of the full response from the backend. + This feature is particularly helpful in scenarios where the Gateway needs to retry connections to a backend, ensuring smooth communication under various conditions. + +Notably, the `request` timeout encompasses the `backendRequest` timeout. Hence, the value of `backendRequest` should never exceed the value of the `request` timeout. + +The ability to configure these timeouts adds a new layer of reliability to your Kubernetes services. +Whether it's ensuring client requests are processed within a specified timeframe or managing backend service communications, Gateway API's Timeouts offer the control and predictability you need. + +To get started, you can define timeouts in your HTTPRoute Rules using the Timeouts field, specifying their type as Duration. +A zero-valued timeout (`0s`) disables the timeout, while a valid non-zero-valued timeout should be at least 1ms. + +Here's an example of setting request and backendRequest timeouts in an HTTPRoute: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: timeout-example +spec: + parentRefs: + - name: example-gateway + rules: + - matches: + - path: + type: PathPrefix + value: /timeout + timeouts: + request: 10s + backendRequest: 2s + backendRefs: + - name: timeout-svc + port: 8080 +``` + +In this example, a `request` timeout of 10 seconds is defined, ensuring that client requests are processed within that timeframe. +Additionally, a 2-second `backendRequest` timeout is set for individual requests from the Gateway to a backend service called timeout-svc. + +These new HTTPRoute Timeouts provide Kubernetes users with more control and flexibility in managing network communications, helping ensure a smoother and more predictable experience for both clients and backends. +For additional details and examples, refer to the [official timeouts API documentation](https://gateway-api.sigs.k8s.io/api-types/httproute/#timeouts-optional). + +## Gateway Infrastructure Labels + +While Gateway API providers a common API for different implementations, each implementation will have different resources created under-the-hood to apply users' intent. +This could be configuring cloud load balancers, creating in-cluster Pods and Services, or more. + +While the API has always provided an extension point -- `parametersRef` in `GatewayClass` -- to customize implementation specific things, there was no common core way to express common infrastructure customizations. + +Gateway API v1.0 paves the way for this with a new `infrastructure` field on the `Gateway` object, allowing customization of the underlying infrastructure. +For now, this starts small with two critical fields: labels and annotations. +When these are set, any generated infrastructure will have the provided labels and annotations set on them. + +For example, I may want to group all my resources for one application together: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: hello-world +spec: + infrastructure: + labels: + app.kubernetes.io/name: hello-world +``` + +In the future, we are looking into more common infrastructure configurations, such as resource sizing. + +For more information, refer to the [documentation](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.GatewayInfrastructure) for this feature. + +## Support for Websockets, HTTP/2 and more! + +Not all implementations of Gateway API support automatic protocol selection. +In some cases protocols are disabled without an explicit opt-in. + +When a Route's backend references a Kubernetes Service, application developers can specify the protocol using `ServicePort` [`appProtocol`][appProtocol] field. + +For example the following `store` Kubernetes Service is indicating the port `8080` supports HTTP/2 Prior Knowledge. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: store +spec: + selector: + app: store + ports: + - protocol: TCP + appProtocol: kubernetes.io/h2c + port: 8080 + targetPort: 8080 +``` + +Currently, Gateway API has conformance testing for: + +- `kubernetes.io/h2c` - HTTP/2 Prior Knowledge +- `kubernetes.io/ws` - WebSocket over HTTP + +For more information, refer to the documentation for [Backend Protocol Selection](https://gateway-api.sigs.k8s.io/guides/backend-protocol). + +[appProtocol]: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + +## `gwctl`, our new Gateway API command line tool + +`gwctl` is a command line tool that aims to be a `kubectl` replacement for viewing Gateway API resources. + +The initial release of `gwctl` that comes bundled with Gateway v1.0 release includes helpful features for managing Gateway API Policies. +Gateway API Policies serve as powerful extension mechanisms for modifying the behavior of Gateway resources. +One challenge with using policies is that it may be hard to discover which policies are affecting which Gateway resources. +`gwctl` helps bridge this gap by answering questions like: + +* Which policies are available for use in the Kubernetes cluster? +* Which policies are attached to a particular Gateway, HTTPRoute, etc? +* If policies are applied to multiple resources in the Gateway resource hierarchy, what is the effective policy that is affecting a particular resource? (For example, if an HTTP request timeout policy is applied to both an HTTPRoute and its parent Gateway, what is the effective timeout for the HTTPRoute?) + +`gwctl` is still in the very early phases of development and hence may be a bit rough around the edges. +Follow the instructions in [the repository](https://github.com/kubernetes-sigs/gateway-api/tree/main/gwctl#try-it-out) to install and try out `gwctl`. + +### Examples + +Here are some examples of how `gwctl` can be used: + +```bash +# List all policies in the cluster. This will also give the resource they bind +# to. +gwctl get policies -A +# List all available policy types. +gwctl get policycrds +# Describe all HTTPRoutes in namespace ns2. (Output includes effective policies) +gwctl describe httproutes -n ns2 +# Describe a single HTTPRoute in the default namespace. (Output includes +# effective policies) +gwctl describe httproutes my-httproute-1 +# Describe all Gateways across all namespaces. (Output includes effective +# policies) +gwctl describe gateways -A +# Describe a single GatewayClass. (Output includes effective policies) +gwctl describe gatewayclasses foo-com-external-gateway-class +``` + +## Get involved + +These projects, and many more, continue to be improved in Gateway API. +There are lots of opportunities to get involved and help define the future of Kubernetes routing APIs for both Ingress and Mesh. + +If this is interesting to you, please [join us in the community](https://gateway-api.sigs.k8s.io/contributing/) and help us build the future of Gateway API together! + diff --git a/content/en/blog/_posts/2023-12-13-kubernetes-1.29.md b/content/en/blog/_posts/2023-12-13-kubernetes-1.29.md new file mode 100644 index 0000000000000..1a2a8b8593e24 --- /dev/null +++ b/content/en/blog/_posts/2023-12-13-kubernetes-1.29.md @@ -0,0 +1,236 @@ +--- +layout: blog +title: 'Kubernetes v1.29: Mandala' +date: 2023-12-13 +slug: kubernetes-v1-29-release +--- + +**Authors:** [Kubernetes v1.29 Release Team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.29/release-team.md) + +**Editors:** Carol Valencia, Kristin Martin, Abigail McCarthy, James Quigley + +Announcing the release of Kubernetes v1.29: Mandala (The Universe), the last release of 2023! + +Similar to previous releases, the release of Kubernetes v1.29 introduces new stable, beta, and alpha features. The consistent delivery of top-notch releases underscores the strength of our development cycle and the vibrant support from our community. + +This release consists of 49 enhancements. Of those enhancements, 11 have graduated to Stable, 19 are entering Beta and 19 have graduated to Alpha. + +## Release theme and logo + +Kubernetes v1.29: *Mandala (The Universe)* ✨🌌 + +{{< figure src="/images/blog/2023-12-13-kubernetes-1.29-release/k8s-1.29.png" alt="Kubernetes 1.29 Mandala logo" class="release-logo" >}} + +Join us on a cosmic journey with Kubernetes v1.29! + +This release is inspired by the beautiful art form that is Mandala—a symbol of the universe in its perfection. Our tight-knit universe of around 40 Release Team members, backed by hundreds of community contributors, has worked tirelessly to turn challenges into joy for millions worldwide. + +The Mandala theme reflects our community’s interconnectedness—a vibrant tapestry woven by enthusiasts and experts alike. Each contributor is a crucial part, adding their unique energy, much like the diverse patterns in Mandala art. Kubernetes thrives on collaboration, echoing the harmony in Mandala creations. + +The release logo, made by [Mario Jason Braganza](https://janusworx.com) (base Mandala art, courtesy - [Fibrel Ojalá](https://pixabay.com/users/fibrel-3502541/)), symbolizes the little universe that is the Kubernetes project and all its people. + +In the spirit of Mandala’s transformative symbolism, Kubernetes v1.29 celebrates our project’s evolution. Like stars in the Kubernetes universe, each contributor, user, and supporter lights the way. Together, we create a universe of possibilities—one release at a time. + +## Improvements that graduated to stable in Kubernetes v1.29 {#graduations-to-stable} + +_This is a selection of some of the improvements that are now stable following the v1.29 release._ + +### ReadWriteOncePod PersistentVolume access mode ([SIG Storage](https://github.com/kubernetes/community/tree/master/sig-storage)) {#readwriteoncepod-pv-access-mode} + +In Kubernetes, volume [access modes](/docs/concepts/storage/persistent-volumes/#access-modes) +are the way you can define how durable storage is consumed. These access modes are a part of the spec for PersistentVolumes (PVs) and PersistentVolumeClaims (PVCs). When using storage, there are different ways to model how that storage is consumed. For example, a storage system like a network file share can have many users all reading and writing data simultaneously. In other cases maybe everyone is allowed to read data but not write it. For highly sensitive data, maybe only one user is allowed to read and write data but nobody else. + +Before v1.22, Kubernetes offered three access modes for PVs and PVCs: +* ReadWriteOnce – the volume can be mounted as read-write by a single node +* ReadOnlyMany – the volume can be mounted read-only by many nodes +* ReadWriteMany – the volume can be mounted as read-write by many nodes + +The ReadWriteOnce access mode restricts volume access to a single node, which means it is possible for multiple pods on the same node to read from and write to the same volume. This could potentially be a major problem for some applications, especially if they require at most one writer for data safety guarantees. + +To address this problem, a fourth access mode ReadWriteOncePod was introduced as an Alpha feature in v1.22 for CSI volumes. If you create a pod with a PVC that uses the ReadWriteOncePod access mode, Kubernetes ensures that pod is the only pod across your whole cluster that can read that PVC or write to it. In v1.29, this feature became Generally Available. + +### Node volume expansion Secret support for CSI drivers ([SIG Storage](https://github.com/kubernetes/community/tree/master/sig-storage)) {#csi-node-volume-expansion-secrets} + +In Kubernetes, a volume expansion operation may include the expansion of the volume on the node, which involves filesystem resize. Some CSI drivers require secrets, for example a credential for accessing a SAN fabric, during the node expansion for the following use cases: +* When a PersistentVolume represents encrypted block storage, for example using LUKS, you may need to provide a passphrase in order to expand the device. +* For various validations, the CSI driver needs to have credentials to communicate with the backend storage system at time of node expansion. + +To meet this requirement, the CSI Node Expand Secret feature was introduced in Kubernetes v1.25. This allows an optional secret field to be sent as part of the NodeExpandVolumeRequest by the CSI drivers so that node volume expansion operation can be performed with the underlying storage system. In Kubernetes v1.29, this feature became generally available. + +### KMS v2 encryption at rest generally available ([SIG Auth](https://github.com/kubernetes/community/tree/master/sig-auth)) {#kms-v2-api-encryption} + +One of the first things to consider when securing a Kubernetes cluster is encrypting persisted +API data at rest. KMS provides an interface for a provider to utilize a key stored in an external +key service to perform this encryption. With the Kubernetes v1.29, KMS v2 has become +a stable feature bringing numerous improvements in performance, key rotation, +health check & status, and observability. +These enhancements provide users with a reliable solution to encrypt all resources in their Kubernetes clusters. You can read more about this in [KEP-3299](https://kep.k8s.io/3299). + +It is recommended to use KMS v2. KMS v1 feature gate is disabled by default. You will have to opt in to continue to use it. + + + +## Improvements that graduated to beta in Kubernetes v1.29 {#graduations-to-beta} + +_This is a selection of some of the improvements that are now beta following the v1.29 release._ + + +The throughput of the scheduler is our eternal challenge. This QueueingHint feature brings a new possibility to optimize the efficiency of requeueing, which could reduce useless scheduling retries significantly. + +### Node lifecycle separated from taint management ([SIG Scheduling](https://github.com/kubernetes/community/tree/master/sig-scheduling)) + +As title describes, it's to decouple `TaintManager` that performs taint-based pod eviction from `NodeLifecycleController` and make them two separate controllers: `NodeLifecycleController` to add taints to unhealthy nodes and `TaintManager` to perform pod deletion on nodes tainted with NoExecute effect. + +### Clean up for legacy Secret-based ServiceAccount tokens ([SIG Auth](https://github.com/kubernetes/community/tree/master/sig-auth)) {#serviceaccount-token-clean-up} + +Kubernetes switched to using more secure service account tokens, which were time-limited and bound to specific pods by 1.22. Stopped auto-generating legacy secret-based service account tokens in 1.24. Then started labeling remaining auto-generated secret-based tokens still in use with their last-used date in 1.27. + +In v1.29, to reduce potential attack surface, the LegacyServiceAccountTokenCleanUp feature labels legacy auto-generated secret-based tokens as invalid if they have not been used for a long time (1 year by default), and automatically removes them if use is not attempted for a long time after being marked as invalid (1 additional year by default). [KEP-2799](https://kep.k8s.io/2799) +## New alpha features + +### Define Pod affinity or anti-affinity using `matchLabelKeys` ([SIG Scheduling](https://github.com/kubernetes/community/tree/master/sig-scheduling)) {#match-label-keys-pod-affinity} + +One enhancement will be introduced in PodAffinity/PodAntiAffinity as alpha. It will increase the accuracy of calculation during rolling updates. + +### nftables backend for kube-proxy ([SIG Network](https://github.com/kubernetes/community/tree/master/sig-network)) {#kube-proxy-nftables} + +The default kube-proxy implementation on Linux is currently based on iptables. This was the preferred packet filtering and processing system in the Linux kernel for many years (starting with the 2.4 kernel in 2001). However, unsolvable problems with iptables led to the development of a successor, nftables. Development on iptables has mostly stopped, with new features and performance improvements primarily going into nftables instead. + +This feature adds a new backend to kube-proxy based on nftables, since some Linux distributions already started to deprecate and remove iptables, and nftables claims to solve the main performance problems of iptables. + +### APIs to manage IP address ranges for Services ([SIG Network](https://github.com/kubernetes/community/tree/master/sig-network)) {#ip-address-range-apis} + +Services are an abstract way to expose an application running on a set of Pods. Services can have a cluster-scoped virtual IP address, that is allocated from a predefined CIDR defined in the kube-apiserver flags. However, users may want to add, remove, or resize existing IP ranges allocated for Services without having to restart the kube-apiserver. + +This feature implements a new allocator logic that uses 2 new API Objects: ServiceCIDR and IPAddress, allowing users to dynamically increase the number of Services IPs available by creating new ServiceCIDRs. This helps to resolve problems like IP exhaustion or IP renumbering. + +### Add support to containerd/kubelet/CRI to support image pull per runtime class ([SIG Windows](https://github.com/kubernetes/community/tree/master/sig-windows)) {#image-pull-per-runtimeclass} + +Kubernetes v1.29 adds support to pull container images based on the RuntimeClass of the Pod that uses them. +This feature is off by default in v1.29 under a feature gate called `RuntimeClassInImageCriApi`. + +Container images can either be a manifest or an index. When the image being pulled is an index (image index has a list of image manifests ordered by platform), platform matching logic in the container runtime is used to pull an appropriate image manifest from the index. By default, the platform matching logic picks a manifest that matches the host that the image pull is being executed from. This can be limiting for VM-based containers where a user could pull an image with the intention of running it as a VM-based container, for example, Windows Hyper-V containers. + +The image pull per runtime class feature adds support to pull different images based the runtime class specified. This is achieved by referencing an image by a tuple of (`imageID`, `runtimeClass`), instead of just the `imageName` or `imageID`. Container runtimes could choose to add support for this feature if they'd like. If they do not, the default behavior of kubelet that existed prior to Kubernetes v1.29 will be retained. + +### In-place updates for Pod resources, for Windows Pods ([SIG Windows](https://github.com/kubernetes/community/tree/master/sig-windows)) + +As an alpha feature, Kubernetes Pods can be mutable with respect to their `resources`, allowing users to change the _desired_ resource requests and limits for a Pod without the need to restart the Pod. With v1.29, this feature is now supported for Windows containers. + +## Graduations, deprecations and removals for Kubernetes v1.29 + +### Graduated to stable + +This lists all the features that graduated to stable (also known as _general availability_). +For a full list of updates including new features and graduations from alpha to beta, see the +[release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md). + +This release includes a total of 11 enhancements promoted to Stable: + +- [Remove transient node predicates from KCCM's service controller](https://kep.k8s.io/3458) +- [Reserve nodeport ranges for dynamic and static allocation](https://kep.k8s.io/3668) +- [Priority and Fairness for API Server Requests](https://kep.k8s.io/1040) +- [KMS v2 Improvements](https://kep.k8s.io/3299) +- [Support paged LIST queries from the Kubernetes API](https://kep.k8s.io/365) +- [ReadWriteOncePod PersistentVolume Access Mode](https://kep.k8s.io/2485) +- [Kubernetes Component Health SLIs](https://kep.k8s.io/3466) +- [CRD Validation Expression Language](https://kep.k8s.io/2876) +- [Introduce nodeExpandSecret in CSI PV source](https://kep.k8s.io/3107) +- [Track Ready Pods in Job status](https://kep.k8s.io/2879) +- [Kubelet Resource Metrics Endpoint](https://kep.k8s.io/727) + +### Deprecations and removals + +#### Removal of in-tree integrations with cloud providers ([SIG Cloud Provider](https://github.com/kubernetes/community/tree/master/sig-cloud-provider)) {#in-tree-cloud-provider-integration-removal} + +Kubernetes v1.29 defaults to operating _without_ a built-in integration to any cloud provider. +If you have previously been relying on in-tree cloud provider integrations (with Azure, GCE, or vSphere) then you can either: +- enable an equivalent external [cloud controller manager](/docs/concepts/architecture/cloud-controller/) + integration _(recommended)_ +- opt back in to the legacy integration by setting the associated feature gates to `false`; the feature + gates to change are `DisableCloudProviders` and `DisableKubeletCloudCredentialProviders` + +Enabling external cloud controller managers means you must run a suitable cloud controller manager within your cluster's control plane; it also requires setting the command line argument `--cloud-provider=external` for the kubelet (on every relevant node), and across the control plane (kube-apiserver and kube-controller-manager). + +For more information about how to enable and run external cloud controller managers, read [Cloud Controller Manager Administration](/docs/tasks/administer-cluster/running-cloud-controller/) and [Migrate Replicated Control Plane To Use Cloud Controller Manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/). + +If you need a cloud controller manager for one of the legacy in-tree providers, please see the following links: +* [Cloud provider AWS](https://github.com/kubernetes/cloud-provider-aws) +* [Cloud provider Azure](https://github.com/kubernetes-sigs/cloud-provider-azure) +* [Cloud provider GCE](https://github.com/kubernetes/cloud-provider-gcp) +* [Cloud provider OpenStack](https://github.com/kubernetes/cloud-provider-openstack) +* [Cloud provider vSphere](https://github.com/kubernetes/cloud-provider-vsphere) + +There are more details in [KEP-2395](https://kep.k8s.io/2395). + +#### Removal of the `v1beta2` flow control API group + +The deprecated _flowcontrol.apiserver.k8s.io/v1beta2_ API version of FlowSchema and +PriorityLevelConfiguration are no longer served in Kubernetes v1.29. + +If you have manifests or client software that uses the deprecated beta API group, you should change +these before you upgrade to v1.29. +See the [deprecated API migration guide](/docs/reference/using-api/deprecation-guide/#v1-29) +for details and advice. + +#### Deprecation of the `status.nodeInfo.kubeProxyVersion` field for Node + +The `.status.kubeProxyVersion` field for Node objects is now deprecated, and the Kubernetes project +is proposing to remove that field in a future release. The deprecated field is not accurate and has historically +been managed by kubelet - which does not actually know the kube-proxy version, or even whether kube-proxy +is running. + +If you've been using this field in client software, stop - the information isn't reliable and the field is now +deprecated. + + +#### Legacy Linux package repositories + +Please note that in August of 2023, the legacy package repositories (`apt.kubernetes.io` and +`yum.kubernetes.io`) were formally deprecated and the Kubernetes project announced the +general availability of the community-owned package repositories for Debian and RPM packages, +available at `https://pkgs.k8s.io`. + +These legacy repositories were frozen in September of 2023, and +will go away entirely in January of 2024. If you are currently relying on them, you **must** migrate. + +_This deprecation is not directly related to the v1.29 release._ For more details, including how these changes may affect you and what to do if you are affected, please read the [legacy package repository deprecation announcement](/blog/2023/08/31/legacy-package-repository-deprecation/). + +## Release notes + +Check out the full details of the Kubernetes v1.29 release in our [release notes](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md). + +## Availability + +Kubernetes v1.29 is available for download on [GitHub](https://github.com/kubernetes/kubernetes/releases/tag/v1.29.0). To get started with Kubernetes, check out these [interactive tutorials](/docs/tutorials) or run local Kubernetes clusters using [minikube](https://minikube.sigs.k8s.io/). You can also easily install v1.29 using [kubeadm](/docs/setup/independent/create-cluster-kubeadm). + +## Release team + +Kubernetes is only possible with the support, commitment, and hard work of its community. Each release team is made up of dedicated community volunteers who work together to build the many pieces that make up the Kubernetes releases you rely on. This requires the specialized skills of people from all corners of our community, from the code itself to its documentation and project management. + +We would like to thank the entire [release team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.29/release-team.md) for the hours spent hard at work to deliver the Kubernetes v1.29 release for our community. A very special thanks is in order for our release lead, [Priyanka Saggu](https://github.com/Priyankasaggu11929), for supporting and guiding us through a successful release cycle, making sure that we could all contribute in the best way possible, and challenging us to improve the release process. + +## Project velocity +The CNCF K8s DevStats project aggregates a number of interesting data points related to the velocity of Kubernetes and various sub-projects. This includes everything from individual contributions to the number of companies that are contributing and is an illustration of the depth and breadth of effort that goes into evolving this ecosystem. + +In the v1.29 release cycle, which [ran for 14 weeks](https://github.com/kubernetes/sig-release/tree/master/releases/release-1.29) (September 6 to December 13), we saw contributions from [888 companies](https://k8s.devstats.cncf.io/d/9/companies-table?orgId=1&var-period_name=v1.28.0%20-%20now&var-metric=contributions) and [1422 individuals](https://k8s.devstats.cncf.io/d/66/developer-activity-counts-by-companies?orgId=1&var-period_name=v1.28.0%20-%20now&var-metric=contributions&var-repogroup_name=Kubernetes&var-repo_name=kubernetes%2Fkubernetes&var-country_name=All&var-companies=All). + + +## Ecosystem updates +- KubeCon + CloudNativeCon Europe 2024 will take in Paris, France, from **19 – 22 March 2024**! You can find more information about the conference and registration on the [event site](https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/). + +## Upcoming release webinar {#release-webinar} + +Join members of the Kubernetes v1.29 release team on Friday, December 15th, 2023, at 11am PT (2pm eastern) to learn about the major features of this release, as well as deprecations and removals to help plan for upgrades. For more information and registration, visit the [event page](https://community.cncf.io/events/details/cncf-cncf-online-programs-presents-cncf-live-webinar-kubernetes-129-release/) on the CNCF Online Programs site. + +### Get involved + +The simplest way to get involved with Kubernetes is by joining one of the many [Special Interest Groups](https://github.com/kubernetes/community/blob/master/sig-list.md) (SIGs) that align with your interests. Have something you’d like to broadcast to the Kubernetes community? Share your voice at our weekly [community meeting](https://github.com/kubernetes/community/tree/master/communication), and through the channels below. Thank you for your continued feedback and support. + +- Follow us on Twitter [@Kubernetesio](https://twitter.com/kubernetesio) for latest updates +- Join the community discussion on [Discuss](https://discuss.kubernetes.io/) +- Join the community on [Slack](http://slack.k8s.io/) +- Post questions (or answer questions) on [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) +- Share your Kubernetes [story](https://docs.google.com/a/linuxfoundation.org/forms/d/e/1FAIpQLScuI7Ye3VQHQTwBASrgkjQDSS5TP0g3AXfFhwSM9YpHgxRKFA/viewform) +- Read more about what’s happening with Kubernetes on the [blog](https://kubernetes.io/blog/) +- Learn more about the [Kubernetes Release Team](https://github.com/kubernetes/sig-release/tree/master/release-team) diff --git a/content/en/blog/_posts/2023-12-14-disabling-in-tree-cloud-provider-goes-beta.md b/content/en/blog/_posts/2023-12-14-disabling-in-tree-cloud-provider-goes-beta.md new file mode 100644 index 0000000000000..5a3fc0214d36a --- /dev/null +++ b/content/en/blog/_posts/2023-12-14-disabling-in-tree-cloud-provider-goes-beta.md @@ -0,0 +1,218 @@ +--- +layout: blog +title: "Kubernetes 1.29: Cloud Provider Integrations Are Now Separate Components" +date: 2023-12-14T09:30:00-08:00 +slug: cloud-provider-integration-changes +--- + +**Authors:** Michael McCune (Red Hat), Andrew Sy Kim (Google) + +For Kubernetes v1.29, you need to use additional components to integrate your +Kubernetes cluster with a cloud infrastructure provider. By default, Kubernetes +v1.29 components **abort** if you try to specify integration with any cloud provider using +one of the legacy compiled-in cloud provider integrations. If you want to use a legacy +integration, you have to opt back in - and a future release will remove even that option. + +In 2018, the [Kubernetes community agreed to form the Cloud Provider Special +Interest Group (SIG)][oldblog], with a mission to externalize all cloud provider +integrations and remove all the existing in-tree cloud provider integrations. +In January 2019, the Kubernetes community approved the initial draft of +[KEP-2395: Removing In-Tree Cloud Provider Code][kep2395]. This KEP defines a +process by which we can remove cloud provider specific code from the core +Kubernetes source tree. From the KEP: + +> Motiviation [sic] behind this effort is to allow cloud providers to develop and +> make releases independent from the core Kubernetes release cycle. The +> de-coupling of cloud provider code allows for separation of concern between +> "Kubernetes core" and the cloud providers within the ecosystem. In addition, +> this ensures all cloud providers in the ecosystem are integrating with +> Kubernetes in a consistent and extendable way. + +After many years of development and collaboration across many contributors, +the default behavior for legacy cloud provider integrations is changing. +This means that users will need to confirm their Kubernetes configurations, +and in some cases run external cloud controller managers. These changes are +taking effect in Kubernetes version 1.29; read on to learn if you are affected +and what changes you will need to make. + +These updated default settings affect a large proportion of Kubernetes users, +and **will require changes** for users who were previously using the in-tree +provider integrations. The legacy integrations offered compatibility with +Azure, AWS, GCE, OpenStack, and vSphere; however for AWS and OpenStack the +compiled-in integrations were removed in Kubernetes versions 1.26 and 1.27, +respectively. + +## What has changed? + +At the most basic level, two [feature gates][fg] are changing their default +value from false to true. Those feature gates, `DisableCloudProviders` and +`DisableKubeletCloudCredentialProviders`, control the way that the +[kube-apiserver][kapi], [kube-controller-manager][kcm], and [kubelet][kubelet] +invoke the cloud provider related code that is included in those components. +When these feature gates are true (the default), the only recognized value for +the `--cloud-provider` command line argument is `external`. + +Let's see what the [official Kubernetes documentation][fg] says about these +feature gates: + +> `DisableCloudProviders`: Disables any functionality in `kube-apiserver`, +> `kube-controller-manager` and `kubelet` related to the `--cloud-provider` +> component flag. + +> `DisableKubeletCloudCredentialProviders`: Disable the in-tree functionality +> in kubelet to authenticate to a cloud provider container registry for image +> pull credentials. + +The next stage beyond beta will be full removal; for that release onwards, you +won't be able to override those feature gates back to false. + +## What do you need to do? + +If you are upgrading from Kubernetes 1.28+ and are not on Azure, GCE, or +vSphere then there are no changes you will need to make. If +you **are** on Azure, GCE, or vSphere, or you are upgrading from a version +older than 1.28, then read on. + +Historically, Kubernetes has included code for a set of cloud providers that +included AWS, Azure, GCE, OpenStack, and vSphere. Since the inception of +[KEP-2395][kep2395] the community has been moving towards removal of that +cloud provider code. The OpenStack provider code was removed in version 1.26, +and the AWS provider code was removed in version 1.27. This means that users +who are upgrading from one of the affected cloud providers and versions will +need to modify their deployments. + +### Upgrading on Azure, GCE, or vSphere + +There are two options for upgrading in this configuration: migrate to external +cloud controller managers, or continue using the in-tree provider code. +Although migrating to external cloud controller managers is recommended, +there are scenarios where continuing with the current behavior is desired. +Please choose the best option for your needs. + +#### Migrate to external cloud controller managers + +Migrating to use external cloud controller managers is the recommended upgrade +path, when possible in your situation. To do this you will need to +enable the `--cloud-provider=external` command line flag for the +`kube-apiserver`, `kube-controller-manager`, and `kubelet` components. In +addition you will need to deploy a cloud controller manager for your provider. + +Installing and running cloud controller managers is a larger topic than this +post can address; if you would like more information on this process please +read the documentation for [Cloud Controller Manager Administration][ccmadmin] +and [Migrate Replicated Control Plane To Use Cloud Controller Manager][ccmha]. +See [below](#cloud-provider-integrations) for links to specific cloud provider +implementations. + +#### Continue using in-tree provider code + +If you wish to continue using Kubernetes with the in-tree cloud provider code, +you will need to modify the command line parameters for `kube-apiserver`, +`kube-controller-manager`, and `kubelet` to disable the feature gates for +`DisableCloudProviders` and `DisableKubeletCloudCredentialProviders`. To do +this, add the following command line flag to the arguments for the previously +listed commands: + +``` +--feature-gates=DisableCloudProviders=false,DisableKubeletCloudCredentialProviders=false +``` + +_Please note that if you have other feature gate modifications on the command +line, they will need to include these 2 feature gates._ + +**Note**: These feature gates will be locked to `true` in an upcoming +release. Setting these feature gates to `false` should be used as a last +resort. It is highly recommended to migrate to an external cloud controller +manager as the in-tree providers are planned for removal as early as Kubernetes +version 1.31. + +### Upgrading on other providers + +For providers other than Azure, GCE, or vSphere, good news, the external cloud +controller manager should already be in use. You can confirm this by inspecting +the `--cloud-provider` flag for the kubelets in your cluster, they will have +the value `external` if using external providers. The code for AWS and OpenStack +providers was removed from Kubernetes before version 1.27 was released. +Other providers beyond the AWS, Azure, GCE, OpenStack, and vSphere were never +included in Kubernetes and as such they began their life as external cloud +controller managers. + +### Upgrading from older Kubernetes versions + +If you are upgrading from a Kubernetes release older than 1.26, and you are on +AWS, Azure, GCE, OpenStack, or vSphere then you will need to enable the +`--cloud-provider=external` flag, and follow the advice for installing and +running a cloud controller manager for your provider. + +Please read the documentation for +[Cloud Controller Manager Administration][ccmadmin] and +[Migrate Replicated Control Plane To Use Cloud Controller Manager][ccmha]. See +below for links to specific cloud provider implementations. + +## Where to find a cloud controller manager? + +At its core, this announcement is about the cloud provider integrations that +were previously included in Kubernetes. As these components move out of the +core Kubernetes code and into their own repositories, it is important to note +a few things: + +First, SIG Cloud Provider offers a reference framework for developers who +wish to create cloud controller managers for any provider. See the +[cloud-provider repository][cloud-provider] for more information about how +these controllers work and how to get started creating your own. + +Second, there are many cloud controller managers available for Kubernetes. +This post is addressing the provider integrations that have been historically +included with Kubernetes but are now in the process of being removed. If you +need a cloud controller manager for your provider and do not see it listed here, +please reach out to the cloud provider you are integrating with or the +[Kubernetes SIG Cloud Provider community][sig] for help and advice. It is +worth noting that while most cloud controller managers are open source today, +this may not always be the case. Users should always contact their cloud +provider to learn if there are preferred solutions to utilize on their +infrastructure. + +### Cloud provider integrations provided by the Kubernetes project {#cloud-provider-integrations} + +* AWS - https://github.com/kubernetes/cloud-provider-aws +* Azure - https://github.com/kubernetes-sigs/cloud-provider-azure +* GCE - https://github.com/kubernetes/cloud-provider-gcp +* OpenStack - https://github.com/kubernetes/cloud-provider-openstack +* vSphere - https://github.com/kubernetes/cloud-provider-vsphere + +If you are looking for an automated approach to installing cloud controller +managers in your clusters, the [kOps][kops] project provides a convenient +solution for managing production-ready clusters. + +## Want to learn more? + +Cloud providers and cloud controller managers serve a core function in +Kubernetes. Cloud providers are often the substrate upon which Kubernetes is +operated, and the cloud controller managers supply the essential lifeline +between Kubernetes clusters and their physical infrastructure. + +This post covers one aspect of how the Kubernetes community interacts with +the world of cloud infrastructure providers. If you are curious about this +topic and want to learn more, the Cloud Provider Special Interest Group (SIG) +is the place to go. SIG Cloud Provider hosts bi-weekly meetings to discuss all +manner of topics related to cloud providers and cloud controller managers in +Kubernetes. + +### SIG Cloud Provider + +* Regular SIG Meeting: [Wednesdays at 9:00 PT (Pacific Time)](https://zoom.us/j/508079177?pwd=ZmEvMksxdTFTc0N1eXFLRm91QUlyUT09) (biweekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=9:00&tz=PT%20%28Pacific%20Time%29). +* [Kubernetes slack][kslack] channel `#sig-cloud-provider` +* [SIG Community page][sig] + +[kep2395]: https://github.com/kubernetes/enhancements/tree/master/keps/sig-cloud-provider/2395-removing-in-tree-cloud-providers +[fg]: https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ +[kubelet]: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/ +[kcm]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/ +[kapi]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ +[ccmadmin]: https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/ +[ccmha]: https://kubernetes.io/docs/tasks/administer-cluster/controller-manager-leader-migration/ +[kslack]: https://kubernetes.slack.com +[sig]: https://github.com/kubernetes/community/tree/master/sig-cloud-provider +[cloud-provider]: https://github.com/kubernetes/cloud-provider +[oldblog]: https://kubernetes.io/blog/2019/04/17/the-future-of-cloud-providers-in-kubernetes/ +[kops]: https://github.com/kubernetes/kops diff --git a/content/en/blog/_posts/2023-12-15-node-expand-secret-moves-ga.md b/content/en/blog/_posts/2023-12-15-node-expand-secret-moves-ga.md new file mode 100644 index 0000000000000..3847bd3230985 --- /dev/null +++ b/content/en/blog/_posts/2023-12-15-node-expand-secret-moves-ga.md @@ -0,0 +1,92 @@ +--- +layout: blog +title: "Kubernetes 1.29: CSI Storage Resizing Authenticated and Generally Available in v1.29" +date: 2023-12-15 +slug: csi-node-expand-secret-support-ga +--- +**Authors:** Humble Chirammal (Vmware), Louis Koo (deeproute.ai) + +Kubernetes version v1.29 brings generally available support for authentication +during CSI (Container Storage Interface) storage resize operations. + +Let's embark on the evolution of this feature, initially introduced in alpha in +Kubernetes v1.25, and unravel the changes accompanying its transition to GA. + +## Authenticated CSI storage resizing unveiled + +Kubernetes harnesses the capabilities of CSI to seamlessly integrate with third-party +storage systems, empowering your cluster to seamlessly expand storage volumes +managed by the CSI driver. The recent elevation of authentication secret support +for resizes from Beta to GA ushers in new horizons, enabling volume expansion in +scenarios where the underlying storage operation demands credentials for backend +cluster operations – such as accessing a SAN/NAS fabric. This enhancement addresses +a critical limitation for CSI drivers, allowing volume expansion at the node level, +especially in cases necessitating authentication for resize operations. + +The challenges extend beyond node-level expansion. Within the Special Interest +Group (SIG) Storage, use cases have surfaced, including scenarios where the +CSI driver needs to validate the actual size of backend block storage before +initiating a node-level filesystem expand operation. This validation prevents +false positive returns from the backend storage cluster during file system expansion. +Additionally, for PersistentVolumes representing encrypted block storage (e.g., using LUKS), +a passphrase is mandated to expand the device and grow the filesystem, underscoring +the necessity for authenticated resizing. + +## What's new for Kubernetes v1.29 +With the graduation to GA, the feature remains enabled by default. Support for +node-level volume expansion secrets has been seamlessly integrated into the CSI +external-provisioner sidecar controller. To take advantage, ensure your external +CSI storage provisioner sidecar controller is operating at v3.3.0 or above. + +## Navigating Authenticated CSI Storage Resizing +Assuming all requisite components, including the CSI driver, are deployed and operational +on your cluster, and you have a CSI driver supporting resizing, you can initiate a +`NodeExpand` operation on a CSI volume. Credentials for the CSI `NodeExpand` operation +can be conveniently provided as a Kubernetes Secret, specifying the Secret via the +StorageClass. Here's an illustrative manifest for a Secret holding credentials: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-secret + namespace: default +data: + stringData: + username: admin + password: t0p-Secret +``` +And here's an example manifest for a StorageClass referencing those credentials: + +```yaml +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-blockstorage-sc +parameters: + csi.storage.k8s.io/node-expand-secret-name: test-secret + csi.storage.k8s.io/node-expand-secret-namespace: default +provisioner: blockstorage.cloudprovider.example +reclaimPolicy: Delete +volumeBindingMode: Immediate +allowVolumeExpansion: true +``` + +Upon successful creation of the PersistentVolumeClaim (PVC), you can verify the +configuration within the .spec.csi field of the PersistentVolume. To confirm, +execute `kubectl get persistentvolume -o yaml`. + +## Engage with the Evolution! +For those enthusiastic about contributing or delving deeper into the technical +intricacies, the enhancement proposal comprises exhaustive details about the +feature's history and implementation. Explore the realms of StorageClass-based +dynamic provisioning in Kubernetes by referring to the [storage class documentation] +(https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class) +and the overarching [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) documentation. + +Join the Kubernetes Storage SIG (Special Interest Group) to actively participate +in elevating this feature. Your insights are invaluable, and we eagerly anticipate +welcoming more contributors to shape the future of Kubernetes storage! + diff --git a/content/en/blog/_posts/2023-12-15-volume-attributes-class/index.md b/content/en/blog/_posts/2023-12-15-volume-attributes-class/index.md new file mode 100644 index 0000000000000..b1ee197dca3a0 --- /dev/null +++ b/content/en/blog/_posts/2023-12-15-volume-attributes-class/index.md @@ -0,0 +1,177 @@ +--- +layout: blog +title: "Kubernetes 1.29: VolumeAttributesClass for Volume Modification" +date: 2023-12-15 +slug: kubernetes-1-29-volume-attributes-class +--- + +**Author**: Sunny Song (Google) + +The v1.29 release of Kubernetes introduced an alpha feature to support modifying a volume +by changing the `volumeAttributesClassName` that was specified for a PersistentVolumeClaim (PVC). +With the feature enabled, Kubernetes can handle updates of volume attributes other than capacity. +Allowing volume attributes to be changed without managing it through different +provider's APIs directly simplifies the current flow. + +You can read about VolumeAttributesClass usage details in the Kubernetes documentation +or you can read on to learn about why the Kubernetes project is supporting this feature. + + +## VolumeAttributesClass + +The new `storage.k8s.io/v1alpha1` API group provides two new types: + +**VolumeAttributesClass** + +Represents a specification of mutable volume attributes defined by the CSI driver. +The class can be specified during dynamic provisioning of PersistentVolumeClaims, +and changed in the PersistentVolumeClaim spec after provisioning. + +**ModifyVolumeStatus** + +Represents the status object of `ControllerModifyVolume` operation. + +With this alpha feature enabled, the spec of PersistentVolumeClaim defines VolumeAttributesClassName +that is used in the PVC. At volume provisioning, the `CreateVolume` operation will apply the parameters in the +VolumeAttributesClass along with the parameters in the StorageClass. + +When there is a change of volumeAttributesClassName in the PVC spec, +the external-resizer sidecar will get an informer event. Based on the current state of the configuration, +the resizer will trigger a CSI ControllerModifyVolume. +More details can be found in [KEP-3751](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/3751-volume-attributes-class/README.md). + +## How to use it + +If you want to test the feature whilst it's alpha, you need to enable the relevant feature gate +in the `kube-controller-manager` and the `kube-apiserver`. Use the `--feature-gates` command line argument: + + +``` +--feature-gates="...,VolumeAttributesClass=true" +``` + + +It also requires that the CSI driver has implemented the ModifyVolume API. + + +### User flow + +If you would like to see the feature in action and verify it works fine in your cluster, here's what you can try: + + +1. Define a StorageClass and VolumeAttributesClass + + ```yaml + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: csi-sc-example + provisioner: pd.csi.storage.gke.io + parameters: + disk-type: "hyperdisk-balanced" + volumeBindingMode: WaitForFirstConsumer + ``` + + + ```yaml + apiVersion: storage.k8s.io/v1alpha1 + kind: VolumeAttributesClass + metadata: + name: silver + driverName: pd.csi.storage.gke.io + parameters: + provisioned-iops: "3000" + provisioned-throughput: "50" + ``` + + +2. Define and create the PersistentVolumeClaim + + ```yaml + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: test-pv-claim + spec: + storageClassName: csi-sc-example + volumeAttributesClassName: silver + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Gi + ``` + + +3. Verify that the PersistentVolumeClaim is now provisioned correctly with: + + ``` + kubectl get pvc + ``` + + +4. Create a new VolumeAttributesClass gold: + + ```yaml + apiVersion: storage.k8s.io/v1alpha1 + kind: VolumeAttributesClass + metadata: + name: gold + driverName: pd.csi.storage.gke.io + parameters: + iops: "4000" + throughput: "60" + ``` + + +5. Update the PVC with the new VolumeAttributesClass and apply: + + ```yaml + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: test-pv-claim + spec: + storageClassName: csi-sc-example + volumeAttributesClassName: gold + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Gi + ``` + + +6. Verify that PersistentVolumeClaims has the updated VolumeAttributesClass parameters with: + + ``` + kubectl describe pvc + ``` + +## Next steps + +* See the [VolumeAttributesClass KEP](https://kep.k8s.io/3751) for more information on the design +* You can view or comment on the [project board](https://github.com/orgs/kubernetes-csi/projects/72) for VolumeAttributesClass +* In order to move this feature towards beta, we need feedback from the community, + so here's a call to action: add support to the CSI drivers, try out this feature, + consider how it can help with problems that your users are having… + + +## Getting involved + +We always welcome new contributors. So, if you would like to get involved, you can join our [Kubernetes Storage Special Interest Group](https://github.com/kubernetes/community/tree/master/sig-storage) (SIG). + +If you would like to share feedback, you can do so on our [public Slack channel](https://app.slack.com/client/T09NY5SBT/C09QZFCE5). + +Special thanks to all the contributors that provided great reviews, shared valuable insight and helped implement this feature (alphabetical order): + +* Baofa Fan (calory) +* Ben Swartzlander (bswartz) +* Connor Catlett (ConnorJC3) +* Hemant Kumar (gnufied) +* Jan Šafránek (jsafrane) +* Joe Betz (jpbetz) +* Jordan Liggitt (liggitt) +* Matthew Cary (mattcary) +* Michelle Au (msau42) +* Xing Yang (xing-yang) \ No newline at end of file diff --git a/content/en/blog/_posts/2023-12-18-kubernetes-1-29-feature-loadbalancer-ip-mode-alpha.md b/content/en/blog/_posts/2023-12-18-kubernetes-1-29-feature-loadbalancer-ip-mode-alpha.md new file mode 100644 index 0000000000000..14767cd5b999e --- /dev/null +++ b/content/en/blog/_posts/2023-12-18-kubernetes-1-29-feature-loadbalancer-ip-mode-alpha.md @@ -0,0 +1,110 @@ +--- +layout: blog +title: "Kubernetes 1.29: New (alpha) Feature, Load Balancer IP Mode for Services" +date: 2023-12-18 +slug: kubernetes-1-29-feature-loadbalancer-ip-mode-alpha +--- + +**Author:** [Aohan Yang](https://github.com/RyanAoh) + +This blog introduces a new alpha feature in Kubernetes 1.29. +It provides a configurable approach to define how Service implementations, +exemplified in this blog by kube-proxy, +handle traffic from pods to the Service, within the cluster. + +## Background + +In older Kubernetes releases, the kube-proxy would intercept traffic that was destined for the IP +address associated with a Service of `type: LoadBalancer`. This happened whatever mode you used +for `kube-proxy`. +The interception implemented the expected behavior (traffic eventually reaching the expected +endpoints behind the Service). The mechanism to make that work depended on the mode for kube-proxy; +on Linux, kube-proxy in iptables mode would redirecting packets directly to the endpoint; in ipvs mode, +kube-proxy would configure the load balancer's IP address to one interface on the node. +The motivation for implementing that interception was for two reasons: + +1. **Traffic path optimization:** Efficiently redirecting pod traffic - when a container in a pod sends an outbound + packet that is destined for the load balancer's IP address - + directly to the backend service by bypassing the load balancer. + +2. **Handling load balancer packets:** Some load balancers send packets with the destination IP set to +the load balancer's IP address. As a result, these packets need to be routed directly to the correct backend (which +might not be local to that node), in order to avoid loops. + +## Problems + +However, there are several problems with the aforementioned behavior: + +1. **[Source IP](https://github.com/kubernetes/kubernetes/issues/79783):** + Some cloud providers use the load balancer's IP as the source IP when + transmitting packets to the node. In the ipvs mode of kube-proxy, + there is a problem that health checks from the load balancer never return. This occurs because the reply packets + would be forward to the local interface `kube-ipvs0`(where the load balancer's IP is bound to) + and be subsequently ignored. + +2. **[Feature loss at load balancer level](https://github.com/kubernetes/kubernetes/issues/66607):** + Certain cloud providers offer features(such as TLS termination, proxy protocol, etc.) at the + load balancer level. + Bypassing the load balancer results in the loss of these features when the packet reaches the service + (leading to protocol errors). + + +Even with the new alpha behaviour disabled (the default), there is a +[workaround](https://github.com/kubernetes/kubernetes/issues/66607#issuecomment-474513060) +that involves setting `.status.loadBalancer.ingress.hostname` for the Service, in order +to bypass kube-proxy binding. +But this is just a makeshift solution. + +## Solution + +In summary, providing an option for cloud providers to disable the current behavior would be highly beneficial. + +To address this, Kubernetes v1.29 introduces a new (alpha) `.status.loadBalancer.ingress.ipMode` +field for a Service. +This field specifies how the load balancer IP behaves and can be specified only when +the `.status.loadBalancer.ingress.ip` field is also specified. + +Two values are possible for `.status.loadBalancer.ingress.ipMode`: `"VIP"` and `"Proxy"`. +The default value is "VIP", meaning that traffic delivered to the node +with the destination set to the load balancer's IP and port will be redirected to the backend service by kube-proxy. +This preserves the existing behavior of kube-proxy. +The "Proxy" value is intended to prevent kube-proxy from binding the load balancer's IP address +to the node in both ipvs and iptables modes. +Consequently, traffic is sent directly to the load balancer and then forwarded to the destination node. +The destination setting for forwarded packets varies depending on how the cloud provider's load balancer delivers traffic: + +- If the traffic is delivered to the node then DNATed to the pod, the destination would be set to the node's IP and node port; +- If the traffic is delivered directly to the pod, the destination would be set to the pod's IP and port. + +## Usage + +Here are the necessary steps to enable this feature: + +- Download the [latest Kubernetes project](https://kubernetes.io/releases/download/) (version `v1.29.0` or later). +- Enable the feature gate with the command line flag `--feature-gates=LoadBalancerIPMode=true` +on kube-proxy, kube-apiserver, and cloud-controller-manager. +- For Services with `type: LoadBalancer`, set `ipMode` to the appropriate value. +This step is likely handled by your chosen cloud-controller-manager during the `EnsureLoadBalancer` process. + +## More information + +- Read [Specifying IPMode of load balancer status](/docs/concepts/services-networking/service/#load-balancer-ip-mode). +- Read [KEP-1860](https://kep.k8s.io/1860) - [Make Kubernetes aware of the LoadBalancer behaviour](https://github.com/kubernetes/enhancements/tree/b103a6b0992439f996be4314caf3bf7b75652366/keps/sig-network/1860-kube-proxy-IP-node-binding#kep-1860-make-kubernetes-aware-of-the-loadbalancer-behaviour) _(sic)_. + +## Getting involved + +Reach us on [Slack](https://slack.k8s.io/): [#sig-network](https://kubernetes.slack.com/messages/sig-network), +or through the [mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-network). + +## Acknowledgments + +Huge thanks to [@Sh4d1](https://github.com/Sh4d1) for the original KEP and initial implementation code. +I took over midway and completed the work. Similarly, immense gratitude to other contributors +who have assisted in the design, implementation, and review of this feature (alphabetical order): + +- [@aojea](https://github.com/aojea) +- [@danwinship](https://github.com/danwinship) +- [@sftim](https://github.com/sftim) +- [@tengqm](https://github.com/tengqm) +- [@thockin](https://github.com/thockin) +- [@wojtek-t](https://github.com/wojtek-t) \ No newline at end of file diff --git a/content/en/blog/_posts/2023-12-18-read-write-once-pod-access-mode-ga.md b/content/en/blog/_posts/2023-12-18-read-write-once-pod-access-mode-ga.md new file mode 100644 index 0000000000000..72b58a3be08fc --- /dev/null +++ b/content/en/blog/_posts/2023-12-18-read-write-once-pod-access-mode-ga.md @@ -0,0 +1,99 @@ +--- +layout: blog +title: "Kubernetes 1.29: Single Pod Access Mode for PersistentVolumes Graduates to Stable" +date: 2023-12-18 +slug: read-write-once-pod-access-mode-ga +--- + +**Author:** Chris Henzie (Google) + +With the release of Kubernetes v1.29, the `ReadWriteOncePod` volume access mode +has graduated to general availability: it's part of Kubernetes' stable API. In +this blog post, I'll take a closer look at this access mode and what it does. + +## What is `ReadWriteOncePod`? + +`ReadWriteOncePod` is an access mode for +[PersistentVolumes](/docs/concepts/storage/persistent-volumes/#persistent-volumes) (PVs) +and [PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) (PVCs) +introduced in Kubernetes v1.22. This access mode enables you to restrict volume +access to a single pod in the cluster, ensuring that only one pod can write to +the volume at a time. This can be particularly useful for stateful workloads +that require single-writer access to storage. + +For more context on access modes and how `ReadWriteOncePod` works read +[What are access modes and why are they important?](/blog/2021/09/13/read-write-once-pod-access-mode-alpha/#what-are-access-modes-and-why-are-they-important) +in the _Introducing Single Pod Access Mode for PersistentVolumes_ article from 2021. + +## How can I start using `ReadWriteOncePod`? + +The `ReadWriteOncePod` volume access mode is available by default in Kubernetes +versions v1.27 and beyond. In Kubernetes v1.29 and later, the Kubernetes API +always recognizes this access mode. + +Note that `ReadWriteOncePod` is +[only supported for CSI volumes](/docs/concepts/storage/persistent-volumes/#access-modes), +and before using this feature, you will need to update the following +[CSI sidecars](https://kubernetes-csi.github.io/docs/sidecar-containers.html) +to these versions or greater: + +- [csi-provisioner:v3.0.0+](https://github.com/kubernetes-csi/external-provisioner/releases/tag/v3.0.0) +- [csi-attacher:v3.3.0+](https://github.com/kubernetes-csi/external-attacher/releases/tag/v3.3.0) +- [csi-resizer:v1.3.0+](https://github.com/kubernetes-csi/external-resizer/releases/tag/v1.3.0) + +To start using `ReadWriteOncePod`, you need to create a PVC with the +`ReadWriteOncePod` access mode: + +```yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: single-writer-only +spec: + accessModes: + - ReadWriteOncePod # Allows only a single pod to access single-writer-only. + resources: + requests: + storage: 1Gi +``` + +If your storage plugin supports +[Dynamic provisioning](/docs/concepts/storage/dynamic-provisioning/), then +new PersistentVolumes will be created with the `ReadWriteOncePod` access mode +applied. + +Read [Migrating existing PersistentVolumes](/blog/2021/09/13/read-write-once-pod-access-mode-alpha/#migrating-existing-persistentvolumes) +for details on migrating existing volumes to use `ReadWriteOncePod`. + +## How can I learn more? + +Please see the blog posts [alpha](/blog/2021/09/13/read-write-once-pod-access-mode-alpha), +[beta](/blog/2023/04/20/read-write-once-pod-access-mode-beta), and +[KEP-2485](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/2485-read-write-once-pod-pv-access-mode/README.md) +for more details on the `ReadWriteOncePod` access mode and motivations for CSI +spec changes. + +## How do I get involved? + +The [Kubernetes #csi Slack channel](https://kubernetes.slack.com/messages/csi) +and any of the standard +[SIG Storage communication channels](https://github.com/kubernetes/community/blob/master/sig-storage/README.md#contact) +are great methods to reach out to the SIG Storage and the CSI teams. + +Special thanks to the following people whose thoughtful reviews and feedback helped shape this feature: + +* Abdullah Gharaibeh (ahg-g) +* Aldo Culquicondor (alculquicondor) +* Antonio Ojea (aojea) +* David Eads (deads2k) +* Jan Šafránek (jsafrane) +* Joe Betz (jpbetz) +* Kante Yin (kerthcet) +* Michelle Au (msau42) +* Tim Bannister (sftim) +* Xing Yang (xing-yang) + +If you’re interested in getting involved with the design and development of CSI +or any part of the Kubernetes storage system, join the +[Kubernetes Storage Special Interest Group](https://github.com/kubernetes/community/tree/master/sig-storage) (SIG). +We’re rapidly growing and always welcome new contributors. diff --git a/content/en/blog/_posts/2023-12-19-PodReadyToStartContainersCondition-in-beta.md b/content/en/blog/_posts/2023-12-19-PodReadyToStartContainersCondition-in-beta.md new file mode 100644 index 0000000000000..1af071c69b544 --- /dev/null +++ b/content/en/blog/_posts/2023-12-19-PodReadyToStartContainersCondition-in-beta.md @@ -0,0 +1,62 @@ +--- +layout: blog +title: "Kubernetes 1.29: PodReadyToStartContainers Condition Moves to Beta" +date: 2023-12-19 +slug: pod-ready-to-start-containers-condition-now-in-beta +--- + +**Authors**: Zefeng Chen (independent), Kevin Hannon (Red Hat) + + +With the recent release of Kubernetes 1.29, the `PodReadyToStartContainers` +[condition](/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions) is +available by default. +The kubelet manages the value for that condition throughout a Pod's lifecycle, +in the status field of a Pod. The kubelet will use the `PodReadyToStartContainers` +condition to accurately surface the initialization state of a Pod, +from the perspective of Pod sandbox creation and network configuration by a container runtime. + +## What's the motivation for this feature? + +Cluster administrators did not have a clear and easily accessible way to view the completion of Pod's sandbox creation +and initialization. As of 1.28, the `Initialized` condition in Pods tracks the execution of init containers. +However, it has limitations in accurately reflecting the completion of sandbox creation and readiness to start containers for all Pods in a cluster. +This distinction is particularly important in multi-tenant clusters where tenants own the Pod specifications, including the set of init containers, +while cluster administrators manage storage plugins, networking plugins, and container runtime handlers. +Therefore, there is a need for an improved mechanism to provide cluster administrators with a clear and +comprehensive view of Pod sandbox creation completion and container readiness. + +## What's the benefit? + +1. Improved Visibility: Cluster administrators gain a clearer and more comprehensive view of Pod sandbox + creation completion and container readiness. + This enhanced visibility allows them to make better-informed decisions and troubleshoot issues more effectively. +2. Metric Collection and Monitoring: Monitoring services can leverage the fields associated with + the `PodReadyToStartContainers` condition to report sandbox creation state and latency. + Metrics can be collected at per-Pod cardinality or aggregated based on various + properties of the Pod, such as `volumes`, `runtimeClassName`, custom annotations for CNI + and IPAM plugins or arbitrary labels and annotations, and `storageClassName` of + PersistentVolumeClaims. + This enables comprehensive monitoring and analysis of Pod readiness across the cluster. +3. Enhanced Troubleshooting: With a more accurate representation of Pod sandbox creation and container readiness, + cluster administrators can quickly identify and address any issues that may arise during the initialization process. + This leads to improved troubleshooting capabilities and reduced downtime. + +### What’s next? + +Due to feedback and adoption, the Kubernetes team promoted `PodReadyToStartContainersCondition` to Beta in 1.29. +Your comments will help determine if this condition continues forward to get promoted to GA, +so please submit additional feedback on this feature! + +### How can I learn more? + +Please check out the +[documentation](/docs/concepts/workloads/pods/pod-lifecycle/) for the +`PodReadyToStartContainersCondition` to learn more about it and how it fits in relation to +other Pod conditions. + +### How to get involved? + +This feature is driven by the SIG Node community. Please join us to connect with +the community and share your ideas and feedback around the above feature and +beyond. We look forward to hearing from you! \ No newline at end of file diff --git a/content/en/blog/_posts/2023-12-19-taint-eviction-controller.md b/content/en/blog/_posts/2023-12-19-taint-eviction-controller.md new file mode 100644 index 0000000000000..695339c100841 --- /dev/null +++ b/content/en/blog/_posts/2023-12-19-taint-eviction-controller.md @@ -0,0 +1,85 @@ +--- +layout: blog +title: "Kubernetes 1.29: Decoupling taint-manager from node-lifecycle-controller" +date: 2023-12-19 +slug: kubernetes-1-29-taint-eviction-controller +--- + +**Authors:** Yuan Chen (Apple), Andrea Tosatto (Apple) + +This blog discusses a new feature in Kubernetes 1.29 to improve the handling of taint-based pod eviction. + +## Background + +In Kubernetes 1.29, an improvement has been introduced to enhance the taint-based pod eviction handling on nodes. +This blog discusses the changes made to node-lifecycle-controller +to separate its responsibilities and improve overall code maintainability. + +## Summary of changes + +node-lifecycle-controller previously combined two independent functions: + +- Adding a pre-defined set of `NoExecute` taints to Node based on Node's condition. +- Performing pod eviction on `NoExecute` taint. + +With the Kubernetes 1.29 release, the taint-based eviction implementation has been +moved out of node-lifecycle-controller into a separate and independent component called taint-eviction-controller. +This separation aims to disentangle code, enhance code maintainability, +and facilitate future extensions to either component. + +As part of the change, additional metrics were introduced to help you monitor taint-based pod evictions: + +- `pod_deletion_duration_seconds` measures the latency between the time when a taint effect +has been activated for the Pod and its deletion via taint-eviction-controller. +- `pod_deletions_total` reports the total number of Pods deleted by taint-eviction-controller since its start. + +## How to use the new feature? + +A new feature gate, `SeparateTaintEvictionController`, has been added. The feature is enabled by default as Beta in Kubernetes 1.29. +Please refer to the [feature gate document](/docs/reference/command-line-tools-reference/feature-gates/). + + +When this feature is enabled, users can optionally disable taint-based eviction by setting `--controllers=-taint-eviction-controller` +in kube-controller-manager. + +To disable the new feature and use the old taint-manager within node-lifecylecycle-controller , users can set the feature gate `SeparateTaintEvictionController=false`. + +## Use cases + +This new feature will allow cluster administrators to extend and enhance the default +taint-eviction-controller and even replace the default taint-eviction-controller with a +custom implementation to meet different needs. An example is to better support +stateful workloads that use PersistentVolume on local disks. + +## FAQ + +**Does this feature change the existing behavior of taint-based pod evictions?** + +No, the taint-based pod eviction behavior remains unchanged. If the feature gate +`SeparateTaintEvictionController` is turned off, the legacy node-lifecycle-controller with taint-manager will continue to be used. + +**Will enabling/using this feature result in an increase in the time taken by any operations covered by existing SLIs/SLOs?** + +No. + +**Will enabling/using this feature result in an increase in resource usage (CPU, RAM, disk, IO, ...)?** + +The increase in resource usage by running a separate `taint-eviction-controller` will be negligible. + +## Learn more + +For more details, refer to the [KEP](http://kep.k8s.io/3902). + +## Acknowledgments + +As with any Kubernetes feature, multiple community members have contributed, from +writing the KEP to implementing the new controller and reviewing the KEP and code. Special thanks to: + +- Aldo Culquicondor (@alculquicondor) +- Maciej Szulik (@soltysh) +- Filip Křepinský (@atiratree) +- Han Kang (@logicalhan) +- Wei Huang (@Huang-Wei) +- Sergey Kanzhelevi (@SergeyKanzhelev) +- Ravi Gudimetla (@ravisantoshgudimetla) +- Deep Debroy (@ddebroy) diff --git a/content/en/blog/_posts/2023-12-20-contextual-logging-in-kubernetes-1-29.md b/content/en/blog/_posts/2023-12-20-contextual-logging-in-kubernetes-1-29.md new file mode 100644 index 0000000000000..9849e43525a45 --- /dev/null +++ b/content/en/blog/_posts/2023-12-20-contextual-logging-in-kubernetes-1-29.md @@ -0,0 +1,147 @@ +--- +layout: blog +title: "Contextual logging in Kubernetes 1.29: Better troubleshooting and enhanced logging" +slug: contextual-logging-in-kubernetes-1-29 +date: 2023-12-20T09:30:00-08:00 +canonicalUrl: https://www.kubernetes.dev/blog/2023/12/20/contextual-logging/ +--- + +**Authors**: [Mengjiao Liu](https://github.com/mengjiao-liu/) (DaoCloud), [Patrick Ohly](https://github.com/pohly) (Intel) + +On behalf of the [Structured Logging Working Group](https://github.com/kubernetes/community/blob/master/wg-structured-logging/README.md) +and [SIG Instrumentation](https://github.com/kubernetes/community/tree/master/sig-instrumentation#readme), +we are pleased to announce that the contextual logging feature +introduced in Kubernetes v1.24 has now been successfully migrated to +two components (kube-scheduler and kube-controller-manager) +as well as some directories. This feature aims to provide more useful logs +for better troubleshooting of Kubernetes and to empower developers to enhance Kubernetes. + +## What is contextual logging? + +[Contextual logging](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/3077-contextual-logging) +is based on the [go-logr](https://github.com/go-logr/logr#a-minimal-logging-api-for-go) API. +The key idea is that libraries are passed a logger instance by their caller +and use that for logging instead of accessing a global logger. +The binary decides the logging implementation, not the libraries. +The go-logr API is designed around structured logging and supports attaching +additional information to a logger. + +This enables additional use cases: + +- The caller can attach additional information to a logger: + - [WithName]() adds a "logger" key with the names concatenated by a dot as value + - [WithValues]() adds key/value pairs + + When passing this extended logger into a function, and the function uses it + instead of the global logger, the additional information is then included + in all log entries, without having to modify the code that generates the log entries. + This is useful in highly parallel applications where it can become hard to identify + all log entries for a certain operation, because the output from different operations gets interleaved. + +- When running unit tests, log output can be associated with the current test. + Then, when a test fails, only the log output of the failed test gets shown by go test. + That output can also be more verbose by default because it will not get shown for successful tests. + Tests can be run in parallel without interleaving their output. + +One of the design decisions for contextual logging was to allow attaching a logger as value to a `context.Context`. +Since the logger encapsulates all aspects of the intended logging for the call, +it is *part* of the context, and not just *using* it. A practical advantage is that many APIs +already have a `ctx` parameter or can add one. This provides additional advantages, like being able to +get rid of `context.TODO()` calls inside the functions. + +## How to use it + +The contextual logging feature is alpha starting from Kubernetes v1.24, +so it requires the `ContextualLogging` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be enabled. +If you want to test the feature while it is alpha, you need to enable this feature gate +on the `kube-controller-manager` and the `kube-scheduler`. + +For the `kube-scheduler`, there is one thing to note, in addition to enabling +the `ContextualLogging` feature gate, instrumentation also depends on log verbosity. +To avoid slowing down the scheduler with the logging instrumentation for contextual logging added for 1.29, +it is important to choose carefully when to add additional information: +- At `-v3` or lower, only `WithValues("pod")` is used once per scheduling cycle. + This has the intended effect that all log messages for the cycle include the pod information. + Once contextual logging is GA, "pod" key/value pairs can be removed from all log calls. +- At `-v4` or higher, richer log entries get produced where `WithValues` is also used for the node (when applicable) + and `WithName` is used for the current operation and plugin. + +Here is an example that demonstrates the effect: +> I1113 08:43:37.029524 87144 default_binder.go:53] "Attempting to bind pod to node" **logger="Bind.DefaultBinder"** **pod**="kube-system/coredns-69cbfb9798-ms4pq" **node**="127.0.0.1" + +The immediate benefit is that the operation and plugin name are visible in `logger`. +`pod` and `node` are already logged as parameters in individual log calls in `kube-scheduler` code. +Once contextual logging is supported by more packages outside of `kube-scheduler`, +they will also be visible there (for example, client-go). Once it is GA, +log calls can be simplified to avoid repeating those values. + +In `kube-controller-manager`, `WithName` is used to add the user-visible controller name to log output, +for example: + +> I1113 08:43:29.284360 87141 graph_builder.go:285] "garbage controller monitor not synced: no monitors" **logger="garbage-collector-controller"** + +The `logger=”garbage-collector-controller”` was added by the `kube-controller-manager` core +when instantiating that controller and appears in all of its log entries - at least as long as the code +that it calls supports contextual logging. Further work is needed to convert shared packages like client-go. + +## Performance impact + +Supporting contextual logging in a package, i.e. accepting a logger from a caller, is cheap. +No performance impact was observed for the `kube-scheduler`. As noted above, +adding `WithName` and `WithValues` needs to be done more carefully. + +In Kubernetes 1.29, enabling contextual logging at production verbosity (`-v3` or lower) +caused no measurable slowdown for the `kube-scheduler` and is not expected for the `kube-controller-manager` either. +At debug levels, a 28% slowdown for some test cases is still reasonable given that the resulting logs make debugging easier. +For details, see the [discussion around promoting the feature to beta](https://github.com/kubernetes/enhancements/pull/4219#issuecomment-1807811995). + +## Impact on downstream users +Log output is not part of the Kubernetes API and changes regularly in each release, +whether it is because developers work on the code or because of the ongoing conversion +to structured and contextual logging. + +If downstream users have dependencies on specific logs, +they need to be aware of how this change affects them. + +## Further reading + +- Read the [Contextual Logging in Kubernetes 1.24](https://www.kubernetes.dev/blog/2022/05/25/contextual-logging/) article. +- Read the [KEP-3077: contextual logging](https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/3077-contextual-logging). + +## Get involved + +If you're interested in getting involved, we always welcome new contributors to join us. +Contextual logging provides a fantastic opportunity for you to contribute to Kubernetes development and make a meaningful impact. +By joining [Structured Logging WG](https://github.com/kubernetes/community/tree/master/wg-structured-logging), +you can actively participate in the development of Kubernetes and make your first contribution. +It's a great way to learn and engage with the community while gaining valuable experience. + +We encourage you to explore the repository and familiarize yourself with the ongoing discussions and projects. +It's a collaborative environment where you can exchange ideas, ask questions, and work together with other contributors. + +If you have any questions or need guidance, don't hesitate to reach out to us +and you can do so on our [public Slack channel](https://kubernetes.slack.com/messages/wg-structured-logging). +If you're not already part of that Slack workspace, you can visit [https://slack.k8s.io/](https://slack.k8s.io/) +for an invitation. + +We would like to express our gratitude to all the contributors who provided excellent reviews, +shared valuable insights, and assisted in the implementation of this feature (in alphabetical order): + +- Aldo Culquicondor ([alculquicondor](https://github.com/alculquicondor)) +- Andy Goldstein ([ncdc](https://github.com/ncdc)) +- Feruzjon Muyassarov ([fmuyassarov](https://github.com/fmuyassarov)) +- Freddie ([freddie400](https://github.com/freddie400)) +- JUN YANG ([yangjunmyfm192085](https://github.com/yangjunmyfm192085)) +- Kante Yin ([kerthcet](https://github.com/kerthcet)) +- Kiki ([carlory](https://github.com/carlory)) +- Lucas Severo Alve ([knelasevero](https://github.com/knelasevero)) +- Maciej Szulik ([soltysh](https://github.com/soltysh)) +- Mengjiao Liu ([mengjiao-liu](https://github.com/mengjiao-liu)) +- Naman Lakhwani ([Namanl2001](https://github.com/Namanl2001)) +- Oksana Baranova ([oxxenix](https://github.com/oxxenix)) +- Patrick Ohly ([pohly](https://github.com/pohly)) +- songxiao-wang87 ([songxiao-wang87](https://github.com/songxiao-wang87)) +- Tim Allclai ([tallclair](https://github.com/tallclair)) +- ZhangYu ([Octopusjust](https://github.com/Octopusjust)) +- Ziqi Zhao ([fatsheep9146](https://github.com/fatsheep9146)) +- Zac ([249043822](https://github.com/249043822)) diff --git a/content/en/case-studies/vsco/index.html b/content/en/case-studies/vsco/index.html index 95d8ae011c1e7..03101669f2adf 100644 --- a/content/en/case-studies/vsco/index.html +++ b/content/en/case-studies/vsco/index.html @@ -18,7 +18,7 @@

Challenge

-

After moving from Rackspace to AWS in 2015, VSCO began building Node.js and Go microservices in addition to running its PHP monolith. The team containerized the microservices using Docker, but "they were all in separate groups of EC2 instances that were dedicated per service," says Melinda Lu, Engineering Manager for the Machine Learning Team. Adds Naveen Gattu, Senior Software Engineer on the Community Team: "That yielded a lot of wasted resources. We started looking for a way to consolidate and be more efficient in the AWS EC2 instances."

+

After moving from Rackspace to AWS in 2015, VSCO began building Node.js and Go microservices in addition to running its PHP monolith. The team containerized the microservices using Docker, but "they were all in separate groups of EC2 instances that were dedicated per service," says Melinda Lu, Engineering Manager for the Machine Learning Team. Adds Naveen Gattu, Senior Software Engineer on the Community Team: "That yielded a lot of wasted resources. We started looking for a way to consolidate and be more efficient in the AWS EC2 instances."

Solution

diff --git a/content/en/community/_index.html b/content/en/community/_index.html index de7fcee9a45ee..8fa4c2461411a 100644 --- a/content/en/community/_index.html +++ b/content/en/community/_index.html @@ -122,9 +122,9 @@

Discussions

- Twitter + 𝕏.org - Twitter ▶ + 𝕏 ▶

#kubernetesio

Real-time announcements of blog posts, events, news, ideas.

@@ -178,6 +178,6 @@

Global community

diff --git a/content/en/docs/concepts/architecture/_index.md b/content/en/docs/concepts/architecture/_index.md index 61fb48e7142b7..7c9a45c71e294 100644 --- a/content/en/docs/concepts/architecture/_index.md +++ b/content/en/docs/concepts/architecture/_index.md @@ -5,3 +5,4 @@ description: > The architectural concepts behind Kubernetes. --- +{{< figure src="/images/docs/kubernetes-cluster-architecture.svg" alt="Components of Kubernetes" caption="Kubernetes cluster architecture" class="diagram-large" >}} diff --git a/content/en/docs/concepts/architecture/cgroups.md b/content/en/docs/concepts/architecture/cgroups.md index b0a98af6604b0..b96d89e0d6dd4 100644 --- a/content/en/docs/concepts/architecture/cgroups.md +++ b/content/en/docs/concepts/architecture/cgroups.md @@ -104,8 +104,8 @@ updated to newer versions that support cgroup v2. For example: DaemonSet for monitoring pods and containers, update it to v0.43.0 or later. * If you deploy Java applications, prefer to use versions which fully support cgroup v2: * [OpenJDK / HotSpot](https://bugs.openjdk.org/browse/JDK-8230305): jdk8u372, 11.0.16, 15 and later - * [IBM Semeru Runtimes](https://www.eclipse.org/openj9/docs/version0.33/#control-groups-v2-support): jdk8u345-b01, 11.0.16.0, 17.0.4.0, 18.0.2.0 and later - * [IBM Java](https://www.ibm.com/docs/en/sdk-java-technology/8?topic=new-service-refresh-7#whatsnew_sr7__fp15): 8.0.7.15 and later + * [IBM Semeru Runtimes](https://www.ibm.com/support/pages/apar/IJ46681): 8.0.382.0, 11.0.20.0, 17.0.8.0, and later + * [IBM Java](https://www.ibm.com/support/pages/apar/IJ46681): 8.0.8.6 and later * If you are using the [uber-go/automaxprocs](https://github.com/uber-go/automaxprocs) package, make sure the version you use is v1.5.1 or higher. diff --git a/content/en/docs/concepts/architecture/controller.md b/content/en/docs/concepts/architecture/controller.md index b89f6a077c2bd..9eae9ff42a54e 100644 --- a/content/en/docs/concepts/architecture/controller.md +++ b/content/en/docs/concepts/architecture/controller.md @@ -165,6 +165,6 @@ controller does. * Discover some of the basic [Kubernetes objects](/docs/concepts/overview/working-with-objects/) * Learn more about the [Kubernetes API](/docs/concepts/overview/kubernetes-api/) * If you want to write your own controller, see - [Extension Patterns](/docs/concepts/extend-kubernetes/#extension-patterns) - in Extending Kubernetes. + [Kubernetes extension patterns](/docs/concepts/extend-kubernetes/#extension-patterns) + and the [sample-controller](https://github.com/kubernetes/sample-controller) repository. diff --git a/content/en/docs/concepts/architecture/garbage-collection.md b/content/en/docs/concepts/architecture/garbage-collection.md index 947ad515fc90b..4b36d850b55bb 100644 --- a/content/en/docs/concepts/architecture/garbage-collection.md +++ b/content/en/docs/concepts/architecture/garbage-collection.md @@ -111,7 +111,7 @@ to override this behaviour, see [Delete owner objects and orphan dependents](/do ## Garbage collection of unused containers and images {#containers-images} The {{}} performs garbage -collection on unused images every five minutes and on unused containers every +collection on unused images every two minutes and on unused containers every minute. You should avoid using external garbage collection tools, as these can break the kubelet behavior and remove containers that should exist. @@ -137,6 +137,20 @@ collection, which deletes images in order based on the last time they were used, starting with the oldest first. The kubelet deletes images until disk usage reaches the `LowThresholdPercent` value. +#### Garbage collection for unused container images {#image-maximum-age-gc} + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + +As an alpha feature, you can specify the maximum time a local image can be unused for, +regardless of disk usage. This is a kubelet setting that you configure for each node. + +To configure the setting, enable the `ImageMaximumGCAge` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the kubelet, +and also set a value for the `ImageMaximumGCAge` field in the kubelet configuration file. + +The value is specified as a Kubernetes _duration_; for example, you can set the configuration +field to `3d12h`, which means 3 days and 12 hours. + ### Container garbage collection {#container-image-garbage-collection} The kubelet garbage collects unused containers based on the following variables, @@ -178,4 +192,4 @@ configure garbage collection: * Learn more about [ownership of Kubernetes objects](/docs/concepts/overview/working-with-objects/owners-dependents/). * Learn more about Kubernetes [finalizers](/docs/concepts/overview/working-with-objects/finalizers/). -* Learn about the [TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) that cleans up finished Jobs. +* Learn about the [TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) that cleans up finished Jobs. \ No newline at end of file diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index 962fe32ac1103..6473d35a17e25 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -280,7 +280,7 @@ If you want to explicitly reserve resources for non-Pod processes, see ## Node topology -{{< feature-state state="beta" for_k8s_version="v1.18" >}} +{{< feature-state state="stable" for_k8s_version="v1.27" >}} If you have enabled the `TopologyManager` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/), then diff --git a/content/en/docs/concepts/cluster-administration/addons.md b/content/en/docs/concepts/cluster-administration/addons.md index f01736c10351c..7c2ab269e4a63 100644 --- a/content/en/docs/concepts/cluster-administration/addons.md +++ b/content/en/docs/concepts/cluster-administration/addons.md @@ -37,7 +37,7 @@ installation instructions. The list does not try to be exhaustive. network policies on L3-L7 using an identity-based security model that is decoupled from network addressing. Cilium can act as a replacement for kube-proxy; it also offers additional, opt-in observability and security features. - Cilium is a [CNCF project at the Incubation level](https://www.cncf.io/projects/cilium/). + Cilium is a [CNCF project at the Graduated level](https://www.cncf.io/projects/cilium/). * [CNI-Genie](https://github.com/cni-genie/CNI-Genie) enables Kubernetes to seamlessly connect to a choice of CNI plugins, such as Calico, Canal, Flannel, or Weave. CNI-Genie is a [CNCF project at the Sandbox level](https://www.cncf.io/projects/cni-genie/). @@ -54,6 +54,9 @@ installation instructions. The list does not try to be exhaustive. and bare metal workloads. * [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) is an overlay network provider that can be used with Kubernetes. +* [Gateway API](/docs/concepts/services-networking/gateway/) is an open source project managed by + the [SIG Network](https://github.com/kubernetes/community/tree/master/sig-network) community and + provides an expressive, extensible, and role-oriented API for modeling service networking. * [Knitter](https://github.com/ZTE/Knitter/) is a plugin to support multiple network interfaces in a Kubernetes pod. * [Multus](https://github.com/k8snetworkplumbingwg/multus-cni) is a Multi plugin for @@ -76,6 +79,9 @@ installation instructions. The list does not try to be exhaustive. Pods and non-Kubernetes environments with visibility and security monitoring. * [Romana](https://github.com/romana) is a Layer 3 networking solution for pod networks that also supports the [NetworkPolicy](/docs/concepts/services-networking/network-policies/) API. +* [Spiderpool](https://github.com/spidernet-io/spiderpool) is an underlay and RDMA + networking solution for Kubernetes. Spiderpool is supported on bare metal, virtual machines, + and public cloud environments. * [Weave Net](https://www.weave.works/docs/net/latest/kubernetes/kube-addon/) provides networking and network policy, will carry on working on both sides of a network partition, and does not require an external database. diff --git a/content/en/docs/concepts/cluster-administration/flow-control.md b/content/en/docs/concepts/cluster-administration/flow-control.md index 7b44cf4e5dc9e..859e841d0b73c 100644 --- a/content/en/docs/concepts/cluster-administration/flow-control.md +++ b/content/en/docs/concepts/cluster-administration/flow-control.md @@ -7,7 +7,7 @@ weight: 110 -{{< feature-state state="beta" for_k8s_version="v1.20" >}} +{{< feature-state state="stable" for_k8s_version="v1.29" >}} Controlling the behavior of the Kubernetes API server in an overload situation is a key task for cluster administrators. The {{< glossary_tooltip @@ -45,30 +45,27 @@ are not subject to the `--max-requests-inflight` limit. ## Enabling/Disabling API Priority and Fairness -The API Priority and Fairness feature is controlled by a feature gate -and is enabled by default. See [Feature -Gates](/docs/reference/command-line-tools-reference/feature-gates/) -for a general explanation of feature gates and how to enable and -disable them. The name of the feature gate for APF is -"APIPriorityAndFairness". This feature also involves an {{< -glossary_tooltip term_id="api-group" text="API Group" >}} with: (a) a -`v1alpha1` version and a `v1beta1` version, disabled by default, and -(b) `v1beta2` and `v1beta3` versions, enabled by default. You can -disable the feature gate and API group beta versions by adding the +The API Priority and Fairness feature is controlled by a command-line flag +and is enabled by default. See +[Options](/docs/reference/command-line-tools-reference/kube-apiserver/options/) +for a general explanation of the available kube-apiserver command-line +options and how to enable and disable them. The name of the +command-line option for APF is "--enable-priority-and-fairness". This feature +also involves an {{}} +with: (a) a stable `v1` version, introduced in 1.29, and +enabled by default (b) a `v1beta3` version, enabled by default, and +deprecated in v1.29. You can +disable the API group beta version `v1beta3` by adding the following command-line flags to your `kube-apiserver` invocation: ```shell kube-apiserver \ ---feature-gates=APIPriorityAndFairness=false \ ---runtime-config=flowcontrol.apiserver.k8s.io/v1beta2=false,flowcontrol.apiserver.k8s.io/v1beta3=false \ +--runtime-config=flowcontrol.apiserver.k8s.io/v1beta3=false \ # …and other flags as usual ``` -Alternatively, you can enable the v1alpha1 and v1beta1 versions of the API group -with `--runtime-config=flowcontrol.apiserver.k8s.io/v1alpha1=true,flowcontrol.apiserver.k8s.io/v1beta1=true`. - The command-line flag `--enable-priority-and-fairness=false` will disable the -API Priority and Fairness feature, even if other flags have enabled it. +API Priority and Fairness feature. ## Concepts @@ -178,14 +175,12 @@ server. ## Resources The flow control API involves two kinds of resources. -[PriorityLevelConfigurations](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1beta2-flowcontrol-apiserver-k8s-io) +[PriorityLevelConfigurations](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#prioritylevelconfiguration-v1-flowcontrol-apiserver-k8s-io) define the available priority levels, the share of the available concurrency budget that each can handle, and allow for fine-tuning queuing behavior. -[FlowSchemas](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#flowschema-v1beta2-flowcontrol-apiserver-k8s-io) +[FlowSchemas](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#flowschema-v1-flowcontrol-apiserver-k8s-io) are used to classify individual inbound requests, matching each to a -single PriorityLevelConfiguration. There is also a `v1alpha1` version -of the same API group, and it has the same Kinds with the same syntax and -semantics. +single PriorityLevelConfiguration. ### PriorityLevelConfiguration @@ -488,6 +483,8 @@ exports additional metrics. Monitoring these can help you determine whether your configuration is inappropriately throttling important traffic, or find poorly-behaved workloads that may be harming system health. +#### Maturity level BETA + * `apiserver_flowcontrol_rejected_requests_total` is a counter vector (cumulative since server start) of requests that were rejected, broken down by the labels `flow_schema` (indicating the one that @@ -509,6 +506,37 @@ poorly-behaved workloads that may be harming system health. vector (cumulative since server start) of requests that began executing, broken down by `flow_schema` and `priority_level`. +* `apiserver_flowcontrol_current_inqueue_requests` is a gauge vector + holding the instantaneous number of queued (not executing) requests, + broken down by `priority_level` and `flow_schema`. + +* `apiserver_flowcontrol_current_executing_requests` is a gauge vector + holding the instantaneous number of executing (not waiting in a + queue) requests, broken down by `priority_level` and `flow_schema`. + +* `apiserver_flowcontrol_current_executing_seats` is a gauge vector + holding the instantaneous number of occupied seats, broken down by + `priority_level` and `flow_schema`. + +* `apiserver_flowcontrol_request_wait_duration_seconds` is a histogram + vector of how long requests spent queued, broken down by the labels + `flow_schema`, `priority_level`, and `execute`. The `execute` label + indicates whether the request has started executing. + + {{< note >}} + Since each FlowSchema always assigns requests to a single + PriorityLevelConfiguration, you can add the histograms for all the + FlowSchemas for one priority level to get the effective histogram for + requests assigned to that priority level. + {{< /note >}} + +* `apiserver_flowcontrol_nominal_limit_seats` is a gauge vector + holding each priority level's nominal concurrency limit, computed + from the API server's total concurrency limit and the priority + level's configured nominal concurrency shares. + +#### Maturity level ALPHA + * `apiserver_current_inqueue_requests` is a gauge vector of recent high water marks of the number of queued requests, grouped by a label named `request_kind` whose value is `mutating` or `readOnly`. @@ -518,6 +546,10 @@ poorly-behaved workloads that may be harming system health. last window's high water mark of number of requests actively being served. +* `apiserver_current_inqueue_seats` is a gauge vector of the sum over + queued requests of the largest number of seats each will occupy, + grouped by labels named `flow_schema` and `priority_level`. + * `apiserver_flowcontrol_read_vs_write_current_requests` is a histogram vector of observations, made at the end of every nanosecond, of the number of requests broken down by the labels @@ -528,14 +560,6 @@ poorly-behaved workloads that may be harming system health. number of requests (queue volume limit for waiting and concurrency limit for executing). -* `apiserver_flowcontrol_current_inqueue_requests` is a gauge vector - holding the instantaneous number of queued (not executing) requests, - broken down by `priority_level` and `flow_schema`. - -* `apiserver_flowcontrol_current_executing_requests` is a gauge vector - holding the instantaneous number of executing (not waiting in a - queue) requests, broken down by `priority_level` and `flow_schema`. - * `apiserver_flowcontrol_request_concurrency_in_use` is a gauge vector holding the instantaneous number of occupied seats, broken down by `priority_level` and `flow_schema`. @@ -584,11 +608,6 @@ poorly-behaved workloads that may be harming system health. was always equal to `apiserver_flowcontrol_current_limit_seats` (which did not exist as a distinct metric). -* `apiserver_flowcontrol_nominal_limit_seats` is a gauge vector - holding each priority level's nominal concurrency limit, computed - from the API server's total concurrency limit and the priority - level's configured nominal concurrency shares. - * `apiserver_flowcontrol_lower_limit_seats` is a gauge vector holding the lower bound on each priority level's dynamic concurrency limit. @@ -631,18 +650,6 @@ poorly-behaved workloads that may be harming system health. holding, for each priority level, the dynamic concurrency limit derived in the last adjustment. -* `apiserver_flowcontrol_request_wait_duration_seconds` is a histogram - vector of how long requests spent queued, broken down by the labels - `flow_schema`, `priority_level`, and `execute`. The `execute` label - indicates whether the request has started executing. - - {{< note >}} - Since each FlowSchema always assigns requests to a single - PriorityLevelConfiguration, you can add the histograms for all the - FlowSchemas for one priority level to get the effective histogram for - requests assigned to that priority level. - {{< /note >}} - * `apiserver_flowcontrol_request_execution_seconds` is a histogram vector of how long requests took to actually execute, broken down by `flow_schema` and `priority_level`. @@ -661,6 +668,11 @@ poorly-behaved workloads that may be harming system health. to a request being dispatched but did not, due to lack of available concurrency, broken down by `flow_schema` and `priority_level`. +* `apiserver_flowcontrol_epoch_advance_total` is a counter vector of + the number of attempts to jump a priority level's progress meter + backward to avoid numeric overflow, grouped by `priority_level` and + `success`. + ## Good practices for using API Priority and Fairness When a given priority level exceeds its permitted concurrency, requests can diff --git a/content/en/docs/concepts/cluster-administration/networking.md b/content/en/docs/concepts/cluster-administration/networking.md index b788b67a14bd5..273f5ed69cbf1 100644 --- a/content/en/docs/concepts/cluster-administration/networking.md +++ b/content/en/docs/concepts/cluster-administration/networking.md @@ -34,12 +34,21 @@ To learn about the Kubernetes networking model, see [here](/docs/concepts/servic ## How to implement the Kubernetes network model -The network model is implemented by the container runtime on each node. The most common container runtimes use [Container Network Interface](https://github.com/containernetworking/cni) (CNI) plugins to manage their network and security capabilities. Many different CNI plugins exist from many different vendors. Some of these provide only basic features of adding and removing network interfaces, while others provide more sophisticated solutions, such as integration with other container orchestration systems, running multiple CNI plugins, advanced IPAM features etc. +The network model is implemented by the container runtime on each node. The most common container +runtimes use [Container Network Interface](https://github.com/containernetworking/cni) (CNI) +plugins to manage their network and security capabilities. Many different CNI plugins exist from +many different vendors. Some of these provide only basic features of adding and removing network +interfaces, while others provide more sophisticated solutions, such as integration with other +container orchestration systems, running multiple CNI plugins, advanced IPAM features etc. -See [this page](/docs/concepts/cluster-administration/addons/#networking-and-network-policy) for a non-exhaustive list of networking addons supported by Kubernetes. +See [this page](/docs/concepts/cluster-administration/addons/#networking-and-network-policy) +for a non-exhaustive list of networking addons supported by Kubernetes. ## {{% heading "whatsnext" %}} -The early design of the networking model and its rationale, and some future -plans are described in more detail in the +The early design of the networking model and its rationale are described in more detail in the [networking design document](https://git.k8s.io/design-proposals-archive/network/networking.md). +For future plans and some on-going efforts that aim to improve Kubernetes networking, please +refer to the SIG-Network +[KEPs](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network). + diff --git a/content/en/docs/concepts/cluster-administration/system-logs.md b/content/en/docs/concepts/cluster-administration/system-logs.md index d2a9d46bbd7e6..1feeecd3db7e5 100644 --- a/content/en/docs/concepts/cluster-administration/system-logs.md +++ b/content/en/docs/concepts/cluster-administration/system-logs.md @@ -17,6 +17,13 @@ scheduler decisions). +{{< warning >}} +In contrast to the command line flags described here, the *log +output* itself does *not* fall under the Kubernetes API stability guarantees: +individual log entries and their formatting may change from one release +to the next! +{{< /warning >}} + ## Klog klog is the Kubernetes logging library. [klog](https://github.com/kubernetes/klog) diff --git a/content/en/docs/concepts/cluster-administration/system-metrics.md b/content/en/docs/concepts/cluster-administration/system-metrics.md index 8fac9f7631795..ff6b41bbcd0ef 100644 --- a/content/en/docs/concepts/cluster-administration/system-metrics.md +++ b/content/en/docs/concepts/cluster-administration/system-metrics.md @@ -21,7 +21,7 @@ This format is structured plain text, designed so that people and machines can b ## Metrics in Kubernetes In most cases metrics are available on `/metrics` endpoint of the HTTP server. For components that -doesn't expose endpoint by default it can be enabled using `--bind-address` flag. +don't expose endpoint by default, it can be enabled using `--bind-address` flag. Examples of those components: @@ -202,10 +202,23 @@ Here is an example: --allow-label-value number_count_metric,odd_number='1,3,5', number_count_metric,even_number='2,4,6', date_gauge_metric,weekend='Saturday,Sunday' ``` +In addition to specifying this from the CLI, this can also be done within a configuration file. You +can specify the path to that configuration file using the `--allow-metric-labels-manifest` command +line argument to a component. Here's an example of the contents of that configuration file: + +```yaml +allow-list: +- "metric1,label2": "v1,v2,v3" +- "metric2,label1": "v1,v2,v3" +``` + +Additionally, the `cardinality_enforcement_unexpected_categorizations_total` meta-metric records the +count of unexpected categorizations during cardinality enforcement, that is, whenever a label value +is encountered that is not allowed with respect to the allow-list contraints. + ## {{% heading "whatsnext" %}} * Read about the [Prometheus text format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) for metrics * See the list of [stable Kubernetes metrics](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) -* Read about the [Kubernetes deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior) - +* Read about the [Kubernetes deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior) \ No newline at end of file diff --git a/content/en/docs/concepts/configuration/manage-resources-containers.md b/content/en/docs/concepts/configuration/manage-resources-containers.md index f5ec08df7001c..d5c4134fd31df 100644 --- a/content/en/docs/concepts/configuration/manage-resources-containers.md +++ b/content/en/docs/concepts/configuration/manage-resources-containers.md @@ -116,8 +116,13 @@ runs on a single-core, dual-core, or 48-core machine. {{< note >}} Kubernetes doesn't allow you to specify CPU resources with a precision finer than -`1m`. Because of this, it's useful to specify CPU units less than `1.0` or `1000m` using -the milliCPU form; for example, `5m` rather than `0.005`. +`1m` or `0.001` CPU. To avoid accidentally using an invalid CPU quantity, it's useful to specify CPU units using the milliCPU form +instead of the decimal form when using less than 1 CPU unit. + +For example, you have a Pod that uses `5m` or `0.005` CPU and would like to decrease +its CPU resources. By using the decimal form, it's harder to spot that `0.0005` CPU +is an invalid value, while by using the milliCPU form, it's easier to spot that +`0.5m` is an invalid value. {{< /note >}} ### Memory resource units {#meaning-of-memory} @@ -571,7 +576,7 @@ Cluster-level extended resources are not tied to nodes. They are usually managed by scheduler extenders, which handle the resource consumption and resource quota. You can specify the extended resources that are handled by scheduler extenders -in [scheduler configuration](/docs/reference/config-api/kube-scheduler-config.v1beta3/) +in [scheduler configuration](/docs/reference/config-api/kube-scheduler-config.v1/) **Example:** @@ -817,6 +822,6 @@ memory limit (and possibly request) for that container. * Read how the API reference defines a [container](/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container) and its [resource requirements](/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources) * Read about [project quotas](https://www.linux.org/docs/man8/xfs_quota.html) in XFS -* Read more about the [kube-scheduler configuration reference (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) +* Read more about the [kube-scheduler configuration reference (v1)](/docs/reference/config-api/kube-scheduler-config.v1/) * Read more about [Quality of Service classes for Pods](/docs/concepts/workloads/pods/pod-qos/) diff --git a/content/en/docs/concepts/configuration/secret.md b/content/en/docs/concepts/configuration/secret.md index d546ec12e4964..19389f6c6fd18 100644 --- a/content/en/docs/concepts/configuration/secret.md +++ b/content/en/docs/concepts/configuration/secret.md @@ -6,8 +6,8 @@ content_type: concept feature: title: Secret and configuration management description: > - Deploy and update secrets and application configuration without rebuilding your image - and without exposing secrets in your stack configuration. + Deploy and update Secrets and application configuration without rebuilding your image + and without exposing Secrets in your stack configuration. weight: 30 --- @@ -24,7 +24,7 @@ Because Secrets can be created independently of the Pods that use them, there is less risk of the Secret (and its data) being exposed during the workflow of creating, viewing, and editing Pods. Kubernetes, and applications that run in your cluster, can also take additional precautions with Secrets, such as avoiding -writing secret data to nonvolatile storage. +writing sensitive data to nonvolatile storage. Secrets are similar to {{< glossary_tooltip text="ConfigMaps" term_id="configmap" >}} but are specifically intended to hold confidential data. @@ -68,7 +68,7 @@ help automate node registration. ### Use case: dotfiles in a secret volume You can make your data "hidden" by defining a key that begins with a dot. -This key represents a dotfile or "hidden" file. For example, when the following secret +This key represents a dotfile or "hidden" file. For example, when the following Secret is mounted into a volume, `secret-volume`, the volume will contain a single file, called `.secret-file`, and the `dotfile-test-container` will have this file present at the path `/etc/secret-volume/.secret-file`. @@ -78,35 +78,7 @@ Files beginning with dot characters are hidden from the output of `ls -l`; you must use `ls -la` to see them when listing directory contents. {{< /note >}} -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: dotfile-secret -data: - .secret-file: dmFsdWUtMg0KDQo= ---- -apiVersion: v1 -kind: Pod -metadata: - name: secret-dotfiles-pod -spec: - volumes: - - name: secret-volume - secret: - secretName: dotfile-secret - containers: - - name: dotfile-test-container - image: registry.k8s.io/busybox - command: - - ls - - "-l" - - "/etc/secret-volume" - volumeMounts: - - name: secret-volume - readOnly: true - mountPath: "/etc/secret-volume" -``` +{{% code language="yaml" file="secret/dotfile-secret.yaml" %}} ### Use case: Secret visible to one container in a Pod @@ -135,8 +107,8 @@ Here are some of your options: [ServiceAccount](/docs/reference/access-authn-authz/authentication/#service-account-tokens) and its tokens to identify your client. - There are third-party tools that you can run, either within or outside your cluster, - that provide secrets management. For example, a service that Pods access over HTTPS, - that reveals a secret if the client correctly authenticates (for example, with a ServiceAccount + that manage sensitive data. For example, a service that Pods access over HTTPS, + that reveals a Secret if the client correctly authenticates (for example, with a ServiceAccount token). - For authentication, you can implement a custom signer for X.509 certificates, and use [CertificateSigningRequests](/docs/reference/access-authn-authz/certificate-signing-requests/) @@ -251,18 +223,7 @@ fills in some other fields such as the `kubernetes.io/service-account.uid` annot The following example configuration declares a ServiceAccount token Secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: secret-sa-sample - annotations: - kubernetes.io/service-account.name: "sa-name" -type: kubernetes.io/service-account-token -data: - # You can include additional key value pairs as you do with Opaque Secrets - extra: YmFyCg== -``` +{{% code language="yaml" file="secret/serviceaccount-token-secret.yaml" %}} After creating the Secret, wait for Kubernetes to populate the `token` key in the `data` field. @@ -290,16 +251,7 @@ you must use one of the following `type` values for that Secret: Below is an example for a `kubernetes.io/dockercfg` type of Secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: secret-dockercfg -type: kubernetes.io/dockercfg -data: - .dockercfg: | - "" -``` +{{% code language="yaml" file="secret/dockercfg-secret.yaml" %}} {{< note >}} If you do not want to perform the base64 encoding, you can choose to use the @@ -369,16 +321,11 @@ Secret manifest. The following manifest is an example of a basic authentication Secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: secret-basic-auth -type: kubernetes.io/basic-auth -stringData: - username: admin # required field for kubernetes.io/basic-auth - password: t0p-Secret # required field for kubernetes.io/basic-auth -``` +{{% code language="yaml" file="secret/basicauth-secret.yaml" %}} + +{{< note >}} +The `stringData` field for a Secret does not work well with server-side apply. +{{< /note >}} The basic authentication Secret type is provided only for convenience. You can create an `Opaque` type for credentials used for basic authentication. @@ -397,17 +344,7 @@ as the SSH credential to use. The following manifest is an example of a Secret used for SSH public/private key authentication: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: secret-ssh-auth -type: kubernetes.io/ssh-auth -data: - # the data is abbreviated in this example - ssh-privatekey: | - MIIEpQIBAAKCAQEAulqb/Y ... -``` +{{% code language="yaml" file="secret/ssh-auth-secret.yaml" %}} The SSH authentication Secret type is provided only for convenience. You can create an `Opaque` type for credentials used for SSH authentication. @@ -440,25 +377,11 @@ the base64 encoded certificate and private key. For details, see The following YAML contains an example config for a TLS Secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: secret-tls -type: kubernetes.io/tls -stringData: - # the data is abbreviated in this example - tls.crt: | - --------BEGIN CERTIFICATE----- - MIIC2DCCAcCgAwIBAgIBATANBgkqh ... - tls.key: | - -----BEGIN RSA PRIVATE KEY----- - MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ ... -``` +{{% code language="yaml" file="secret/tls-auth-secret.yaml" %}} The TLS Secret type is provided only for convenience. You can create an `Opaque` type for credentials used for TLS authentication. -However, using the defined and public Secret type (`kubernetes.io/ssh-auth`) +However, using the defined and public Secret type (`kubernetes.io/tls`) helps ensure the consistency of Secret format in your project. The API server verifies if the required keys are set for a Secret of this type. @@ -486,26 +409,12 @@ string of the token ID. As a Kubernetes manifest, a bootstrap token Secret might look like the following: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: bootstrap-token-5emitj - namespace: kube-system -type: bootstrap.kubernetes.io/token -data: - auth-extra-groups: c3lzdGVtOmJvb3RzdHJhcHBlcnM6a3ViZWFkbTpkZWZhdWx0LW5vZGUtdG9rZW4= - expiration: MjAyMC0wOS0xM1QwNDozOToxMFo= - token-id: NWVtaXRq - token-secret: a3E0Z2lodnN6emduMXAwcg== - usage-bootstrap-authentication: dHJ1ZQ== - usage-bootstrap-signing: dHJ1ZQ== -``` +{{% code language="yaml" file="secret/bootstrap-token-secret-base64.yaml" %}} A bootstrap token Secret has the following keys specified under `data`: - `token-id`: A random 6 character string as the token identifier. Required. -- `token-secret`: A random 16 character string as the actual token secret. Required. +- `token-secret`: A random 16 character string as the actual token Secret. Required. - `description`: A human-readable string that describes what the token is used for. Optional. - `expiration`: An absolute UTC time using [RFC3339](https://datatracker.ietf.org/doc/html/rfc3339) specifying when the token @@ -518,26 +427,11 @@ A bootstrap token Secret has the following keys specified under `data`: You can alternatively provide the values in the `stringData` field of the Secret without base64 encoding them: -```yaml -apiVersion: v1 -kind: Secret -metadata: - # Note how the Secret is named - name: bootstrap-token-5emitj - # A bootstrap token Secret usually resides in the kube-system namespace - namespace: kube-system -type: bootstrap.kubernetes.io/token -stringData: - auth-extra-groups: "system:bootstrappers:kubeadm:default-node-token" - expiration: "2020-09-13T04:39:10Z" - # This token ID is used in the name - token-id: "5emitj" - token-secret: "kq4gihvszzgn1p0r" - # This token can be used for authentication - usage-bootstrap-authentication: "true" - # and it can be used for signing - usage-bootstrap-signing: "true" -``` +{{% code language="yaml" file="secret/bootstrap-token-secret-literal.yaml" %}} + +{{< note >}} +The `stringData` field for a Secret does not work well with server-side apply. +{{< /note >}} ## Working with Secrets @@ -568,9 +462,9 @@ precedence. #### Size limit {#restriction-data-size} -Individual secrets are limited to 1MiB in size. This is to discourage creation -of very large secrets that could exhaust the API server and kubelet memory. -However, creation of many smaller secrets could also exhaust memory. You can +Individual Secrets are limited to 1MiB in size. This is to discourage creation +of very large Secrets that could exhaust the API server and kubelet memory. +However, creation of many smaller Secrets could also exhaust memory. You can use a [resource quota](/docs/concepts/policy/resource-quotas/) to limit the number of Secrets (or other resources) in a namespace. @@ -613,25 +507,7 @@ When you reference a Secret in a Pod, you can mark the Secret as _optional_, such as in the following example. If an optional Secret doesn't exist, Kubernetes ignores it. -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: mypod -spec: - containers: - - name: mypod - image: redis - volumeMounts: - - name: foo - mountPath: "/etc/foo" - readOnly: true - volumes: - - name: foo - secret: - secretName: mysecret - optional: true -``` +{{% code language="yaml" file="secret/optional-secret.yaml" %}} By default, Secrets are required. None of a Pod's containers will start until all non-optional Secrets are available. @@ -708,17 +584,17 @@ LASTSEEN FIRSTSEEN COUNT NAME KIND SUBOBJECT 0s 0s 1 dapi-test-pod Pod Warning InvalidEnvironmentVariableNames kubelet, 127.0.0.1 Keys [1badkey, 2alsobad] from the EnvFrom secret default/mysecret were skipped since they are considered invalid environment variable names. ``` -### Container image pull secrets {#using-imagepullsecrets} +### Container image pull Secrets {#using-imagepullsecrets} If you want to fetch container images from a private repository, you need a way for the kubelet on each node to authenticate to that repository. You can configure -_image pull secrets_ to make this possible. These secrets are configured at the Pod +_image pull Secrets_ to make this possible. These Secrets are configured at the Pod level. #### Using imagePullSecrets -The `imagePullSecrets` field is a list of references to secrets in the same namespace. -You can use an `imagePullSecrets` to pass a secret that contains a Docker (or other) image registry +The `imagePullSecrets` field is a list of references to Secrets in the same namespace. +You can use an `imagePullSecrets` to pass a Secret that contains a Docker (or other) image registry password to the kubelet. The kubelet uses this information to pull a private image on behalf of your Pod. See the [PodSpec API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core) for more information about the `imagePullSecrets` field. @@ -787,7 +663,7 @@ Secrets it expects to interact with, other apps within the same namespace can render those assumptions invalid. A Secret is only sent to a node if a Pod on that node requires it. -For mounting secrets into Pods, the kubelet stores a copy of the data into a `tmpfs` +For mounting Secrets into Pods, the kubelet stores a copy of the data into a `tmpfs` so that the confidential data is not written to durable storage. Once the Pod that depends on the Secret is deleted, the kubelet deletes its local copy of the confidential data from the Secret. @@ -801,6 +677,13 @@ There may be Secrets for several Pods on the same node. However, only the Secrets that a Pod requests are potentially visible within its containers. Therefore, one Pod does not have access to the Secrets of another Pod. +### Configure least-privilege access to Secrets + +To enhance the security measures around Secrets, Kubernetes provides a mechanism: you can +annotate a ServiceAccount as `kubernetes.io/enforce-mountable-secrets: "true"`. + +For more information, you can refer to the [documentation about this annotation](/docs/concepts/security/service-accounts/#enforce-mountable-secrets). + {{< warning >}} Any containers that run with `privileged: true` on a node can access all Secrets used on that node. diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index 8e1cd2eb59690..038121a5d0049 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -55,12 +55,15 @@ There are two types of hook handlers that can be implemented for Containers: * Exec - Executes a specific command, such as `pre-stop.sh`, inside the cgroups and namespaces of the Container. Resources consumed by the command are counted against the Container. * HTTP - Executes an HTTP request against a specific endpoint on the Container. +* Sleep - Pauses the container for a specified duration. + The "Sleep" action is available when the [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) + `PodLifecycleSleepAction` is enabled. ### Hook handler execution When a Container lifecycle management hook is called, the Kubernetes management system executes the handler according to the hook action, -`httpGet` and `tcpSocket` are executed by the kubelet process, and `exec` is executed in the container. +`httpGet` , `tcpSocket` and `sleep` are executed by the kubelet process, and `exec` is executed in the container. Hook handler calls are synchronous within the context of the Pod containing the Container. This means that for a `PostStart` hook, diff --git a/content/en/docs/concepts/containers/images.md b/content/en/docs/concepts/containers/images.md index b01b2fd112eef..9b36a6b72803d 100644 --- a/content/en/docs/concepts/containers/images.md +++ b/content/en/docs/concepts/containers/images.md @@ -159,6 +159,17 @@ that Kubernetes will keep trying to pull the image, with an increasing back-off Kubernetes raises the delay between each attempt until it reaches a compiled-in limit, which is 300 seconds (5 minutes). +### Image pull per runtime class + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} +Kubernetes includes alpha support for performing image pulls based on the RuntimeClass of a Pod. + +If you enable the `RuntimeClassInImageCriApi` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/), +the kubelet references container images by a tuple of (image name, runtime handler) rather than just the +image name or digest. Your {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}} +may adapt its behavior based on the selected runtime handler. +Pulling images based on runtime class will be helpful for VM based containers like windows hyperV containers. + ## Serial and parallel image pulls By default, kubelet pulls images serially. In other words, kubelet sends only @@ -265,38 +276,26 @@ See [Configure a kubelet image credential provider](/docs/tasks/administer-clust The interpretation of `config.json` varies between the original Docker implementation and the Kubernetes interpretation. In Docker, the `auths` keys can only specify root URLs, whereas Kubernetes allows glob URLs as well as -prefix-matched paths. This means that a `config.json` like this is valid: +prefix-matched paths. The only limitation is that glob patterns (`*`) have to +include the dot (`.`) for each subdomain. The amount of matched subdomains has +to be equal to the amount of glob patterns (`*.`), for example: + +- `*.kubernetes.io` will *not* match `kubernetes.io`, but `abc.kubernetes.io` +- `*.*.kubernetes.io` will *not* match `abc.kubernetes.io`, but `abc.def.kubernetes.io` +- `prefix.*.io` will match `prefix.kubernetes.io` +- `*-good.kubernetes.io` will match `prefix-good.kubernetes.io` + +This means that a `config.json` like this is valid: ```json { "auths": { - "*my-registry.io/images": { - "auth": "…" - } + "my-registry.io/images": { "auth": "…" }, + "*.my-registry.io/images": { "auth": "…" } } } ``` -The root URL (`*my-registry.io`) is matched by using the following syntax: - -``` -pattern: - { term } - -term: - '*' matches any sequence of non-Separator characters - '?' matches any single non-Separator character - '[' [ '^' ] { character-range } ']' - character class (must be non-empty) - c matches character c (c != '*', '?', '\\', '[') - '\\' c matches character c - -character-range: - c matches character c (c != '\\', '-', ']') - '\\' c matches character c - lo '-' hi matches character c for lo <= c <= hi -``` - Image pull operations would now pass the credentials to the CRI container runtime for every valid pattern. For example the following container image names would match successfully: @@ -305,10 +304,14 @@ would match successfully: - `my-registry.io/images/my-image` - `my-registry.io/images/another-image` - `sub.my-registry.io/images/my-image` + +But not: + - `a.sub.my-registry.io/images/my-image` +- `a.b.sub.my-registry.io/images/my-image` The kubelet performs image pulls sequentially for every found credential. This -means, that multiple entries in `config.json` are possible, too: +means, that multiple entries in `config.json` for different paths are possible, too: ```json { diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 4770e2c3be156..8dd955cdad965 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -159,8 +159,8 @@ The general workflow of a device plugin includes the following steps: {{< note >}} The processing of the fully-qualified CDI device names by the Device Manager requires that the `DevicePluginCDIDevices` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) - is enabled for the kubelet and the kube-apiserver. This was added as an alpha feature in Kubernetes - v1.28. + is enabled for both the kubelet and the kube-apiserver. This was added as an alpha feature in Kubernetes + v1.28 and graduated to beta in v1.29. {{< /note >}} ### Handling kubelet restarts diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 5c6cfa7fc5842..0487ca61ca29d 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -172,3 +172,7 @@ metadata: ## {{% heading "whatsnext" %}} +- Learn more about [Cluster Networking](/docs/concepts/cluster-administration/networking/) +- Learn more about [Network Policies](/docs/concepts/services-networking/network-policies/) +- Learn about the [Troubleshooting CNI plugin-related errors](/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors/) + diff --git a/content/en/docs/concepts/extend-kubernetes/flowchart.svg b/content/en/docs/concepts/extend-kubernetes/flowchart.svg index b6044c4f49e91..5ee7b6b559034 100644 --- a/content/en/docs/concepts/extend-kubernetes/flowchart.svg +++ b/content/en/docs/concepts/extend-kubernetes/flowchart.svg @@ -1,4 +1,4 @@ -
YES
YES
Go to "API Extensions"
Go to "API Extensions"
Do you want to add entirely new types to the Kubernetes API?
Do you want to add...
NO
NO
Do you want to restrict or automatically edit fields in some or all API types?
Do you want to restrict or...
YES
YES
Go to "API Access Extensions"
Go to "API Access Extensions"
NO
NO
Do you want to change the underlying implementation of the built-in API types?
Do you want to change the unde...
YES
YES
NO
NO
NO
NO
YES
YES
Do you want ot change Volumes, Services, Ingresses, PersistentVolumes?
Do you want ot change Volumes, S...
Go to "Infrastructure"
Go to "Infrastructure"
Go to "Automation"
Go to "Automation"
Text is not SVG - cannot display
\ No newline at end of file +
YES
YES
Go to "API Extensions"
Go to "API Extensions"
Do you want to add entirely new types to the Kubernetes API?
Do you want to add...
NO
NO
Do you want to restrict or automatically edit fields in some or all API types?
Do you want to restrict or...
YES
YES
Go to "API Access Extensions"
Go to "API Access Extensions"
NO
NO
Do you want to change the underlying implementation of the built-in API types?
Do you want to change the unde...
YES
YES
NO
NO
NO
NO
YES
YES
Do you want to change Volumes, Services, Ingresses, PersistentVolumes?
Do you want to change Volumes, S...
Go to "Infrastructure"
Go to "Infrastructure"
Go to "Automation"
Go to "Automation"
Text is not SVG - cannot display
diff --git a/content/en/docs/concepts/extend-kubernetes/operator.md b/content/en/docs/concepts/extend-kubernetes/operator.md index 69b13915603a3..16cbf37ecf275 100644 --- a/content/en/docs/concepts/extend-kubernetes/operator.md +++ b/content/en/docs/concepts/extend-kubernetes/operator.md @@ -129,7 +129,7 @@ operator. * Read the {{< glossary_tooltip text="CNCF" term_id="cncf" >}} - [Operator White Paper](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md). + [Operator White Paper](https://github.com/cncf/tag-app-delivery/blob/163962c4b1cd70d085107fc579e3e04c2e14d59c/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md). * Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) * Find ready-made operators on [OperatorHub.io](https://operatorhub.io/) to suit your use case * [Publish](https://operatorhub.io/) your operator for other people to use diff --git a/content/en/docs/concepts/overview/_index.md b/content/en/docs/concepts/overview/_index.md index 200b3e2ea337e..12c150c6cafa8 100644 --- a/content/en/docs/concepts/overview/_index.md +++ b/content/en/docs/concepts/overview/_index.md @@ -129,6 +129,14 @@ Kubernetes provides you with: Kubernetes lets you store and manage sensitive information, such as passwords, OAuth tokens, and SSH keys. You can deploy and update secrets and application configuration without rebuilding your container images, and without exposing secrets in your stack configuration. +* **Batch execution** + In addition to services, Kubernetes can manage your batch and CI workloads, replacing containers that fail, if desired. +* **Horizontal scaling** + Scale your application up and down with a simple command, with a UI, or automatically based on CPU usage. +* **IPv4/IPv6 dual-stack** + Allocation of IPv4 and IPv6 addresses to Pods and Services +* **Designed for extensibility** + Add features to your Kubernetes cluster without changing upstream source code. ## What Kubernetes is not diff --git a/content/en/docs/concepts/overview/working-with-objects/annotations.md b/content/en/docs/concepts/overview/working-with-objects/annotations.md index e58db8139dba9..0c5058693f979 100644 --- a/content/en/docs/concepts/overview/working-with-objects/annotations.md +++ b/content/en/docs/concepts/overview/working-with-objects/annotations.md @@ -17,7 +17,8 @@ objects. Labels can be used to select objects and to find collections of objects that satisfy certain conditions. In contrast, annotations are not used to identify and select objects. The metadata in an annotation can be small or large, structured or unstructured, and can -include characters not permitted by labels. +include characters not permitted by labels. It is possible to use labels as +well as annotations in the metadata of the same object. Annotations, like labels, are key/value maps: diff --git a/content/en/docs/concepts/overview/working-with-objects/names.md b/content/en/docs/concepts/overview/working-with-objects/names.md index 48655caf46c2e..f8bcf56335667 100644 --- a/content/en/docs/concepts/overview/working-with-objects/names.md +++ b/content/en/docs/concepts/overview/working-with-objects/names.md @@ -67,6 +67,13 @@ This means the name must: - start with an alphabetic character - end with an alphanumeric character +{{< note >}} +The only difference between the RFC 1035 and RFC 1123 +label standards is that RFC 1123 labels are allowed to +start with a digit, whereas RFC 1035 labels can start +with a lowercase alphabetic character only. +{{< /note >}} + ### Path Segment Names Some resource types require their names to be able to be safely encoded as a diff --git a/content/en/docs/concepts/policy/pid-limiting.md b/content/en/docs/concepts/policy/pid-limiting.md index 54e1b324f9d9b..a73c42c5b6098 100644 --- a/content/en/docs/concepts/policy/pid-limiting.md +++ b/content/en/docs/concepts/policy/pid-limiting.md @@ -97,7 +97,7 @@ Eviction signal value is calculated periodically and does NOT enforce the limit. PID limiting - per Pod and per Node sets the hard limit. Once the limit is hit, workload will start experiencing failures when trying to get a new PID. It may or may not lead to rescheduling of a Pod, -depending on how workload reacts on these failures and how liveleness and readiness +depending on how workload reacts on these failures and how liveness and readiness probes are configured for the Pod. However, if limits were set correctly, you can guarantee that other Pods workload and system processes will not run out of PIDs when one Pod is misbehaving. diff --git a/content/en/docs/concepts/policy/resource-quotas.md b/content/en/docs/concepts/policy/resource-quotas.md index 11919ec3ebed3..d3a3a3966b691 100644 --- a/content/en/docs/concepts/policy/resource-quotas.md +++ b/content/en/docs/concepts/policy/resource-quotas.md @@ -465,7 +465,7 @@ from getting scheduled in a failure domain. Using this scope operators can prevent certain namespaces (`foo-ns` in the example below) from having pods that use cross-namespace pod affinity by creating a resource quota object in -that namespace with `CrossNamespaceAffinity` scope and hard limit of 0: +that namespace with `CrossNamespacePodAffinity` scope and hard limit of 0: ```yaml apiVersion: v1 @@ -478,11 +478,12 @@ spec: pods: "0" scopeSelector: matchExpressions: - - scopeName: CrossNamespaceAffinity + - scopeName: CrossNamespacePodAffinity + operator: Exists ``` If operators want to disallow using `namespaces` and `namespaceSelector` by default, and -only allow it for specific namespaces, they could configure `CrossNamespaceAffinity` +only allow it for specific namespaces, they could configure `CrossNamespacePodAffinity` as a limited resource by setting the kube-apiserver flag --admission-control-config-file to the path of the following configuration file: @@ -497,12 +498,13 @@ plugins: limitedResources: - resource: pods matchScopes: - - scopeName: CrossNamespaceAffinity + - scopeName: CrossNamespacePodAffinity + operator: Exists ``` With the above configuration, pods can use `namespaces` and `namespaceSelector` in pod affinity only if the namespace where they are created have a resource quota object with -`CrossNamespaceAffinity` scope and a hard limit greater than or equal to the number of pods using those fields. +`CrossNamespacePodAffinity` scope and a hard limit greater than or equal to the number of pods using those fields. ## Requests compared to Limits {#requests-vs-limits} diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index 1f9cd85e9e2a5..3e8a6a14073a9 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -35,8 +35,10 @@ specific Pods: ## Node labels {#built-in-node-labels} Like many other Kubernetes objects, nodes have -[labels](/docs/concepts/overview/working-with-objects/labels/). You can [attach labels manually](/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node). -Kubernetes also populates a [standard set of labels](/docs/reference/node/node-labels/) on all nodes in a cluster. +[labels](/docs/concepts/overview/working-with-objects/labels/). You can +[attach labels manually](/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node). +Kubernetes also populates a [standard set of labels](/docs/reference/node/node-labels/) +on all nodes in a cluster. {{}} The value of these labels is cloud provider specific and is not guaranteed to be reliable. @@ -303,17 +305,23 @@ Pod affinity rule uses the "hard" `requiredDuringSchedulingIgnoredDuringExecution`, while the anti-affinity rule uses the "soft" `preferredDuringSchedulingIgnoredDuringExecution`. -The affinity rule says that the scheduler can only schedule a Pod onto a node if -the node is in the same zone as one or more existing Pods with the label -`security=S1`. More precisely, the scheduler must place the Pod on a node that has the -`topology.kubernetes.io/zone=V` label, as long as there is at least one node in -that zone that currently has one or more Pods with the Pod label `security=S1`. - -The anti-affinity rule says that the scheduler should try to avoid scheduling -the Pod onto a node that is in the same zone as one or more Pods with the label -`security=S2`. More precisely, the scheduler should try to avoid placing the Pod on a node that has the -`topology.kubernetes.io/zone=R` label if there are other nodes in the -same zone currently running Pods with the `Security=S2` Pod label. +The affinity rule specifies that the scheduler is allowed to place the example Pod +on a node only if that node belongs to a specific [zone](/docs/concepts/scheduling-eviction/topology-spread-constraints/topology-spread-constraints/) +where other Pods have been labeled with `security=S1`. +For instance, if we have a cluster with a designated zone, let's call it "Zone V," +consisting of nodes labeled with `topology.kubernetes.io/zone=V`, the scheduler can +assign the Pod to any node within Zone V, as long as there is at least one Pod within +Zone V already labeled with `security=S1`. Conversely, if there are no Pods with `security=S1` +labels in Zone V, the scheduler will not assign the example Pod to any node in that zone. + +The anti-affinity rule specifies that the scheduler should try to avoid scheduling the Pod +on a node if that node belongs to a specific [zone](/docs/concepts/scheduling-eviction/topology-spread-constraints/topology-spread-constraints/) +where other Pods have been labeled with `security=S2`. +For instance, if we have a cluster with a designated zone, let's call it "Zone R," +consisting of nodes labeled with `topology.kubernetes.io/zone=R`, the scheduler should avoid +assigning the Pod to any node within Zone R, as long as there is at least one Pod within +Zone R already labeled with `security=S2`. Conversely, the anti-affinity rule does not impact +scheduling into Zone R if there are no Pods with `security=S2` labels. To get yourself more familiar with the examples of Pod affinity and anti-affinity, refer to the [design proposal](https://git.k8s.io/design-proposals-archive/scheduling/podaffinity.md). @@ -327,7 +335,8 @@ to learn more about how these work. In principle, the `topologyKey` can be any allowed label key with the following exceptions for performance and security reasons: -- For Pod affinity and anti-affinity, an empty `topologyKey` field is not allowed in both `requiredDuringSchedulingIgnoredDuringExecution` +- For Pod affinity and anti-affinity, an empty `topologyKey` field is not allowed in both + `requiredDuringSchedulingIgnoredDuringExecution` and `preferredDuringSchedulingIgnoredDuringExecution`. - For `requiredDuringSchedulingIgnoredDuringExecution` Pod anti-affinity rules, the admission controller `LimitPodHardAntiAffinityTopology` limits @@ -349,6 +358,108 @@ The affinity term is applied to namespaces selected by both `namespaceSelector` Note that an empty `namespaceSelector` ({}) matches all namespaces, while a null or empty `namespaces` list and null `namespaceSelector` matches the namespace of the Pod where the rule is defined. +#### matchLabelKeys + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + +{{< note >}} + +The `matchLabelKeys` field is a alpha-level field and is disabled by default in +Kubernetes {{< skew currentVersion >}}. +When you want to use it, you have to enable it via the +`MatchLabelKeysInPodAffinity` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). +{{< /note >}} + +Kubernetes includes an optional `matchLabelKeys` field for Pod affinity +or anti-affinity. The field specifies keys for the labels that should match with the incoming Pod's labels, +when satisfying the Pod (anti)affinity. + +The keys are used to look up values from the pod labels; those key-value labels are combined +(using `AND`) with the match restrictions defined using the `labelSelector` field. The combined +filtering selects the set of existing pods that will be taken into Pod (anti)affinity calculation. + +A common use case is to use `matchLabelKeys` with `pod-template-hash` (set on Pods +managed as part of a Deployment, where the value is unique for each revision). +Using `pod-template-hash` in `matchLabelKeys` allows you to target the Pods that belong +to the same revision as the incoming Pod, so that a rolling upgrade won't break affinity. + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: application-server +... +spec: + template: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - database + topologyKey: topology.kubernetes.io/zone + # Only Pods from a given rollout are taken into consideration when calculating pod affinity. + # If you update the Deployment, the replacement Pods follow their own affinity rules + # (if there are any defined in the new Pod template) + matchLabelKeys: + - pod-template-hash +``` + +#### mismatchLabelKeys + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + +{{< note >}} + +The `mismatchLabelKeys` field is a alpha-level field and is disabled by default in +Kubernetes {{< skew currentVersion >}}. +When you want to use it, you have to enable it via the +`MatchLabelKeysInPodAffinity` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). +{{< /note >}} + +Kubernetes includes an optional `mismatchLabelKeys` field for Pod affinity +or anti-affinity. The field specifies keys for the labels that should **not** match with the incoming Pod's labels, +when satisfying the Pod (anti)affinity. + +One example use case is to ensure Pods go to the topology domain (node, zone, etc) where only Pods from the same tenant or team are scheduled in. +In other words, you want to avoid running Pods from two different tenants on the same topology domain at the same time. + +```yaml +apiVersion: v1 +kind: Pod +metadata: + labels: + # Assume that all relevant Pods have a "tenant" label set + tenant: tenant-a +... +spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + # ensure that pods associated with this tenant land on the correct node pool + - matchLabelKeys: + - tenant + topologyKey: node-pool + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + # ensure that pods associated with this tenant can't schedule to nodes used for another tenant + - mismatchLabelKeys: + - tenant # whatever the value of the "tenant" label for this Pod, prevent + # scheduling to nodes in any pool where any Pod from a different + # tenant is running. + labelSelector: + # We have to have the labelSelector which selects only Pods with the tenant label, + # otherwise this Pod would hate Pods from daemonsets as well, for example, + # which aren't supposed to have the tenant label. + matchExpressions: + - key: tenant + operator: Exists + topologyKey: node-pool +``` + #### More practical use-cases Inter-pod affinity and anti-affinity can be even more useful when they are used with higher diff --git a/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md b/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md index f34f7a2c5adbc..47420240d94df 100644 --- a/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md +++ b/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md @@ -162,6 +162,17 @@ gets scheduled onto one node and then cannot run there, which is bad because such a pending Pod also blocks all other resources like RAM or CPU that were set aside for it. +{{< note >}} + +Scheduling of pods which use ResourceClaims is going to be slower because of +the additional communication that is required. Beware that this may also impact +pods that don't use ResourceClaims because only one pod at a time gets +scheduled, blocking API calls are made while handling a pod with +ResourceClaims, and thus scheduling the next pod gets delayed. + +{{< /note >}} + + ## Monitoring resources The kubelet provides a gRPC service to enable discovery of dynamic resources of diff --git a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md index 6c78f52b8c851..4dc27d80b8f42 100644 --- a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -86,7 +86,7 @@ of the scheduler: * Read about [scheduler performance tuning](/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * Read about [Pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/) * Read the [reference documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for kube-scheduler -* Read the [kube-scheduler config (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) reference +* Read the [kube-scheduler config (v1)](/docs/reference/config-api/kube-scheduler-config.v1/) reference * Learn about [configuring multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) * Learn about [topology management policies](/docs/tasks/administer-cluster/topology-manager/) * Learn about [Pod Overhead](/docs/concepts/scheduling-eviction/pod-overhead/) diff --git a/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md b/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md index 80367800153a6..1f3e84fb5e5f2 100644 --- a/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md +++ b/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md @@ -86,7 +86,8 @@ like `free -m`. This is important because `free -m` does not work in a container, and if users use the [node allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable) feature, out of resource decisions are made local to the end user Pod part of the cgroup hierarchy as well as the -root node. This [script](/examples/admin/resource/memory-available.sh) +root node. This [script](/examples/admin/resource/memory-available.sh) or +[cgroupv2 script](/examples/admin/resource/memory-available-cgroupv2.sh) reproduces the same set of steps that the kubelet performs to calculate `memory.available`. The kubelet excludes inactive_file (the number of bytes of file-backed memory on the inactive LRU list) from its calculation, as it assumes that @@ -105,13 +106,11 @@ does not support other configurations. Some kubelet garbage collection features are deprecated in favor of eviction: -| Existing Flag | New Flag | Rationale | -| ------------- | -------- | --------- | -| `--image-gc-high-threshold` | `--eviction-hard` or `--eviction-soft` | existing eviction signals can trigger image garbage collection | -| `--image-gc-low-threshold` | `--eviction-minimum-reclaim` | eviction reclaims achieve the same behavior | -| `--maximum-dead-containers` | - | deprecated once old logs are stored outside of container's context | -| `--maximum-dead-containers-per-container` | - | deprecated once old logs are stored outside of container's context | -| `--minimum-container-ttl-duration` | - | deprecated once old logs are stored outside of container's context | +| Existing Flag | Rationale | +| ------------- | --------- | +| `--maximum-dead-containers` | deprecated once old logs are stored outside of container's context | +| `--maximum-dead-containers-per-container` | deprecated once old logs are stored outside of container's context | +| `--minimum-container-ttl-duration` | deprecated once old logs are stored outside of container's context | ### Eviction thresholds diff --git a/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md b/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md index 24b5032c07f5b..0b671ecbfcbe7 100644 --- a/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md +++ b/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md @@ -6,7 +6,7 @@ weight: 40 -{{< feature-state for_k8s_version="v1.26" state="alpha" >}} +{{< feature-state for_k8s_version="v1.27" state="beta" >}} Pods were considered ready for scheduling once created. Kubernetes scheduler does its due diligence to find nodes to place all pending Pods. However, in a diff --git a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md index e275848b3486a..c666c14f215d3 100644 --- a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -23,7 +23,7 @@ To set the `MostAllocated` strategy for the `NodeResourcesFit` plugin, use a [scheduler configuration](/docs/reference/scheduling/config) similar to the following: ```yaml -apiVersion: kubescheduler.config.k8s.io/v1beta3 +apiVersion: kubescheduler.config.k8s.io/v1 kind: KubeSchedulerConfiguration profiles: - pluginConfig: @@ -43,7 +43,7 @@ profiles: ``` To learn more about other parameters and their default configuration, see the API documentation for -[`NodeResourcesFitArgs`](/docs/reference/config-api/kube-scheduler-config.v1beta3/#kubescheduler-config-k8s-io-v1beta3-NodeResourcesFitArgs). +[`NodeResourcesFitArgs`](/docs/reference/config-api/kube-scheduler-config.v1/#kubescheduler-config-k8s-io-v1-NodeResourcesFitArgs). ## Enabling bin packing using RequestedToCapacityRatio @@ -53,7 +53,7 @@ allows users to bin pack extended resources by using appropriate parameters to improve the utilization of scarce resources in large clusters. It favors nodes according to a configured function of the allocated resources. The behavior of the `RequestedToCapacityRatio` in the `NodeResourcesFit` score function can be controlled by the -[scoringStrategy](/docs/reference/config-api/kube-scheduler-config.v1beta3/#kubescheduler-config-k8s-io-v1beta3-ScoringStrategy) field. +[scoringStrategy](/docs/reference/config-api/kube-scheduler-config.v1/#kubescheduler-config-k8s-io-v1-ScoringStrategy) field. Within the `scoringStrategy` field, you can configure two parameters: `requestedToCapacityRatio` and `resources`. The `shape` in the `requestedToCapacityRatio` parameter allows the user to tune the function as least requested or most @@ -66,7 +66,7 @@ the bin packing behavior for extended resources `intel.com/foo` and `intel.com/b using the `requestedToCapacityRatio` field. ```yaml -apiVersion: kubescheduler.config.k8s.io/v1beta3 +apiVersion: kubescheduler.config.k8s.io/v1 kind: KubeSchedulerConfiguration profiles: - pluginConfig: @@ -92,7 +92,7 @@ flag `--config=/path/to/config/file` will pass the configuration to the scheduler. To learn more about other parameters and their default configuration, see the API documentation for -[`NodeResourcesFitArgs`](/docs/reference/config-api/kube-scheduler-config.v1beta3/#kubescheduler-config-k8s-io-v1beta3-NodeResourcesFitArgs). +[`NodeResourcesFitArgs`](/docs/reference/config-api/kube-scheduler-config.v1/#kubescheduler-config-k8s-io-v1-NodeResourcesFitArgs). ### Tuning the score function diff --git a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index bda0ec04aadd3..786b6ed984190 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -43,7 +43,7 @@ If you set `percentageOfNodesToScore` above 100, kube-scheduler acts as if you had set a value of 100. To change the value, edit the -[kube-scheduler configuration file](/docs/reference/config-api/kube-scheduler-config.v1beta3/) +[kube-scheduler configuration file](/docs/reference/config-api/kube-scheduler-config.v1/) and then restart the scheduler. In many cases, the configuration file can be found at `/etc/kubernetes/config/kube-scheduler.yaml`. @@ -161,5 +161,5 @@ After going over all the Nodes, it goes back to Node 1. ## {{% heading "whatsnext" %}} -* Check the [kube-scheduler configuration reference (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) +* Check the [kube-scheduler configuration reference (v1)](/docs/reference/config-api/kube-scheduler-config.v1/) diff --git a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md index ea0d981055bd7..618f5586f68e5 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md @@ -10,28 +10,27 @@ weight: 60 {{< feature-state for_k8s_version="v1.19" state="stable" >}} -The scheduling framework is a pluggable architecture for the Kubernetes scheduler. -It adds a new set of "plugin" APIs to the existing scheduler. Plugins are compiled into the scheduler. The APIs allow most scheduling features to be implemented as plugins, while keeping the -scheduling "core" lightweight and maintainable. Refer to the [design proposal of the -scheduling framework][kep] for more technical information on the design of the -framework. +The _scheduling framework_ is a pluggable architecture for the Kubernetes scheduler. +It consists of a set of "plugin" APIs that are compiled directly into the scheduler. +These APIs allow most scheduling features to be implemented as plugins, +while keeping the scheduling "core" lightweight and maintainable. Refer to the +[design proposal of the scheduling framework][kep] for more technical information on +the design of the framework. [kep]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-scheduling/624-scheduling-framework/README.md - - -# Framework workflow +## Framework workflow The Scheduling Framework defines a few extension points. Scheduler plugins register to be invoked at one or more extension points. Some of these plugins can change the scheduling decisions and some are informational only. -Each attempt to schedule one Pod is split into two phases, the **scheduling -cycle** and the **binding cycle**. +Each attempt to schedule one Pod is split into two phases, the +**scheduling cycle** and the **binding cycle**. -## Scheduling Cycle & Binding Cycle +### Scheduling cycle & binding cycle The scheduling cycle selects a node for the Pod, and the binding cycle applies that decision to the cluster. Together, a scheduling cycle and binding cycle are @@ -43,15 +42,17 @@ A scheduling or binding cycle can be aborted if the Pod is determined to be unschedulable or if there is an internal error. The Pod will be returned to the queue and retried. -## Extension points +## Interfaces -The following picture shows the scheduling context of a Pod and the extension -points that the scheduling framework exposes. In this picture "Filter" is -equivalent to "Predicate" and "Scoring" is equivalent to "Priority function". +The following picture shows the scheduling context of a Pod and the interfaces +that the scheduling framework exposes. -One plugin may register at multiple extension points to perform more complex or +One plugin may implement multiple interfaces to perform more complex or stateful tasks. +Some interfaces match the scheduler extension points which can be configured through +[Scheduler Configuration](/docs/reference/scheduling/config/#extension-points). + {{< figure src="/images/docs/scheduling-framework-extensions.png" title="Scheduling framework extension points" class="diagram-large">}} ### PreEnqueue {#pre-enqueue} @@ -65,6 +66,32 @@ Otherwise, it's placed in the internal unschedulable Pods list, and doesn't get For more details about how internal scheduler queues work, read [Scheduling queue in kube-scheduler](https://github.com/kubernetes/community/blob/f03b6d5692bd979f07dd472e7b6836b2dad0fd9b/contributors/devel/sig-scheduling/scheduler_queues.md). +### EnqueueExtension + +EnqueueExtension is the interface where the plugin can control +whether to retry scheduling of Pods rejected by this plugin, based on changes in the cluster. +Plugins that implement PreEnqueue, PreFilter, Filter, Reserve or Permit should implement this interface. + +### QueueingHint + +{{< feature-state for_k8s_version="v1.28" state="beta" >}} + +QueueingHint is a callback function for deciding whether a Pod can be requeued to the active queue or backoff queue. +It's executed every time a certain kind of event or change happens in the cluster. +When the QueueingHint finds that the event might make the Pod schedulable, +the Pod is put into the active queue or the backoff queue +so that the scheduler will retry the scheduling of the Pod. + +{{< note >}} +QueueingHint evaluation during scheduling is a beta-level feature. +The v1.28 release series initially enabled the associated feature gate; however, after the +discovery of an excessive memory footprint, the Kubernetes project set that feature gate +to be disabled by default. In Kubernetes {{< skew currentVersion >}}, this feature gate is +disabled and you need to enable it manually. +You can enable it via the +`SchedulerQueueingHints` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). +{{< /note >}} + ### QueueSort {#queue-sort} These plugins are used to sort Pods in the scheduling queue. A queue sort plugin @@ -148,7 +175,7 @@ NormalizeScore extension point. ### Reserve {#reserve} -A plugin that implements the Reserve extension has two methods, namely `Reserve` +A plugin that implements the Reserve interface has two methods, namely `Reserve` and `Unreserve`, that back two informational scheduling phases called Reserve and Unreserve, respectively. Plugins which maintain runtime state (aka "stateful plugins") should use these phases to be notified by the scheduler when resources @@ -194,9 +221,9 @@ the three things: {{< note >}} While any plugin can access the list of "waiting" Pods and approve them -(see [`FrameworkHandle`](https://git.k8s.io/enhancements/keps/sig-scheduling/624-scheduling-framework#frameworkhandle)), we expect only the permit -plugins to approve binding of reserved Pods that are in "waiting" state. Once a Pod -is approved, it is sent to the [PreBind](#pre-bind) phase. +(see [`FrameworkHandle`](https://git.k8s.io/enhancements/keps/sig-scheduling/624-scheduling-framework#frameworkhandle)), +we expect only the permit plugins to approve binding of reserved Pods that are in "waiting" state. +Once a Pod is approved, it is sent to the [PreBind](#pre-bind) phase. {{< /note >}} ### PreBind {#pre-bind} @@ -218,7 +245,7 @@ skipped**. ### PostBind {#post-bind} -This is an informational extension point. Post-bind plugins are called after a +This is an informational interface. Post-bind plugins are called after a Pod is successfully bound. This is the end of a binding cycle, and can be used to clean up associated resources. @@ -260,4 +287,3 @@ plugins and get them configured along with default plugins. You can visit If you are using Kubernetes v1.18 or later, you can configure a set of plugins as a scheduler profile and then define multiple profiles to fit various kinds of workload. Learn more at [multiple profiles](/docs/reference/scheduling/config/#multiple-profiles). - diff --git a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md index 1b44a1fab4ccb..c9afb795a11c2 100644 --- a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -85,9 +85,27 @@ An empty `effect` matches all effects with key `key1`. {{< /note >}} The above example used `effect` of `NoSchedule`. Alternatively, you can use `effect` of `PreferNoSchedule`. -This is a "preference" or "soft" version of `NoSchedule` -- the system will *try* to avoid placing a -pod that does not tolerate the taint on the node, but it is not required. The third kind of `effect` is -`NoExecute`, described later. + + +The allowed values for the `effect` field are: + +`NoExecute` +: This affects pods that are already running on the node as follows: + * Pods that do not tolerate the taint are evicted immediately + * Pods that tolerate the taint without specifying `tolerationSeconds` in + their toleration specification remain bound forever + * Pods that tolerate the taint with a specified `tolerationSeconds` remain + bound for the specified amount of time. After that time elapses, the node + lifecycle controller evicts the Pods from the node. + +`NoSchedule` +: No new Pods will be scheduled on the tainted node unless they have a matching + toleration. Pods currently running on the node are **not** evicted. + +`PreferNoSchedule` +: `PreferNoSchedule` is a "preference" or "soft" version of `NoSchedule`. + The control plane will *try* to avoid placing a Pod that does not tolerate + the taint on the node, but it is not guaranteed. You can put multiple taints on the same node and multiple tolerations on the same pod. The way Kubernetes processes multiple taints and tolerations is like a filter: start @@ -194,14 +212,7 @@ when there are node problems, which is described in the next section. {{< feature-state for_k8s_version="v1.18" state="stable" >}} -The `NoExecute` taint effect, mentioned above, affects pods that are already -running on the node as follows - * pods that do not tolerate the taint are evicted immediately - * pods that tolerate the taint without specifying `tolerationSeconds` in - their toleration specification remain bound forever - * pods that tolerate the taint with a specified `tolerationSeconds` remain - bound for the specified amount of time The node controller automatically taints a Node when certain conditions are true. The following taints are built in: @@ -221,7 +232,9 @@ are true. The following taints are built in: this node, the kubelet removes this taint. In case a node is to be drained, the node controller or the kubelet adds relevant taints -with `NoExecute` effect. If the fault condition returns to normal the kubelet or node +with `NoExecute` effect. This effect is added by default for the +`node.kubernetes.io/not-ready` and `node.kubernetes.io/unreachable` taints. +If the fault condition returns to normal, the kubelet or node controller can remove the relevant taint(s). In some cases when the node is unreachable, the API server is unable to communicate diff --git a/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md b/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md index 1c497d0fb565d..3cbc5b01acd8d 100644 --- a/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md +++ b/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md @@ -97,8 +97,11 @@ your cluster. Those fields are: nodes match the node selector. {{< note >}} - The `minDomains` field is a beta field and disabled by default in 1.25. You can enable it by enabling the - `MinDomainsInPodTopologySpread` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). + The `MinDomainsInPodTopologySpread` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) + enables `minDomains` for pod topology spread. Starting from v1.28, + the `MinDomainsInPodTopologySpread` gate + is enabled by default. In older Kubernetes clusters it might be explicitly + disabled or the field might not be available. {{< /note >}} - The value of `minDomains` must be greater than 0, when specified. @@ -525,13 +528,6 @@ profiles: whenUnsatisfiable: ScheduleAnyway defaultingType: List ``` - -{{< note >}} -The [`SelectorSpread` plugin](/docs/reference/scheduling/config/#scheduling-plugins) -is disabled by default. The Kubernetes project recommends using `PodTopologySpread` -to achieve similar behavior. -{{< /note >}} - ### Built-in default constraints {#internal-default-constraints} {{< feature-state for_k8s_version="v1.24" state="stable" >}} diff --git a/content/en/docs/concepts/security/multi-tenancy.md b/content/en/docs/concepts/security/multi-tenancy.md index 49355d08a6ac4..d6ddd1c60a8ec 100755 --- a/content/en/docs/concepts/security/multi-tenancy.md +++ b/content/en/docs/concepts/security/multi-tenancy.md @@ -438,7 +438,7 @@ The two options are discussed in more detail in the following sections. As previously mentioned, you should consider isolating each workload in its own namespace, even if you are using dedicated clusters or virtualized control planes. This ensures that each workload -only has access to its own resources, such as Config Maps and Secrets, and allows you to tailor +only has access to its own resources, such as ConfigMaps and Secrets, and allows you to tailor dedicated security policies for each workload. In addition, it is a best practice to give each namespace names that are unique across your entire fleet (that is, even if they are in separate clusters), as this gives you the flexibility to switch between dedicated and shared clusters in diff --git a/content/en/docs/concepts/security/pod-security-standards.md b/content/en/docs/concepts/security/pod-security-standards.md index 35c4952b60ed2..15b64e001a0fb 100644 --- a/content/en/docs/concepts/security/pod-security-standards.md +++ b/content/en/docs/concepts/security/pod-security-standards.md @@ -271,6 +271,7 @@ fail validation.
  • net.ipv4.ip_unprivileged_port_start
  • net.ipv4.tcp_syncookies
  • net.ipv4.ping_group_range
  • +
  • net.ipv4.ip_local_reserved_ports (since Kubernetes 1.27)
  • @@ -485,6 +486,12 @@ Restrictions on the following controls are only required if `.spec.os.name` is n - Seccomp - Linux Capabilities +## User namespaces + +User Namespaces are a Linux-only feature to run workloads with increased +isolation. How they work together with Pod Security Standards is described in +the [documentation](/docs/concepts/workloads/pods/user-namespaces#integration-with-pod-security-admission-checks) for Pods that use user namespaces. + ## FAQ ### Why isn't there a profile between privileged and baseline? diff --git a/content/en/docs/concepts/security/secrets-good-practices.md b/content/en/docs/concepts/security/secrets-good-practices.md index f864a0ca9d328..3e18929f90f3b 100644 --- a/content/en/docs/concepts/security/secrets-good-practices.md +++ b/content/en/docs/concepts/security/secrets-good-practices.md @@ -62,6 +62,12 @@ recommendations include: * Implement audit rules that alert on specific events, such as concurrent reading of multiple Secrets by a single user +#### Additional ServiceAccount annotations for Secret management + +You can also use the `kubernetes.io/enforce-mountable-secrets` annotation on +a ServiceAccount to enforce specific rules on how Secrets are used in a Pod. +For more details, see the [documentation on this annotation](/docs/reference/labels-annotations-taints/#enforce-mountable-secrets). + ### Improve etcd management policies Consider wiping or shredding the durable storage used by `etcd` once it is diff --git a/content/en/docs/concepts/security/service-accounts.md b/content/en/docs/concepts/security/service-accounts.md index 365074cba9770..a7b3d54d76d33 100644 --- a/content/en/docs/concepts/security/service-accounts.md +++ b/content/en/docs/concepts/security/service-accounts.md @@ -196,6 +196,36 @@ or using a custom mechanism such as an [authentication webhook](/docs/reference/ You can also use TokenRequest to obtain short-lived tokens for your external application. {{< /note >}} +### Restricting access to Secrets {#enforce-mountable-secrets} + +Kubernetes provides an annotation called `kubernetes.io/enforce-mountable-secrets` +that you can add to your ServiceAccounts. When this annotation is applied, +the ServiceAccount's secrets can only be mounted on specified types of resources, +enhancing the security posture of your cluster. + +You can add the annotation to a ServiceAccount using a manifest: + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + kubernetes.io/enforce-mountable-secrets: "true" + name: my-serviceaccount + namespace: my-namespace +``` +When this annotation is set to "true", the Kubernetes control plane ensures that +the Secrets from this ServiceAccount are subject to certain mounting restrictions. + +1. The name of each Secret that is mounted as a volume in a Pod must appear in the `secrets` field of the + Pod's ServiceAccount. +1. The name of each Secret referenced using `envFrom` in a Pod must also appear in the `secrets` + field of the Pod's ServiceAccount. +1. The name of each Secret referenced using `imagePullSecrets` in a Pod must also appear in the `secrets` + field of the Pod's ServiceAccount. + +By understanding and enforcing these restrictions, cluster administrators can maintain a tighter security profile and ensure that secrets are accessed only by the appropriate resources. + ## Authenticating service account credentials {#authenticating-credentials} ServiceAccounts use signed @@ -217,7 +247,8 @@ request. The API server checks the validity of that bearer token as follows: The TokenRequest API produces _bound tokens_ for a ServiceAccount. This binding is linked to the lifetime of the client, such as a Pod, that is acting -as that ServiceAccount. +as that ServiceAccount. See [Token Volume Projection](/docs/tasks/configure-pod-container/configure-service-account/#serviceaccount-token-volume-projection) +for an example of a bound pod service account token's JWT schema and payload. For tokens issued using the `TokenRequest` API, the API server also checks that the specific object reference that is using the ServiceAccount still exists, @@ -239,7 +270,7 @@ account credentials, you can use the following methods: The Kubernetes project recommends that you use the TokenReview API, because this method invalidates tokens that are bound to API objects such as Secrets, -ServiceAccounts, and Pods when those objects are deleted. For example, if you +ServiceAccounts, Pods or Nodes when those objects are deleted. For example, if you delete the Pod that contains a projected ServiceAccount token, the cluster invalidates that token immediately and a TokenReview immediately fails. If you use OIDC validation instead, your clients continue to treat the token diff --git a/content/en/docs/concepts/services-networking/_index.md b/content/en/docs/concepts/services-networking/_index.md index 25a59d536a71c..1f30d33e6f98f 100644 --- a/content/en/docs/concepts/services-networking/_index.md +++ b/content/en/docs/concepts/services-networking/_index.md @@ -54,6 +54,8 @@ Kubernetes networking addresses four concerns: to be reachable from outside your cluster. - [Ingress](/docs/concepts/services-networking/ingress/) provides extra functionality specifically for exposing HTTP applications, websites and APIs. + - [Gateway API](/docs/concepts/services-networking/gateway/) is an {{}} + that provides an expressive, extensible, and role-oriented family of API kinds for modeling service networking. - You can also use Services to [publish services only for consumption inside your cluster](/docs/concepts/services-networking/service-traffic-policy/). diff --git a/content/en/docs/concepts/services-networking/dns-pod-service.md b/content/en/docs/concepts/services-networking/dns-pod-service.md index 3be3d6e345305..29eb675abdf37 100644 --- a/content/en/docs/concepts/services-networking/dns-pod-service.md +++ b/content/en/docs/concepts/services-networking/dns-pod-service.md @@ -98,9 +98,9 @@ of the form `hostname.my-svc.my-namespace.svc.cluster-domain.example`. ### A/AAAA records -In general a Pod has the following DNS resolution: +Kube-DNS versions, prior to the implementation of the [DNS specification](https://github.com/kubernetes/dns/blob/master/docs/specification.md), had the following DNS resolution: -`pod-ip-address.my-namespace.pod.cluster-domain.example`. +`pod-ipv4-address.my-namespace.pod.cluster-domain.example`. For example, if a Pod in the `default` namespace has the IP address 172.17.0.3, and the domain name for your cluster is `cluster.local`, then the Pod has a DNS name: @@ -109,7 +109,7 @@ and the domain name for your cluster is `cluster.local`, then the Pod has a DNS Any Pods exposed by a Service have the following DNS resolution available: -`pod-ip-address.service-name.my-namespace.svc.cluster-domain.example`. +`pod-ipv4-address.service-name.my-namespace.svc.cluster-domain.example`. ### Pod's hostname and subdomain fields diff --git a/content/en/docs/concepts/services-networking/dual-stack.md b/content/en/docs/concepts/services-networking/dual-stack.md index bf3ccbe83207c..292204b9b53dc 100644 --- a/content/en/docs/concepts/services-networking/dual-stack.md +++ b/content/en/docs/concepts/services-networking/dual-stack.md @@ -65,12 +65,12 @@ To configure IPv4/IPv6 dual-stack, set dual-stack cluster network assignments: * kube-proxy: * `--cluster-cidr=,` * kubelet: - * when there is no `--cloud-provider` the administrator can pass a comma-separated pair of IP - addresses via `--node-ip` to manually configure dual-stack `.status.addresses` for that Node. - If a Pod runs on that node in HostNetwork mode, the Pod reports these IP addresses in its - `.status.podIPs` field. - All `podIPs` in a node match the IP family preference defined by the `.status.addresses` - field for that Node. + * `--node-ip=,` + * This option is required for bare metal dual-stack nodes (nodes that do not define a + cloud provider with the `--cloud-provider` flag). If you are using a cloud provider + and choose to override the node IPs chosen by the cloud provider, set the + `--node-ip` option. + * (The legacy built-in cloud providers do not support dual-stack `--node-ip`.) {{< note >}} An example of an IPv4 CIDR: `10.244.0.0/16` (though you would supply your own address range) @@ -79,13 +79,6 @@ An example of an IPv6 CIDR: `fdXY:IJKL:MNOP:15::/64` (this shows the format but address - see [RFC 4193](https://tools.ietf.org/html/rfc4193)) {{< /note >}} -{{< feature-state for_k8s_version="v1.27" state="alpha" >}} - -When using an external cloud provider, you can pass a dual-stack `--node-ip` value to -kubelet if you enable the `CloudDualStackNodeIPs` feature gate in both kubelet and the -external cloud provider. This is only supported for cloud providers that support dual -stack clusters. - ## Services You can create {{< glossary_tooltip text="Services" term_id="service" >}} which can use IPv4, IPv6, or both. diff --git a/content/en/docs/concepts/services-networking/gateway.md b/content/en/docs/concepts/services-networking/gateway.md new file mode 100644 index 0000000000000..dd54398659a40 --- /dev/null +++ b/content/en/docs/concepts/services-networking/gateway.md @@ -0,0 +1,206 @@ +--- +title: Gateway API +content_type: concept +description: >- + Gateway API is a family of API kinds that provide dynamic infrastructure provisioning + and advanced traffic routing. +weight: 55 +--- + + + +Make network services available by using an extensible, role-oriented, protocol-aware configuration +mechanism. [Gateway API](https://gateway-api.sigs.k8s.io/) is an {{}} +containing API [kinds](https://gateway-api.sigs.k8s.io/references/spec/) that provide dynamic infrastructure +provisioning and advanced traffic routing. + + + +## Design principles + +The following principles shaped the design and architecture of Gateway API: + +* __Role-oriented:__ Gateway API kinds are modeled after organizational roles that are + responsible for managing Kubernetes service networking: + * __Infrastructure Provider:__ Manages infrastructure that allows multiple isolated clusters + to serve multiple tenants, e.g. a cloud provider. + * __Cluster Operator:__ Manages clusters and is typically concerned with policies, network + access, application permissions, etc. + * __Application Developer:__ Manages an application running in a cluster and is typically + concerned with application-level configuration and [Service](/docs/concepts/services-networking/service/) + composition. +* __Portable:__ Gateway API specifications are defined as [custom resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources) + and are supported by many [implementations](https://gateway-api.sigs.k8s.io/implementations/). +* __Expressive:__ Gateway API kinds support functionality for common traffic routing use cases + such as header-based matching, traffic weighting, and others that were only possible in + [Ingress](/docs/concepts/services-networking/ingress/) by using custom annotations. +* __Extensible:__ Gateway allows for custom resources to be linked at various layers of the API. + This makes granular customization possible at the appropriate places within the API structure. + +## Resource model + +Gateway API has three stable API kinds: + +* __GatewayClass:__ Defines a set of gateways with common configuration and managed by a controller + that implements the class. + +* __Gateway:__ Defines an instance of traffic handling infrastructure, such as cloud load balancer. + +* __HTTPRoute:__ Defines HTTP-specific rules for mapping traffic from a Gateway listener to a + representation of backend network endpoints. These endpoints are often represented as a + {{}}. + +Gateway API is organized into different API kinds that have interdependent relationships to support +the role-oriented nature of organizations. A Gateway object is associated with exactly one GatewayClass; +the GatewayClass describes the gateway controller responsible for managing Gateways of this class. +One or more route kinds such as HTTPRoute, are then associated to Gateways. A Gateway can filter the routes +that may be attached to its `listeners`, forming a bidirectional trust model with routes. + +The following figure illustrates the relationships of the three stable Gateway API kinds: + +{{< figure src="/docs/images/gateway-kind-relationships.svg" alt="A figure illustrating the relationships of the three stable Gateway API kinds" class="diagram-medium" >}} + +### GatewayClass {#api-kind-gateway-class} + +Gateways can be implemented by different controllers, often with different configurations. A Gateway +must reference a GatewayClass that contains the name of the controller that implements the +class. + +A minimal GatewayClass example: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: example-class +spec: + controllerName: example.com/gateway-controller +``` + +In this example, a controller that has implemented Gateway API is configured to manage GatewayClasses +with the controller name `example.com/gateway-controller`. Gateways of this class will be managed by +the implementation's controller. + +See the [GatewayClass](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.GatewayClass) +reference for a full definition of this API kind. + +### Gateway {#api-kind-gateway} + +A Gateway describes an instance of traffic handling infrastructure. It defines a network endpoint +that can be used for processing traffic, i.e. filtering, balancing, splitting, etc. for backends +such as a Service. For example, a Gateway may represent a cloud load balancer or an in-cluster proxy +server that is configured to accept HTTP traffic. + +A minimal Gateway resource example: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: example-gateway +spec: + gatewayClassName: example-class + listeners: + - name: http + protocol: HTTP + port: 80 +``` + +In this example, an instance of traffic handling infrastructure is programmed to listen for HTTP +traffic on port 80. Since the `addresses` field is unspecified, an address or hostname is assigned +to the Gateway by the implementation's controller. This address is used as a network endpoint for +processing traffic of backend network endpoints defined in routes. + +See the [Gateway](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.Gateway) +reference for a full definition of this API kind. + +### HTTPRoute {#api-kind-httproute} + +The HTTPRoute kind specifies routing behavior of HTTP requests from a Gateway listener to backend network +endpoints. For a Service backend, an implementation may represent the backend network endpoint as a Service +IP or the backing Endpoints of the Service. An HTTPRoute represents configuration that is applied to the +underlying Gateway implementation. For example, defining a new HTTPRoute may result in configuring additional +traffic routes in a cloud load balancer or in-cluster proxy server. + +A minimal HTTPRoute example: + +```yaml +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: example-httproute +spec: + parentRefs: + - name: example-gateway + hostnames: + - "www.example.com" + rules: + - matches: + - path: + type: PathPrefix + value: /login + backendRefs: + - name: example-svc + port: 8080 +``` + +In this example, HTTP traffic from Gateway `example-gateway` with the Host: header set to `www.example.com` +and the request path specified as `/login` will be routed to Service `example-svc` on port `8080`. + +See the [HTTPRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1.HTTPRoute) +reference for a full definition of this API kind. + +## Request flow + +Here is a simple example of HTTP traffic being routed to a Service by using a Gateway and an HTTPRoute: + +{{< figure src="/docs/images/gateway-request-flow.svg" alt="A diagram that provides an example of HTTP traffic being routed to a Service by using a Gateway and an HTTPRoute" class="diagram-medium" >}} + +In this example, the request flow for a Gateway implemented as a reverse proxy is: + +1. The client starts to prepare an HTTP request for the URL `http://www.example.com` +2. The client's DNS resolver queries for the destination name and learns a mapping to + one or more IP addresses associated with the Gateway. +3. The client sends a request to the Gateway IP address; the reverse proxy receives the HTTP + request and uses the Host: header to match a configuration that was derived from the Gateway + and attached HTTPRoute. +4. Optionally, the reverse proxy can perform request header and/or path matching based + on match rules of the HTTPRoute. +5. Optionally, the reverse proxy can modify the request; for example, to add or remove headers, + based on filter rules of the HTTPRoute. +6. Lastly, the reverse proxy forwards the request to one or more backends. + +## Conformance + +Gateway API covers a broad set of features and is widely implemented. This combination requires +clear conformance definitions and tests to ensure that the API provides a consistent experience +wherever it is used. + +See the [conformance](https://gateway-api.sigs.k8s.io/concepts/conformance/) documentation to +understand details such as release channels, support levels, and running conformance tests. + +## Migrating from Ingress + +Gateway API is the successor to the [Ingress](/docs/concepts/services-networking/ingress/) API. +However, it does not include the Ingress kind. As a result, a one-time conversion from your existing +Ingress resources to Gateway API resources is necessary. + +Refer to the [ingress migration](https://gateway-api.sigs.k8s.io/guides/migrating-from-ingress/#migrating-from-ingress) +guide for details on migrating Ingress resources to Gateway API resources. + +## {{% heading "whatsnext" %}} + +Instead of Gateway API resources being natively implemented by Kubernetes, the specifications +are defined as [Custom Resources](docs/concepts/extend-kubernetes/api-extension/custom-resources) +supported by a wide range of [implementations](https://gateway-api.sigs.k8s.io/implementations/). +[Install](https://gateway-api.sigs.k8s.io/guides/#installing-gateway-api) the Gateway API CRDs or +follow the installation instructions of your selected implementation. After installing an +implementation, use the [Getting Started](https://gateway-api.sigs.k8s.io/guides/) guide to help +you quickly start working with Gateway API. + +{{< note >}} +Make sure to review the documentation of your selected implementation to understand any caveats. +{{< /note >}} + +Refer to the [API specification](https://gateway-api.sigs.k8s.io/reference/spec/) for additional +details of all Gateway API kinds. diff --git a/content/en/docs/concepts/services-networking/ingress-controllers.md b/content/en/docs/concepts/services-networking/ingress-controllers.md index 924604e5dd8a7..1546e5e1a59a7 100644 --- a/content/en/docs/concepts/services-networking/ingress-controllers.md +++ b/content/en/docs/concepts/services-networking/ingress-controllers.md @@ -28,6 +28,7 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet {{% thirdparty-content %}} * [AKS Application Gateway Ingress Controller](https://docs.microsoft.com/azure/application-gateway/tutorial-ingress-controller-add-on-existing?toc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Faks%2Ftoc.json&bc=https%3A%2F%2Fdocs.microsoft.com%2Fen-us%2Fazure%2Fbread%2Ftoc.json) is an ingress controller that configures the [Azure Application Gateway](https://docs.microsoft.com/azure/application-gateway/overview). +* [Alibaba Cloud MSE Ingress](https://www.alibabacloud.com/help/en/mse/user-guide/overview-of-mse-ingress-gateways) is an ingress controller that configures the [Alibaba Cloud Native Gateway](https://www.alibabacloud.com/help/en/mse/product-overview/cloud-native-gateway-overview?spm=a2c63.p38356.0.0.20563003HJK9is), which is also the commercial version of [Higress](https://github.com/alibaba/higress). * [Apache APISIX ingress controller](https://github.com/apache/apisix-ingress-controller) is an [Apache APISIX](https://github.com/apache/apisix)-based ingress controller. * [Avi Kubernetes Operator](https://github.com/vmware/load-balancer-and-ingress-services-for-kubernetes) provides L4-L7 load-balancing using [VMware NSX Advanced Load Balancer](https://avinetworks.com/). * [BFE Ingress Controller](https://github.com/bfenetworks/ingress-bfe) is a [BFE](https://www.bfe-networks.net)-based ingress controller. @@ -46,6 +47,7 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet which offers API gateway functionality. * [HAProxy Ingress](https://haproxy-ingress.github.io/) is an ingress controller for [HAProxy](https://www.haproxy.org/#desc). +* [Higress](https://github.com/alibaba/higress) is an [Envoy](https://www.envoyproxy.io) based API gateway that can run as an ingress controller. * The [HAProxy Ingress Controller for Kubernetes](https://github.com/haproxytech/kubernetes-ingress#readme) is also an ingress controller for [HAProxy](https://www.haproxy.org/#desc). * [Istio Ingress](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress/) @@ -62,7 +64,7 @@ Kubernetes as a project supports and maintains [AWS](https://github.com/kubernet * The [Traefik Kubernetes Ingress provider](https://doc.traefik.io/traefik/providers/kubernetes-ingress/) is an ingress controller for the [Traefik](https://traefik.io/traefik/) proxy. * [Tyk Operator](https://github.com/TykTechnologies/tyk-operator) extends Ingress with Custom Resources to bring API Management capabilities to Ingress. Tyk Operator works with the Open Source Tyk Gateway & Tyk Cloud control plane. -* [Voyager](https://appscode.com/products/voyager) is an ingress controller for +* [Voyager](https://voyagermesh.com) is an ingress controller for [HAProxy](https://www.haproxy.org/#desc). * [Wallarm Ingress Controller](https://www.wallarm.com/solutions/waf-for-kubernetes) is an Ingress Controller that provides WAAP (WAF) and API Security capabilities. diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 836373b3b8898..89888cb5d32af 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -15,6 +15,10 @@ weight: 30 {{< feature-state for_k8s_version="v1.19" state="stable" >}} {{< glossary_definition term_id="ingress" length="all" >}} +{{< note >}} +Ingress is frozen. New features are being added to the [Gateway API](/docs/concepts/services-networking/gateway/). +{{< /note >}} + ## Terminology @@ -84,7 +88,7 @@ is the [rewrite-target annotation](https://github.com/kubernetes/ingress-nginx/b Different [Ingress controllers](/docs/concepts/services-networking/ingress-controllers) support different annotations. Review the documentation for your choice of Ingress controller to learn which annotations are supported. -The Ingress [spec](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status) +The [Ingress spec](/docs/reference/kubernetes-api/service-resources/ingress-v1/#IngressSpec) has all the information needed to configure a load balancer or proxy server. Most importantly, it contains a list of rules matched against all incoming requests. Ingress resource only supports rules for directing HTTP(S) traffic. @@ -94,8 +98,8 @@ should be defined. There are some ingress controllers, that work without the definition of a default `IngressClass`. For example, the Ingress-NGINX controller can be -configured with a [flag](https://kubernetes.github.io/ingress-nginx/#what-is-the-flag-watch-ingress-without-class) -`--watch-ingress-without-class`. It is [recommended](https://kubernetes.github.io/ingress-nginx/#i-have-only-one-instance-of-the-ingresss-nginx-controller-in-my-cluster-what-should-i-do) though, to specify the +configured with a [flag](https://kubernetes.github.io/ingress-nginx/user-guide/k8s-122-migration/#what-is-the-flag-watch-ingress-without-class) +`--watch-ingress-without-class`. It is [recommended](https://kubernetes.github.io/ingress-nginx/user-guide/k8s-122-migration/#i-have-only-one-ingress-controller-in-my-cluster-what-should-i-do) though, to specify the default `IngressClass` as shown [below](#default-ingress-class). ### Ingress rules diff --git a/content/en/docs/concepts/services-networking/network-policies.md b/content/en/docs/concepts/services-networking/network-policies.md index 2eaad9b6a6649..90c0351388fe4 100644 --- a/content/en/docs/concepts/services-networking/network-policies.md +++ b/content/en/docs/concepts/services-networking/network-policies.md @@ -16,8 +16,8 @@ description: >- -If you want to control traffic flow at the IP address or port level (OSI layer 3 or 4), then you -might consider using Kubernetes NetworkPolicies for particular applications in your cluster. +If you want to control traffic flow at the IP address or port level for TCP, UDP, and SCTP protocols, +then you might consider using Kubernetes NetworkPolicies for particular applications in your cluster. NetworkPolicies are an application-centric construct which allow you to specify how a {{< glossary_tooltip text="pod" term_id="pod">}} is allowed to communicate with various network "entities" (we use the word "entity" here to avoid overloading the more common terms such as @@ -257,21 +257,23 @@ creating the following NetworkPolicy in that namespace. This ensures that even pods that aren't selected by any other NetworkPolicy will not be allowed ingress or egress traffic. -## SCTP support +## Network traffic filtering -{{< feature-state for_k8s_version="v1.20" state="stable" >}} - -As a stable feature, this is enabled by default. To disable SCTP at a cluster level, you (or your -cluster administrator) will need to disable the `SCTPSupport` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -for the API server with `--feature-gates=SCTPSupport=false,…`. -When the feature gate is enabled, you can set the `protocol` field of a NetworkPolicy to `SCTP`. +NetworkPolicy is defined for [layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_layer) +connections (TCP, UDP, and optionally SCTP). For all the other protocols, the behaviour may vary +across network plugins. {{< note >}} You must be using a {{< glossary_tooltip text="CNI" term_id="cni" >}} plugin that supports SCTP protocol NetworkPolicies. {{< /note >}} +When a `deny all` network policy is defined, it is only guaranteed to deny TCP, UDP and SCTP +connections. For other protocols, such as ARP or ICMP, the behaviour is undefined. +The same applies to allow rules: when a specific pod is allowed as ingress source or egress destination, +it is undefined what happens with (for example) ICMP packets. Protocols such as ICMP may be allowed by some +network plugins and denied by others. + ## Targeting a range of ports {{< feature-state for_k8s_version="v1.25" state="stable" >}} @@ -346,6 +348,88 @@ namespaces, the value of the label is the namespace name. While NetworkPolicy cannot target a namespace by its name with some object field, you can use the standardized label to target a specific namespace. +## Pod lifecycle + +{{< note >}} +The following applies to clusters with a conformant networking plugin and a conformant implementation of +NetworkPolicy. +{{< /note >}} + +When a new NetworkPolicy object is created, it may take some time for a network plugin +to handle the new object. If a pod that is affected by a NetworkPolicy +is created before the network plugin has completed NetworkPolicy handling, +that pod may be started unprotected, and isolation rules will be applied when +the NetworkPolicy handling is completed. + +Once the NetworkPolicy is handled by a network plugin, + +1. All newly created pods affected by a given NetworkPolicy will be isolated before +they are started. +Implementations of NetworkPolicy must ensure that filtering is effective throughout +the Pod lifecycle, even from the very first instant that any container in that Pod is started. +Because they are applied at Pod level, NetworkPolicies apply equally to init containers, +sidecar containers, and regular containers. + +2. Allow rules will be applied eventually after the isolation rules (or may be applied at the same time). +In the worst case, a newly created pod may have no network connectivity at all when it is first started, if +isolation rules were already applied, but no allow rules were applied yet. + +Every created NetworkPolicy will be handled by a network plugin eventually, but there is no +way to tell from the Kubernetes API when exactly that happens. + +Therefore, pods must be resilient against being started up with different network +connectivity than expected. If you need to make sure the pod can reach certain destinations +before being started, you can use an [init container](/docs/concepts/workloads/pods/init-containers/) +to wait for those destinations to be reachable before kubelet starts the app containers. + +Every NetworkPolicy will be applied to all selected pods eventually. +Because the network plugin may implement NetworkPolicy in a distributed manner, +it is possible that pods may see a slightly inconsistent view of network policies +when the pod is first created, or when pods or policies change. +For example, a newly-created pod that is supposed to be able to reach both Pod A +on Node 1 and Pod B on Node 2 may find that it can reach Pod A immediately, +but cannot reach Pod B until a few seconds later. + +## NetworkPolicy and `hostNetwork` pods + +NetworkPolicy behaviour for `hostNetwork` pods is undefined, but it should be limited to 2 possibilities: +- The network plugin can distinguish `hostNetwork` pod traffic from all other traffic + (including being able to distinguish traffic from different `hostNetwork` pods on + the same node), and will apply NetworkPolicy to `hostNetwork` pods just like it does + to pod-network pods. +- The network plugin cannot properly distinguish `hostNetwork` pod traffic, + and so it ignores `hostNetwork` pods when matching `podSelector` and `namespaceSelector`. + Traffic to/from `hostNetwork` pods is treated the same as all other traffic to/from the node IP. + (This is the most common implementation.) + +This applies when +1. a `hostNetwork` pod is selected by `spec.podSelector`. + + ```yaml + ... + spec: + podSelector: + matchLabels: + role: client + ... + ``` + +2. a `hostNetwork` pod is selected by a `podSelector` or `namespaceSelector` in an `ingress` or `egress` rule. + + ```yaml + ... + ingress: + - from: + - podSelector: + matchLabels: + role: client + ... + ``` + +At the same time, since `hostNetwork` pods have the same IP addresses as the nodes they reside on, +their connections will be treated as node connections. For example, you can allow traffic +from a `hostNetwork` Pod using an `ipBlock` rule. + ## What you can't do with network policies (at least, not yet) As of Kubernetes {{< skew currentVersion >}}, the following functionality does not exist in the diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index 1cb64bb99e3ba..fd992995288da 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -175,7 +175,6 @@ spec: targetPort: http-web-svc ``` - This works even if there is a mixture of Pods in the Service using a single configured name, with the same network protocol available via different port numbers. This offers a lot of flexibility for deploying and evolving @@ -269,7 +268,8 @@ as a destination. {{< /note >}} For an EndpointSlice that you create yourself, or in your own code, -you should also pick a value to use for the [`endpointslice.kubernetes.io/managed-by`](/docs/reference/labels-annotations-taints/#endpointslicekubernetesiomanaged-by) label. +you should also pick a value to use for the label +[`endpointslice.kubernetes.io/managed-by`](/docs/reference/labels-annotations-taints/#endpointslicekubernetesiomanaged-by). If you create your own controller code to manage EndpointSlices, consider using a value similar to `"my-domain.example/name-of-controller"`. If you are using a third party tool, use the name of the tool in all-lowercase and change spaces and other @@ -283,7 +283,8 @@ managed by Kubernetes' own control plane. #### Accessing a Service without a selector {#service-no-selector-access} Accessing a Service without a selector works the same as if it had a selector. -In the [example](#services-without-selectors) for a Service without a selector, traffic is routed to one of the two endpoints defined in +In the [example](#services-without-selectors) for a Service without a selector, +traffic is routed to one of the two endpoints defined in the EndpointSlice manifest: a TCP connection to 10.1.2.3 or 10.4.5.6, on port 9376. {{< note >}} @@ -334,8 +335,7 @@ affects the legacy Endpoints API. In that case, Kubernetes selects at most 1000 possible backend endpoints to store into the Endpoints object, and sets an -{{< glossary_tooltip text="annotation" term_id="annotation" >}} on the -Endpoints: +{{< glossary_tooltip text="annotation" term_id="annotation" >}} on the Endpoints: [`endpoints.kubernetes.io/over-capacity: truncated`](/docs/reference/labels-annotations-taints/#endpoints-kubernetes-io-over-capacity). The control plane also removes that annotation if the number of backend Pods drops below 1000. @@ -349,7 +349,8 @@ The same API limit means that you cannot manually update an Endpoints to have mo {{< feature-state for_k8s_version="v1.20" state="stable" >}} The `appProtocol` field provides a way to specify an application protocol for -each Service port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. +each Service port. This is used as a hint for implementations to offer +richer behavior for protocols that they understand. The value of this field is mirrored by the corresponding Endpoints and EndpointSlice objects. @@ -365,8 +366,6 @@ This field follows standard Kubernetes label syntax. Valid values are one of: |----------|-------------| | `kubernetes.io/h2c` | HTTP/2 over cleartext as described in [RFC 7540](https://www.rfc-editor.org/rfc/rfc7540) | - - ### Multi-port Services For some Services, you need to expose more than one port. @@ -402,7 +401,6 @@ also start and end with an alphanumeric character. For example, the names `123-abc` and `web` are valid, but `123_abc` and `-web` are not. {{< /note >}} - ## Service type {#publishing-services-service-types} For some parts of your application (for example, frontends) you may want to expose a @@ -417,7 +415,8 @@ The available `type` values and their behaviors are: : Exposes the Service on a cluster-internal IP. Choosing this value makes the Service only reachable from within the cluster. This is the default that is used if you don't explicitly specify a `type` for a Service. - You can expose the Service to the public internet using an [Ingress](/docs/concepts/services-networking/ingress/) or a + You can expose the Service to the public internet using an + [Ingress](/docs/concepts/services-networking/ingress/) or a [Gateway](https://gateway-api.sigs.k8s.io/). [`NodePort`](#type-nodeport) @@ -437,8 +436,9 @@ The available `type` values and their behaviors are: No proxying of any kind is set up. The `type` field in the Service API is designed as nested functionality - each level -adds to the previous. This is not strictly required on all cloud providers, but -the Kubernetes API design for Service requires it anyway. +adds to the previous. However there is an exception to this nested design. You can +define a `LoadBalancer` Service by +[disabling the load balancer `NodePort` allocation](/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation). ### `type: ClusterIP` {#type-clusterip} @@ -508,27 +508,27 @@ spec: selector: app.kubernetes.io/name: MyApp ports: - # By default and for convenience, the `targetPort` is set to the same value as the `port` field. - port: 80 + # By default and for convenience, the `targetPort` is set to + # the same value as the `port` field. targetPort: 80 # Optional field - # By default and for convenience, the Kubernetes control plane will allocate a port from a range (default: 30000-32767) + # By default and for convenience, the Kubernetes control plane + # will allocate a port from a range (default: 30000-32767) nodePort: 30007 ``` -#### Reserve Nodeport Ranges to avoid collisions when port assigning +#### Reserve Nodeport ranges to avoid collisions {#avoid-nodeport-collisions} -{{< feature-state for_k8s_version="v1.28" state="beta" >}} +{{< feature-state for_k8s_version="v1.29" state="stable" >}} The policy for assigning ports to NodePort services applies to both the auto-assignment and the manual assignment scenarios. When a user wants to create a NodePort service that uses a specific port, the target port may conflict with another port that has already been assigned. -In this case, you can enable the feature gate `ServiceNodePortStaticSubrange`, which allows you -to use a different port allocation strategy for NodePort Services. The port range for NodePort services -is divided into two bands. Dynamic port assignment uses the upper band by default, and it may use -the lower band once the upper band has been exhausted. Users can then allocate from the lower band -with a lower risk of port collision. +To avoid this problem, the port range for NodePort services is divided into two bands. +Dynamic port assignment uses the upper band by default, and it may use the lower band once the +upper band has been exhausted. Users can then allocate from the lower band with a lower risk of port collision. #### Custom IP address configuration for `type: NodePort` Services {#service-nodeport-custom-listen-address} @@ -539,8 +539,7 @@ control plane). If you want to specify particular IP address(es) to proxy the port, you can set the `--nodeport-addresses` flag for kube-proxy or the equivalent `nodePortAddresses` -field of the -[kube-proxy configuration file](/docs/reference/config-api/kube-proxy-config.v1alpha1/) +field of the [kube-proxy configuration file](/docs/reference/config-api/kube-proxy-config.v1alpha1/) to particular IP block(s). This flag takes a comma-delimited list of IP blocks (e.g. `10.0.0.0/8`, `192.0.2.0/25`) @@ -554,7 +553,8 @@ This means that kube-proxy should consider all available network interfaces for {{< note >}} This Service is visible as `:spec.ports[*].nodePort` and `.spec.clusterIP:spec.ports[*].port`. If the `--nodeport-addresses` flag for kube-proxy or the equivalent field -in the kube-proxy configuration file is set, `` would be a filtered node IP address (or possibly IP addresses). +in the kube-proxy configuration file is set, `` would be a filtered +node IP address (or possibly IP addresses). {{< /note >}} ### `type: LoadBalancer` {#loadbalancer} @@ -608,7 +608,8 @@ set is ignored. {{< note >}} The`.spec.loadBalancerIP` field for a Service was deprecated in Kubernetes v1.24. -This field was under-specified and its meaning varies across implementations. It also cannot support dual-stack networking. This field may be removed in a future API version. +This field was under-specified and its meaning varies across implementations. +It also cannot support dual-stack networking. This field may be removed in a future API version. If you're integrating with a provider that supports specifying the load balancer IP address(es) for a Service via a (provider specific) annotation, you should switch to doing that. @@ -667,6 +668,28 @@ The value of `spec.loadBalancerClass` must be a label-style identifier, with an optional prefix such as "`internal-vip`" or "`example.com/internal-vip`". Unprefixed names are reserved for end-users. +#### Specifying IPMode of load balancer status {#load-balancer-ip-mode} + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + +Starting as Alpha in Kubernetes 1.29, +a [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +named `LoadBalancerIPMode` allows you to set the `.status.loadBalancer.ingress.ipMode` +for a Service with `type` set to `LoadBalancer`. +The `.status.loadBalancer.ingress.ipMode` specifies how the load-balancer IP behaves. +It may be specified only when the `.status.loadBalancer.ingress.ip` field is also specified. + +There are two possible values for `.status.loadBalancer.ingress.ipMode`: "VIP" and "Proxy". +The default value is "VIP" meaning that traffic is delivered to the node +with the destination set to the load-balancer's IP and port. +There are two cases when setting this to "Proxy", depending on how the load-balancer +from the cloud provider delivers the traffics: + +- If the traffic is delivered to the node then DNATed to the pod, the destination would be set to the node's IP and node port; +- If the traffic is delivered directly to the pod, the destination would be set to the pod's IP and port. + +Service implementations may use this information to adjust traffic routing. + #### Internal load balancer In a mixed environment it is sometimes necessary to route traffic from Services inside the same @@ -682,117 +705,97 @@ depending on the cloud service provider you're using: {{% tab name="Default" %}} Select one of the tabs. {{% /tab %}} + {{% tab name="GCP" %}} ```yaml -[...] metadata: - name: my-service - annotations: - networking.gke.io/load-balancer-type: "Internal" -[...] + name: my-service + annotations: + networking.gke.io/load-balancer-type: "Internal" ``` - {{% /tab %}} {{% tab name="AWS" %}} ```yaml -[...] metadata: name: my-service annotations: service.beta.kubernetes.io/aws-load-balancer-internal: "true" -[...] ``` {{% /tab %}} {{% tab name="Azure" %}} ```yaml -[...] metadata: - name: my-service - annotations: - service.beta.kubernetes.io/azure-load-balancer-internal: "true" -[...] + name: my-service + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" ``` {{% /tab %}} {{% tab name="IBM Cloud" %}} ```yaml -[...] metadata: - name: my-service - annotations: - service.kubernetes.io/ibm-load-balancer-cloud-provider-ip-type: "private" -[...] + name: my-service + annotations: + service.kubernetes.io/ibm-load-balancer-cloud-provider-ip-type: "private" ``` {{% /tab %}} {{% tab name="OpenStack" %}} ```yaml -[...] metadata: - name: my-service - annotations: - service.beta.kubernetes.io/openstack-internal-load-balancer: "true" -[...] + name: my-service + annotations: + service.beta.kubernetes.io/openstack-internal-load-balancer: "true" ``` {{% /tab %}} {{% tab name="Baidu Cloud" %}} ```yaml -[...] metadata: - name: my-service - annotations: - service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" -[...] + name: my-service + annotations: + service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true" ``` {{% /tab %}} {{% tab name="Tencent Cloud" %}} ```yaml -[...] metadata: annotations: service.kubernetes.io/qcloud-loadbalancer-internal-subnetid: subnet-xxxxx -[...] ``` {{% /tab %}} {{% tab name="Alibaba Cloud" %}} ```yaml -[...] metadata: annotations: service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet" -[...] ``` {{% /tab %}} {{% tab name="OCI" %}} ```yaml -[...] metadata: - name: my-service - annotations: - service.beta.kubernetes.io/oci-load-balancer-internal: true -[...] + name: my-service + annotations: + service.beta.kubernetes.io/oci-load-balancer-internal: true ``` {{% /tab %}} {{< /tabs >}} ### `type: ExternalName` {#externalname} - - Services of type ExternalName map a Service to a DNS name, not to a typical selector such as `my-service` or `cassandra`. You specify these Services with the `spec.externalName` parameter. @@ -811,11 +814,14 @@ spec: ``` {{< note >}} -A Service of `type: ExternalName` accepts an IPv4 address string, but treats that string as a DNS name comprised of digits, -not as an IP address (the internet does not however allow such names in DNS). Services with external names that resemble IPv4 +A Service of `type: ExternalName` accepts an IPv4 address string, +but treats that string as a DNS name comprised of digits, +not as an IP address (the internet does not however allow such names in DNS). +Services with external names that resemble IPv4 addresses are not resolved by DNS servers. -If you want to map a Service directly to a specific IP address, consider using [headless Services](#headless-services). +If you want to map a Service directly to a specific IP address, consider using +[headless Services](#headless-services). {{< /note >}} When looking up the host `my-service.prod.svc.cluster.local`, the cluster DNS Service @@ -881,9 +887,7 @@ finding a Service: environment variables and DNS. When a Pod is run on a Node, the kubelet adds a set of environment variables for each active Service. It adds `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, where the Service name is upper-cased and dashes are converted to underscores. -It also supports variables (see [makeLinkVariables](https://github.com/kubernetes/kubernetes/blob/dd2d12f6dc0e654c15d5db57a5f9f6ba61192726/pkg/kubelet/envvars/envvars.go#L72)) -that are compatible with Docker Engine's -"_[legacy container links](https://docs.docker.com/network/links/)_" feature. + For example, the Service `redis-primary` which exposes TCP port 6379 and has been allocated cluster IP address 10.0.0.11, produces the following environment @@ -1013,14 +1017,17 @@ about the [Service API object](/docs/reference/generated/kubernetes-api/{{< para ## {{% heading "whatsnext" %}} Learn more about Services and how they fit into Kubernetes: -* Follow the [Connecting Applications with Services](/docs/tutorials/services/connect-applications-service/) tutorial. + +* Follow the [Connecting Applications with Services](/docs/tutorials/services/connect-applications-service/) + tutorial. * Read about [Ingress](/docs/concepts/services-networking/ingress/), which exposes HTTP and HTTPS routes from outside the cluster to Services within your cluster. -* Read about [Gateway](https://gateway-api.sigs.k8s.io/), an extension to +* Read about [Gateway](/docs/concepts/services-networking/gateway/), an extension to Kubernetes that provides more flexibility than Ingress. For more context, read the following: + * [Virtual IPs and Service Proxies](/docs/reference/networking/virtual-ips/) * [EndpointSlices](/docs/concepts/services-networking/endpoint-slices/) * [Service API reference](/docs/reference/kubernetes-api/service-resources/service-v1/) diff --git a/content/en/docs/concepts/storage/ephemeral-volumes.md b/content/en/docs/concepts/storage/ephemeral-volumes.md index 77844874348d7..f92f544768fd0 100644 --- a/content/en/docs/concepts/storage/ephemeral-volumes.md +++ b/content/en/docs/concepts/storage/ephemeral-volumes.md @@ -47,8 +47,7 @@ different purposes: [secret](/docs/concepts/storage/volumes/#secret): inject different kinds of Kubernetes data into a Pod - [CSI ephemeral volumes](#csi-ephemeral-volumes): - similar to the previous volume kinds, but provided by special - [CSI drivers](https://github.com/container-storage-interface/spec/blob/master/spec.md) + similar to the previous volume kinds, but provided by special {{< glossary_tooltip text="CSI" term_id="csi" >}} drivers which specifically [support this feature](https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html) - [generic ephemeral volumes](#generic-ephemeral-volumes), which can be provided by all storage drivers that also support persistent volumes diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index 2d7c00b7b7135..0d5caa4ef2f13 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -17,7 +17,8 @@ weight: 20 This document describes _persistent volumes_ in Kubernetes. Familiarity with -[volumes](/docs/concepts/storage/volumes/) is suggested. +[volumes](/docs/concepts/storage/volumes/), [StorageClasses](/docs/concepts/storage/storage-classes/) +and [VolumeAttributesClasses](/docs/concepts/storage/volume-attributes-classes/) is suggested. @@ -39,8 +40,8 @@ NFS, iSCSI, or a cloud-provider-specific storage system. A _PersistentVolumeClaim_ (PVC) is a request for storage by a user. It is similar to a Pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific -size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany or -ReadWriteMany, see [AccessModes](#access-modes)). +size and access modes (e.g., they can be mounted ReadWriteOnce, ReadOnlyMany, +ReadWriteMany, or ReadWriteOncePod, see [AccessModes](#access-modes)). While PersistentVolumeClaims allow a user to consume abstract storage resources, it is common that users need PersistentVolumes with varying properties, such as @@ -185,7 +186,7 @@ another claim because the previous claimant's data remains on the volume. An administrator can manually reclaim the volume with the following steps. 1. Delete the PersistentVolume. The associated storage asset in external infrastructure - (such as an AWS EBS or GCE PD volume) still exists after the PV is deleted. + still exists after the PV is deleted. 1. Manually clean up the data on the associated storage asset accordingly. 1. Manually delete the associated storage asset. @@ -196,7 +197,7 @@ the same storage asset definition. For volume plugins that support the `Delete` reclaim policy, deletion removes both the PersistentVolume object from Kubernetes, as well as the associated -storage asset in the external infrastructure, such as an AWS EBS or GCE PD volume. Volumes that were dynamically provisioned +storage asset in the external infrastructure. Volumes that were dynamically provisioned inherit the [reclaim policy of their StorageClass](#reclaim-policy), which defaults to `Delete`. The administrator should configure the StorageClass according to users' expectations; otherwise, the PV must be edited or @@ -272,7 +273,7 @@ Access Modes: RWO VolumeMode: Filesystem Capacity: 1Gi Node Affinity: -Message: +Message: Source: Type: vSphereVolume (a Persistent Disk resource in vSphere) VolumePath: [vsanDatastore] d49c4a62-166f-ce12-c464-020077ba5d46/kubernetes-dynamic-pvc-74a498d6-3929-47e8-8c02-078c1ece4d78.vmdk @@ -297,7 +298,7 @@ Access Modes: RWO VolumeMode: Filesystem Capacity: 200Mi Node Affinity: -Message: +Message: Source: Type: CSI (a Container Storage Interface (CSI) volume source) Driver: csi.vsphere.vmware.com @@ -370,7 +371,6 @@ the following types of volumes: * azureFile (deprecated) * {{< glossary_tooltip text="csi" term_id="csi" >}} * flexVolume (deprecated) -* gcePersistentDisk (deprecated) * rbd (deprecated) * portworxVolume (deprecated) @@ -438,11 +438,6 @@ Similar to other volume types - FlexVolume volumes can also be expanded when in- FlexVolume resize is possible only when the underlying driver supports resize. {{< /note >}} -{{< note >}} -Expanding EBS volumes is a time-consuming operation. -Also, there is a per-volume quota of one modification every 6 hours. -{{< /note >}} - #### Recovering from Failure when Expanding Volumes If a user specifies a new size that is too big to be satisfied by underlying @@ -518,8 +513,6 @@ This means that support is still available but will be removed in a future Kuber (**deprecated** in v1.21) * [`flexVolume`](/docs/concepts/storage/volumes/#flexvolume) - FlexVolume (**deprecated** in v1.23) -* [`gcePersistentDisk`](/docs/concepts/storage/volumes/#gcepersistentdisk) - GCE Persistent Disk - (**deprecated** in v1.17) * [`portworxVolume`](/docs/concepts/storage/volumes/#portworxvolume) - Portworx volume (**deprecated** in v1.25) * [`vsphereVolume`](/docs/concepts/storage/volumes/#vspherevolume) - vSphere VMDK volume @@ -626,7 +619,8 @@ The access modes are: `ReadWriteOnce` : the volume can be mounted as read-write by a single node. ReadWriteOnce access - mode still can allow multiple pods to access the volume when the pods are running on the same node. + mode still can allow multiple pods to access the volume when the pods are + running on the same node. For single pod access, please see ReadWriteOncePod. `ReadOnlyMany` : the volume can be mounted as read-only by many nodes. @@ -635,15 +629,22 @@ The access modes are: : the volume can be mounted as read-write by many nodes. `ReadWriteOncePod` -: {{< feature-state for_k8s_version="v1.27" state="beta" >}} +: {{< feature-state for_k8s_version="v1.29" state="stable" >}} the volume can be mounted as read-write by a single Pod. Use ReadWriteOncePod access mode if you want to ensure that only one pod across the whole cluster can - read that PVC or write to it. This is only supported for CSI volumes and - Kubernetes version 1.22+. + read that PVC or write to it. -The blog article -[Introducing Single Pod Access Mode for PersistentVolumes](/blog/2021/09/13/read-write-once-pod-access-mode-alpha/) -covers this in more detail. +{{< note >}} +The `ReadWriteOncePod` access mode is only supported for +{{< glossary_tooltip text="CSI" term_id="csi" >}} volumes and Kubernetes version +1.22+. To use this feature you will need to update the following +[CSI sidecars](https://kubernetes-csi.github.io/docs/sidecar-containers.html) +to these versions or greater: + +* [csi-provisioner:v3.0.0+](https://github.com/kubernetes-csi/external-provisioner/releases/tag/v3.0.0) +* [csi-attacher:v3.3.0+](https://github.com/kubernetes-csi/external-attacher/releases/tag/v3.3.0) +* [csi-resizer:v1.3.0+](https://github.com/kubernetes-csi/external-resizer/releases/tag/v1.3.0) +{{< /note >}} In the CLI, the access modes are abbreviated to: @@ -663,8 +664,7 @@ are specified as ReadWriteOncePod, the volume is constrained and can be mounted {{< /note >}} > __Important!__ A volume can only be mounted using one access mode at a time, -> even if it supports many. For example, a GCEPersistentDisk can be mounted as -> ReadWriteOnce by a single node or ReadOnlyMany by many nodes, but not at the same time. +> even if it supports many. | Volume Plugin | ReadWriteOnce | ReadOnlyMany | ReadWriteMany | ReadWriteOncePod | | :--- | :---: | :---: | :---: | - | @@ -673,8 +673,6 @@ are specified as ReadWriteOncePod, the volume is constrained and can be mounted | CSI | depends on the driver | depends on the driver | depends on the driver | depends on the driver | | FC | ✓ | ✓ | - | - | | FlexVolume | ✓ | ✓ | depends on the driver | - | -| GCEPersistentDisk | ✓ | ✓ | - | - | -| Glusterfs | ✓ | ✓ | ✓ | - | | HostPath | ✓ | - | - | - | | iSCSI | ✓ | ✓ | - | - | | NFS | ✓ | ✓ | ✓ | - | @@ -701,9 +699,9 @@ Current reclaim policies are: * Retain -- manual reclamation * Recycle -- basic scrub (`rm -rf /thevolume/*`) -* Delete -- associated storage asset such as AWS EBS or GCE PD volume is deleted +* Delete -- delete the volume -Currently, only NFS and HostPath support recycling. AWS EBS and GCE PD volumes support deletion. +For Kubernetes {{< skew currentVersion >}}, only `nfs` and `hostPath` volume types support recycling. ### Mount Options @@ -719,7 +717,6 @@ The following volume types support mount options: * `azureFile` * `cephfs` (**deprecated** in v1.28) * `cinder` (**deprecated** in v1.18) -* `gcePersistentDisk` (**deprecated** in v1.28) * `iscsi` * `nfs` * `rbd` (**deprecated** in v1.28) @@ -734,8 +731,7 @@ it will become fully deprecated in a future Kubernetes release. ### Node Affinity {{< note >}} -For most volume types, you do not need to set this field. It is automatically -populated for [GCE PD](/docs/concepts/storage/volumes/#gcepersistentdisk) volume block types. +For most volume types, you do not need to set this field. You need to explicitly set this for [local](/docs/concepts/storage/volumes/#local) volumes. {{< /note >}} @@ -766,7 +762,7 @@ You can see the name of the PVC bound to the PV using `kubectl describe persiste #### Phase transition timestamp -{{< feature-state for_k8s_version="v1.28" state="alpha" >}} +{{< feature-state for_k8s_version="v1.29" state="beta" >}} The `.status` field for a PersistentVolume can include an alpha `lastPhaseTransitionTime` field. This field records the timestamp of when the volume last transitioned its phase. For newly created @@ -956,7 +952,6 @@ applicable: * CSI * FC (Fibre Channel) -* GCEPersistentDisk (deprecated) * iSCSI * Local volume * OpenStack Cinder @@ -1166,7 +1161,7 @@ users should be aware of: When the `CrossNamespaceVolumeDataSource` feature is enabled, there are additional differences: * The `dataSource` field only allows local objects, while the `dataSourceRef` field allows - objects in any namespaces. + objects in any namespaces. * When namespace is specified, `dataSource` and `dataSourceRef` are not synced. Users should always use `dataSourceRef` on clusters that have the feature gate enabled, and diff --git a/content/en/docs/concepts/storage/projected-volumes.md b/content/en/docs/concepts/storage/projected-volumes.md index ac64fa4d7daf8..8d59b8026482f 100644 --- a/content/en/docs/concepts/storage/projected-volumes.md +++ b/content/en/docs/concepts/storage/projected-volumes.md @@ -24,6 +24,7 @@ Currently, the following types of volume sources can be projected: * [`downwardAPI`](/docs/concepts/storage/volumes/#downwardapi) * [`configMap`](/docs/concepts/storage/volumes/#configmap) * [`serviceAccountToken`](#serviceaccounttoken) +* [`clusterTrustBundle`](#clustertrustbundle) All sources are required to be in the same namespace as the Pod. For more details, see the [all-in-one volume](https://git.k8s.io/design-proposals-archive/node/all-in-one-volume.md) design document. @@ -70,6 +71,31 @@ A container using a projected volume source as a [`subPath`](/docs/concepts/stor volume mount will not receive updates for those volume sources. {{< /note >}} +## clusterTrustBundle projected volumes {#clustertrustbundle} + +{{}} + +{{< note >}} +To use this feature in Kubernetes {{< skew currentVersion >}}, you must enable support for ClusterTrustBundle objects with the `ClusterTrustBundle` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and `--runtime-config=certificates.k8s.io/v1alpha1/clustertrustbundles=true` kube-apiserver flag, then enable the `ClusterTrustBundleProjection` feature gate. +{{< /note >}} + +The `clusterTrustBundle` projected volume source injects the contents of one or more [ClusterTrustBundle](/docs/reference/access-authn-authz/certificate-signing-requests#cluster-trust-bundles) objects as an automatically-updating file in the container filesystem. + +ClusterTrustBundles can be selected either by [name](/docs/reference/access-authn-authz/certificate-signing-requests#ctb-signer-unlinked) or by [signer name](/docs/reference/access-authn-authz/certificate-signing-requests#ctb-signer-linked). + +To select by name, use the `name` field to designate a single ClusterTrustBundle object. + +To select by signer name, use the `signerName` field (and optionally the +`labelSelector` field) to designate a set of ClusterTrustBundle objects that use +the given signer name. If `labelSelector` is not present, then all +ClusterTrustBundles for that signer are selected. + +The kubelet deduplicates the certificates in the selected ClusterTrustBundle objects, normalizes the PEM representations (discarding comments and headers), reorders the certificates, and writes them into the file named by `path`. As the set of selected ClusterTrustBundles or their content changes, kubelet keeps the file up-to-date. + +By default, the kubelet will prevent the pod from starting if the named ClusterTrustBundle is not found, or if `signerName` / `labelSelector` do not match any ClusterTrustBundles. If this behavior is not what you want, then set the `optional` field to `true`, and the pod will start up with an empty file at `path`. + +{{% code_sample file="pods/storage/projected-clustertrustbundle.yaml" %}} + ## SecurityContext interactions The [proposal](https://git.k8s.io/enhancements/keps/sig-storage/2451-service-account-token-volumes#proposal) for file permission handling in projected service account volume enhancement introduced the projected files having the correct owner permissions set. diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index 393d72a77bfc0..1227950ad606b 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -15,59 +15,78 @@ This document describes the concept of a StorageClass in Kubernetes. Familiarity with [volumes](/docs/concepts/storage/volumes/) and [persistent volumes](/docs/concepts/storage/persistent-volumes) is suggested. - - -## Introduction - -A StorageClass provides a way for administrators to describe the "classes" of +A StorageClass provides a way for administrators to describe the _classes_ of storage they offer. Different classes might map to quality-of-service levels, or to backup policies, or to arbitrary policies determined by the cluster administrators. Kubernetes itself is unopinionated about what classes -represent. This concept is sometimes called "profiles" in other storage -systems. +represent. -## The StorageClass Resource +The Kubernetes concept of a storage class is similar to “profiles” in some other +storage system designs. + + + +## StorageClass objects Each StorageClass contains the fields `provisioner`, `parameters`, and `reclaimPolicy`, which are used when a PersistentVolume belonging to the -class needs to be dynamically provisioned. +class needs to be dynamically provisioned to satisfy a PersistentVolumeClaim (PVC). The name of a StorageClass object is significant, and is how users can request a particular class. Administrators set the name and other parameters of a class when first creating StorageClass objects. -Administrators can specify a default StorageClass only for PVCs that don't -request any particular class to bind to: see the -[PersistentVolumeClaim section](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) -for details. +As an administrator, you can specify a default StorageClass that applies to any PVCs that +don't request a specific class. For more details, see the +[PersistentVolumeClaim concept](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: standard -provisioner: kubernetes.io/aws-ebs -parameters: - type: gp2 -reclaimPolicy: Retain -allowVolumeExpansion: true -mountOptions: - - debug -volumeBindingMode: Immediate -``` +Here's an example of a StorageClass: -### Default StorageClass +{{% code_sample file="storage/storageclass-low-latency.yaml" %}} -When a PVC does not specify a `storageClassName`, the default StorageClass is -used. The cluster can only have one default StorageClass. If more than one -default StorageClass is accidentally set, the newest default is used when the -PVC is dynamically provisioned. +## Default StorageClass +You can mark a StorageClass as the default for your cluster. For instructions on setting the default StorageClass, see [Change the default StorageClass](/docs/tasks/administer-cluster/change-default-storage-class/). -Note that certain cloud providers may already define a default StorageClass. -### Provisioner +When a PVC does not specify a `storageClassName`, the default StorageClass is +used. + +If you set the +[`storageclass.kubernetes.io/is-default-class`](/docs/reference/labels-annotations-taints/#ingressclass-kubernetes-io-is-default-class) +annotation to true on more than one StorageClass in your cluster, and you then +create a PersistentVolumeClaim with no `storageClassName` set, Kubernetes +uses the most recently created default StorageClass. + +{{< note >}} +You should try to only have one StorageClass in your cluster that is +marked as the default. The reason that Kubernetes allows you to have +multiple default StorageClasses is to allow for seamless migration. +{{< /note >}} + +You can create a PersistentVolumeClaim without specifying a `storageClassName` +for the new PVC, and you can do so even when no default StorageClass exists +in your cluster. In this case, the new PVC creates as you defined it, and the +`storageClassName` of that PVC remains unset until a default becomes available. + +You can have a cluster without any default StorageClass. If you don't mark any +StorageClass as default (and one hasn't been set for you by, for example, a cloud provider), +then Kubernetes cannot apply that defaulting for PersistentVolumeClaims that need +it. + +If or when a default StorageClass becomes available, the control plane identifies any +existing PVCs without `storageClassName`. For the PVCs that either have an empty +value for `storageClassName` or do not have this key, the control plane then +updates those PVCs to set `storageClassName` to match the new default StorageClass. +If you have an existing PVC where the `storageClassName` is `""`, and you configure +a default StorageClass, then this PVC will not get updated. + +In order to keep binding to PVs with `storageClassName` set to `""` +(while a default StorageClass is present), you need to set the `storageClassName` +of the associated PVC to `""`. + +## Provisioner Each StorageClass has a provisioner that determines what volume plugin is used for provisioning PVs. This field must be specified. @@ -78,13 +97,12 @@ for provisioning PVs. This field must be specified. | CephFS | - | - | | FC | - | - | | FlexVolume | - | - | -| GCEPersistentDisk | ✓ | [GCE PD](#gce-pd) | | iSCSI | - | - | +| Local | - | [Local](#local) | | NFS | - | [NFS](#nfs) | -| RBD | ✓ | [Ceph RBD](#ceph-rbd) | -| VsphereVolume | ✓ | [vSphere](#vsphere) | | PortworxVolume | ✓ | [Portworx Volume](#portworx-volume) | -| Local | - | [Local](#local) | +| RBD | - | [Ceph RBD](#ceph-rbd) | +| VsphereVolume | ✓ | [vSphere](#vsphere) | You are not restricted to specifying the "internal" provisioners listed here (whose names are prefixed with "kubernetes.io" and shipped @@ -102,7 +120,7 @@ For example, NFS doesn't provide an internal provisioner, but an external provisioner can be used. There are also cases when 3rd party storage vendors provide their own external provisioner. -### Reclaim Policy +## Reclaim policy PersistentVolumes that are dynamically created by a StorageClass will have the [reclaim policy](/docs/concepts/storage/persistent-volumes/#reclaiming) @@ -113,24 +131,24 @@ StorageClass object is created, it will default to `Delete`. PersistentVolumes that are created manually and managed via a StorageClass will have whatever reclaim policy they were assigned at creation. -### Allow Volume Expansion +## Volume expansion {#allow-volume-expansion} -PersistentVolumes can be configured to be expandable. This feature when set to `true`, -allows the users to resize the volume by editing the corresponding PVC object. +PersistentVolumes can be configured to be expandable. This allows you to resize the +volume by editing the corresponding PVC object, requesting a new larger amount of +storage. The following types of volumes support volume expansion, when the underlying StorageClass has the field `allowVolumeExpansion` set to true. {{< table caption = "Table of Volume types and the version of Kubernetes they require" >}} -| Volume type | Required Kubernetes version | -| :------------------- | :-------------------------- | -| gcePersistentDisk | 1.11 | -| rbd | 1.11 | -| Azure File | 1.11 | -| Portworx | 1.11 | -| FlexVolume | 1.13 | -| CSI | 1.14 (alpha), 1.16 (beta) | +| Volume type | Required Kubernetes version for volume expansion | +| :------------------- | :----------------------------------------------- | +| Azure File | 1.11 | +| CSI | 1.24 | +| FlexVolume | 1.13 | +| Portworx | 1.11 | +| rbd | 1.11 | {{< /table >}} @@ -138,20 +156,20 @@ StorageClass has the field `allowVolumeExpansion` set to true. You can only use the volume expansion feature to grow a Volume, not to shrink it. {{< /note >}} -### Mount Options +## Mount options PersistentVolumes that are dynamically created by a StorageClass will have the mount options specified in the `mountOptions` field of the class. If the volume plugin does not support mount options but mount options are -specified, provisioning will fail. Mount options are not validated on either +specified, provisioning will fail. Mount options are **not** validated on either the class or PV. If a mount option is invalid, the PV mount fails. -### Volume Binding Mode +## Volume binding mode The `volumeBindingMode` field controls when [volume binding and dynamic provisioning](/docs/concepts/storage/persistent-volumes/#provisioning) -should occur. When unset, "Immediate" mode is used by default. +should occur. When unset, `Immediate` mode is used by default. The `Immediate` mode indicates that volume binding and dynamic provisioning occurs once the PersistentVolumeClaim is created. For storage @@ -171,23 +189,19 @@ and [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-tolera The following plugins support `WaitForFirstConsumer` with dynamic provisioning: -- [GCEPersistentDisk](#gce-pd) +- CSI volumes, provided that the specific CSI driver supports this The following plugins support `WaitForFirstConsumer` with pre-created PersistentVolume binding: -- All of the above -- [Local](#local) - -[CSI volumes](/docs/concepts/storage/volumes/#csi) are also supported with dynamic provisioning -and pre-created PVs, but you'll need to look at the documentation for a specific CSI driver -to see its supported topology keys and examples. +- CSI volumes, provided that the specific CSI driver supports this +- [`local`](#local) {{< note >}} If you choose to use `WaitForFirstConsumer`, do not use `nodeName` in the Pod spec to specify node affinity. If `nodeName` is used in this case, the scheduler will be bypassed and PVC will remain in `pending` state. -Instead, you can use node selector for hostname in this case as shown below. +Instead, you can use node selector for `kubernetes.io/hostname`: {{< /note >}} ```yaml @@ -213,7 +227,7 @@ spec: name: task-pv-storage ``` -### Allowed Topologies +## Allowed topologies When a cluster operator specifies the `WaitForFirstConsumer` volume binding mode, it is no longer necessary to restrict provisioning to specific topologies in most situations. However, @@ -228,7 +242,7 @@ apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: standard -provisioner: kubernetes.io/gce-pd +provisioner: kubernetes.io/example parameters: type: pd-standard volumeBindingMode: WaitForFirstConsumer @@ -242,11 +256,9 @@ allowedTopologies: ## Parameters -Storage Classes have parameters that describe volumes belonging to the storage -class. Different parameters may be accepted depending on the `provisioner`. For -example, the value `io1`, for the parameter `type`, and the parameter -`iopsPerGB` are specific to EBS. When a parameter is omitted, some default is -used. +StorageClasses have parameters that describe volumes belonging to the storage +class. Different parameters may be accepted depending on the `provisioner`. +When a parameter is omitted, some default is used. There can be at most 512 parameters defined for a StorageClass. The total length of the parameters object including its keys and values cannot @@ -254,97 +266,43 @@ exceed 256 KiB. ### AWS EBS -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: slow -provisioner: kubernetes.io/aws-ebs -parameters: - type: io1 - iopsPerGB: "10" - fsType: ext4 -``` + -- `type`: `io1`, `gp2`, `sc1`, `st1`. See - [AWS docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) - for details. Default: `gp2`. -- `zone` (Deprecated): AWS zone. If neither `zone` nor `zones` is specified, volumes are - generally round-robin-ed across all active zones where Kubernetes cluster - has a node. `zone` and `zones` parameters must not be used at the same time. -- `zones` (Deprecated): A comma separated list of AWS zone(s). If neither `zone` nor `zones` - is specified, volumes are generally round-robin-ed across all active zones - where Kubernetes cluster has a node. `zone` and `zones` parameters must not - be used at the same time. -- `iopsPerGB`: only for `io1` volumes. I/O operations per second per GiB. AWS - volume plugin multiplies this with size of requested volume to compute IOPS - of the volume and caps it at 20 000 IOPS (maximum supported by AWS, see - [AWS docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html)). - A string is expected here, i.e. `"10"`, not `10`. -- `fsType`: fsType that is supported by kubernetes. Default: `"ext4"`. -- `encrypted`: denotes whether the EBS volume should be encrypted or not. - Valid values are `"true"` or `"false"`. A string is expected here, - i.e. `"true"`, not `true`. -- `kmsKeyId`: optional. The full Amazon Resource Name of the key to use when - encrypting the volume. If none is supplied but `encrypted` is true, a key is - generated by AWS. See AWS docs for valid ARN value. +Kubernetes {{< skew currentVersion >}} does not include a `awsElasticBlockStore` volume type. -{{< note >}} -`zone` and `zones` parameters are deprecated and replaced with -[allowedTopologies](#allowed-topologies) -{{< /note >}} +The AWSElasticBlockStore in-tree storage driver was deprecated in the Kubernetes v1.19 release +and then removed entirely in the v1.27 release. -### GCE PD +The Kubernetes project suggests that you use the [AWS EBS](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) +out-of-tree storage driver instead. +Here is an example StorageClass for the AWS EBS CSI driver: ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: slow -provisioner: kubernetes.io/gce-pd + name: ebs-sc +provisioner: ebs.csi.aws.com +volumeBindingMode: WaitForFirstConsumer parameters: - type: pd-standard - fstype: ext4 - replication-type: none + csi.storage.k8s.io/fstype: xfs + type: io1 + iopsPerGB: "50" + encrypted: "true" +allowedTopologies: +- matchLabelExpressions: + - key: topology.ebs.csi.aws.com/zone + values: + - us-east-2c ``` -- `type`: `pd-standard` or `pd-ssd`. Default: `pd-standard` -- `zone` (Deprecated): GCE zone. If neither `zone` nor `zones` is specified, volumes are - generally round-robin-ed across all active zones where Kubernetes cluster has - a node. `zone` and `zones` parameters must not be used at the same time. -- `zones` (Deprecated): A comma separated list of GCE zone(s). If neither `zone` nor `zones` - is specified, volumes are generally round-robin-ed across all active zones - where Kubernetes cluster has a node. `zone` and `zones` parameters must not - be used at the same time. -- `fstype`: `ext4` or `xfs`. Default: `ext4`. The defined filesystem type must be supported by the host operating system. - -- `replication-type`: `none` or `regional-pd`. Default: `none`. - -If `replication-type` is set to `none`, a regular (zonal) PD will be provisioned. - -If `replication-type` is set to `regional-pd`, a -[Regional Persistent Disk](https://cloud.google.com/compute/docs/disks/#repds) -will be provisioned. It's highly recommended to have -`volumeBindingMode: WaitForFirstConsumer` set, in which case when you create -a Pod that consumes a PersistentVolumeClaim which uses this StorageClass, a -Regional Persistent Disk is provisioned with two zones. One zone is the same -as the zone that the Pod is scheduled in. The other zone is randomly picked -from the zones available to the cluster. Disk zones can be further constrained -using `allowedTopologies`. - -{{< note >}} -`zone` and `zones` parameters are deprecated and replaced with -[allowedTopologies](#allowed-topologies). When -[GCE CSI Migration](/docs/concepts/storage/volumes/#gce-csi-migration) is -enabled, a GCE PD volume can be provisioned in a topology that does not match -any nodes, but any pod trying to use that volume will fail to schedule. With -legacy pre-migration GCE PD, in this case an error will be produced -instead at provisioning time. GCE CSI Migration is enabled by default beginning -from the Kubernetes 1.23 release. -{{< /note >}} - ### NFS +To configure NFS storage, you can use the in-tree driver or the +[NFS CSI driver for Kubernetes](https://github.com/kubernetes-csi/csi-driver-nfs#readme) +(recommended). + ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass @@ -457,7 +415,8 @@ There are few [vSphere examples](https://github.com/kubernetes/examples/tree/master/staging/volumes/vsphere) which you try out for persistent volume management inside Kubernetes for vSphere. -### Ceph RBD +### Ceph RBD (deprecated) {#ceph-rbd} + {{< note >}} {{< feature-state state="deprecated" for_k8s_version="v1.28" >}} This internal provisioner of Ceph RBD is deprecated. Please use @@ -513,58 +472,18 @@ parameters: ### Azure Disk -#### Azure Unmanaged Disk storage class {#azure-unmanaged-disk-storage-class} + -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: slow -provisioner: kubernetes.io/azure-disk -parameters: - skuName: Standard_LRS - location: eastus - storageAccount: azure_storage_account_name -``` +Kubernetes {{< skew currentVersion >}} does not include a `azureDisk` volume type. -- `skuName`: Azure storage account Sku tier. Default is empty. -- `location`: Azure storage account location. Default is empty. -- `storageAccount`: Azure storage account name. If a storage account is provided, - it must reside in the same resource group as the cluster, and `location` is - ignored. If a storage account is not provided, a new storage account will be - created in the same resource group as the cluster. - -#### Azure Disk storage class (starting from v1.7.2) {#azure-disk-storage-class} +The `azureDisk` in-tree storage driver was deprecated in the Kubernetes v1.19 release +and then removed entirely in the v1.27 release. -```yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: slow -provisioner: kubernetes.io/azure-disk -parameters: - storageaccounttype: Standard_LRS - kind: managed -``` +The Kubernetes project suggests that you use the [Azure Disk](https://github.com/kubernetes-sigs/azuredisk-csi-driver) third party +storage driver instead. -- `storageaccounttype`: Azure storage account Sku tier. Default is empty. -- `kind`: Possible values are `shared`, `dedicated`, and `managed` (default). - When `kind` is `shared`, all unmanaged disks are created in a few shared - storage accounts in the same resource group as the cluster. When `kind` is - `dedicated`, a new dedicated storage account will be created for the new - unmanaged disk in the same resource group as the cluster. When `kind` is - `managed`, all managed disks are created in the same resource group as - the cluster. -- `resourceGroup`: Specify the resource group in which the Azure disk will be created. - It must be an existing resource group name. If it is unspecified, the disk will be - placed in the same resource group as the current Kubernetes cluster. - -* Premium VM can attach both Standard_LRS and Premium_LRS disks, while Standard - VM can only attach Standard_LRS disks. -* Managed VM can only attach managed disks and unmanaged VM can only attach - unmanaged disks. - -### Azure File +### Azure File (deprecated) {#azure-file} ```yaml apiVersion: storage.k8s.io/v1 @@ -578,7 +497,7 @@ parameters: storageAccount: azure_storage_account_name ``` -- `skuName`: Azure storage account Sku tier. Default is empty. +- `skuName`: Azure storage account SKU tier. Default is empty. - `location`: Azure storage account location. Default is empty. - `storageAccount`: Azure storage account name. Default is empty. If a storage account is not provided, all storage accounts associated with the resource @@ -604,7 +523,7 @@ In a multi-tenancy context, it is strongly recommended to set the value for `secretNamespace` explicitly, otherwise the storage account credentials may be read by other users. -### Portworx Volume +### Portworx volume (deprecated) {#portworx-volume} ```yaml apiVersion: storage.k8s.io/v1 @@ -649,9 +568,10 @@ provisioner: kubernetes.io/no-provisioner volumeBindingMode: WaitForFirstConsumer ``` -Local volumes do not currently support dynamic provisioning, however a StorageClass -should still be created to delay volume binding until Pod scheduling. This is -specified by the `WaitForFirstConsumer` volume binding mode. +Local volumes do not support dynamic provisioning in Kubernetes {{< skew currentVersion >}}; +however a StorageClass should still be created to delay volume binding until a Pod is actually +scheduled to the appropriate node. This is specified by the `WaitForFirstConsumer` volume +binding mode. Delaying volume binding allows the scheduler to consider all of a Pod's scheduling constraints when choosing an appropriate PersistentVolume for a diff --git a/content/en/docs/concepts/storage/volume-attributes-classes.md b/content/en/docs/concepts/storage/volume-attributes-classes.md new file mode 100644 index 0000000000000..69b4e41289237 --- /dev/null +++ b/content/en/docs/concepts/storage/volume-attributes-classes.md @@ -0,0 +1,131 @@ +--- +reviewers: +- msau42 +- xing-yang +title: Volume Attributes Classes +content_type: concept +weight: 40 +--- + + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + +This page assumes that you are familiar with [StorageClasses](/docs/concepts/storage/storage-classes/), +[volumes](/docs/concepts/storage/volumes/) and [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) +in Kubernetes. + + + +A VolumeAttributesClass provides a way for administrators to describe the mutable +"classes" of storage they offer. Different classes might map to different quality-of-service levels. +Kubernetes itself is unopinionated about what these classes represent. + +This is an alpha feature and disabled by default. + +If you want to test the feature whilst it's alpha, you need to enable the `VolumeAttributesClass` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) for the kube-controller-manager and the kube-apiserver. You use the `--feature-gates` command line argument: + +``` +--feature-gates="...,VolumeAttributesClass=true" +``` + +You can also only use VolumeAttributesClasses with storage backed by +{{< glossary_tooltip text="Container Storage Interface" term_id="csi" >}}, and only where the +relevant CSI driver implements the `ModifyVolume` API. + +## The VolumeAttributesClass API + +Each VolumeAttributesClass contains the `driverName` and `parameters`, which are +used when a PersistentVolume (PV) belonging to the class needs to be dynamically provisioned +or modified. + +The name of a VolumeAttributesClass object is significant and is how users can request a particular class. +Administrators set the name and other parameters of a class when first creating VolumeAttributesClass objects. +While the name of a VolumeAttributesClass object in a `PersistentVolumeClaim` is mutable, the parameters in an existing class are immutable. + + +```yaml +apiVersion: storage.k8s.io/v1alpha1 +kind: VolumeAttributesClass +metadata: + name: silver +driverName: pd.csi.storage.gke.io +parameters: + provisioned-iops: "3000" + provisioned-throughput: "50" +``` + + +### Provisioner + +Each VolumeAttributesClass has a provisioner that determines what volume plugin is used for provisioning PVs. The field `driverName` must be specified. + +The feature support for VolumeAttributesClass is implemented in [kubernetes-csi/external-provisioner](https://github.com/kubernetes-csi/external-provisioner). + +You are not restricted to specifying the [kubernetes-csi/external-provisioner](https://github.com/kubernetes-csi/external-provisioner). You can also run and specify external provisioners, +which are independent programs that follow a specification defined by Kubernetes. +Authors of external provisioners have full discretion over where their code lives, how +the provisioner is shipped, how it needs to be run, what volume plugin it uses, etc. + + +### Resizer + +Each VolumeAttributesClass has a resizer that determines what volume plugin is used for modifying PVs. The field `driverName` must be specified. + +The modifying volume feature support for VolumeAttributesClass is implemented in [kubernetes-csi/external-resizer](https://github.com/kubernetes-csi/external-resizer). + +For example, a existing PersistentVolumeClaim is using a VolumeAttributesClass named silver: + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-pv-claim +spec: + … + volumeAttributesClassName: silver + … +``` + +A new VolumeAttributesClass gold is available in the cluster: + + +```yaml +apiVersion: storage.k8s.io/v1alpha1 +kind: VolumeAttributesClass +metadata: + name: gold +driverName: pd.csi.storage.gke.io +parameters: + iops: "4000" + throughput: "60" +``` + + +The end user can update the PVC with the new VolumeAttributesClass gold and apply: + + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-pv-claim +spec: + … + volumeAttributesClassName: gold + … +``` + + +## Parameters + +VolumeAttributeClasses have parameters that describe volumes belonging to them. Different parameters may be accepted +depending on the provisioner or the resizer. For example, the value `4000`, for the parameter `iops`, +and the parameter `throughput` are specific to GCE PD. +When a parameter is omitted, the default is used at volume provisioning. +If a user apply the PVC with a different VolumeAttributesClass with omitted parameters, the default value of +the parameters may be used depends on the CSI driver implementation. +Please refer to the related CSI driver documentation for more details. + +There can be at most 512 parameters defined for a VolumeAttributesClass. +The total length of the parameters object including its keys and values cannot exceed 256 KiB. \ No newline at end of file diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index 59f98cc2c1c9d..08be7f60e3923 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -222,9 +222,8 @@ to learn more. ### emptyDir {#emptydir} -An `emptyDir` volume is first created when a Pod is assigned to a node, and -exists as long as that Pod is running on that node. As the name says, the -`emptyDir` volume is initially empty. All containers in the Pod can read and write the same +For a Pod that defines an `emptyDir` volume, the volume is created when the Pod is assigned to a node. +As the name says, the `emptyDir` volume is initially empty. All containers in the Pod can read and write the same files in the `emptyDir` volume, though that volume can be mounted at the same or different paths in each container. When a Pod is removed from a node for any reason, the data in the `emptyDir` is deleted permanently. @@ -245,9 +244,8 @@ The `emptyDir.medium` field controls where `emptyDir` volumes are stored. By default `emptyDir` volumes are stored on whatever medium that backs the node such as disk, SSD, or network storage, depending on your environment. If you set the `emptyDir.medium` field to `"Memory"`, Kubernetes mounts a tmpfs (RAM-backed -filesystem) for you instead. While tmpfs is very fast, be aware that unlike -disks, tmpfs is cleared on node reboot and any files you write count against -your container's memory limit. +filesystem) for you instead. While tmpfs is very fast be aware that, unlike +disks, files you write count against the memory limit of the container that wrote them. A size limit can be specified for the default medium, which limits the capacity @@ -297,127 +295,15 @@ beforehand so that Kubernetes hosts can access them. See the [fibre channel example](https://github.com/kubernetes/examples/tree/master/staging/volumes/fibre_channel) for more details. -### gcePersistentDisk (deprecated) {#gcepersistentdisk} +### gcePersistentDisk (removed) {#gcepersistentdisk} -{{< feature-state for_k8s_version="v1.17" state="deprecated" >}} +Kubernetes {{< skew currentVersion >}} does not include a `gcePersistentDisk` volume type. -A `gcePersistentDisk` volume mounts a Google Compute Engine (GCE) -[persistent disk](https://cloud.google.com/compute/docs/disks) (PD) into your Pod. -Unlike `emptyDir`, which is erased when a pod is removed, the contents of a PD are -preserved and the volume is merely unmounted. This means that a PD can be -pre-populated with data, and that data can be shared between pods. +The `gcePersistentDisk` in-tree storage driver was deprecated in the Kubernetes v1.17 release +and then removed entirely in the v1.28 release. -{{< note >}} -You must create a PD using `gcloud` or the GCE API or UI before you can use it. -{{< /note >}} - -There are some restrictions when using a `gcePersistentDisk`: - -* the nodes on which Pods are running must be GCE VMs -* those VMs need to be in the same GCE project and zone as the persistent disk - -One feature of GCE persistent disk is concurrent read-only access to a persistent disk. -A `gcePersistentDisk` volume permits multiple consumers to simultaneously -mount a persistent disk as read-only. This means that you can pre-populate a PD with your dataset -and then serve it in parallel from as many Pods as you need. Unfortunately, -PDs can only be mounted by a single consumer in read-write mode. Simultaneous -writers are not allowed. - -Using a GCE persistent disk with a Pod controlled by a ReplicaSet will fail unless -the PD is read-only or the replica count is 0 or 1. - -#### Creating a GCE persistent disk {#gce-create-persistent-disk} - -Before you can use a GCE persistent disk with a Pod, you need to create it. - -```shell -gcloud compute disks create --size=500GB --zone=us-central1-a my-data-disk -``` - -#### GCE persistent disk configuration example - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: test-pd -spec: - containers: - - image: registry.k8s.io/test-webserver - name: test-container - volumeMounts: - - mountPath: /test-pd - name: test-volume - volumes: - - name: test-volume - # This GCE PD must already exist. - gcePersistentDisk: - pdName: my-data-disk - fsType: ext4 -``` - -#### Regional persistent disks - -The [Regional persistent disks](https://cloud.google.com/compute/docs/disks/#repds) -feature allows the creation of persistent disks that are available in two zones -within the same region. In order to use this feature, the volume must be provisioned -as a PersistentVolume; referencing the volume directly from a pod is not supported. - -#### Manually provisioning a Regional PD PersistentVolume - -Dynamic provisioning is possible using a -[StorageClass for GCE PD](/docs/concepts/storage/storage-classes/#gce-pd). -Before creating a PersistentVolume, you must create the persistent disk: - -```shell -gcloud compute disks create --size=500GB my-data-disk - --region us-central1 - --replica-zones us-central1-a,us-central1-b -``` - -#### Regional persistent disk configuration example - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: test-volume -spec: - capacity: - storage: 400Gi - accessModes: - - ReadWriteOnce - gcePersistentDisk: - pdName: my-data-disk - fsType: ext4 - nodeAffinity: - required: - nodeSelectorTerms: - - matchExpressions: - # failure-domain.beta.kubernetes.io/zone should be used prior to 1.21 - - key: topology.kubernetes.io/zone - operator: In - values: - - us-central1-a - - us-central1-b -``` - -#### GCE CSI migration - -{{< feature-state for_k8s_version="v1.25" state="stable" >}} - -The `CSIMigration` feature for GCE PD, when enabled, redirects all plugin operations -from the existing in-tree plugin to the `pd.csi.storage.gke.io` Container -Storage Interface (CSI) Driver. In order to use this feature, the [GCE PD CSI -Driver](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) -must be installed on the cluster. - -#### GCE CSI migration complete - -{{< feature-state for_k8s_version="v1.21" state="alpha" >}} - -To disable the `gcePersistentDisk` storage plugin from being loaded by the controller manager -and the kubelet, set the `InTreePluginGCEUnregister` flag to `true`. +The Kubernetes project suggests that you use the [Google Compute Engine Persistent Disk CSI](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) +third party storage driver instead. ### gitRepo (deprecated) {#gitrepo} @@ -706,8 +592,8 @@ for an example of mounting NFS volumes with PersistentVolumes. A `persistentVolumeClaim` volume is used to mount a [PersistentVolume](/docs/concepts/storage/persistent-volumes/) into a Pod. PersistentVolumeClaims -are a way for users to "claim" durable storage (such as a GCE PersistentDisk or an -iSCSI volume) without knowing the details of the particular cloud environment. +are a way for users to "claim" durable storage (such as an iSCSI volume) +without knowing the details of the particular cloud environment. See the information about [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) for more details. @@ -1236,24 +1122,7 @@ in `Container.volumeMounts`. Its values are: (unmounted) by the containers on termination. {{< /warning >}} -### Configuration - -Before mount propagation can work properly on some deployments (CoreOS, -RedHat/Centos, Ubuntu) mount share must be configured correctly in -Docker as shown below. - -Edit your Docker's `systemd` service file. Set `MountFlags` as follows: -```shell -MountFlags=shared -``` - -Or, remove `MountFlags=slave` if present. Then restart the Docker daemon: - -```shell -sudo systemctl daemon-reload -sudo systemctl restart docker -``` ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/concepts/storage/windows-storage.md b/content/en/docs/concepts/storage/windows-storage.md index 1aa3941a1f208..b55ec7335ecdc 100644 --- a/content/en/docs/concepts/storage/windows-storage.md +++ b/content/en/docs/concepts/storage/windows-storage.md @@ -41,7 +41,7 @@ As a result, the following storage functionality is not supported on Windows nod * Block device mapping * Memory as the storage medium (for example, `emptyDir.medium` set to `Memory`) * File system features like uid/gid; per-user Linux filesystem permissions -* Setting [secret permissions with DefaultMode](/docs/concepts/configuration/secret/#secret-files-permissions) (due to UID/GID dependency) +* Setting [secret permissions with DefaultMode](/docs/tasks/inject-data-application/distribute-credentials-secure/#set-posix-permissions-for-secret-keys) (due to UID/GID dependency) * NFS based storage/volume support * Expanding the mounted volume (resizefs) @@ -66,5 +66,4 @@ The following broad classes of Kubernetes volume plugins are supported on Window The following in-tree plugins support persistent storage on Windows nodes: * [`azureFile`](/docs/concepts/storage/volumes/#azurefile) -* [`gcePersistentDisk`](/docs/concepts/storage/volumes/#gcepersistentdisk) * [`vsphereVolume`](/docs/concepts/storage/volumes/#vspherevolume) diff --git a/content/en/docs/concepts/windows/intro.md b/content/en/docs/concepts/windows/intro.md index 01f4e304de2ff..3e22aa7d624f0 100644 --- a/content/en/docs/concepts/windows/intro.md +++ b/content/en/docs/concepts/windows/intro.md @@ -37,7 +37,7 @@ you can deploy worker nodes running either Windows or Linux. Windows {{< glossary_tooltip text="nodes" term_id="node" >}} are [supported](#windows-os-version-support) provided that the operating system is -Windows Server 2019. +Windows Server 2019 or Windows Server 2022. This document uses the term *Windows containers* to mean Windows containers with process isolation. Kubernetes does not support running Windows containers with @@ -320,8 +320,7 @@ The following container runtimes work with Windows: You can use {{< glossary_tooltip term_id="containerd" text="ContainerD" >}} 1.4.0+ as the container runtime for Kubernetes nodes that run Windows. -Learn how to [install ContainerD on a Windows node](/docs/setup/production-environment/container-runtimes/#install-containerd). - +Learn how to [install ContainerD on a Windows node](/docs/setup/production-environment/container-runtimes/#containerd). {{< note >}} There is a [known limitation](/docs/tasks/configure-pod-container/configure-gmsa/#gmsa-limitations) when using GMSA with containerd to access Windows network shares, which requires a diff --git a/content/en/docs/concepts/workloads/controllers/cron-jobs.md b/content/en/docs/concepts/workloads/controllers/cron-jobs.md index 33f914716467d..d2ce56bece06e 100644 --- a/content/en/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/en/docs/concepts/workloads/controllers/cron-jobs.md @@ -181,15 +181,14 @@ A time zone database from the Go standard library is included in the binaries an ### Unsupported TimeZone specification -The implementation of the CronJob API in Kubernetes {{< skew currentVersion >}} lets you set -the `.spec.schedule` field to include a timezone; for example: `CRON_TZ=UTC * * * * *` -or `TZ=UTC * * * * *`. - -Specifying a timezone that way is **not officially supported** (and never has been). - -If you try to set a schedule that includes `TZ` or `CRON_TZ` timezone specification, -Kubernetes reports a [warning](/blog/2020/09/03/warnings/) to the client. -Future versions of Kubernetes will prevent setting the unofficial timezone mechanism entirely. +Specifying a timezone using `CRON_TZ` or `TZ` variables inside `.spec.schedule` +is **not officially supported** (and never has been). + +Starting with Kubernetes 1.29 if you try to set a schedule that includes `TZ` or `CRON_TZ` +timezone specification, Kubernetes will fail to create the resource with a validation +error. +Updates to CronJobs already using `TZ` or `CRON_TZ` will continue to report a +[warning](/blog/2020/09/03/warnings/) to the client. ### Modifying a CronJob diff --git a/content/en/docs/concepts/workloads/controllers/daemonset.md b/content/en/docs/concepts/workloads/controllers/daemonset.md index c033702068ca5..bd7cca541b339 100644 --- a/content/en/docs/concepts/workloads/controllers/daemonset.md +++ b/content/en/docs/concepts/workloads/controllers/daemonset.md @@ -108,8 +108,8 @@ If you do not specify either, then the DaemonSet controller will create Pods on ## How Daemon Pods are scheduled -A DaemonSet ensures that all eligible nodes run a copy of a Pod. The DaemonSet -controller creates a Pod for each eligible node and adds the +A DaemonSet can be used to ensure that all eligible nodes run a copy of a Pod. +The DaemonSet controller creates a Pod for each eligible node and adds the `spec.affinity.nodeAffinity` field of the Pod to match the target host. After the Pod is created, the default scheduler typically takes over and then binds the Pod to the target host by setting the `.spec.nodeName` field. If the new @@ -118,6 +118,13 @@ the existing Pods based on the [priority](/docs/concepts/scheduling-eviction/pod-priority-preemption/#pod-priority) of the new Pod. +{{< note >}} +If it's important that the DaemonSet pod run on each node, it's often desirable +to set the `.spec.template.spec.priorityClassName` of the DaemonSet to a +[PriorityClass](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass) +with a higher priority to ensure that this eviction occurs. +{{< /note >}} + The user can specify a different scheduler for the Pods of the DaemonSet, by setting the `.spec.template.spec.schedulerName` field of the DaemonSet. diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index 17b8b9f221b25..9b1e5f065bd06 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -1197,6 +1197,105 @@ rolling update starts, such that the total number of old and new Pods does not e Pods. Once old Pods have been killed, the new ReplicaSet can be scaled up further, ensuring that the total number of Pods running at any time during the update is at most 130% of desired Pods. +Here are some Rolling Update Deployment examples that use the `maxUnavailable` and `maxSurge`: + +{{< tabs name="tab_with_md" >}} +{{% tab name="Max Unavailable" %}} + + ```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + ``` + +{{% /tab %}} +{{% tab name="Max Surge" %}} + + ```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + ``` + +{{% /tab %}} +{{% tab name="Hybrid" %}} + + ```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment + labels: + app: nginx +spec: + replicas: 3 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + ``` + +{{% /tab %}} +{{< /tabs >}} + ### Progress Deadline Seconds `.spec.progressDeadlineSeconds` is an optional field that specifies the number of seconds you want diff --git a/content/en/docs/concepts/workloads/controllers/job.md b/content/en/docs/concepts/workloads/controllers/job.md index 09914197f09a3..cc51f6f84a7a0 100644 --- a/content/en/docs/concepts/workloads/controllers/job.md +++ b/content/en/docs/concepts/workloads/controllers/job.md @@ -382,7 +382,7 @@ from failed Jobs is not lost inadvertently. ### Backoff limit per index {#backoff-limit-per-index} -{{< feature-state for_k8s_version="v1.28" state="alpha" >}} +{{< feature-state for_k8s_version="v1.29" state="beta" >}} {{< note >}} You can only configure the backoff limit per index for an [Indexed](#completion-mode) Job, if you @@ -938,6 +938,11 @@ creates Pods with the finalizer `batch.kubernetes.io/job-tracking`. The controller removes the finalizer only after the Pod has been accounted for in the Job status, allowing the Pod to be removed by other controllers or users. +{{< note >}} +See [My pod stays terminating](/docs/tasks/debug/debug-application/debug-pods/) if you +observe that pods from a Job are stucked with the tracking finalizer. +{{< /note >}} + ### Elastic Indexed Jobs {{< feature-state for_k8s_version="v1.27" state="beta" >}} @@ -953,11 +958,12 @@ scaling an indexed Job, such as MPI, Horovord, Ray, and PyTorch training jobs. ### Delayed creation of replacement pods {#pod-replacement-policy} -{{< feature-state for_k8s_version="v1.28" state="alpha" >}} +{{< feature-state for_k8s_version="v1.29" state="beta" >}} {{< note >}} You can only set `podReplacementPolicy` on Jobs if you enable the `JobPodReplacementPolicy` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/). +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +(enabled by default). {{< /note >}} By default, the Job controller recreates Pods as soon they either fail or are terminating (have a deletion timestamp). diff --git a/content/en/docs/concepts/workloads/controllers/statefulset.md b/content/en/docs/concepts/workloads/controllers/statefulset.md index 87415e8df2fba..b0176108cea75 100644 --- a/content/en/docs/concepts/workloads/controllers/statefulset.md +++ b/content/en/docs/concepts/workloads/controllers/statefulset.md @@ -116,6 +116,12 @@ spec: storage: 1Gi ``` +{{< note >}} +This example uses the `ReadWriteOnce` access mode, for simplicity. For +production use, the Kubernetes project recommends using the `ReadWriteOncePod` +access mode instead. +{{< /note >}} + In the above example: * A Headless Service, named `nginx`, is used to control the network domain. @@ -225,7 +231,7 @@ Cluster Domain will be set to `cluster.local` unless For each VolumeClaimTemplate entry defined in a StatefulSet, each Pod receives one PersistentVolumeClaim. In the nginx example above, each Pod receives a single PersistentVolume -with a StorageClass of `my-storage-class` and 1 Gib of provisioned storage. If no StorageClass +with a StorageClass of `my-storage-class` and 1 GiB of provisioned storage. If no StorageClass is specified, then the default StorageClass will be used. When a Pod is (re)scheduled onto a node, its `volumeMounts` mount the PersistentVolumes associated with its PersistentVolume Claims. Note that, the PersistentVolumes associated with the diff --git a/content/en/docs/concepts/workloads/pods/_index.md b/content/en/docs/concepts/workloads/pods/_index.md index febf062c2ebdc..1132c38793c5a 100644 --- a/content/en/docs/concepts/workloads/pods/_index.md +++ b/content/en/docs/concepts/workloads/pods/_index.md @@ -111,9 +111,9 @@ Some Pods have {{< glossary_tooltip text="init containers" term_id="init-contain as well as {{< glossary_tooltip text="app containers" term_id="app-container" >}}. By default, init containers run and complete before the app containers are started. -{{< feature-state for_k8s_version="v1.28" state="alpha" >}} +{{< feature-state for_k8s_version="v1.29" state="beta" >}} -Enabling the `SidecarContainers` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +Enabled by default, the `SidecarContainers` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) allows you to specify `restartPolicy: Always` for init containers. Setting the `Always` restart policy ensures that the init containers where you set it are kept running during the entire lifetime of the Pod. diff --git a/content/en/docs/concepts/workloads/pods/init-containers.md b/content/en/docs/concepts/workloads/pods/init-containers.md index 2533c286d7907..480d4cee80b9e 100644 --- a/content/en/docs/concepts/workloads/pods/init-containers.md +++ b/content/en/docs/concepts/workloads/pods/init-containers.md @@ -14,6 +14,9 @@ Init containers can contain utilities or setup scripts not present in an app ima You can specify init containers in the Pod specification alongside the `containers` array (which describes app containers). +In Kubernetes, a [sidecar container](/docs/concepts/workloads/pods/sidecar-containers/) is a container that +starts before the main application container and _continues to run_. This document is about init containers: +containers that run to completion during Pod initialization. @@ -48,14 +51,33 @@ including resource limits, [volumes](/docs/concepts/storage/volumes/), and secur resource requests and limits for an init container are handled differently, as documented in [Resource sharing within containers](#resource-sharing-within-containers). -Also, init containers do not support `lifecycle`, `livenessProbe`, `readinessProbe`, or -`startupProbe` because they must run to completion before the Pod can be ready. +Regular init containers (in other words: excluding sidecar containers) do not support the +`lifecycle`, `livenessProbe`, `readinessProbe`, or `startupProbe` fields. Init containers +must run to completion before the Pod can be ready; sidecar containers continue running +during a Pod's lifetime, and _do_ support some probes. See [sidecar container](/docs/concepts/workloads/pods/sidecar-containers/) +for further details about sidecar containers. If you specify multiple init containers for a Pod, kubelet runs each init container sequentially. Each init container must succeed before the next can run. When all of the init containers have run to completion, kubelet initializes the application containers for the Pod and runs them as usual. +### Differences from sidecar containers + +Init containers run and complete their tasks before the main application container starts. +Unlike [sidecar containers](/docs/concepts/workloads/pods/sidecar-containers), +init containers are not continuously running alongside the main containers. + +Init containers run to completion sequentially, and the main container does not start +until all the init containers have successfully completed. + +init containers do not support `lifecycle`, `livenessProbe`, `readinessProbe`, or +`startupProbe` whereas sidecar containers support all these [probes](/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe) to control their lifecycle. + +Init containers share the same resources (CPU, memory, network) with the main application +containers but do not interact directly with them. They can, however, use shared volumes +for data exchange. + ## Using init containers Because init containers have separate images from app containers, they @@ -289,51 +311,9 @@ The Pod which is already running correctly would be killed by `activeDeadlineSec The name of each app and init container in a Pod must be unique; a validation error is thrown for any container sharing a name with another. -#### API for sidecar containers - -{{< feature-state for_k8s_version="v1.28" state="alpha" >}} - -Starting with Kubernetes 1.28 in alpha, a feature gate named `SidecarContainers` -allows you to specify a `restartPolicy` for init containers which is independent of -the Pod and other init containers. Container [probes](/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe) -can also be added to control their lifecycle. - -If an init container is created with its `restartPolicy` set to `Always`, it will -start and remain running during the entire life of the Pod, which is useful for -running supporting services separated from the main application containers. - -If a `readinessProbe` is specified for this init container, its result will be used -to determine the `ready` state of the Pod. - -Since these containers are defined as init containers, they benefit from the same -ordering and sequential guarantees as other init containers, allowing them to -be mixed with other init containers into complex Pod initialization flows. - -Compared to regular init containers, sidecar-style init containers continue to -run and the next init container can begin starting once the kubelet has set -the `started` container status for the sidecar-style init container to true. -That status either becomes true because there is a process running in the -container and no startup probe defined, or -as a result of its `startupProbe` succeeding. - -This feature can be used to implement the sidecar container pattern in a more -robust way, as the kubelet always restarts a sidecar container if it fails. - -Here's an example of a Deployment with two containers, one of which is a sidecar: - -{{% code_sample language="yaml" file="application/deployment-sidecar.yaml" %}} - -This feature is also useful for running Jobs with sidecars, as the sidecar -container will not prevent the Job from completing after the main container -has finished. - -Here's an example of a Job with two containers, one of which is a sidecar: - -{{% code_sample language="yaml" file="application/job/job-sidecar.yaml" %}} - -#### Resource sharing within containers +### Resource sharing within containers -Given the ordering and execution for init containers, the following rules +Given the order of execution for init, sidecar and app containers, the following rules for resource usage apply: * The highest of any particular resource request or limit defined on all init @@ -354,6 +334,10 @@ limit. Pod level control groups (cgroups) are based on the effective Pod request and limit, the same as the scheduler. +{{< comment >}} +This section also present under [sidecar containers](/docs/concepts/workloads/pods/sidecar-containers/) page. +If you're editing this section, change both places. +{{< /comment >}} ### Pod restart reasons @@ -373,7 +357,9 @@ Kubernetes, consult the documentation for the version you are using. ## {{% heading "whatsnext" %}} -* Read about [creating a Pod that has an init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) -* Learn how to [debug init containers](/docs/tasks/debug/debug-application/debug-init-containers/) -* Read about an overview of [kubelet](/docs/reference/command-line-tools-reference/kubelet/) and [kubectl](/docs/reference/kubectl/) -* Learn about the [types of probes](/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe): liveness, readiness, startup probe. +Learn more about the following: +* [Creating a Pod that has an init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container). +* [Debug init containers](/docs/tasks/debug/debug-application/debug-init-containers/). +* Overview of [kubelet](/docs/reference/command-line-tools-reference/kubelet/) and [kubectl](/docs/reference/kubectl/). +* [Types of probes](/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe): liveness, readiness, startup probe. +* [Sidecar containers](/docs/concepts/workloads/pods/sidecar-containers). diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index 1f73ccbe3ff99..ff73d7bc2310a 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -150,11 +150,22 @@ the `Terminated` state. The `spec` of a Pod has a `restartPolicy` field with possible values Always, OnFailure, and Never. The default value is Always. -The `restartPolicy` applies to all containers in the Pod. `restartPolicy` only -refers to restarts of the containers by the kubelet on the same node. After containers -in a Pod exit, the kubelet restarts them with an exponential back-off delay (10s, 20s, -40s, …), that is capped at five minutes. Once a container has executed for 10 minutes -without any problems, the kubelet resets the restart backoff timer for that container. +The `restartPolicy` for a Pod applies to {{< glossary_tooltip text="app containers" term_id="app-container" >}} +in the Pod and to regular [init containers](/docs/concepts/workloads/pods/init-containers/). +[Sidecar containers](/docs/concepts/workloads/pods/sidecar-containers/) +ignore the Pod-level `restartPolicy` field: in Kubernetes, a sidecar is defined as an +entry inside `initContainers` that has its container-level `restartPolicy` set to `Always`. +For init containers that exit with an error, the kubelet restarts the init container if +the Pod level `restartPolicy` is either `OnFailure` or `Always`. + +When the kubelet is handling container restarts according to the configured restart +policy, that only applies to restarts that make replacement containers inside the +same Pod and running on the same node. After containers in a Pod exit, the kubelet +restarts them with an exponential back-off delay (10s, 20s,40s, …), that is capped at +five minutes. Once a container has executed for 10 minutes without any problems, the +kubelet resets the restart backoff timer for that container. +[Sidecar containers and Pod lifecycle](/docs/concepts/workloads/pods/sidecar-containers/#sidecar-containers-and-pod-lifecycle) +explains the behaviour of `init containers` when specify `restartpolicy` field on it. ## Pod conditions @@ -164,7 +175,7 @@ through which the Pod has or has not passed. Kubelet manages the following PodConditions: * `PodScheduled`: the Pod has been scheduled to a node. -* `PodReadyToStartContainers`: (alpha feature; must be [enabled explicitly](#pod-has-network)) the +* `PodReadyToStartContainers`: (beta feature; enabled by [default](#pod-has-network)) the Pod sandbox has been successfully created and networking configured. * `ContainersReady`: all containers in the Pod are ready. * `Initialized`: all [init containers](/docs/concepts/workloads/pods/init-containers/) @@ -242,19 +253,21 @@ When a Pod's containers are Ready but at least one custom condition is missing o ### Pod network readiness {#pod-has-network} -{{< feature-state for_k8s_version="v1.25" state="alpha" >}} +{{< feature-state for_k8s_version="v1.29" state="beta" >}} {{< note >}} -This condition was renamed from PodHasNetwork to PodReadyToStartContainers. +During its early development, this condition was named `PodHasNetwork`. {{< /note >}} -After a Pod gets scheduled on a node, it needs to be admitted by the Kubelet and -have any volumes mounted. Once these phases are complete, the Kubelet works with +After a Pod gets scheduled on a node, it needs to be admitted by the kubelet and +to have any required storage volumes mounted. Once these phases are complete, +the kubelet works with a container runtime (using {{< glossary_tooltip term_id="cri" >}}) to set up a runtime sandbox and configure networking for the Pod. If the -`PodReadyToStartContainersCondition` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled, -Kubelet reports whether a pod has reached this initialization milestone through -the `PodReadyToStartContainers` condition in the `status.conditions` field of a Pod. +`PodReadyToStartContainersCondition` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled +(it is enabled by default for Kubernetes {{< skew currentVersion >}}), the +`PodReadyToStartContainers` condition will be added to the `status.conditions` field of a Pod. The `PodReadyToStartContainers` condition is set to `False` by the Kubelet when it detects a Pod does not have a runtime sandbox with networking configured. This occurs in @@ -504,6 +517,22 @@ termination grace period _begins_. The behavior above is described when the feature gate `EndpointSliceTerminatingCondition` is enabled. {{
    }} +{{}} +Beginning with Kubernetes 1.29, if your Pod includes one or more sidecar containers +(init containers with an Always restart policy), the kubelet will delay sending +the TERM signal to these sidecar containers until the last main container has fully terminated. +The sidecar containers will be terminated in the reverse order they are defined in the Pod spec. +This ensures that sidecar containers continue serving the other containers in the Pod until they are no longer needed. + +Note that slow termination of a main container will also delay the termination of the sidecar containers. +If the grace period expires before the termination process is complete, the Pod may enter emergency termination. +In this case, all remaining containers in the Pod will be terminated simultaneously with a short grace period. + +Similarly, if the Pod has a preStop hook that exceeds the termination grace period, emergency termination may occur. +In general, if you have used preStop hooks to control the termination order without sidecar containers, you can now +remove them and allow the kubelet to manage sidecar termination automatically. +{{}} + 1. When the grace period expires, the kubelet triggers forcible shutdown. The container runtime sends `SIGKILL` to any processes still running in any container in the Pod. The kubelet also cleans up a hidden `pause` container if that container runtime uses one. @@ -582,6 +611,8 @@ for more details. * Learn more about [container lifecycle hooks](/docs/concepts/containers/container-lifecycle-hooks/). +* Learn more about [sidecar containers](/docs/concepts/workloads/pods/sidecar-containers/). + * For detailed information about Pod and container status in the API, see the API reference documentation covering - [`status`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodStatus) for Pod. + [`status`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodStatus) for Pod. \ No newline at end of file diff --git a/content/en/docs/concepts/workloads/pods/sidecar-containers.md b/content/en/docs/concepts/workloads/pods/sidecar-containers.md new file mode 100644 index 0000000000000..23df85aeb7a32 --- /dev/null +++ b/content/en/docs/concepts/workloads/pods/sidecar-containers.md @@ -0,0 +1,123 @@ +--- +title: Sidecar Containers +content_type: concept +weight: 50 +--- + + +{{< feature-state for_k8s_version="v1.29" state="beta" >}} + +Sidecar containers are the secondary containers that run along with the main +application container within the same {{< glossary_tooltip text="Pod" term_id="pod" >}}. +These containers are used to enhance or to extend the functionality of the main application +container by providing additional services, or functionality such as logging, monitoring, +security, or data synchronization, without directly altering the primary application code. + + + +## Enabling sidecar containers + +Enabled by default with Kubernetes 1.29, a +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) named +`SidecarContainers` allows you to specify a `restartPolicy` for containers listed in a +Pod's `initContainers` field. These restartable _sidecar_ containers are independent with +other [init containers](/docs/concepts/workloads/pods/init-containers/) and main +application container within the same pod. These can be started, stopped, or restarted +without effecting the main application container and other init containers. + +## Sidecar containers and Pod lifecycle + +If an init container is created with its `restartPolicy` set to `Always`, it will +start and remain running during the entire life of the Pod. This can be helpful for +running supporting services separated from the main application containers. + +If a `readinessProbe` is specified for this init container, its result will be used +to determine the `ready` state of the Pod. + +Since these containers are defined as init containers, they benefit from the same +ordering and sequential guarantees as other init containers, allowing them to +be mixed with other init containers into complex Pod initialization flows. + +Compared to regular init containers, sidecars defined within `initContainers` continue to +run after they have started. This is important when there is more than one entry inside +`.spec.initContainers` for a Pod. After a sidecar-style init container is running (the kubelet +has set the `started` status for that init container to true), the kubelet then starts the +next init container from the ordered `.spec.initContainers` list. +That status either becomes true because there is a process running in the +container and no startup probe defined, or as a result of its `startupProbe` succeeding. + +Here's an example of a Deployment with two containers, one of which is a sidecar: + +{{% code_sample language="yaml" file="application/deployment-sidecar.yaml" %}} + +This feature is also useful for running Jobs with sidecars, as the sidecar +container will not prevent the Job from completing after the main container +has finished. + +Here's an example of a Job with two containers, one of which is a sidecar: + +{{% code_sample language="yaml" file="application/job/job-sidecar.yaml" %}} + +## Differences from regular containers + +Sidecar containers run alongside regular containers in the same pod. However, they do not +execute the primary application logic; instead, they provide supporting functionality to +the main application. + +Sidecar containers have their own independent lifecycles. They can be started, stopped, +and restarted independently of regular containers. This means you can update, scale, or +maintain sidecar containers without affecting the primary application. + +Sidecar containers share the same network and storage namespaces with the primary +container This co-location allows them to interact closely and share resources. + +## Differences from init containers + +Sidecar containers work alongside the main container, extending its functionality and +providing additional services. + +Sidecar containers run concurrently with the main application container. They are active +throughout the lifecycle of the pod and can be started and stopped independently of the +main container. Unlike [init containers](/docs/concepts/workloads/pods/init-containers/), +sidecar containers support [probes](/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe) to control their lifecycle. + +These containers can interact directly with the main application containers, sharing +the same network namespace, filesystem, and environment variables. They work closely +together to provide additional functionality. + +## Resource sharing within containers + +{{< comment >}} +This section is also present in the [init containers](/docs/concepts/workloads/pods/init-containers/) page. +If you're editing this section, change both places. +{{< /comment >}} + +Given the order of execution for init, sidecar and app containers, the following rules +for resource usage apply: + +* The highest of any particular resource request or limit defined on all init + containers is the *effective init request/limit*. If any resource has no + resource limit specified this is considered as the highest limit. +* The Pod's *effective request/limit* for a resource is the sum of +[pod overhead](/docs/concepts/scheduling-eviction/pod-overhead/) and the higher of: + * the sum of all non-init containers(app and sidecar containers) request/limit for a + resource + * the effective init request/limit for a resource +* Scheduling is done based on effective requests/limits, which means + init containers can reserve resources for initialization that are not used + during the life of the Pod. +* The QoS (quality of service) tier of the Pod's *effective QoS tier* is the + QoS tier for all init, sidecar and app containers alike. + +Quota and limits are applied based on the effective Pod request and +limit. + +Pod level control groups (cgroups) are based on the effective Pod request and +limit, the same as the scheduler. + +## {{% heading "whatsnext" %}} + +* Read a blog post on [native sidecar containers](/blog/2023/08/25/native-sidecar-containers/). +* Read about [creating a Pod that has an init container](/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container). +* Learn about the [types of probes](/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe): liveness, readiness, startup probe. +* Learn about [pod overhead](/docs/concepts/scheduling-eviction/pod-overhead/). diff --git a/content/en/docs/concepts/workloads/pods/user-namespaces.md b/content/en/docs/concepts/workloads/pods/user-namespaces.md index fa51a47d305e8..410b3c90524d2 100644 --- a/content/en/docs/concepts/workloads/pods/user-namespaces.md +++ b/content/en/docs/concepts/workloads/pods/user-namespaces.md @@ -152,6 +152,35 @@ host's file owner/group. [CVE-2021-25741]: https://github.com/kubernetes/kubernetes/issues/104980 +## Integration with Pod security admission checks + +{{< feature-state state="alpha" for_k8s_version="v1.29" >}} + +For Linux Pods that enable user namespaces, Kubernetes relaxes the application of +[Pod Security Standards](/docs/concepts/security/pod-security-standards) in a controlled way. +This behavior can be controlled by the [feature +gate](/docs/reference/command-line-tools-reference/feature-gates/) +`UserNamespacesPodSecurityStandards`, which allows an early opt-in for end +users. Admins have to ensure that user namespaces are enabled by all nodes +within the cluster if using the feature gate. + +If you enable the associated feature gate and create a Pod that uses user +namespaces, the following fields won't be constrained even in contexts that enforce the +_Baseline_ or _Restricted_ pod security standard. This behavior does not +present a security concern because `root` inside a Pod with user namespaces +actually refers to the user inside the container, that is never mapped to a +privileged user on the host. Here's the list of fields that are **not** checks for Pods in those +circumstances: + +- `spec.securityContext.runAsNonRoot` +- `spec.containers[*].securityContext.runAsNonRoot` +- `spec.initContainers[*].securityContext.runAsNonRoot` +- `spec.ephemeralContainers[*].securityContext.runAsNonRoot` +- `spec.securityContext.runAsUser` +- `spec.containers[*].securityContext.runAsUser` +- `spec.initContainers[*].securityContext.runAsUser` +- `spec.ephemeralContainers[*].securityContext.runAsUser` + ## Limitations When using a user namespace for the pod, it is disallowed to use other host diff --git a/content/en/docs/contribute/_index.md b/content/en/docs/contribute/_index.md index beb64464043e8..8bd6cfa17dac5 100644 --- a/content/en/docs/contribute/_index.md +++ b/content/en/docs/contribute/_index.md @@ -47,8 +47,8 @@ pull request (PR) to the [`kubernetes/website` GitHub repository](https://github.com/kubernetes/website). You need to be comfortable with [git](https://git-scm.com/) and -[GitHub](https://lab.github.com/) -to work effectively in the Kubernetes community. +[GitHub](https://skills.github.com/) +to work effectively in the Kubernetes community. To get involved with documentation: @@ -76,7 +76,7 @@ end subgraph second[Review] direction TB T[ ] -.- - D[Look over the
    K8s/website
    repository] --- E[Check out the
    Hugo static site
    generator] + D[Look over the
    kubernetes/website
    repository] --- E[Check out the
    Hugo static site
    generator] E --- F[Understand basic
    GitHub commands] F --- G[Review open PR
    and change review
    processes] end @@ -123,7 +123,7 @@ flowchart LR direction TB S[ ] -.- G[Review PRs from other
    K8s members] --> - A[Check K8s/website
    issues list for
    good first PRs] --> B[Open a PR!!] + A[Check kubernetes/website
    issues list for
    good first PRs] --> B[Open a PR!!] end subgraph first[Suggested Prep] direction TB diff --git a/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md b/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md index 8e23cb69ed142..af0d97f7099da 100644 --- a/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md +++ b/content/en/docs/contribute/generate-ref-docs/contribute-upstream.md @@ -24,7 +24,7 @@ API or the `kube-*` components from the upstream code, see the following instruc - You need to have these tools installed: - [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) - - [Golang](https://golang.org/doc/install) version 1.13+ + - [Golang](https://go.dev/doc/install) version 1.13+ - [Docker](https://docs.docker.com/engine/installation/) - [etcd](https://github.com/coreos/etcd/) - [make](https://www.gnu.org/software/make/) diff --git a/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md b/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md index c7199208130be..e33239a5da429 100644 --- a/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md +++ b/content/en/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md @@ -5,9 +5,9 @@ - You need to have these tools installed: - - [Python](https://www.python.org/downloads/) v3.7.x + - [Python](https://www.python.org/downloads/) v3.7.x+ - [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) - - [Golang](https://golang.org/doc/install) version 1.13+ + - [Golang](https://go.dev/dl/) version 1.13+ - [Pip](https://pypi.org/project/pip/) used to install PyYAML - [PyYAML](https://pyyaml.org/) v5.1.2 - [make](https://www.gnu.org/software/make/) @@ -19,4 +19,3 @@ - You need to know how to create a pull request to a GitHub repository. This involves creating your own fork of the repository. For more information, see [Work from a local clone](/docs/contribute/new-content/open-a-pr/#fork-the-repo). - diff --git a/content/en/docs/contribute/localization.md b/content/en/docs/contribute/localization.md index ab00b2cd88134..14ee682f5ade6 100644 --- a/content/en/docs/contribute/localization.md +++ b/content/en/docs/contribute/localization.md @@ -8,7 +8,7 @@ weight: 50 card: name: contribute weight: 50 - title: Translating the docs + title: Localizing the docs --- @@ -23,9 +23,9 @@ the docs for a different language. You can help add or improve the content of an existing localization. In [Kubernetes Slack](https://slack.k8s.io/), you can find a channel for each -localization. There is also a general [SIG Docs Localizations Slack -channel](https://kubernetes.slack.com/messages/sig-docs-localizations) where you -can say hello. +localization. There is also a general +[SIG Docs Localizations Slack channel](https://kubernetes.slack.com/messages/sig-docs-localizations) +where you can say hello. {{< note >}} For extra details on how to contribute to a specific localization, @@ -34,9 +34,9 @@ look for a localized version of this page. ### Find your two-letter language code -First, consult the [ISO 639-1 -standard](https://www.loc.gov/standards/iso639-2/php/code_list.php) to find your -localization's two-letter language code. For example, the two-letter code for +First, consult the +[ISO 639-1 standard](https://www.loc.gov/standards/iso639-2/php/code_list.php) +to find your localization's two-letter language code. For example, the two-letter code for Korean is `ko`. Some languages use a lowercase version of the country code as defined by the @@ -45,8 +45,7 @@ language code is `pt-br`. ### Fork and clone the repo -First, [create your own -fork](/docs/contribute/new-content/open-a-pr/#fork-the-repo) of the +First, [create your own fork](/docs/contribute/new-content/open-a-pr/#fork-the-repo) of the [kubernetes/website](https://github.com/kubernetes/website) repository. Then, clone your fork and `cd` into it: @@ -62,7 +61,7 @@ localization you want to help out with is inside `content/`. ### Suggest changes Create or update your chosen localized page based on the English original. See -[translating content](#translating-content) for more details. +[localize content](#localize-content) for more details. If you notice a technical inaccuracy or other problem with the upstream (English) documentation, you should fix the upstream documentation first and @@ -120,8 +119,8 @@ localization teams to collaborate on defining and documenting the processes for creating localized contribution guides. In addition, the SIG Docs localization subgroup looks for opportunities to create and share common tools across localization teams and identify new requirements for the SIG Docs Leadership -team. If you have questions about this meeting, please inquire on the [SIG Docs -Localizations Slack channel](https://kubernetes.slack.com/messages/sig-docs-localizations). +team. If you have questions about this meeting, please inquire on the +[SIG Docs Localizations Slack channel](https://kubernetes.slack.com/messages/sig-docs-localizations). You can also create a Slack channel for your localization in the `kubernetes/community` repository. For an example of adding a Slack channel, see @@ -220,7 +219,7 @@ Open a PR against the [`cncf/foundation`](https://github.com/cncf/foundation/tree/main/code-of-conduct-languages) repository to add the code of conduct in your language. -### Setting up the OWNERS files +### Set up the OWNERS files To set the roles of each user contributing to the localization, create an `OWNERS` file inside the language-specific subdirectory with: @@ -313,7 +312,7 @@ a GitHub ID, email address, [Slack channel](https://slack.com/), or another method of contact. You must also provide a link to your localized Community Code of Conduct. -### Launching your new localization +### Launch your new localization When a localization meets the requirements for workflow and minimum output, SIG Docs does the following: @@ -323,7 +322,7 @@ Docs does the following: [Cloud Native Computing Foundation](https://www.cncf.io/about/)(CNCF) channels, including the [Kubernetes blog](/blog/). -## Translating content +## Localize content Localizing *all* the Kubernetes documentation is an enormous task. It's okay to start small and expand over time. @@ -361,7 +360,7 @@ extensive human review to meet minimum standards of quality. To ensure accuracy in grammar and meaning, members of your localization team should carefully review all machine-generated translations before publishing. -### Translating SVG images +### Localize SVG images The Kubernetes project recommends using vector (SVG) images where possible, as these are much easier for a localization team to edit. If you find a raster @@ -375,33 +374,33 @@ used in the Kubernetes documentation to illustrate concepts, workflows, and diagrams. 1. **Identifying translatable text**: Start by identifying the text elements -within the SVG image that need to be translated. These elements typically -include labels, captions, annotations, or any text that conveys information. - -2. **Editing SVG files**: SVG files are XML-based, which means they can be -edited using a text editor. However, it's important to note that most of the -documentation images in Kubernetes already convert text to curves to avoid font -compatibility issues. In such cases, it is recommended to use specialized SVG -editing software, such as Inkscape, for editing, open the SVG file and locate -the text elements that require translation. - -3. **Translating the text**: Replace the original text with the translated -version in the desired language. Ensure the translated text accurately conveys -the intended meaning and fits within the available space in the image. The Open -Sans font family should be used when working with languages that use the Latin -alphabet. You can download the Open Sans typeface from here: -[Open Sans Typeface](https://fonts.google.com/specimen/Open+Sans). - -4. **Converting text to curves**: As already mentioned, to address font -compatibility issues, it is recommended to convert the translated text to -curves or paths. Converting text to curves ensures that the final image -displays the translated text correctly, even if the user's system does not -have the exact font used in the original SVG. - -5. **Reviewing and testing**: After making the necessary translations and -converting text to curves, save and review the updated SVG image to ensure -the text is properly displayed and aligned. Check -[Preview your changes locally](/docs/contribute/new-content/open-a-pr/#preview-locally). + within the SVG image that need to be translated. These elements typically + include labels, captions, annotations, or any text that conveys information. + +1. **Editing SVG files**: SVG files are XML-based, which means they can be + edited using a text editor. However, it's important to note that most of the + documentation images in Kubernetes already convert text to curves to avoid font + compatibility issues. In such cases, it is recommended to use specialized SVG + editing software, such as Inkscape, for editing, open the SVG file and locate + the text elements that require translation. + +1. **Translating the text**: Replace the original text with the translated + version in the desired language. Ensure the translated text accurately conveys + the intended meaning and fits within the available space in the image. The Open + Sans font family should be used when working with languages that use the Latin + alphabet. You can download the Open Sans typeface from here: + [Open Sans Typeface](https://fonts.google.com/specimen/Open+Sans). + +1. **Converting text to curves**: As already mentioned, to address font + compatibility issues, it is recommended to convert the translated text to + curves or paths. Converting text to curves ensures that the final image + displays the translated text correctly, even if the user's system does not + have the exact font used in the original SVG. + +1. **Reviewing and testing**: After making the necessary translations and + converting text to curves, save and review the updated SVG image to ensure + the text is properly displayed and aligned. Check + [Preview your changes locally](/docs/contribute/new-content/open-a-pr/#preview-locally). ### Source files @@ -414,7 +413,7 @@ To find source files for your target version: 1. Navigate to the Kubernetes website repository at https://github.com/kubernetes/website. -2. Select a branch for your target version from the following table: +1. Select a branch for your target version from the following table: Target version | Branch -----|----- @@ -481,7 +480,7 @@ Per CNCF policy, the localization teams must upload their meetings to the SIG Docs YouTube playlist. A SIG Docs Co-Chair or Tech Lead can help with the process until SIG Docs automates it. -## Branching strategy +## Branch strategy Because localization projects are highly collaborative efforts, we encourage teams to work in shared localization branches - especially @@ -506,15 +505,15 @@ To collaborate on a localization branch: branch `dev-1.12-de.1` directly against the `kubernetes/website` repository, based on the source branch for Kubernetes v1.12. -2. Individual contributors open feature branches based on the localization +1. Individual contributors open feature branches based on the localization branch. For example, a German contributor opens a pull request with changes to `kubernetes:dev-1.12-de.1` from `username:local-branch-name`. -3. Approvers review and merge feature branches into the localization branch. +1. Approvers review and merge feature branches into the localization branch. -4. Periodically, an approver merges the localization branch with its source +1. Periodically, an approver merges the localization branch with its source branch by opening and approving a new pull request. Be sure to squash the commits before approving the pull request. diff --git a/content/en/docs/contribute/new-content/new-features.md b/content/en/docs/contribute/new-content/new-features.md index 3b71e2d5316dd..2d670a3c56dbc 100644 --- a/content/en/docs/contribute/new-content/new-features.md +++ b/content/en/docs/contribute/new-content/new-features.md @@ -123,17 +123,42 @@ When you complete your content, the documentation person assigned to your featur To ensure technical accuracy, the content may also require a technical review from corresponding SIG(s). Use their suggestions to get the content to a release ready state. +If your feature needs documentation and the first draft +content is not received, the feature may be removed from the milestone. + +#### Feature gates {#ready-for-review-feature-gates} + If your feature is an Alpha or Beta feature and is behind a feature gate, make sure you add it to [Alpha/Beta Feature gates](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) -table as part of your pull request. With new feature gates, a description of -the feature gate is also required. If your feature is GA'ed or deprecated, -make sure to move it from the +table as part of your pull request. With net new feature gates, a separate +description of the feature gate is also required; create a new Markdown file +inside `content/en/docs/reference/command-line-tools-reference/feature-gates/` +(use other files as a template). + +{{< note >}} +Make sure to add a +[`feature-gate-description` shortcode](/docs/contribute/style/hugo-shortcodes/#feature-gate-description) +into the feature gates page. The list is sorted alphabetically. +{{< /note >}} + +When you change a feature gate to disabled-by-default to enabled-by-default, +you may also need to change other documentation (not just the list of +feature gates). Watch out for language such as ”The `exampleSetting` field +is a beta field and disabled by default. You can enable it by enabling the +`ProcessExampleThings` feature gate.” + +If your feature is GA'ed or deprecated, make sure to move it from the [Feature gates for Alpha/Feature](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) table to [Feature gates for graduated or deprecated features](/docs/reference/command-line-tools-reference/feature-gates-removed/#feature-gates-that-are-removed) table with Alpha and Beta history intact. -If your feature needs documentation and the first draft -content is not received, the feature may be removed from the milestone. +Eventually, Kubernetes will stop including the feature gate at all. +In that case, you move it from [Feature gates for graduated or deprecated features](/docs/reference/command-line-tools-reference/feature-gates-removed/#feature-gates-that-are-removed) +and into a separate page, [Feature Gates (removed)](/docs/reference/command-line-tools-reference/feature-gates-removed/). + +Also make sure to move the relevant list entry and +[`feature-gate-description` shortcode](/docs/contribute/style/hugo-shortcodes/#feature-gate-description) into the +removed feature gates page. The lists are sorted alphabetically. ### All PRs reviewed and ready to merge diff --git a/content/en/docs/contribute/new-content/open-a-pr.md b/content/en/docs/contribute/new-content/open-a-pr.md index 1690fdd5c46cc..382befe116941 100644 --- a/content/en/docs/contribute/new-content/open-a-pr.md +++ b/content/en/docs/contribute/new-content/open-a-pr.md @@ -37,7 +37,7 @@ opening a pull request. Figure 1 outlines the steps and the details follow. {{< mermaid >}} flowchart LR -A([fa:fa-user New
    Contributor]) --- id1[(K8s/Website
    GitHub)] +A([fa:fa-user New
    Contributor]) --- id1[(kubernetes/website
    GitHub)] subgraph tasks[Changes using GitHub] direction TB 0[ ] -.- @@ -132,7 +132,7 @@ Figure 2 shows the steps to follow when you work from a local fork. The details {{< mermaid >}} flowchart LR -1[Fork the K8s/website
    repository] --> 2[Create local clone
    and set upstream] +1[Fork the kubernetes/website
    repository] --> 2[Create local clone
    and set upstream] subgraph changes[Your changes] direction TB S[ ] -.- @@ -359,7 +359,9 @@ Alternately, install and use the `hugo` command on your computer: ### Open a pull request from your fork to kubernetes/website {#open-a-pr} -Figure 3 shows the steps to open a PR from your fork to the K8s/website. The details follow. +Figure 3 shows the steps to open a PR from your fork to the [kubernetes/website](https://github.com/kubernetes/website). The details follow. + +Please, note that contributors can mention `kubernetes/website` as `k/website`. @@ -368,7 +370,7 @@ Figure 3 shows the steps to open a PR from your fork to the K8s/website. The det flowchart LR subgraph first[ ] direction TB -1[1. Go to K8s/website repository] --> 2[2. Select New Pull Request] +1[1. Go to kubernetes/website repository] --> 2[2. Select New Pull Request] 2 --> 3[3. Select compare across forks] 3 --> 4[4. Select your fork from
    head repository drop-down menu] end @@ -387,7 +389,7 @@ class 1,2,3,4,5,6,7,8 grey class first,second white {{}} -Figure 3. Steps to open a PR from your fork to the K8s/website. +Figure 3. Steps to open a PR from your fork to the [kubernetes/website](https://github.com/kubernetes/website). 1. In a web browser, go to the [`kubernetes/website`](https://github.com/kubernetes/website/) repository. 1. Select **New Pull Request**. diff --git a/content/en/docs/contribute/participate/issue-wrangler.md b/content/en/docs/contribute/participate/issue-wrangler.md new file mode 100644 index 0000000000000..0f07a14f89152 --- /dev/null +++ b/content/en/docs/contribute/participate/issue-wrangler.md @@ -0,0 +1,91 @@ +--- +title: Issue Wranglers +content_type: concept +weight: 20 +--- + + + +Alongside the [PR Wrangler](/docs/contribute/participate/pr-wranglers), formal approvers, +reviewers and members of SIG Docs take week-long shifts +[triaging and categorising issues](/docs/contribute/review/for-approvers/#triage-and-categorize-issues) +for the repository. + + + +## Duties + +Each day in a week-long shift the Issue Wrangler will be responsible for: + +- Triaging and tagging incoming issues daily. See + [Triage and categorize issues](/docs/contribute/review/for-approvers/#triage-and-categorize-issues) + for guidelines on how SIG Docs uses metadata. +- Keeping an eye on stale & rotten issues within the kubernetes/website repository. +- Maintenance of the [Issues board](https://github.com/orgs/kubernetes/projects/72/views/1). + +## Requirements + +- Must be an active member of the Kubernetes organization. +- A minimum of 15 [non-trivial](https://www.kubernetes.dev/docs/guide/pull-requests/#trivial-edits) + contributions to Kubernetes (of which a certain amount should be directed towards kubernetes/website). +- Performing the role in an informal capacity already. + +## Helpful Prow commands for wranglers + +Below are some commonly used commands for Issue Wranglers: + +```bash +# reopen an issue +/reopen + +# transfer issues that don't fit in k/website to another repository +/transfer[-issue] + +# change the state of rotten issues +/remove-lifecycle rotten + +# change the state of stale issues +/remove-lifecycle stale + +# assign sig to an issue +/sig + +# add specific area +/area + +# for beginner friendly issues +/good-first-issue + +# issues that needs help +/help wanted + +# tagging issue as support specific +/kind support + +# to accept triaging for an issue +/triage accepted + +# closing an issue we won't be working on and haven't fixed yet +/close not-planned +``` + +To find more Prow commands, refer to the [Command Help](https://prow.k8s.io/command-help) documentation. + +## When to close Issues + +For an open source project to succeed, good issue management is crucial. +But it is also critical to resolve issues in order to maintain the repository +and communicate clearly with contributors and users. + +Close issues when: + +- A similar issue is reported more than once. You will first need to tag it as `/triage duplicate`; + link it to the main issue & then close it. It is also advisable to direct the users to the original issue. +- It is very difficult to understand and address the issue presented by the author with the information provided. + However, encourage the user to provide more details or reopen the issue if they can reproduce it later. +- The same functionality is implemented elsewhere. One can close this issue and direct user to the appropriate place. +- The reported issue is not currently planned or aligned with the project's goals. +- If the issue appears to be spam and is clearly unrelated. +- If the issue is related to an external limitation or dependency and is beyond the control of the project. + +To close an issue, leave a `/close` comment on the issue. diff --git a/content/en/docs/contribute/style/diagram-guide.md b/content/en/docs/contribute/style/diagram-guide.md index bb8d9c95373dd..6a0d44828f5af 100644 --- a/content/en/docs/contribute/style/diagram-guide.md +++ b/content/en/docs/contribute/style/diagram-guide.md @@ -260,7 +260,8 @@ You should use the [local](/docs/contribute/new-content/open-a-pr/#preview-local and Netlify previews to verify the diagram is properly rendered. {{< caution >}} -The Mermaid live editor feature set may not support the K8s/website Mermaid feature set. +The Mermaid live editor feature set may not support the [kubernetes/website](https://github.com/kubernetes/website) Mermaid feature set. +And please, note that contributors can mention `kubernetes/website` as `k/website`. You might see a syntax error or a blank screen after the Hugo build. If that is the case, consider using the Mermaid+SVG method. {{< /caution >}} @@ -342,7 +343,7 @@ The following lists advantages of the Mermaid+SVG method: * Live editor tool. * Live editor tool supports the most current Mermaid feature set. -* Employ existing K8s/website methods for handling `.svg` image files. +* Employ existing [kubernetes/website](https://github.com/kubernetes/website) methods for handling `.svg` image files. * Environment doesn't require Mermaid support. Be sure to check that your diagram renders properly using the diff --git a/content/en/docs/contribute/style/hugo-shortcodes/index.md b/content/en/docs/contribute/style/hugo-shortcodes/index.md index 2c8c10309e8a9..6112080eb8552 100644 --- a/content/en/docs/contribute/style/hugo-shortcodes/index.md +++ b/content/en/docs/contribute/style/hugo-shortcodes/index.md @@ -49,6 +49,24 @@ Renders to: {{< feature-state for_k8s_version="v1.10" state="beta" >}} +## Feature gate description + +In a Markdown page (`.md` file) on this site, you can add a shortcode to +display the description for a shortcode. + +### Feature gate description demo + +Below is a demo of the feature state snippet, which displays the feature as +stable in the latest Kubernetes version. + +``` +{{}} +``` + +Renders to: + +{{< feature-gate-description name="DryRun" >}} + ## Glossary There are two glossary shortcodes: `glossary_tooltip` and `glossary_definition`. @@ -401,6 +419,7 @@ Renders to: {{< latest-release-notes >}} + ## {{% heading "whatsnext" %}} * Learn about [Hugo](https://gohugo.io/). diff --git a/content/en/docs/contribute/style/style-guide.md b/content/en/docs/contribute/style/style-guide.md index 63c87dc25f7d1..b6a8819879026 100644 --- a/content/en/docs/contribute/style/style-guide.md +++ b/content/en/docs/contribute/style/style-guide.md @@ -457,6 +457,15 @@ be two newlines. Second-level headings follow the first-level (or the title) wit any preceding paragraphs or texts. A two line spacing helps visualize the overall structure of content in a code editor better. +Manually wrap paragraphs in the Markdown source when appropriate. Since the git +tool and the GitHub website generate file diffs on a line-by-line basis, +manually wrapping long lines helps the reviewers to easily find out the changes +made in a PR and provide feedback. It also helps the downstream localization +teams where people track the upstream changes on a per-line basis. Line +wrapping can happen at the end of a sentence or a punctuation character, for +example. One exception to this is that a Markdown link or a shortcode is +expected to be in a single line. + ### Headings and titles {#headings} People accessing this documentation may use a screen reader or other assistive technology (AT). diff --git a/content/en/docs/images/gateway-kind-relationships.svg b/content/en/docs/images/gateway-kind-relationships.svg new file mode 100644 index 0000000000000..cca73e75ce74f --- /dev/null +++ b/content/en/docs/images/gateway-kind-relationships.svg @@ -0,0 +1 @@ +
    cluster
    GatewayClass
    Gateway
    HTTPRoute
    \ No newline at end of file diff --git a/content/en/docs/images/gateway-request-flow.svg b/content/en/docs/images/gateway-request-flow.svg new file mode 100644 index 0000000000000..a02852868ba2e --- /dev/null +++ b/content/en/docs/images/gateway-request-flow.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/en/docs/reference/_index.md b/content/en/docs/reference/_index.md index f5c9f852f2e9c..5be7d4c4b8887 100644 --- a/content/en/docs/reference/_index.md +++ b/content/en/docs/reference/_index.md @@ -84,10 +84,8 @@ operator to use or manage a cluster. * [kubelet configuration (v1alpha1)](/docs/reference/config-api/kubelet-config.v1alpha1/) and [kubelet configuration (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) [kubelet configuration (v1)](/docs/reference/config-api/kubelet-config.v1/) -* [kubelet credential providers (v1alpha1)](/docs/reference/config-api/kubelet-credentialprovider.v1alpha1/), - [kubelet credential providers (v1beta1)](/docs/reference/config-api/kubelet-credentialprovider.v1beta1/) and - [kubelet credential providers (v1)](/docs/reference/config-api/kubelet-credentialprovider.v1/) - [kube-scheduler configuration (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) and +* [kubelet credential providers (v1)](/docs/reference/config-api/kubelet-credentialprovider.v1/) +* [kube-scheduler configuration (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) and [kube-scheduler configuration (v1)](/docs/reference/config-api/kube-scheduler-config.v1/) * [kube-controller-manager configuration (v1alpha1)](/docs/reference/config-api/kube-controller-manager-config.v1alpha1/) * [kube-proxy configuration (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/) diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index 7279339f597ba..2ce4f659da04c 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -24,7 +24,7 @@ Kubernetes API server prior to persistence of the object, but after the request is authenticated and authorized. Admission controllers may be _validating_, _mutating_, or both. Mutating -controllers may modify related objects to the requests they admit; validating controllers may not. +controllers may modify objects related to the requests they admit; validating controllers may not. Admission controllers limit requests to create, delete, modify objects. Admission controllers can also block custom verbs, such as a request connect to a Pod via @@ -845,6 +845,10 @@ The Kubernetes project strongly recommends enabling this admission controller. You should enable this admission controller if you intend to make any use of Kubernetes `ServiceAccount` objects. +Regarding the annotation `kubernetes.io/enforce-mountable-secrets`: While the annotation's name suggests it only concerns the mounting of Secrets, +its enforcement also extends to other ways Secrets are used in the context of a Pod. +Therefore, it is crucial to ensure that all the referenced secrets are correctly specified in the ServiceAccount. + ### StorageObjectInUseProtection **Type**: Mutating. diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index 960fa736a408f..3b48363efb6a2 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -10,8 +10,7 @@ weight: 10 --- -This page provides an overview of authenticating. - +This page provides an overview of authentication. ## Users in Kubernetes @@ -59,7 +58,8 @@ with the request: * Username: a string which identifies the end user. Common values might be `kube-admin` or `jane@example.com`. * UID: a string which identifies the end user and attempts to be more consistent and unique than username. -* Groups: a set of strings, each of which indicates the user's membership in a named logical collection of users. Common values might be `system:masters` or `devops-team`. +* Groups: a set of strings, each of which indicates the user's membership in a named logical collection of users. + Common values might be `system:masters` or `devops-team`. * Extra fields: a map of strings to list of strings which holds additional information authorizers may find useful. All values are opaque to the authentication system and only hold significance @@ -80,7 +80,7 @@ Integrations with other authentication protocols (LDAP, SAML, Kerberos, alternat can be accomplished using an [authenticating proxy](#authenticating-proxy) or the [authentication webhook](#webhook-token-authentication). -### X509 Client Certs +### X509 client certificates Client certificate authentication is enabled by passing the `--client-ca-file=SOMEFILE` option to API server. The referenced file must contain one or more certificate authorities @@ -100,9 +100,10 @@ This would create a CSR for the username "jbeda", belonging to two groups, "app1 See [Managing Certificates](/docs/tasks/administer-cluster/certificates/) for how to generate a client cert. -### Static Token File +### Static token file -The API server reads bearer tokens from a file when given the `--token-auth-file=SOMEFILE` option on the command line. Currently, tokens last indefinitely, and the token list cannot be +The API server reads bearer tokens from a file when given the `--token-auth-file=SOMEFILE` option +on the command line. Currently, tokens last indefinitely, and the token list cannot be changed without restarting the API server. The token file is a csv file with a minimum of 3 columns: token, user name, user uid, @@ -116,7 +117,7 @@ token,user,uid,"group1,group2,group3" ``` {{< /note >}} -#### Putting a Bearer Token in a Request +#### Putting a bearer token in a request When using bearer token authentication from an http client, the API server expects an `Authorization` header with a value of `Bearer @@ -130,7 +131,7 @@ header as shown below. Authorization: Bearer 31ada4fd-adec-460c-809a-9e56ceb75269 ``` -### Bootstrap Tokens +### Bootstrap tokens {{< feature-state for_k8s_version="v1.18" state="stable" >}} @@ -165,15 +166,15 @@ Please see [Bootstrap Tokens](/docs/reference/access-authn-authz/bootstrap-token documentation on the Bootstrap Token authenticator and controllers along with how to manage these tokens with `kubeadm`. -### Service Account Tokens +### Service account tokens A service account is an automatically enabled authenticator that uses signed bearer tokens to verify requests. The plugin takes two optional flags: * `--service-account-key-file` File containing PEM-encoded x509 RSA or ECDSA -private or public keys, used to verify ServiceAccount tokens. The specified file -can contain multiple keys, and the flag can be specified multiple times with -different files. If unspecified, --tls-private-key-file is used. + private or public keys, used to verify ServiceAccount tokens. The specified file + can contain multiple keys, and the flag can be specified multiple times with + different files. If unspecified, --tls-private-key-file is used. * `--service-account-lookup` If enabled, tokens which are deleted from the API will be revoked. Service accounts are usually created automatically by the API server and @@ -241,7 +242,7 @@ and are assigned to the groups `system:serviceaccounts` and `system:serviceaccou {{< warning >}} Because service account tokens can also be stored in Secret API objects, any user with -write access to Secrets can request a token, and any user with read access to those +write access to Secrets can request a token, and any user with read access to those Secrets can authenticate as the service account. Be cautious when granting permissions to service accounts and read or write capabilities for Secrets. {{< /warning >}} @@ -257,7 +258,7 @@ email, signed by the server. To identify the user, the authenticator uses the `id_token` (not the `access_token`) from the OAuth2 [token response](https://openid.net/specs/openid-connect-core-1_0.html#TokenResponse) -as a bearer token. See [above](#putting-a-bearer-token-in-a-request) for how the token +as a bearer token. See [above](#putting-a-bearer-token-in-a-request) for how the token is included in a request. {{< mermaid >}} @@ -267,7 +268,7 @@ sequenceDiagram participant kube as Kubectl participant api as API Server - user ->> idp: 1. Login to IdP + user ->> idp: 1. Log in to IdP activate idp idp -->> user: 2. Provide access_token,
    id_token, and refresh_token deactivate idp @@ -288,31 +289,37 @@ sequenceDiagram deactivate kube {{< /mermaid >}} -1. Login to your identity provider -2. Your identity provider will provide you with an `access_token`, `id_token` and a `refresh_token` -3. When using `kubectl`, use your `id_token` with the `--token` flag or add it directly to your `kubeconfig` -4. `kubectl` sends your `id_token` in a header called Authorization to the API server -5. The API server will make sure the JWT signature is valid by checking against the certificate named in the configuration -6. Check to make sure the `id_token` hasn't expired -7. Make sure the user is authorized -8. Once authorized the API server returns a response to `kubectl` -9. `kubectl` provides feedback to the user - +1. Log in to your identity provider +1. Your identity provider will provide you with an `access_token`, `id_token` and a `refresh_token` +1. When using `kubectl`, use your `id_token` with the `--token` flag or add it directly to your `kubeconfig` +1. `kubectl` sends your `id_token` in a header called Authorization to the API server +1. The API server will make sure the JWT signature is valid +1. Check to make sure the `id_token` hasn't expired + 1. Perform claim and/or user validation if CEL expressions are configured with `AuthenticationConfiguration`. +1. Make sure the user is authorized +1. Once authorized the API server returns a response to `kubectl` +1. `kubectl` provides feedback to the user Since all of the data needed to validate who you are is in the `id_token`, Kubernetes doesn't need to -"phone home" to the identity provider. In a model where every request is stateless this provides a very scalable solution for authentication. It does offer a few challenges: +"phone home" to the identity provider. In a model where every request is stateless this provides a +very scalable solution for authentication. It does offer a few challenges: -1. Kubernetes has no "web interface" to trigger the authentication process. There is no browser or interface to collect credentials which is why you need to authenticate to your identity provider first. -2. The `id_token` can't be revoked, it's like a certificate so it should be short-lived (only a few minutes) so it can be very annoying to have to get a new token every few minutes. -3. To authenticate to the Kubernetes dashboard, you must use the `kubectl proxy` command or a reverse proxy that injects the `id_token`. +1. Kubernetes has no "web interface" to trigger the authentication process. There is no browser or + interface to collect credentials which is why you need to authenticate to your identity provider first. +1. The `id_token` can't be revoked, it's like a certificate so it should be short-lived (only a few minutes) + so it can be very annoying to have to get a new token every few minutes. +1. To authenticate to the Kubernetes dashboard, you must use the `kubectl proxy` command or a reverse proxy + that injects the `id_token`. #### Configuring the API Server +##### Using flags + To enable the plugin, configure the following flags on the API server: | Parameter | Description | Example | Required | | --------- | ----------- | ------- | ------- | -| `--oidc-issuer-url` | URL of the provider which allows the API server to discover public signing keys. Only URLs which use the `https://` scheme are accepted. This is typically the provider's discovery URL without a path, for example "https://accounts.google.com" or "https://login.salesforce.com". This URL should point to the level below .well-known/openid-configuration | If the discovery URL is `https://accounts.google.com/.well-known/openid-configuration`, the value should be `https://accounts.google.com` | Yes | +| `--oidc-issuer-url` | URL of the provider that allows the API server to discover public signing keys. Only URLs that use the `https://` scheme are accepted. This is typically the provider's discovery URL, changed to have an empty path | If the issuer's OIDC discovery URL is `https://accounts.provider.example/.well-known/openid-configuration`, the value should be `https://accounts.provider.example` | Yes | | `--oidc-client-id` | A client id that all tokens must be issued for. | kubernetes | Yes | | `--oidc-username-claim` | JWT claim to use as the user name. By default `sub`, which is expected to be a unique identifier of the end user. Admins can choose other claims, such as `email` or `name`, depending on their provider. However, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins. | sub | No | | `--oidc-username-prefix` | Prefix prepended to username claims to prevent clashes with existing names (such as `system:` users). For example, the value `oidc:` will create usernames like `oidc:jane.doe`. If this flag isn't provided and `--oidc-username-claim` is a value other than `email` the prefix defaults to `( Issuer URL )#` where `( Issuer URL )` is the value of `--oidc-issuer-url`. The value `-` can be used to disable all prefixing. | `oidc:` | No | @@ -322,6 +329,291 @@ To enable the plugin, configure the following flags on the API server: | `--oidc-ca-file` | The path to the certificate for the CA that signed your identity provider's web certificate. Defaults to the host's root CAs. | `/etc/kubernetes/ssl/kc-ca.pem` | No | | `--oidc-signing-algs` | The signing algorithms accepted. Default is "RS256". | `RS512` | No | +##### Using Authentication Configuration + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + +JWT Authenticator is an authenticator to authenticate Kubernetes users using JWT compliant tokens. The authenticator will attempt to +parse a raw ID token, verify it's been signed by the configured issuer. The public key to verify the signature is discovered from the issuer's public endpoint using OIDC discovery. + +The API server can be configured to use a JWT authenticator via the `--authentication-config` flag. This flag takes a path to a file containing the `AuthenticationConfiguration`. An example configuration is provided below. +To use this config, the `StructuredAuthenticationConfiguration` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +has to be enabled. + +{{< note >}} +When the feature is enabled, setting both `--authentication-config` and any of the `--oidc-*` flags will result in an error. If you want to use the feature, you have to remove the `--oidc-*` flags and use the configuration file instead. +{{< /note >}} + +```yaml +--- +# +# CAUTION: this is an example configuration. +# Do not use this for your own cluster! +# +apiVersion: apiserver.config.k8s.io/v1alpha1 +kind: AuthenticationConfiguration +# list of authenticators to authenticate Kubernetes users using JWT compliant tokens. +jwt: +- issuer: + url: https://example.com # Same as --oidc-issuer-url. + audiences: + - my-app # Same as --oidc-client-id. + # rules applied to validate token claims to authenticate users. + claimValidationRules: + # Same as --oidc-required-claim key=value. + - claim: hd + requiredValue: example.com + # Instead of claim and requiredValue, you can use expression to validate the claim. + # expression is a CEL expression that evaluates to a boolean. + # all the expressions must evaluate to true for validation to succeed. + - expression: 'claims.hd == "example.com"' + # Message customizes the error message seen in the API server logs when the validation fails. + message: the hd claim must be set to example.com + - expression: 'claims.exp - claims.nbf <= 86400' + message: total token lifetime must not exceed 24 hours + claimMappings: + # username represents an option for the username attribute. + # This is the only required attribute. + username: + # Same as --oidc-username-claim. Mutually exclusive with username.expression. + claim: "sub" + # Same as --oidc-username-prefix. Mutually exclusive with username.expression. + # if username.claim is set, username.prefix is required. + # Explicitly set it to "" if no prefix is desired. + prefix: "" + # Mutually exclusive with username.claim and username.prefix. + # expression is a CEL expression that evaluates to a string. + expression: 'claims.username + ":external-user"' + # groups represents an option for the groups attribute. + groups: + # Same as --oidc-groups-claim. Mutually exclusive with groups.expression. + claim: "sub" + # Same as --oidc-groups-prefix. Mutually exclusive with groups.expression. + # if groups.claim is set, groups.prefix is required. + # Explicitly set it to "" if no prefix is desired. + prefix: "" + # Mutually exclusive with groups.claim and groups.prefix. + # expression is a CEL expression that evaluates to a string or a list of strings. + expression: 'claims.roles.split(",")' + # uid represents an option for the uid attribute. + uid: + # Mutually exclusive with uid.expression. + claim: 'sub' + # Mutually exclusive with uid.claim + # expression is a CEL expression that evaluates to a string. + expression: 'claims.sub' + # extra attributes to be added to the UserInfo object. Keys must be domain-prefix path and must be unique. + extra: + - key: 'example.com/tenant' + # valueExpression is a CEL expression that evaluates to a string or a list of strings. + valueExpression: 'claims.tenant' + # validation rules applied to the final user object. + userValidationRules: + # expression is a CEL expression that evaluates to a boolean. + # all the expressions must evaluate to true for the user to be valid. + - expression: "!user.username.startsWith('system:')" + # Message customizes the error message seen in the API server logs when the validation fails. + message: 'username cannot used reserved system: prefix' + - expression: "user.groups.all(group, !group.startsWith('system:'))" + message: 'groups cannot used reserved system: prefix' +``` + +* Claim validation rule expression + + `jwt.claimValidationRules[i].expression` represents the expression which will be evaluated by CEL. + CEL expressions have access to the contents of the token payload, organized into `claims` CEL variable. + `claims` is a map of claim names (as strings) to claim values (of any type). +* User validation rule expression + + `jwt.userValidationRules[i].expression` represents the expression which will be evaluated by CEL. + CEL expressions have access to the contents of `userInfo`, organized into `user` CEL variable. + Refer to the [UserInfo](/docs/reference/generated/kubernetes-api/v{{< skew currentVersion >}}/#userinfo-v1-authentication-k8s-io) API documentation for the schema of `user`. +* Claim mapping expression + + `jwt.claimMappings.username.expression`, `jwt.claimMappings.groups.expression`, `jwt.claimMappings.uid.expression` + `jwt.claimMappings.extra[i].valueExpression` represents the expression which will be evaluated by CEL. + CEL expressions have access to the contents of the token payload, organized into `claims` CEL variable. + `claims` is a map of claim names (as strings) to claim values (of any type). + + To learn more, see the [Documentation on CEL](/docs/reference/using-api/cel/) + + Here are examples of the `AuthenticationConfiguration` with different token payloads. + + {{< tabs name="example_configuration" >}} + {{% tab name="Valid token" %}} + ```yaml + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: AuthenticationConfiguration + jwt: + - issuer: + url: https://example.com + audiences: + - my-app + claimMappings: + username: + expression: 'claims.username + ":external-user"' + groups: + expression: 'claims.roles.split(",")' + uid: + expression: 'claims.sub' + extra: + - key: 'example.com/tenant' + valueExpression: 'claims.tenant' + userValidationRules: + - expression: "!user.username.startsWith('system:')" # the expression will evaluate to true, so validation will succeed. + message: 'username cannot used reserved system: prefix' + ``` + + ```bash + TOKEN=eyJhbGciOiJSUzI1NiIsImtpZCI6ImY3dF9tOEROWmFTQk1oWGw5QXZTWGhBUC04Y0JmZ0JVbFVpTG5oQkgxdXMiLCJ0eXAiOiJKV1QifQ.eyJhdWQiOiJrdWJlcm5ldGVzIiwiZXhwIjoxNzAzMjMyOTQ5LCJpYXQiOjE3MDExMDcyMzMsImlzcyI6Imh0dHBzOi8vZXhhbXBsZS5jb20iLCJqdGkiOiI3YzMzNzk0MjgwN2U3M2NhYTJjMzBjODY4YWMwY2U5MTBiY2UwMmRkY2JmZWJlOGMyM2I4YjVmMjdhZDYyODczIiwibmJmIjoxNzAxMTA3MjMzLCJyb2xlcyI6InVzZXIsYWRtaW4iLCJzdWIiOiJhdXRoIiwidGVuYW50IjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjRhIiwidXNlcm5hbWUiOiJmb28ifQ.TBWF2RkQHm4QQz85AYPcwLxSk-VLvQW-mNDHx7SEOSv9LVwcPYPuPajJpuQn9C_gKq1R94QKSQ5F6UgHMILz8OfmPKmX_00wpwwNVGeevJ79ieX2V-__W56iNR5gJ-i9nn6FYk5pwfVREB0l4HSlpTOmu80gbPWAXY5hLW0ZtcE1JTEEmefORHV2ge8e3jp1xGafNy6LdJWabYuKiw8d7Qga__HxtKB-t0kRMNzLRS7rka_SfQg0dSYektuxhLbiDkqhmRffGlQKXGVzUsuvFw7IGM5ZWnZgEMDzCI357obHeM3tRqpn5WRjtB8oM7JgnCymaJi-P3iCd88iu1xnzA + ``` + where the token payload is: + + ```json + { + "aud": "kubernetes", + "exp": 1703232949, + "iat": 1701107233, + "iss": "https://example.com", + "jti": "7c337942807e73caa2c30c868ac0ce910bce02ddcbfebe8c23b8b5f27ad62873", + "nbf": 1701107233, + "roles": "user,admin", + "sub": "auth", + "tenant": "72f988bf-86f1-41af-91ab-2d7cd011db4a", + "username": "foo" + } + ``` + + The token with the above `AuthenticationConfiguration` will produce the following `UserInfo` object and successfully authenticate the user. + + ```json + { + "username": "foo:external-user", + "uid": "auth", + "groups": [ + "user", + "admin" + ], + "extra": { + "example.com/tenant": "72f988bf-86f1-41af-91ab-2d7cd011db4a" + } + } + ``` + {{% /tab %}} + {{% tab name="Fails claim validation" %}} + ```yaml + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: AuthenticationConfiguration + jwt: + - issuer: + url: https://example.com + audiences: + - my-app + claimValidationRules: + - expression: 'claims.hd == "example.com"' # the token below does not have this claim, so validation will fail. + message: the hd claim must be set to example.com + claimMappings: + username: + expression: 'claims.username + ":external-user"' + groups: + expression: 'claims.roles.split(",")' + uid: + expression: 'claims.sub' + extra: + - key: 'example.com/tenant' + valueExpression: 'claims.tenant' + userValidationRules: + - expression: "!user.username.startsWith('system:')" # the expression will evaluate to true, so validation will succeed. + message: 'username cannot used reserved system: prefix' + ``` + + ```bash + TOKEN=eyJhbGciOiJSUzI1NiIsImtpZCI6ImY3dF9tOEROWmFTQk1oWGw5QXZTWGhBUC04Y0JmZ0JVbFVpTG5oQkgxdXMiLCJ0eXAiOiJKV1QifQ.eyJhdWQiOiJrdWJlcm5ldGVzIiwiZXhwIjoxNzAzMjMyOTQ5LCJpYXQiOjE3MDExMDcyMzMsImlzcyI6Imh0dHBzOi8vZXhhbXBsZS5jb20iLCJqdGkiOiI3YzMzNzk0MjgwN2U3M2NhYTJjMzBjODY4YWMwY2U5MTBiY2UwMmRkY2JmZWJlOGMyM2I4YjVmMjdhZDYyODczIiwibmJmIjoxNzAxMTA3MjMzLCJyb2xlcyI6InVzZXIsYWRtaW4iLCJzdWIiOiJhdXRoIiwidGVuYW50IjoiNzJmOTg4YmYtODZmMS00MWFmLTkxYWItMmQ3Y2QwMTFkYjRhIiwidXNlcm5hbWUiOiJmb28ifQ.TBWF2RkQHm4QQz85AYPcwLxSk-VLvQW-mNDHx7SEOSv9LVwcPYPuPajJpuQn9C_gKq1R94QKSQ5F6UgHMILz8OfmPKmX_00wpwwNVGeevJ79ieX2V-__W56iNR5gJ-i9nn6FYk5pwfVREB0l4HSlpTOmu80gbPWAXY5hLW0ZtcE1JTEEmefORHV2ge8e3jp1xGafNy6LdJWabYuKiw8d7Qga__HxtKB-t0kRMNzLRS7rka_SfQg0dSYektuxhLbiDkqhmRffGlQKXGVzUsuvFw7IGM5ZWnZgEMDzCI357obHeM3tRqpn5WRjtB8oM7JgnCymaJi-P3iCd88iu1xnzA + ``` + where the token payload is: + ```json + { + "aud": "kubernetes", + "exp": 1703232949, + "iat": 1701107233, + "iss": "https://example.com", + "jti": "7c337942807e73caa2c30c868ac0ce910bce02ddcbfebe8c23b8b5f27ad62873", + "nbf": 1701107233, + "roles": "user,admin", + "sub": "auth", + "tenant": "72f988bf-86f1-41af-91ab-2d7cd011db4a", + "username": "foo" + } + ``` + + The token with the above `AuthenticationConfiguration` will fail to authenticate because the `hd` claim is not set to `example.com`. The API server will return `401 Unauthorized` error. + {{% /tab %}} + {{% tab name="Fails user validation" %}} + ```yaml + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: AuthenticationConfiguration + jwt: + - issuer: + url: https://example.com + audiences: + - my-app + claimValidationRules: + - expression: 'claims.hd == "example.com"' + message: the hd claim must be set to example.com + claimMappings: + username: + expression: '"system:" + claims.username' # this will prefix the username with "system:" and will fail user validation. + groups: + expression: 'claims.roles.split(",")' + uid: + expression: 'claims.sub' + extra: + - key: 'example.com/tenant' + valueExpression: 'claims.tenant' + userValidationRules: + - expression: "!user.username.startsWith('system:')" # the username will be system:foo and expression will evaluate to false, so validation will fail. + message: 'username cannot used reserved system: prefix' + ``` + ```bash + TOKEN=eyJhbGciOiJSUzI1NiIsImtpZCI6ImY3dF9tOEROWmFTQk1oWGw5QXZTWGhBUC04Y0JmZ0JVbFVpTG5oQkgxdXMiLCJ0eXAiOiJKV1QifQ.eyJhdWQiOiJrdWJlcm5ldGVzIiwiZXhwIjoxNzAzMjMyOTQ5LCJoZCI6ImV4YW1wbGUuY29tIiwiaWF0IjoxNzAxMTEzMTAxLCJpc3MiOiJodHRwczovL2V4YW1wbGUuY29tIiwianRpIjoiYjViMDY1MjM3MmNkMjBlMzQ1YjZmZGZmY2RjMjE4MWY0YWZkNmYyNTlhYWI0YjdlMzU4ODEyMzdkMjkyMjBiYyIsIm5iZiI6MTcwMTExMzEwMSwicm9sZXMiOiJ1c2VyLGFkbWluIiwic3ViIjoiYXV0aCIsInRlbmFudCI6IjcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0YSIsInVzZXJuYW1lIjoiZm9vIn0.FgPJBYLobo9jnbHreooBlvpgEcSPWnKfX6dc0IvdlRB-F0dCcgy91oCJeK_aBk-8zH5AKUXoFTlInfLCkPivMOJqMECA1YTrMUwt_IVqwb116AqihfByUYIIqzMjvUbthtbpIeHQm2fF0HbrUqa_Q0uaYwgy8mD807h7sBcUMjNd215ff_nFIHss-9zegH8GI1d9fiBf-g6zjkR1j987EP748khpQh9IxPjMJbSgG_uH5x80YFuqgEWwq-aYJPQxXX6FatP96a2EAn7wfPpGlPRt0HcBOvq5pCnudgCgfVgiOJiLr_7robQu4T1bis0W75VPEvwWtgFcLnvcQx0JWg + ``` + where the token payload is: + + ```json + { + "aud": "kubernetes", + "exp": 1703232949, + "hd": "example.com", + "iat": 1701113101, + "iss": "https://example.com", + "jti": "b5b0652372cd20e345b6fdffcdc2181f4afd6f259aab4b7e35881237d29220bc", + "nbf": 1701113101, + "roles": "user,admin", + "sub": "auth", + "tenant": "72f988bf-86f1-41af-91ab-2d7cd011db4a", + "username": "foo" + } + ``` + + The token with the above `AuthenticationConfiguration` will produce the following `UserInfo` object: + + ```json + { + "username": "system:foo", + "uid": "auth", + "groups": [ + "user", + "admin" + ], + "extra": { + "example.com/tenant": "72f988bf-86f1-41af-91ab-2d7cd011db4a" + } + } + ``` + which will fail user validation because the username starts with `system:`. The API server will return `401 Unauthorized` error. + {{% /tab %}} + {{< /tabs >}} + Importantly, the API server is not an OAuth2 client, rather it can only be configured to trust a single issuer. This allows the use of public providers, such as Google, without trusting credentials issued to third parties. Admins who @@ -339,12 +631,19 @@ Tremolo Security's [OpenUnison](https://openunison.github.io/). For an identity provider to work with Kubernetes it must: -1. Support [OpenID connect discovery](https://openid.net/specs/openid-connect-discovery-1_0.html); not all do. -2. Run in TLS with non-obsolete ciphers -3. Have a CA signed certificate (even if the CA is not a commercial CA or is self signed) +1. Support [OpenID connect discovery](https://openid.net/specs/openid-connect-discovery-1_0.html); not all do. +1. Run in TLS with non-obsolete ciphers +1. Have a CA signed certificate (even if the CA is not a commercial CA or is self signed) -A note about requirement #3 above, requiring a CA signed certificate. If you deploy your own identity provider (as opposed to one of the cloud providers like Google or Microsoft) you MUST have your identity provider's web server certificate signed by a certificate with the `CA` flag set to `TRUE`, even if it is self signed. This is due to GoLang's TLS client implementation being very strict to the standards around certificate validation. If you don't have a CA handy, you can use [this script](https://github.com/dexidp/dex/blob/master/examples/k8s/gencert.sh) from the Dex team to create a simple CA and a signed certificate and key pair. -Or you can use [this similar script](https://raw.githubusercontent.com/TremoloSecurity/openunison-qs-kubernetes/master/src/main/bash/makessl.sh) that generates SHA256 certs with a longer life and larger key size. +A note about requirement #3 above, requiring a CA signed certificate. If you deploy your own +identity provider (as opposed to one of the cloud providers like Google or Microsoft) you MUST +have your identity provider's web server certificate signed by a certificate with the `CA` flag +set to `TRUE`, even if it is self signed. This is due to GoLang's TLS client implementation +being very strict to the standards around certificate validation. If you don't have a CA handy, +you can use the [gencert script](https://github.com/dexidp/dex/blob/master/examples/k8s/gencert.sh) +from the Dex team to create a simple CA and a signed certificate and key pair. Or you can use +[this similar script](https://raw.githubusercontent.com/TremoloSecurity/openunison-qs-kubernetes/master/src/main/bash/makessl.sh) +that generates SHA256 certs with a longer life and larger key size. Setup instructions for specific systems: @@ -356,9 +655,12 @@ Setup instructions for specific systems: ##### Option 1 - OIDC Authenticator -The first option is to use the kubectl `oidc` authenticator, which sets the `id_token` as a bearer token for all requests and refreshes the token once it expires. After you've logged into your provider, use kubectl to add your `id_token`, `refresh_token`, `client_id`, and `client_secret` to configure the plugin. +The first option is to use the kubectl `oidc` authenticator, which sets the `id_token` as a bearer token +for all requests and refreshes the token once it expires. After you've logged into your provider, use +kubectl to add your `id_token`, `refresh_token`, `client_id`, and `client_secret` to configure the plugin. -Providers that don't return an `id_token` as part of their refresh token response aren't supported by this plugin and should use "Option 2" below. +Providers that don't return an `id_token` as part of their refresh token response aren't supported +by this plugin and should use "Option 2" below. ```bash kubectl config set-credentials USER_NAME \ @@ -401,7 +703,8 @@ users: name: oidc ``` -Once your `id_token` expires, `kubectl` will attempt to refresh your `id_token` using your `refresh_token` and `client_secret` storing the new values for the `refresh_token` and `id_token` in your `.kube/config`. +Once your `id_token` expires, `kubectl` will attempt to refresh your `id_token` using your `refresh_token` +and `client_secret` storing the new values for the `refresh_token` and `id_token` in your `.kube/config`. ##### Option 2 - Use the `--token` Option @@ -411,14 +714,13 @@ The `kubectl` command lets you pass in a token using the `--token` option. Copy kubectl --token=eyJhbGciOiJSUzI1NiJ9.eyJpc3MiOiJodHRwczovL21sYi50cmVtb2xvLmxhbjo4MDQzL2F1dGgvaWRwL29pZGMiLCJhdWQiOiJrdWJlcm5ldGVzIiwiZXhwIjoxNDc0NTk2NjY5LCJqdGkiOiI2RDUzNXoxUEpFNjJOR3QxaWVyYm9RIiwiaWF0IjoxNDc0NTk2MzY5LCJuYmYiOjE0NzQ1OTYyNDksInN1YiI6Im13aW5kdSIsInVzZXJfcm9sZSI6WyJ1c2VycyIsIm5ldy1uYW1lc3BhY2Utdmlld2VyIl0sImVtYWlsIjoibXdpbmR1QG5vbW9yZWplZGkuY29tIn0.f2As579n9VNoaKzoF-dOQGmXkFKf1FMyNV0-va_B63jn-_n9LGSCca_6IVMP8pO-Zb4KvRqGyTP0r3HkHxYy5c81AnIh8ijarruczl-TK_yF5akjSTHFZD-0gRzlevBDiH8Q79NAr-ky0P4iIXS8lY9Vnjch5MF74Zx0c3alKJHJUnnpjIACByfF2SCaYzbWFMUNat-K1PaUk5-ujMBG7yYnr95xD-63n8CO8teGUAAEMx6zRjzfhnhbzX-ajwZLGwGUBT4WqjMs70-6a7_8gZmLZb2az1cZynkFRj2BaCkVT3A2RrjeEwZEtGXlMqKJ1_I2ulrOVsYx01_yD35-rw get nodes ``` - ### Webhook Token Authentication Webhook authentication is a hook for verifying bearer tokens. * `--authentication-token-webhook-config-file` a configuration file describing how to access the remote webhook service. * `--authentication-token-webhook-cache-ttl` how long to cache authentication decisions. Defaults to two minutes. -* `--authentication-token-webhook-version` determines whether to use `authentication.k8s.io/v1beta1` or `authentication.k8s.io/v1` +* `--authentication-token-webhook-version` determines whether to use `authentication.k8s.io/v1beta1` or `authentication.k8s.io/v1` `TokenReview` objects to send/receive information from the webhook. Defaults to `v1beta1`. The configuration file uses the [kubeconfig](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) @@ -453,11 +755,12 @@ contexts: name: webhook ``` -When a client attempts to authenticate with the API server using a bearer token as discussed [above](#putting-a-bearer-token-in-a-request), -the authentication webhook POSTs a JSON-serialized `TokenReview` object containing the token to the remote service. +When a client attempts to authenticate with the API server using a bearer token as discussed +[above](#putting-a-bearer-token-in-a-request), the authentication webhook POSTs a JSON-serialized +`TokenReview` object containing the token to the remote service. -Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/concepts/overview/kubernetes-api/) as other Kubernetes API objects. -Implementers should check the `apiVersion` field of the request to ensure correct deserialization, +Note that webhook API objects are subject to the same [versioning compatibility rules](/docs/concepts/overview/kubernetes-api/) +as other Kubernetes API objects. Implementers should check the `apiVersion` field of the request to ensure correct deserialization, and **must** respond with a `TokenReview` object of the same version as the request. {{< tabs name="TokenReview_request" >}} @@ -474,9 +777,9 @@ To opt into receiving `authentication.k8s.io/v1` token reviews, the API server m "spec": { # Opaque bearer token sent to the API server "token": "014fbff9a07c...", - + # Optional list of the audience identifiers for the server the token was presented to. - # Audience-aware token authenticators (for example, OIDC token authenticators) + # Audience-aware token authenticators (for example, OIDC token authenticators) # should verify the token was intended for at least one of the audiences in this list, # and return the intersection of this list and the valid audiences for the token in the response status. # This ensures the token is valid to authenticate to the server it was presented to. @@ -494,9 +797,9 @@ To opt into receiving `authentication.k8s.io/v1` token reviews, the API server m "spec": { # Opaque bearer token sent to the API server "token": "014fbff9a07c...", - + # Optional list of the audience identifiers for the server the token was presented to. - # Audience-aware token authenticators (for example, OIDC token authenticators) + # Audience-aware token authenticators (for example, OIDC token authenticators) # should verify the token was intended for at least one of the audiences in this list, # and return the intersection of this list and the valid audiences for the token in the response status. # This ensures the token is valid to authenticate to the server it was presented to. @@ -620,12 +923,19 @@ An unsuccessful request would return: The API server can be configured to identify users from request header values, such as `X-Remote-User`. It is designed for use in combination with an authenticating proxy, which sets the request header value. -* `--requestheader-username-headers` Required, case-insensitive. Header names to check, in order, for the user identity. The first header containing a value is used as the username. -* `--requestheader-group-headers` 1.6+. Optional, case-insensitive. "X-Remote-Group" is suggested. Header names to check, in order, for the user's groups. All values in all specified headers are used as group names. -* `--requestheader-extra-headers-prefix` 1.6+. Optional, case-insensitive. "X-Remote-Extra-" is suggested. Header prefixes to look for to determine extra information about the user (typically used by the configured authorization plugin). Any headers beginning with any of the specified prefixes have the prefix removed. The remainder of the header name is lowercased and [percent-decoded](https://tools.ietf.org/html/rfc3986#section-2.1) and becomes the extra key, and the header value is the extra value. +* `--requestheader-username-headers` Required, case-insensitive. Header names to check, in order, + for the user identity. The first header containing a value is used as the username. +* `--requestheader-group-headers` 1.6+. Optional, case-insensitive. "X-Remote-Group" is suggested. + Header names to check, in order, for the user's groups. All values in all specified headers are used as group names. +* `--requestheader-extra-headers-prefix` 1.6+. Optional, case-insensitive. "X-Remote-Extra-" is suggested. + Header prefixes to look for to determine extra information about the user (typically used by the configured authorization plugin). + Any headers beginning with any of the specified prefixes have the prefix removed. + The remainder of the header name is lowercased and [percent-decoded](https://tools.ietf.org/html/rfc3986#section-2.1) + and becomes the extra key, and the header value is the extra value. {{< note >}} -Prior to 1.11.3 (and 1.10.7, 1.9.11), the extra key could only contain characters which were [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6). +Prior to 1.11.3 (and 1.10.7, 1.9.11), the extra key could only contain characters which +were [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6). {{< /note >}} For example, with this configuration: @@ -663,15 +973,17 @@ extra: - profile ``` - In order to prevent header spoofing, the authenticating proxy is required to present a valid client certificate to the API server for validation against the specified CA before the request headers are checked. WARNING: do **not** reuse a CA that is used in a different context unless you understand the risks and the mechanisms to protect the CA's usage. -* `--requestheader-client-ca-file` Required. PEM-encoded certificate bundle. A valid client certificate must be presented and validated against the certificate authorities in the specified file before the request headers are checked for user names. -* `--requestheader-allowed-names` Optional. List of Common Name values (CNs). If set, a valid client certificate with a CN in the specified list must be presented before the request headers are checked for user names. If empty, any CN is allowed. - +* `--requestheader-client-ca-file` Required. PEM-encoded certificate bundle. A valid client certificate + must be presented and validated against the certificate authorities in the specified file before the + request headers are checked for user names. +* `--requestheader-allowed-names` Optional. List of Common Name values (CNs). If set, a valid client + certificate with a CN in the specified list must be presented before the request headers are checked + for user names. If empty, any CN is allowed. ## Anonymous requests @@ -711,12 +1023,18 @@ to the impersonated user info. The following HTTP headers can be used to performing an impersonation request: * `Impersonate-User`: The username to act as. -* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. Optional. Requires "Impersonate-User". -* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` must be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1). -* `Impersonate-Uid`: A unique identifier that represents the user being impersonated. Optional. Requires "Impersonate-User". Kubernetes does not impose any format requirements on this string. +* `Impersonate-Group`: A group name to act as. Can be provided multiple times to set multiple groups. + Optional. Requires "Impersonate-User". +* `Impersonate-Extra-( extra name )`: A dynamic header used to associate extra fields with the user. + Optional. Requires "Impersonate-User". In order to be preserved consistently, `( extra name )` + must be lower-case, and any characters which aren't [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6) + MUST be utf8 and [percent-encoded](https://tools.ietf.org/html/rfc3986#section-2.1). +* `Impersonate-Uid`: A unique identifier that represents the user being impersonated. Optional. + Requires "Impersonate-User". Kubernetes does not impose any format requirements on this string. {{< note >}} -Prior to 1.11.3 (and 1.10.7, 1.9.11), `( extra name )` could only contain characters which were [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6). +Prior to 1.11.3 (and 1.10.7, 1.9.11), `( extra name )` could only contain characters which +were [legal in HTTP header labels](https://tools.ietf.org/html/rfc7230#section-3.2.6). {{< /note >}} {{< note >}} @@ -724,6 +1042,7 @@ Prior to 1.11.3 (and 1.10.7, 1.9.11), `( extra name )` could only contain charac {{< /note >}} An example of the impersonation headers used when impersonating a user with groups: + ```http Impersonate-User: jane.doe@example.com Impersonate-Group: developers @@ -732,6 +1051,7 @@ Impersonate-Group: admins An example of the impersonation headers used when impersonating a user with a UID and extra fields: + ```http Impersonate-User: jane.doe@example.com Impersonate-Extra-dn: cn=jane,ou=engineers,dc=example,dc=com @@ -838,7 +1158,7 @@ rules: {{< note >}} Impersonating a user or group allows you to perform any action as if you were that user or group; for that reason, impersonation is not namespace scoped. -If you want to allow impersonation using Kubernetes RBAC, +If you want to allow impersonation using Kubernetes RBAC, this requires using a `ClusterRole` and a `ClusterRoleBinding`, not a `Role` and `RoleBinding`. {{< /note >}} @@ -856,6 +1176,10 @@ protocol specific logic, then returns opaque credentials to use. Almost all cred use cases require a server side component with support for the [webhook token authenticator](#webhook-token-authentication) to interpret the credential format produced by the client plugin. +{{< note >}} +Earlier versions of `kubectl` included built-in support for authenticating to AKS and GKE, but this is no longer present. +{{< /note >}} + ### Example use case In a hypothetical use case, an organization would run an external service that exchanges LDAP credentials @@ -1222,13 +1546,16 @@ The following `ExecCredential` manifest describes a cluster information sample. {{< feature-state for_k8s_version="v1.28" state="stable" >}} -If your cluster has the API enabled, you can use the `SelfSubjectReview` API to find out how your Kubernetes cluster maps your authentication -information to identify you as a client. This works whether you are authenticating as a user (typically representing +If your cluster has the API enabled, you can use the `SelfSubjectReview` API to find out +how your Kubernetes cluster maps your authentication information to identify you as a client. +This works whether you are authenticating as a user (typically representing a real person) or as a ServiceAccount. -`SelfSubjectReview` objects do not have any configurable fields. On receiving a request, the Kubernetes API server fills the status with the user attributes and returns it to the user. +`SelfSubjectReview` objects do not have any configurable fields. On receiving a request, +the Kubernetes API server fills the status with the user attributes and returns it to the user. Request example (the body would be a `SelfSubjectReview`): + ``` POST /apis/authentication.k8s.io/v1/selfsubjectreviews ``` @@ -1261,24 +1588,28 @@ Response example: } ``` -For convenience, the `kubectl auth whoami` command is present. Executing this command will produce the following output (yet different user attributes will be shown): +For convenience, the `kubectl auth whoami` command is present. Executing this command will +produce the following output (yet different user attributes will be shown): * Simple output example - ``` - ATTRIBUTE VALUE - Username jane.doe - Groups [system:authenticated] - ``` + + ``` + ATTRIBUTE VALUE + Username jane.doe + Groups [system:authenticated] + ``` * Complex example including extra attributes - ``` - ATTRIBUTE VALUE - Username jane.doe - UID b79dbf30-0c6a-11ed-861d-0242ac120002 - Groups [students teachers system:authenticated] - Extra: skills [reading learning] - Extra: subjects [math sports] - ``` + + ``` + ATTRIBUTE VALUE + Username jane.doe + UID b79dbf30-0c6a-11ed-861d-0242ac120002 + Groups [students teachers system:authenticated] + Extra: skills [reading learning] + Extra: subjects [math sports] + ``` + By providing the output flag, it is also possible to print the JSON or YAML representation of the result: {{< tabs name="self_subject_attributes_review_Example_1" >}} @@ -1335,8 +1666,9 @@ status: {{% /tab %}} {{< /tabs >}} -This feature is extremely useful when a complicated authentication flow is used in a Kubernetes cluster, -for example, if you use [webhook token authentication](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) or [authenticating proxy](/docs/reference/access-authn-authz/authentication/#authenticating-proxy). +This feature is extremely useful when a complicated authentication flow is used in a Kubernetes cluster, +for example, if you use [webhook token authentication](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) +or [authenticating proxy](/docs/reference/access-authn-authz/authentication/#authenticating-proxy). {{< note >}} The Kubernetes API server fills the `userInfo` after all authentication mechanisms are applied, @@ -1345,10 +1677,12 @@ If you, or an authentication proxy, make a SelfSubjectReview using impersonation you see the user details and properties for the user that was impersonated. {{< /note >}} -By default, all authenticated users can create `SelfSubjectReview` objects when the `APISelfSubjectReview` feature is enabled. It is allowed by the `system:basic-user` cluster role. +By default, all authenticated users can create `SelfSubjectReview` objects when the `APISelfSubjectReview` +feature is enabled. It is allowed by the `system:basic-user` cluster role. {{< note >}} You can only make `SelfSubjectReview` requests if: + * the `APISelfSubjectReview` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled for your cluster (not needed for Kubernetes {{< skew currentVersion >}}, but older @@ -1359,10 +1693,7 @@ You can only make `SelfSubjectReview` requests if: enabled. {{< /note >}} - - ## {{% heading "whatsnext" %}} * Read the [client authentication reference (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) * Read the [client authentication reference (v1)](/docs/reference/config-api/client-authentication.v1/) - diff --git a/content/en/docs/reference/access-authn-authz/authorization.md b/content/en/docs/reference/access-authn-authz/authorization.md index 9a77c7ac21124..621cc9773b474 100644 --- a/content/en/docs/reference/access-authn-authz/authorization.md +++ b/content/en/docs/reference/access-authn-authz/authorization.md @@ -209,6 +209,143 @@ The following flags can be used: You can choose more than one authorization module. Modules are checked in order so an earlier module has higher priority to allow or deny a request. +## Configuring the API Server using an Authorization Config File + +{{< feature-state state="alpha" for_k8s_version="v1.29" >}} + +The Kubernetes API server's authorizer chain can be configured using a +configuration file. + +You specify the path to that authorization configuration using the +`--authorization-config` command line argument. This feature enables +creation of authorization chains with multiple webhooks with well-defined +parameters that validate requests in a certain order and enables fine grained +control - such as explicit Deny on failures. An example configuration with +all possible values is provided below. + +In order to customise the authorizer chain, you need to enable the +`StructuredAuthorizationConfiguration` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). + +Note: When the feature is enabled, setting both `--authorization-config` and +configuring an authorization webhook using the `--authorization-mode` and +`--authorization-webhook-*` command line flags is not allowed. If done, there +will be an error and API Server would exit right away. + +{{< caution >}} +While the feature is in Alpha/Beta, there is no change if you want to keep on +using command line flags. When the feature goes Beta, the feature flag would +be turned on by default. The feature flag would be removed when feature goes GA. + +When configuring the authorizer chain using a config file, make sure all the +apiserver nodes have the file. Also, take a note of the apiserver configuration +when upgrading/downgrading the clusters. For example, if upgrading to v1.29+ +clusters and using the config file, you would need to make sure the config file +exists before upgrading the cluster. When downgrading to v1.28, you would need +to add the flags back to their bootstrap mechanism. +{{< /caution >}} + +```yaml +# +# DO NOT USE THE CONFIG AS IS. THIS IS AN EXAMPLE. +# +apiVersion: apiserver.config.k8s.io/v1alpha1 +kind: AuthorizationConfiguration +# authorizers are defined in order of precedence +authorizers: + - type: Webhook + # Name used to describe the authorizer + # This is explicitly used in monitoring machinery for metrics + # Note: + # - Validation for this field is similar to how K8s labels are validated today. + # Required, with no default + name: webhook + webhook: + # The duration to cache 'authorized' responses from the webhook + # authorizer. + # Same as setting `--authorization-webhook-cache-authorized-ttl` flag + # Default: 5m0s + authorizedTTL: 30s + # The duration to cache 'unauthorized' responses from the webhook + # authorizer. + # Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + # Default: 30s + unauthorizedTTL: 30s + # Timeout for the webhook request + # Maximum allowed is 30s. + # Required, with no default. + timeout: 3s + # The API version of the authorization.k8s.io SubjectAccessReview to + # send to and expect from the webhook. + # Same as setting `--authorization-webhook-version` flag + # Required, with no default + # Valid values: v1beta1, v1 + subjectAccessReviewVersion: v1 + # MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + # version the CEL expressions are evaluated against + # Valid values: v1 + # Required only if matchConditions are specified, no default value + matchConditionSubjectAccessReviewVersion: v1 + # Controls the authorization decision when a webhook request fails to + # complete or returns a malformed response or errors evaluating + # matchConditions. + # Valid values: + # - NoOpinion: continue to subsequent authorizers to see if one of + # them allows the request + # - Deny: reject the request without consulting subsequent authorizers + # Required, with no default. + failurePolicy: Deny + connectionInfo: + # Controls how the webhook should communicate with the server. + # Valid values: + # - KubeConfig: use the file specified in kubeConfigFile to locate the + # server. + # - InClusterConfig: use the in-cluster configuration to call the + # SubjectAccessReview API hosted by kube-apiserver. This mode is not + # allowed for kube-apiserver. + type: KubeConfig + # Path to KubeConfigFile for connection info + # Required, if connectionInfo.Type is KubeConfig + kubeConfigFile: /kube-system-authz-webhook.yaml + # matchConditions is a list of conditions that must be met for a request to be sent to this + # webhook. An empty list of matchConditions matches all requests. + # There are a maximum of 64 match conditions allowed. + # + # The exact matching logic is (in order): + # 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + # 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + # 3. If at least one matchCondition evaluates to an error (but none are FALSE): + # - If failurePolicy=Deny, then the webhook rejects the request + # - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + matchConditions: + # expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + # CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + # If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + # the contents would be converted to the v1 version before evaluating the CEL expression. + # + # Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + # + # only send resource requests to the webhook + - expression: has(request.resourceAttributes) + # only intercept requests to kube-system + - expression: request.resourceAttributes.namespace == 'kube-system' + # don't intercept requests from kube-system service accounts + - expression: !('system:serviceaccounts:kube-system' in request.user.groups) + - type: Node + name: node + - type: RBAC + name: rbac + - type: Webhook + name: in-cluster-authorizer + webhook: + authorizedTTL: 5m + unauthorizedTTL: 30s + timeout: 3s + subjectAccessReviewVersion: v1 + failurePolicy: NoOpinion + connectionInfo: + type: InClusterConfig +``` + ## Privilege escalation via workload creation or edits {#privilege-escalation-via-pod-creation} Users who can create/edit pods in a namespace, either directly or through a [controller](/docs/concepts/architecture/controller/) @@ -241,4 +378,3 @@ This should be considered when deciding on your RBAC controls. * To learn more about Authentication, see **Authentication** in [Controlling Access to the Kubernetes API](/docs/concepts/security/controlling-access/). * To learn more about Admission Control, see [Using Admission Controllers](/docs/reference/access-authn-authz/admission-controllers/). - diff --git a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md index ae33b68c29e84..ec13b0badefca 100644 --- a/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md +++ b/content/en/docs/reference/access-authn-authz/certificate-signing-requests.md @@ -371,7 +371,7 @@ you like. If you want to add a note for human consumption, use the {{< feature-state for_k8s_version="v1.27" state="alpha" >}} {{< note >}} -In Kubernetes {{< skew currentVersion >}}, you must enable the `ClusterTrustBundles` +In Kubernetes {{< skew currentVersion >}}, you must enable the `ClusterTrustBundle` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) _and_ the `certificates.k8s.io/v1alpha1` {{< glossary_tooltip text="API group" term_id="api-group" >}} in order to use @@ -472,6 +472,12 @@ such as role-based access control. To distinguish them from signer-linked ClusterTrustBundles, the names of signer-unlinked ClusterTrustBundles **must not** contain a colon (`:`). +### Accessing ClusterTrustBundles from pods {#ctb-projection} + +{{}} + +The contents of ClusterTrustBundles can be injected into the container filesystem, similar to ConfigMaps and Secrets. See the [clusterTrustBundle projected volume source](/docs/concepts/storage/projected-volumes#clustertrustbundle) for more details. + ## How to issue a certificate for a user {#normal-user} @@ -488,7 +494,7 @@ O is the group that this user will belong to. You can refer to ```shell openssl genrsa -out myuser.key 2048 -openssl req -new -key myuser.key -out myuser.csr +openssl req -new -key myuser.key -out myuser.csr -subj "/CN=myuser" ``` ### Create a CertificateSigningRequest {#create-certificatessigningrequest} diff --git a/content/en/docs/reference/access-authn-authz/kubelet-tls-bootstrapping.md b/content/en/docs/reference/access-authn-authz/kubelet-tls-bootstrapping.md index c1b33647407c1..c4393b261e205 100644 --- a/content/en/docs/reference/access-authn-authz/kubelet-tls-bootstrapping.md +++ b/content/en/docs/reference/access-authn-authz/kubelet-tls-bootstrapping.md @@ -11,31 +11,35 @@ weight: 120 -In a Kubernetes cluster, the components on the worker nodes - kubelet and kube-proxy - need to communicate with Kubernetes control plane components, specifically kube-apiserver. -In order to ensure that communication is kept private, not interfered with, and ensure that each component of the cluster is talking to another trusted component, we strongly +In a Kubernetes cluster, the components on the worker nodes - kubelet and kube-proxy - need +to communicate with Kubernetes control plane components, specifically kube-apiserver. +In order to ensure that communication is kept private, not interfered with, and ensure that +each component of the cluster is talking to another trusted component, we strongly recommend using client TLS certificates on nodes. -The normal process of bootstrapping these components, especially worker nodes that need certificates so they can communicate safely with kube-apiserver, -can be a challenging process as it is often outside of the scope of Kubernetes and requires significant additional work. +The normal process of bootstrapping these components, especially worker nodes that need certificates +so they can communicate safely with kube-apiserver, can be a challenging process as it is often outside +of the scope of Kubernetes and requires significant additional work. This in turn, can make it challenging to initialize or scale a cluster. -In order to simplify the process, beginning in version 1.4, Kubernetes introduced a certificate request and signing API. The proposal can be -found [here](https://github.com/kubernetes/kubernetes/pull/20439). +In order to simplify the process, beginning in version 1.4, Kubernetes introduced a certificate request +and signing API. The proposal can be found [here](https://github.com/kubernetes/kubernetes/pull/20439). This document describes the process of node initialization, how to set up TLS client certificate bootstrapping for kubelets, and how it works. -## Initialization Process +## Initialization process When a worker node starts up, the kubelet does the following: 1. Look for its `kubeconfig` file -2. Retrieve the URL of the API server and credentials, normally a TLS key and signed certificate from the `kubeconfig` file -3. Attempt to communicate with the API server using the credentials. +1. Retrieve the URL of the API server and credentials, normally a TLS key and signed certificate from the `kubeconfig` file +1. Attempt to communicate with the API server using the credentials. -Assuming that the kube-apiserver successfully validates the kubelet's credentials, it will treat the kubelet as a valid node, and begin to assign pods to it. +Assuming that the kube-apiserver successfully validates the kubelet's credentials, +it will treat the kubelet as a valid node, and begin to assign pods to it. Note that the above process depends upon: @@ -45,35 +49,36 @@ Note that the above process depends upon: All of the following are responsibilities of whoever sets up and manages the cluster: 1. Creating the CA key and certificate -2. Distributing the CA certificate to the control plane nodes, where kube-apiserver is running -3. Creating a key and certificate for each kubelet; strongly recommended to have a unique one, with a unique CN, for each kubelet -4. Signing the kubelet certificate using the CA key -5. Distributing the kubelet key and signed certificate to the specific node on which the kubelet is running +1. Distributing the CA certificate to the control plane nodes, where kube-apiserver is running +1. Creating a key and certificate for each kubelet; strongly recommended to have a unique one, with a unique CN, for each kubelet +1. Signing the kubelet certificate using the CA key +1. Distributing the kubelet key and signed certificate to the specific node on which the kubelet is running -The TLS Bootstrapping described in this document is intended to simplify, and partially or even completely automate, steps 3 onwards, as these are the most common when initializing or scaling +The TLS Bootstrapping described in this document is intended to simplify, and partially or even +completely automate, steps 3 onwards, as these are the most common when initializing or scaling a cluster. -### Bootstrap Initialization +### Bootstrap initialization In the bootstrap initialization process, the following occurs: 1. kubelet begins -2. kubelet sees that it does _not_ have a `kubeconfig` file -3. kubelet searches for and finds a `bootstrap-kubeconfig` file -4. kubelet reads its bootstrap file, retrieving the URL of the API server and a limited usage "token" -5. kubelet connects to the API server, authenticates using the token -6. kubelet now has limited credentials to create and retrieve a certificate signing request (CSR) -7. kubelet creates a CSR for itself with the signerName set to `kubernetes.io/kube-apiserver-client-kubelet` -8. CSR is approved in one of two ways: +1. kubelet sees that it does _not_ have a `kubeconfig` file +1. kubelet searches for and finds a `bootstrap-kubeconfig` file +1. kubelet reads its bootstrap file, retrieving the URL of the API server and a limited usage "token" +1. kubelet connects to the API server, authenticates using the token +1. kubelet now has limited credentials to create and retrieve a certificate signing request (CSR) +1. kubelet creates a CSR for itself with the signerName set to `kubernetes.io/kube-apiserver-client-kubelet` +1. CSR is approved in one of two ways: * If configured, kube-controller-manager automatically approves the CSR * If configured, an outside process, possibly a person, approves the CSR using the Kubernetes API or via `kubectl` -9. Certificate is created for the kubelet -10. Certificate is issued to the kubelet -11. kubelet retrieves the certificate -12. kubelet creates a proper `kubeconfig` with the key and signed certificate -13. kubelet begins normal operation -14. Optional: if configured, kubelet automatically requests renewal of the certificate when it is close to expiry -15. The renewed certificate is approved and issued, either automatically or manually, depending on configuration. +1. Certificate is created for the kubelet +1. Certificate is issued to the kubelet +1. kubelet retrieves the certificate +1. kubelet creates a proper `kubeconfig` with the key and signed certificate +1. kubelet begins normal operation +1. Optional: if configured, kubelet automatically requests renewal of the certificate when it is close to expiry +1. The renewed certificate is approved and issued, either automatically or manually, depending on configuration. The rest of this document describes the necessary steps to configure TLS Bootstrapping, and its limitations. @@ -90,13 +95,16 @@ In addition, you need your Kubernetes Certificate Authority (CA). ## Certificate Authority -As without bootstrapping, you will need a Certificate Authority (CA) key and certificate. As without bootstrapping, these will be used -to sign the kubelet certificate. As before, it is your responsibility to distribute them to control plane nodes. +As without bootstrapping, you will need a Certificate Authority (CA) key and certificate. +As without bootstrapping, these will be used to sign the kubelet certificate. As before, +it is your responsibility to distribute them to control plane nodes. -For the purposes of this document, we will assume these have been distributed to control plane nodes at `/var/lib/kubernetes/ca.pem` (certificate) and `/var/lib/kubernetes/ca-key.pem` (key). +For the purposes of this document, we will assume these have been distributed to control +plane nodes at `/var/lib/kubernetes/ca.pem` (certificate) and `/var/lib/kubernetes/ca-key.pem` (key). We will refer to these as "Kubernetes CA certificate and key". -All Kubernetes components that use these certificates - kubelet, kube-apiserver, kube-controller-manager - assume the key and certificate to be PEM-encoded. +All Kubernetes components that use these certificates - kubelet, kube-apiserver, +kube-controller-manager - assume the key and certificate to be PEM-encoded. ## kube-apiserver configuration @@ -116,24 +124,27 @@ containing the signing certificate, for example ### Initial bootstrap authentication -In order for the bootstrapping kubelet to connect to kube-apiserver and request a certificate, it must first authenticate to the server. -You can use any [authenticator](/docs/reference/access-authn-authz/authentication/) that can authenticate the kubelet. +In order for the bootstrapping kubelet to connect to kube-apiserver and request a certificate, +it must first authenticate to the server. You can use any +[authenticator](/docs/reference/access-authn-authz/authentication/) that can authenticate the kubelet. While any authentication strategy can be used for the kubelet's initial bootstrap credentials, the following two authenticators are recommended for ease of provisioning. 1. [Bootstrap Tokens](#bootstrap-tokens) -2. [Token authentication file](#token-authentication-file) +1. [Token authentication file](#token-authentication-file) -Using bootstrap tokens is a simpler and more easily managed method to authenticate kubelets, and does not require any additional flags when starting kube-apiserver. +Using bootstrap tokens is a simpler and more easily managed method to authenticate kubelets, +and does not require any additional flags when starting kube-apiserver. Whichever method you choose, the requirement is that the kubelet be able to authenticate as a user with the rights to: 1. create and retrieve CSRs -2. be automatically approved to request node client certificates, if automatic approval is enabled. +1. be automatically approved to request node client certificates, if automatic approval is enabled. -A kubelet authenticating using bootstrap tokens is authenticated as a user in the group `system:bootstrappers`, which is the standard method to use. +A kubelet authenticating using bootstrap tokens is authenticated as a user in the group +`system:bootstrappers`, which is the standard method to use. As this feature matures, you should ensure tokens are bound to a Role Based Access Control (RBAC) policy @@ -144,17 +155,20 @@ particular bootstrap group's access when you are done provisioning the nodes. #### Bootstrap tokens -Bootstrap tokens are described in detail [here](/docs/reference/access-authn-authz/bootstrap-tokens/). These are tokens that are stored as secrets in the Kubernetes cluster, -and then issued to the individual kubelet. You can use a single token for an entire cluster, or issue one per worker node. +Bootstrap tokens are described in detail [here](/docs/reference/access-authn-authz/bootstrap-tokens/). +These are tokens that are stored as secrets in the Kubernetes cluster, and then issued to the individual kubelet. +You can use a single token for an entire cluster, or issue one per worker node. The process is two-fold: 1. Create a Kubernetes secret with the token ID, secret and scope(s). -2. Issue the token to the kubelet +1. Issue the token to the kubelet From the kubelet's perspective, one token is like another and has no special meaning. -From the kube-apiserver's perspective, however, the bootstrap token is special. Due to its `type`, `namespace` and `name`, kube-apiserver recognizes it as a special token, -and grants anyone authenticating with that token special bootstrap rights, notably treating them as a member of the `system:bootstrappers` group. This fulfills a basic requirement +From the kube-apiserver's perspective, however, the bootstrap token is special. +Due to its `type`, `namespace` and `name`, kube-apiserver recognizes it as a special token, +and grants anyone authenticating with that token special bootstrap rights, notably treating +them as a member of the `system:bootstrappers` group. This fulfills a basic requirement for TLS bootstrapping. The details for creating the secret are available [here](/docs/reference/access-authn-authz/bootstrap-tokens/). @@ -198,7 +212,8 @@ certificate signing request (CSR) as well as retrieve it when done. Fortunately, Kubernetes ships with a `ClusterRole` with precisely these (and only these) permissions, `system:node-bootstrapper`. -To do this, you only need to create a `ClusterRoleBinding` that binds the `system:bootstrappers` group to the cluster role `system:node-bootstrapper`. +To do this, you only need to create a `ClusterRoleBinding` that binds the `system:bootstrappers` +group to the cluster role `system:node-bootstrapper`. ```yaml # enable bootstrapping nodes to create CSR @@ -237,9 +252,10 @@ In order for the controller-manager to sign certificates, it needs the following As described earlier, you need to create a Kubernetes CA key and certificate, and distribute it to the control plane nodes. These will be used by the controller-manager to sign the kubelet certificates. -Since these signed certificates will, in turn, be used by the kubelet to authenticate as a regular kubelet to kube-apiserver, it is important that the CA -provided to the controller-manager at this stage also be trusted by kube-apiserver for authentication. This is provided to kube-apiserver -with the flag `--client-ca-file=FILENAME` (for example, `--client-ca-file=/var/lib/kubernetes/ca.pem`), as described in the kube-apiserver configuration section. +Since these signed certificates will, in turn, be used by the kubelet to authenticate as a regular kubelet +to kube-apiserver, it is important that the CA provided to the controller-manager at this stage also be +trusted by kube-apiserver for authentication. This is provided to kube-apiserver with the flag `--client-ca-file=FILENAME` +(for example, `--client-ca-file=/var/lib/kubernetes/ca.pem`), as described in the kube-apiserver configuration section. To provide the Kubernetes CA key and certificate to kube-controller-manager, use the following flags: @@ -266,10 +282,14 @@ RBAC permissions to the correct group. There are two distinct sets of permissions: -* `nodeclient`: If a node is creating a new certificate for a node, then it does not have a certificate yet. It is authenticating using one of the tokens listed above, and thus is part of the group `system:bootstrappers`. -* `selfnodeclient`: If a node is renewing its certificate, then it already has a certificate (by definition), which it uses continuously to authenticate as part of the group `system:nodes`. +* `nodeclient`: If a node is creating a new certificate for a node, then it does not have a certificate yet. + It is authenticating using one of the tokens listed above, and thus is part of the group `system:bootstrappers`. +* `selfnodeclient`: If a node is renewing its certificate, then it already has a certificate (by definition), + which it uses continuously to authenticate as part of the group `system:nodes`. -To enable the kubelet to request and receive a new certificate, create a `ClusterRoleBinding` that binds the group in which the bootstrapping node is a member `system:bootstrappers` to the `ClusterRole` that grants it permission, `system:certificates.k8s.io:certificatesigningrequests:nodeclient`: +To enable the kubelet to request and receive a new certificate, create a `ClusterRoleBinding` that binds +the group in which the bootstrapping node is a member `system:bootstrappers` to the `ClusterRole` that +grants it permission, `system:certificates.k8s.io:certificatesigningrequests:nodeclient`: ```yaml # Approve all CSRs for the group "system:bootstrappers" @@ -287,7 +307,8 @@ roleRef: apiGroup: rbac.authorization.k8s.io ``` -To enable the kubelet to renew its own client certificate, create a `ClusterRoleBinding` that binds the group in which the fully functioning node is a member `system:nodes` to the `ClusterRole` that +To enable the kubelet to renew its own client certificate, create a `ClusterRoleBinding` that binds +the group in which the fully functioning node is a member `system:nodes` to the `ClusterRole` that grants it permission, `system:certificates.k8s.io:certificatesigningrequests:selfnodeclient`: ```yaml @@ -316,10 +337,10 @@ built-in approver doesn't explicitly deny CSRs. It only ignores unauthorized requests. The controller also prunes expired certificates as part of garbage collection. - ## kubelet configuration -Finally, with the control plane nodes properly set up and all of the necessary authentication and authorization in place, we can configure the kubelet. +Finally, with the control plane nodes properly set up and all of the necessary +authentication and authorization in place, we can configure the kubelet. The kubelet requires the following configuration to bootstrap: @@ -385,7 +406,7 @@ referencing the generated key and obtained certificate is written to the path specified by `--kubeconfig`. The certificate and key file will be placed in the directory specified by `--cert-dir`. -### Client and Serving Certificates +### Client and serving certificates All of the above relate to kubelet _client_ certificates, specifically, the certificates a kubelet uses to authenticate to kube-apiserver. @@ -402,7 +423,7 @@ be used as serving certificates, or `server auth`. However, you _can_ enable its server certificate, at least partially, via certificate rotation. -### Certificate Rotation +### Certificate rotation Kubernetes v1.8 and higher kubelet implements features for enabling rotation of its client and/or serving certificates. Note, rotation of serving @@ -420,7 +441,7 @@ or pass the following command line argument to the kubelet (deprecated): Enabling `RotateKubeletServerCertificate` causes the kubelet **both** to request a serving certificate after bootstrapping its client credentials **and** to rotate that -certificate. To enable this behavior, use the field `serverTLSBootstrap` of +certificate. To enable this behavior, use the field `serverTLSBootstrap` of the [kubelet configuration file](/docs/tasks/administer-cluster/kubelet-config-file/) or pass the following command line argument to the kubelet (deprecated): @@ -430,8 +451,8 @@ or pass the following command line argument to the kubelet (deprecated): {{< note >}} The CSR approving controllers implemented in core Kubernetes do not -approve node _serving_ certificates for [security -reasons](https://github.com/kubernetes/community/pull/1982). To use +approve node _serving_ certificates for +[security reasons](https://github.com/kubernetes/community/pull/1982). To use `RotateKubeletServerCertificate` operators need to run a custom approving controller, or manually approve the serving certificate requests. @@ -439,9 +460,9 @@ A deployment-specific approval process for kubelet serving certificates should t 1. are requested by nodes (ensure the `spec.username` field is of the form `system:node:` and `spec.groups` contains `system:nodes`) -2. request usages for a serving certificate (ensure `spec.usages` contains `server auth`, +1. request usages for a serving certificate (ensure `spec.usages` contains `server auth`, optionally contains `digital signature` and `key encipherment`, and contains no other usages) -3. only have IP and DNS subjectAltNames that belong to the requesting node, +1. only have IP and DNS subjectAltNames that belong to the requesting node, and have no URI and Email subjectAltNames (parse the x509 Certificate Signing Request in `spec.request` to verify `subjectAltNames`) @@ -457,8 +478,11 @@ Like the kubelet, these other components also require a method of authenticating You have several options for generating these credentials: * The old way: Create and distribute certificates the same way you did for kubelet before TLS bootstrapping -* DaemonSet: Since the kubelet itself is loaded on each node, and is sufficient to start base services, you can run kube-proxy and other node-specific services not as a standalone process, but rather as a daemonset in the `kube-system` namespace. Since it will be in-cluster, you can give it a proper service account with appropriate permissions to perform its activities. This may be the simplest way to configure such services. - +* DaemonSet: Since the kubelet itself is loaded on each node, and is sufficient to start base services, + you can run kube-proxy and other node-specific services not as a standalone process, but rather as a + daemonset in the `kube-system` namespace. Since it will be in-cluster, you can give it a proper service + account with appropriate permissions to perform its activities. This may be the simplest way to configure + such services. ## kubectl approval diff --git a/content/en/docs/reference/access-authn-authz/node.md b/content/en/docs/reference/access-authn-authz/node.md index 4e9c0fc411f47..2df5d8b674dc7 100644 --- a/content/en/docs/reference/access-authn-authz/node.md +++ b/content/en/docs/reference/access-authn-authz/node.md @@ -9,7 +9,8 @@ weight: 90 --- -Node authorization is a special-purpose authorization mode that specifically authorizes API requests made by kubelets. +Node authorization is a special-purpose authorization mode that specifically +authorizes API requests made by kubelets. @@ -23,40 +24,58 @@ Read operations: * endpoints * nodes * pods -* secrets, configmaps, persistent volume claims and persistent volumes related to pods bound to the kubelet's node +* secrets, configmaps, persistent volume claims and persistent volumes related + to pods bound to the kubelet's node Write operations: -* nodes and node status (enable the `NodeRestriction` admission plugin to limit a kubelet to modify its own node) -* pods and pod status (enable the `NodeRestriction` admission plugin to limit a kubelet to modify pods bound to itself) +* nodes and node status (enable the `NodeRestriction` admission plugin to limit + a kubelet to modify its own node) +* pods and pod status (enable the `NodeRestriction` admission plugin to limit a + kubelet to modify pods bound to itself) * events Auth-related operations: -* read/write access to the [CertificateSigningRequests API](/docs/reference/access-authn-authz/certificate-signing-requests/) for TLS bootstrapping -* the ability to create TokenReviews and SubjectAccessReviews for delegated authentication/authorization checks +* read/write access to the + [CertificateSigningRequests API](/docs/reference/access-authn-authz/certificate-signing-requests/) + for TLS bootstrapping +* the ability to create TokenReviews and SubjectAccessReviews for delegated + authentication/authorization checks -In future releases, the node authorizer may add or remove permissions to ensure kubelets -have the minimal set of permissions required to operate correctly. +In future releases, the node authorizer may add or remove permissions to ensure +kubelets have the minimal set of permissions required to operate correctly. -In order to be authorized by the Node authorizer, kubelets must use a credential that identifies them as -being in the `system:nodes` group, with a username of `system:node:`. +In order to be authorized by the Node authorizer, kubelets must use a credential +that identifies them as being in the `system:nodes` group, with a username of +`system:node:`. This group and user name format match the identity created for each kubelet as part of [kubelet TLS bootstrapping](/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/). -The value of `` **must** match precisely the name of the node as registered by the kubelet. By default, this is the host name as provided by `hostname`, or overridden via the [kubelet option](/docs/reference/command-line-tools-reference/kubelet/) `--hostname-override`. However, when using the `--cloud-provider` kubelet option, the specific hostname may be determined by the cloud provider, ignoring the local `hostname` and the `--hostname-override` option. -For specifics about how the kubelet determines the hostname, see the [kubelet options reference](/docs/reference/command-line-tools-reference/kubelet/). +The value of `` **must** match precisely the name of the node as +registered by the kubelet. By default, this is the host name as provided by +`hostname`, or overridden via the +[kubelet option](/docs/reference/command-line-tools-reference/kubelet/) +`--hostname-override`. However, when using the `--cloud-provider` kubelet +option, the specific hostname may be determined by the cloud provider, ignoring +the local `hostname` and the `--hostname-override` option. +For specifics about how the kubelet determines the hostname, see the +[kubelet options reference](/docs/reference/command-line-tools-reference/kubelet/). To enable the Node authorizer, start the apiserver with `--authorization-mode=Node`. -To limit the API objects kubelets are able to write, enable the [NodeRestriction](/docs/reference/access-authn-authz/admission-controllers#noderestriction) admission plugin by starting the apiserver with `--enable-admission-plugins=...,NodeRestriction,...` +To limit the API objects kubelets are able to write, enable the +[NodeRestriction](/docs/reference/access-authn-authz/admission-controllers#noderestriction) +admission plugin by starting the apiserver with +`--enable-admission-plugins=...,NodeRestriction,...` ## Migration considerations ### Kubelets outside the `system:nodes` group -Kubelets outside the `system:nodes` group would not be authorized by the `Node` authorization mode, -and would need to continue to be authorized via whatever mechanism currently authorizes them. +Kubelets outside the `system:nodes` group would not be authorized by the `Node` +authorization mode, and would need to continue to be authorized via whatever +mechanism currently authorizes them. The node admission plugin would not restrict requests from these kubelets. ### Kubelets with undifferentiated usernames @@ -70,29 +89,3 @@ and would need to continue to be authorized via whatever mechanism currently aut The `NodeRestriction` admission plugin would ignore requests from these kubelets, since the default node identifier implementation would not consider that a node identity. -### Upgrades from previous versions using RBAC - -Upgraded pre-1.7 clusters using [RBAC](/docs/reference/access-authn-authz/rbac/) will continue functioning as-is because the `system:nodes` group binding will already exist. - -If a cluster admin wishes to start using the `Node` authorizer and `NodeRestriction` admission plugin -to limit node access to the API, that can be done non-disruptively: - -1. Enable the `Node` authorization mode (`--authorization-mode=Node,RBAC`) and the `NodeRestriction` admission plugin -2. Ensure all kubelets' credentials conform to the group/username requirements -3. Audit apiserver logs to ensure the `Node` authorizer is not rejecting requests from kubelets (no persistent `NODE DENY` messages logged) -4. Delete the `system:node` cluster role binding - -### RBAC Node Permissions - -In 1.6, the `system:node` cluster role was automatically bound to the `system:nodes` group when using the [RBAC Authorization mode](/docs/reference/access-authn-authz/rbac/). - -In 1.7, the automatic binding of the `system:nodes` group to the `system:node` role is deprecated -because the node authorizer accomplishes the same purpose with the benefit of additional restrictions -on secret and configmap access. If the `Node` and `RBAC` authorization modes are both enabled, -the automatic binding of the `system:nodes` group to the `system:node` role is not created in 1.7. - -In 1.8, the binding will not be created at all. - -When using RBAC, the `system:node` cluster role will continue to be created, -for compatibility with deployment methods that bind other users or groups to that role. - diff --git a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md index 4c2e2671d04f2..f2f1025701bf8 100644 --- a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md @@ -1,9 +1,7 @@ --- reviewers: - - bprashanth - - davidopp - - lavalamp - liggitt + - enj title: Managing Service Accounts content_type: concept weight: 50 @@ -140,6 +138,62 @@ using [TokenRequest](/docs/reference/kubernetes-api/authentication-resources/tok to obtain short-lived API access tokens is recommended instead. {{< /note >}} +## Auto-generated legacy ServiceAccount token clean up {#auto-generated-legacy-serviceaccount-token-clean-up} + +Before version 1.24, Kubernetes automatically generated Secret-based tokens for +ServiceAccounts. To distinguish between automatically generated tokens and +manually created ones, Kubernetes checks for a reference from the +ServiceAccount's secrets field. If the Secret is referenced in the `secrets` +field, it is considered an auto-generated legacy token. Otherwise, it is +considered a manually created legacy token. For example: + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: build-robot + namespace: default +secrets: + - name: build-robot-secret # usually NOT present for a manually generated token +``` + +Beginning from version 1.29, legacy ServiceAccount tokens that were generated +automatically will be marked as invalid if they remain unused for a certain +period of time (set to default at one year). Tokens that continue to be unused +for this defined period (again, by default, one year) will subsequently be +purged by the control plane. + +If users use an invalidated auto-generated token, the token validator will + +1. add an audit annotation for the key-value pair + `authentication.k8s.io/legacy-token-invalidated: /`, +1. increment the `invalid_legacy_auto_token_uses_total` metric count, +1. update the Secret label `kubernetes.io/legacy-token-last-used` with the new + date, +1. return an error indicating that the token has been invalidated. + +When receiving this validation error, users can update the Secret to remove the +`kubernetes.io/legacy-token-invalid-since` label to temporarily allow use of +this token. + +Here's an example of an auto-generated legacy token that has been marked with the +`kubernetes.io/legacy-token-last-used` and `kubernetes.io/legacy-token-invalid-since` +labels: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: build-robot-secret + namespace: default + labels: + kubernetes.io/legacy-token-last-used: 2022-10-24 + kubernetes.io/legacy-token-invalid-since: 2023-10-25 + annotations: + kubernetes.io/service-account.name: build-robot +type: kubernetes.io/service-account-token +``` + ## Control plane details ### ServiceAccount controller @@ -193,6 +247,51 @@ it does the following when a Pod is created: 1. If the spec of the incoming Pod doesn't already contain any `imagePullSecrets`, then the admission controller adds `imagePullSecrets`, copying them from the `ServiceAccount`. +### Legacy ServiceAccount token tracking controller + +{{< feature-state for_k8s_version="v1.28" state="stable" >}} + +This controller generates a ConfigMap called +`kube-system/kube-apiserver-legacy-service-account-token-tracking` in the +`kube-system` namespace. The ConfigMap records the timestamp when legacy service +account tokens began to be monitored by the system. + +### Legacy ServiceAccount token cleaner + +{{< feature-state for_k8s_version="v1.29" state="beta" >}} + +The legacy ServiceAccount token cleaner runs as part of the +`kube-controller-manager` and checks every 24 hours to see if any auto-generated +legacy ServiceAccount token has not been used in a *specified amount of time*. +If so, the cleaner marks those tokens as invalid. + +The cleaner works by first checking the ConfigMap created by the control plane +(provided that `LegacyServiceAccountTokenTracking` is enabled). If the current +time is a *specified amount of time* after the date in the ConfigMap, the +cleaner then loops through the list of Secrets in the cluster and evaluates each +Secret that has the type `kubernetes.io/service-account-token`. + +If a Secret meets all of the following conditions, the cleaner marks it as +invalid: + +- The Secret is auto-generated, meaning that it is bi-directionally referenced + by a ServiceAccount. +- The Secret is not currently mounted by any pods. +- The Secret has not been used in a *specified amount of time* since it was + created or since it was last used. + +The cleaner marks a Secret invalid by adding a label called +`kubernetes.io/legacy-token-invalid-since` to the Secret, with the current date +as the value. If an invalid Secret is not used in a *specified amount of time*, +the cleaner will delete it. + +{{< note >}} +All the *specified amount of time* above defaults to one year. The cluster +administrator can configure this value through the +`--legacy-service-account-token-clean-up-period` command line argument for the +`kube-controller-manager` component. +{{< /note >}} + ### TokenRequest API {{< feature-state for_k8s_version="v1.22" state="stable" >}} @@ -300,6 +399,12 @@ token: ... If you launch a new Pod into the `examplens` namespace, it can use the `myserviceaccount` service-account-token Secret that you just created. +{{< caution >}} +Do not reference manually created Secrets in the `secrets` field of a +ServiceAccount. Or the manually created Secrets will be cleaned if it is not used for a long +time. Please refer to [auto-generated legacy ServiceAccount token clean up](#auto-generated-legacy-serviceaccount-token-clean-up). +{{< /caution >}} + ## Delete/invalidate a ServiceAccount token {#delete-token} If you know the name of the Secret that contains the token you want to remove: @@ -340,30 +445,6 @@ Then, delete the Secret you now know the name of: kubectl -n examplens delete secret/example-automated-thing-token-zyxwv ``` -The control plane spots that the ServiceAccount is missing its Secret, -and creates a replacement: - -```shell -kubectl -n examplens get serviceaccount/example-automated-thing -o yaml -``` - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"v1","kind":"ServiceAccount","metadata":{"annotations":{},"name":"example-automated-thing","namespace":"examplens"}} - creationTimestamp: "2019-07-21T07:07:07Z" - name: example-automated-thing - namespace: examplens - resourceVersion: "1026" - selfLink: /api/v1/namespaces/examplens/serviceaccounts/example-automated-thing - uid: f23fd170-66f2-4697-b049-e1e266b7f835 -secrets: - - name: example-automated-thing-token-4rdrh -``` - ## Clean up If you created a namespace `examplens` to experiment with, you can remove it: diff --git a/content/en/docs/reference/access-authn-authz/validating-admission-policy.md b/content/en/docs/reference/access-authn-authz/validating-admission-policy.md index 269300c0d5ac9..58d351c163667 100644 --- a/content/en/docs/reference/access-authn-authz/validating-admission-policy.md +++ b/content/en/docs/reference/access-authn-authz/validating-admission-policy.md @@ -113,7 +113,7 @@ actions. Failures defined by the `failurePolicy` are enforced according to these actions only if the `failurePolicy` is set to `Fail` (or not specified), otherwise the failures are ignored. -See [Audit Annotations: validation falures](/docs/reference/labels-annotations-taints/audit-annotations/#validation-policy-admission-k8s-io-validation_failure) +See [Audit Annotations: validation failures](/docs/reference/labels-annotations-taints/audit-annotations/#validation-policy-admission-k8s-io-validation-failure) for more details about the validation failure audit annotation. ### Parameter resources @@ -291,33 +291,6 @@ variables as well as some other useful variables: The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible. -Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. -Accessible property names are escaped according to the following rules when accessed in the -expression: - -| escape sequence | property name equivalent | -| ----------------------- | -----------------------| -| `__underscores__` | `__` | -| `__dot__` | `.` | -|`__dash__` | `-` | -| `__slash__` | `/` | -| `__{keyword}__` | [CEL RESERVED keyword](https://github.com/google/cel-spec/blob/v0.6.0/doc/langdef.md#syntax) | - -{{< note >}} -A **CEL reserved** keyword only needs to be escaped if the token is an exact match -for the reserved keyword. -For example, `int` in the word “sprint” would not be escaped. -{{< /note >}} - -Examples on escaping: - -|property name | rule with escaped property name | -| ----------------|-----------------------------------| -| namespace | `object.__namespace__ > 0` | -| x-prop | `object.x__dash__prop > 0` | -| redact__d | `object.redact__underscores__d > 0` | -| string | `object.startsWith('kube')` | - Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type: @@ -530,4 +503,4 @@ kubectl create deploy --image=dev.example.com/nginx invalid The error message is similar to this. ```console error: failed to create deployment: deployments.apps "invalid" is forbidden: ValidatingAdmissionPolicy 'image-matches-namespace-environment.policy.example.com' with binding 'demo-binding-test.example.com' denied request: only prod images are allowed in namespace default -``` \ No newline at end of file +``` diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates-removed.md b/content/en/docs/reference/command-line-tools-reference/feature-gates-removed.md deleted file mode 100644 index 75cf88333d0b5..0000000000000 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates-removed.md +++ /dev/null @@ -1,956 +0,0 @@ ---- -title: Feature Gates (removed) -weight: 15 -content_type: concept ---- - - - -This page contains list of feature gates that have been removed. The information on this page is for reference. -A removed feature gate is different from a GA'ed or deprecated one in that a removed one is -no longer recognized as a valid feature gate. -However, a GA'ed or a deprecated feature gate is still recognized by the corresponding Kubernetes -components although they are unable to cause any behavior differences in a cluster. - -For feature gates that are still recognized by the Kubernetes components, please refer to -the [Alpha/Beta feature gate table](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) -or the [Graduated/Deprecated feature gate table](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-graduated-or-deprecated-features) - -### Feature gates that are removed - -In the following table: - -- The "From" column contains the Kubernetes release when a feature is introduced - or its release stage is changed. -- The "To" column, if not empty, contains the last Kubernetes release in which - you can still use a feature gate. If the feature stage is either "Deprecated" - or "GA", the "To" column is the Kubernetes release when the feature is removed. - -{{< table caption="Feature Gates Removed" >}} - -| Feature | Default | Stage | From | To | -|---------|---------|-------|-------|-------| -| `Accelerators` | `false` | Alpha | 1.6 | 1.10 | -| `Accelerators` | - | Deprecated | 1.11 | 1.11 | -| `AdvancedAuditing` | `false` | Alpha | 1.7 | 1.7 | -| `AdvancedAuditing` | `true` | Beta | 1.8 | 1.11 | -| `AdvancedAuditing` | `true` | GA | 1.12 | 1.27 | -| `AffinityInAnnotations` | `false` | Alpha | 1.6 | 1.7 | -| `AffinityInAnnotations` | - | Deprecated | 1.8 | 1.8 | -| `AllowExtTrafficLocalEndpoints` | `false` | Beta | 1.4 | 1.6 | -| `AllowExtTrafficLocalEndpoints` | `true` | GA | 1.7 | 1.9 | -| `AllowInsecureBackendProxy` | `true` | Beta | 1.17 | 1.20 | -| `AllowInsecureBackendProxy` | `true` | GA | 1.21 | 1.25 | -| `AttachVolumeLimit` | `false` | Alpha | 1.11 | 1.11 | -| `AttachVolumeLimit` | `true` | Beta | 1.12 | 1.16 | -| `AttachVolumeLimit` | `true` | GA | 1.17 | 1.21 | -| `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | 1.21 | -| `BalanceAttachedNodeVolumes` | `false` | Deprecated | 1.22 | 1.22 | -| `BlockVolume` | `false` | Alpha | 1.9 | 1.12 | -| `BlockVolume` | `true` | Beta | 1.13 | 1.17 | -| `BlockVolume` | `true` | GA | 1.18 | 1.21 | -| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | 1.20 | -| `BoundServiceAccountTokenVolume` | `true` | Beta | 1.21 | 1.21 | -| `BoundServiceAccountTokenVolume` | `true` | GA | 1.22 | 1.23 | -| `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | -| `CRIContainerLogRotation` | `true` | Beta | 1.11 | 1.20 | -| `CRIContainerLogRotation` | `true` | GA | 1.21 | 1.22 | -| `CSIBlockVolume` | `false` | Alpha | 1.11 | 1.13 | -| `CSIBlockVolume` | `true` | Beta | 1.14 | 1.17 | -| `CSIBlockVolume` | `true` | GA | 1.18 | 1.21 | -| `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | -| `CSIDriverRegistry` | `true` | Beta | 1.14 | 1.17 | -| `CSIDriverRegistry` | `true` | GA | 1.18 | 1.21 | -| `CSIInlineVolume` | `false` | Alpha | 1.15 | 1.15 | -| `CSIInlineVolume` | `true` | Beta | 1.16 | 1.24 | -| `CSIInlineVolume` | `true` | GA | 1.25 | 1.26 | -| `CSIMigration` | `false` | Alpha | 1.14 | 1.16 | -| `CSIMigration` | `true` | Beta | 1.17 | 1.24 | -| `CSIMigration` | `true` | GA | 1.25 | 1.26 | -| `CSIMigrationAWS` | `false` | Alpha | 1.14 | 1.16 | -| `CSIMigrationAWS` | `false` | Beta | 1.17 | 1.22 | -| `CSIMigrationAWS` | `true` | Beta | 1.23 | 1.24 | -| `CSIMigrationAWS` | `true` | GA | 1.25 | 1.26 | -| `CSIMigrationAWSComplete` | `false` | Alpha | 1.17 | 1.20 | -| `CSIMigrationAWSComplete` | - | Deprecated | 1.21 | 1.21 | -| `CSIMigrationAzureDisk` | `false` | Alpha | 1.15 | 1.18 | -| `CSIMigrationAzureDisk` | `false` | Beta | 1.19 | 1.22 | -| `CSIMigrationAzureDisk` | `true` | Beta | 1.23 | 1.23 | -| `CSIMigrationAzureDisk` | `true` | GA | 1.24 | 1.26 | -| `CSIMigrationAzureDiskComplete` | `false` | Alpha | 1.17 | 1.20 | -| `CSIMigrationAzureDiskComplete` | - | Deprecated | 1.21 | 1.21 | -| `CSIMigrationAzureFileComplete` | `false` | Alpha | 1.17 | 1.20 | -| `CSIMigrationAzureFileComplete` | - | Deprecated | 1.21 | 1.21 | -| `CSIMigrationGCE` | `false` | Alpha | 1.14 | 1.16 | -| `CSIMigrationGCE` | `false` | Beta | 1.17 | 1.22 | -| `CSIMigrationGCE` | `true` | Beta | 1.23 | 1.24 | -| `CSIMigrationGCE` | `true` | GA | 1.25 | 1.27 | -| `CSIMigrationGCEComplete` | `false` | Alpha | 1.17 | 1.20 | -| `CSIMigrationGCEComplete` | - | Deprecated | 1.21 | 1.21 | -| `CSIMigrationOpenStack` | `false` | Alpha | 1.14 | 1.17 | -| `CSIMigrationOpenStack` | `true` | Beta | 1.18 | 1.23 | -| `CSIMigrationOpenStack` | `true` | GA | 1.24 | 1.25 | -| `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | 1.20 | -| `CSIMigrationOpenStackComplete` | - | Deprecated | 1.21 | 1.21 | -| `CSIMigrationvSphereComplete` | `false` | Beta | 1.19 | 1.21 | -| `CSIMigrationvSphereComplete` | - | Deprecated | 1.22 | 1.22 | -| `CSINodeInfo` | `false` | Alpha | 1.12 | 1.13 | -| `CSINodeInfo` | `true` | Beta | 1.14 | 1.16 | -| `CSINodeInfo` | `true` | GA | 1.17 | 1.22 | -| `CSIPersistentVolume` | `false` | Alpha | 1.9 | 1.9 | -| `CSIPersistentVolume` | `true` | Beta | 1.10 | 1.12 | -| `CSIPersistentVolume` | `true` | GA | 1.13 | 1.16 | -| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | 1.20 | -| `CSIServiceAccountToken` | `true` | Beta | 1.21 | 1.21 | -| `CSIServiceAccountToken` | `true` | GA | 1.22 | 1.24 | -| `CSIStorageCapacity` | `false` | Alpha | 1.19 | 1.20 | -| `CSIStorageCapacity` | `true` | Beta | 1.21 | 1.23 | -| `CSIStorageCapacity` | `true` | GA | 1.24 | 1.27 | -| `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | 1.19 | -| `CSIVolumeFSGroupPolicy` | `true` | Beta | 1.20 | 1.22 | -| `CSIVolumeFSGroupPolicy` | `true` | GA | 1.23 | 1.25 | -| `CSRDuration` | `true` | Beta | 1.22 | 1.23 | -| `CSRDuration` | `true` | GA | 1.24 | 1.25 | -| `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | 1.19 | -| `ConfigurableFSGroupPolicy` | `true` | Beta | 1.20 | 1.22 | -| `ConfigurableFSGroupPolicy` | `true` | GA | 1.23 | 1.25 | -| `ControllerManagerLeaderMigration` | `false` | Alpha | 1.21 | 1.21 | -| `ControllerManagerLeaderMigration` | `true` | Beta | 1.22 | 1.23 | -| `ControllerManagerLeaderMigration` | `true` | GA | 1.24 | 1.26 | -| `CronJobControllerV2` | `false` | Alpha | 1.20 | 1.20 | -| `CronJobControllerV2` | `true` | Beta | 1.21 | 1.21 | -| `CronJobControllerV2` | `true` | GA | 1.22 | 1.23 | -| `CustomPodDNS` | `false` | Alpha | 1.9 | 1.9 | -| `CustomPodDNS` | `true` | Beta| 1.10 | 1.13 | -| `CustomPodDNS` | `true` | GA | 1.14 | 1.16 | -| `CustomResourceDefaulting` | `false` | Alpha| 1.15 | 1.15 | -| `CustomResourceDefaulting` | `true` | Beta | 1.16 | 1.16 | -| `CustomResourceDefaulting` | `true` | GA | 1.17 | 1.18 | -| `CustomResourcePublishOpenAPI` | `false` | Alpha| 1.14 | 1.14 | -| `CustomResourcePublishOpenAPI` | `true` | Beta| 1.15 | 1.15 | -| `CustomResourcePublishOpenAPI` | `true` | GA | 1.16 | 1.18 | -| `CustomResourceSubresources` | `false` | Alpha | 1.10 | 1.10 | -| `CustomResourceSubresources` | `true` | Beta | 1.11 | 1.15 | -| `CustomResourceSubresources` | `true` | GA | 1.16 | 1.18 | -| `CustomResourceValidation` | `false` | Alpha | 1.8 | 1.8 | -| `CustomResourceValidation` | `true` | Beta | 1.9 | 1.15 | -| `CustomResourceValidation` | `true` | GA | 1.16 | 1.18 | -| `CustomResourceWebhookConversion` | `false` | Alpha | 1.13 | 1.14 | -| `CustomResourceWebhookConversion` | `true` | Beta | 1.15 | 1.15 | -| `CustomResourceWebhookConversion` | `true` | GA | 1.16 | 1.18 | -| `DaemonSetUpdateSurge` | `false` | Alpha | 1.21 | 1.21 | -| `DaemonSetUpdateSurge` | `true` | Beta | 1.22 | 1.24 | -| `DaemonSetUpdateSurge` | `true` | GA | 1.25 | 1.26 | -| `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | 1.19 | -| `DefaultPodTopologySpread` | `true` | Beta | 1.20 | 1.23 | -| `DefaultPodTopologySpread` | `true` | GA | 1.24 | 1.25 | -| `DelegateFSGroupToCSIDriver` | `false` | Alpha | 1.22 | 1.22 | -| `DelegateFSGroupToCSIDriver` | `true` | Beta | 1.23 | 1.25 | -| `DelegateFSGroupToCSIDriver` | `true` | GA | 1.26 | 1.27 | -| `DevicePlugins` | `false` | Alpha | 1.8 | 1.9 | -| `DevicePlugins` | `true` | Beta | 1.10 | 1.25 | -| `DevicePlugins` | `true` | GA | 1.26 | 1.27 | -| `DisableAcceleratorUsageMetrics` | `false` | Alpha | 1.19 | 1.19 | -| `DisableAcceleratorUsageMetrics` | `true` | Beta | 1.20 | 1.24 | -| `DisableAcceleratorUsageMetrics` | `true` | GA | 1.25 | 1.27 | -| `DryRun` | `false` | Alpha | 1.12 | 1.12 | -| `DryRun` | `true` | Beta | 1.13 | 1.18 | -| `DryRun` | `true` | GA | 1.19 | 1.27 | -| `DynamicAuditing` | `false` | Alpha | 1.13 | 1.18 | -| `DynamicAuditing` | - | Deprecated | 1.19 | 1.19 | -| `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | -| `DynamicKubeletConfig` | `true` | Beta | 1.11 | 1.21 | -| `DynamicKubeletConfig` | `false` | Deprecated | 1.22 | 1.25 | -| `DynamicProvisioningScheduling` | `false` | Alpha | 1.11 | 1.11 | -| `DynamicProvisioningScheduling` | - | Deprecated| 1.12 | - | -| `DynamicVolumeProvisioning` | `true` | Alpha | 1.3 | 1.7 | -| `DynamicVolumeProvisioning` | `true` | GA | 1.8 | 1.12 | -| `EnableAggregatedDiscoveryTimeout` | `true` | Deprecated | 1.16 | 1.17 | -| `EnableEquivalenceClassCache` | `false` | Alpha | 1.8 | 1.12 | -| `EnableEquivalenceClassCache` | - | Deprecated | 1.13 | 1.23 | -| `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 | -| `EndpointSlice` | `false` | Beta | 1.17 | 1.17 | -| `EndpointSlice` | `true` | Beta | 1.18 | 1.20 | -| `EndpointSlice` | `true` | GA | 1.21 | 1.24 | -| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | 1.20 | -| `EndpointSliceNodeName` | `true` | GA | 1.21 | 1.24 | -| `EndpointSliceProxying` | `false` | Alpha | 1.18 | 1.18 | -| `EndpointSliceProxying` | `true` | Beta | 1.19 | 1.21 | -| `EndpointSliceProxying` | `true` | GA | 1.22 | 1.24 | -| `EndpointSliceTerminatingCondition` | `false` | Alpha | 1.20 | 1.21 | -| `EndpointSliceTerminatingCondition` | `true` | Beta | 1.22 | 1.25 | -| `EndpointSliceTerminatingCondition` | `true` | GA | 1.26 | 1.27 | -| `EphemeralContainers` | `false` | Alpha | 1.16 | 1.22 | -| `EphemeralContainers` | `true` | Beta | 1.23 | 1.24 | -| `EphemeralContainers` | `true` | GA | 1.25 | 1.26 | -| `EvenPodsSpread` | `false` | Alpha | 1.16 | 1.17 | -| `EvenPodsSpread` | `true` | Beta | 1.18 | 1.18 | -| `EvenPodsSpread` | `true` | GA | 1.19 | 1.21 | -| `ExpandCSIVolumes` | `false` | Alpha | 1.14 | 1.15 | -| `ExpandCSIVolumes` | `true` | Beta | 1.16 | 1.23 | -| `ExpandCSIVolumes` | `true` | GA | 1.24 | 1.26 | -| `ExpandInUsePersistentVolumes` | `false` | Alpha | 1.11 | 1.14 | -| `ExpandInUsePersistentVolumes` | `true` | Beta | 1.15 | 1.23 | -| `ExpandInUsePersistentVolumes` | `true` | GA | 1.24 | 1.26 | -| `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | -| `ExpandPersistentVolumes` | `true` | Beta | 1.11 | 1.23 | -| `ExpandPersistentVolumes` | `true` | GA | 1.24 | 1.26 | -| `ExperimentalCriticalPodAnnotation` | `false` | Alpha | 1.5 | 1.12 | -| `ExperimentalCriticalPodAnnotation` | `false` | Deprecated | 1.13 | 1.16 | -| `ExternalPolicyForExternalIP` | `true` | GA | 1.18 | 1.22 | -| `GCERegionalPersistentDisk` | `true` | Beta | 1.10 | 1.12 | -| `GCERegionalPersistentDisk` | `true` | GA | 1.13 | 1.16 | -| `GenericEphemeralVolume` | `false` | Alpha | 1.19 | 1.20 | -| `GenericEphemeralVolume` | `true` | Beta | 1.21 | 1.22 | -| `GenericEphemeralVolume` | `true` | GA | 1.23 | 1.24 | -| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | -| `HugePageStorageMediumSize` | `true` | Beta | 1.19 | 1.21 | -| `HugePageStorageMediumSize` | `true` | GA | 1.22 | 1.24 | -| `HugePages` | `false` | Alpha | 1.8 | 1.9 | -| `HugePages` | `true` | Beta| 1.10 | 1.13 | -| `HugePages` | `true` | GA | 1.14 | 1.16 | -| `HyperVContainer` | `false` | Alpha | 1.10 | 1.19 | -| `HyperVContainer` | `false` | Deprecated | 1.20 | 1.20 | -| `IPv6DualStack` | `false` | Alpha | 1.15 | 1.20 | -| `IPv6DualStack` | `true` | Beta | 1.21 | 1.22 | -| `IPv6DualStack` | `true` | GA | 1.23 | 1.24 | -| `IdentifyPodOS` | `false` | Alpha | 1.23 | 1.23 | -| `IdentifyPodOS` | `true` | Beta | 1.24 | 1.24 | -| `IdentifyPodOS` | `true` | GA | 1.25 | 1.26 | -| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 | -| `ImmutableEphemeralVolumes` | `true` | Beta | 1.19 | 1.20 | -| `ImmutableEphemeralVolumes` | `true` | GA | 1.21 | 1.24 | -| `IndexedJob` | `false` | Alpha | 1.21 | 1.21 | -| `IndexedJob` | `true` | Beta | 1.22 | 1.23 | -| `IndexedJob` | `true` | GA | 1.24 | 1.25 | -| `IngressClassNamespacedParams` | `false` | Alpha | 1.21 | 1.21 | -| `IngressClassNamespacedParams` | `true` | Beta | 1.22 | 1.22 | -| `IngressClassNamespacedParams` | `true` | GA | 1.23 | 1.24 | -| `Initializers` | `false` | Alpha | 1.7 | 1.13 | -| `Initializers` | - | Deprecated | 1.14 | 1.14 | -| `KMSv1` | `true` | Deprecated | 1.28 | | -| `KubeletConfigFile` | `false` | Alpha | 1.8 | 1.9 | -| `KubeletConfigFile` | - | Deprecated | 1.10 | 1.10 | -| `KubeletCredentialProviders` | `false` | Alpha | 1.20 | 1.23 | -| `KubeletCredentialProviders` | `true` | Beta | 1.24 | 1.25 | -| `KubeletCredentialProviders` | `true` | GA | 1.26 | 1.28 | -| `KubeletPluginsWatcher` | `false` | Alpha | 1.11 | 1.11 | -| `KubeletPluginsWatcher` | `true` | Beta | 1.12 | 1.12 | -| `KubeletPluginsWatcher` | `true` | GA | 1.13 | 1.16 | -| `LegacyNodeRoleBehavior` | `false` | Alpha | 1.16 | 1.18 | -| `LegacyNodeRoleBehavior` | `true` | Beta | 1.19 | 1.20 | -| `LegacyNodeRoleBehavior` | `false` | GA | 1.21 | 1.22 | -| `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | -| `LocalStorageCapacityIsolation` | `true` | Beta | 1.10 | 1.24 | -| `LocalStorageCapacityIsolation` | `true` | GA | 1.25 | 1.26 | -| `MixedProtocolLBService` | `false` | Alpha | 1.20 | 1.23 | -| `MixedProtocolLBService` | `true` | Beta | 1.24 | 1.25 | -| `MixedProtocolLBService` | `true` | GA | 1.26 | 1.27 | -| `MountContainers` | `false` | Alpha | 1.9 | 1.16 | -| `MountContainers` | `false` | Deprecated | 1.17 | 1.17 | -| `MountPropagation` | `false` | Alpha | 1.8 | 1.9 | -| `MountPropagation` | `true` | Beta | 1.10 | 1.11 | -| `MountPropagation` | `true` | GA | 1.12 | 1.14 | -| `NamespaceDefaultLabelName` | `true` | Beta | 1.21 | 1.21 | -| `NamespaceDefaultLabelName` | `true` | GA | 1.22 | 1.23 | -| `NetworkPolicyEndPort` | `false` | Alpha | 1.21 | 1.21 | -| `NetworkPolicyEndPort` | `true` | Beta | 1.22 | 1.24 | -| `NetworkPolicyEndPort` | `true` | GA | 1.25 | 1.26 | -| `NetworkPolicyStatus` | `false` | Alpha | 1.24 | 1.27 | -| `NodeDisruptionExclusion` | `false` | Alpha | 1.16 | 1.18 | -| `NodeDisruptionExclusion` | `true` | Beta | 1.19 | 1.20 | -| `NodeDisruptionExclusion` | `true` | GA | 1.21 | 1.22 | -| `NodeLease` | `false` | Alpha | 1.12 | 1.13 | -| `NodeLease` | `true` | Beta | 1.14 | 1.16 | -| `NodeLease` | `true` | GA | 1.17 | 1.23 | -| `NonPreemptingPriority` | `false` | Alpha | 1.15 | 1.18 | -| `NonPreemptingPriority` | `true` | Beta | 1.19 | 1.23 | -| `NonPreemptingPriority` | `true` | GA | 1.24 | 1.25 | -| `PVCProtection` | `false` | Alpha | 1.9 | 1.9 | -| `PVCProtection` | - | Deprecated | 1.10 | 1.10 | -| `PersistentLocalVolumes` | `false` | Alpha | 1.7 | 1.9 | -| `PersistentLocalVolumes` | `true` | Beta | 1.10 | 1.13 | -| `PersistentLocalVolumes` | `true` | GA | 1.14 | 1.16 | -| `PodAffinityNamespaceSelector` | `false` | Alpha | 1.21 | 1.21 | -| `PodAffinityNamespaceSelector` | `true` | Beta | 1.22 | 1.23 | -| `PodAffinityNamespaceSelector` | `true` | GA | 1.24 | 1.25 | -| `PodDisruptionBudget` | `false` | Alpha | 1.3 | 1.4 | -| `PodDisruptionBudget` | `true` | Beta | 1.5 | 1.20 | -| `PodDisruptionBudget` | `true` | GA | 1.21 | 1.25 | -| `PodHasNetworkCondition` | `false` | Alpha | 1.25 | 1.27 | -| `PodOverhead` | `false` | Alpha | 1.16 | 1.17 | -| `PodOverhead` | `true` | Beta | 1.18 | 1.23 | -| `PodOverhead` | `true` | GA | 1.24 | 1.25 | -| `PodPriority` | `false` | Alpha | 1.8 | 1.10 | -| `PodPriority` | `true` | Beta | 1.11 | 1.13 | -| `PodPriority` | `true` | GA | 1.14 | 1.18 | -| `PodReadinessGates` | `false` | Alpha | 1.11 | 1.11 | -| `PodReadinessGates` | `true` | Beta | 1.12 | 1.13 | -| `PodReadinessGates` | `true` | GA | 1.14 | 1.16 | -| `PodSecurity` | `false` | Alpha | 1.22 | 1.22 | -| `PodSecurity` | `true` | Beta | 1.23 | 1.24 | -| `PodSecurity` | `true` | GA | 1.25 | 1.27 | -| `PodShareProcessNamespace` | `false` | Alpha | 1.10 | 1.11 | -| `PodShareProcessNamespace` | `true` | Beta | 1.12 | 1.16 | -| `PodShareProcessNamespace` | `true` | GA | 1.17 | 1.19 | -| `PreferNominatedNode` | `false` | Alpha | 1.21 | 1.21 | -| `PreferNominatedNode` | `true` | Beta | 1.22 | 1.23 | -| `PreferNominatedNode` | `true` | GA | 1.24 | 1.25 | -| `RequestManagement` | `false` | Alpha | 1.15 | 1.16 | -| `RequestManagement` | - | Deprecated | 1.17 | 1.17 | -| `ResourceLimitsPriorityFunction` | `false` | Alpha | 1.9 | 1.18 | -| `ResourceLimitsPriorityFunction` | - | Deprecated | 1.19 | 1.19 | -| `ResourceQuotaScopeSelectors` | `false` | Alpha | 1.11 | 1.11 | -| `ResourceQuotaScopeSelectors` | `true` | Beta | 1.12 | 1.16 | -| `ResourceQuotaScopeSelectors` | `true` | GA | 1.17 | 1.18 | -| `RootCAConfigMap` | `false` | Alpha | 1.13 | 1.19 | -| `RootCAConfigMap` | `true` | Beta | 1.20 | 1.20 | -| `RootCAConfigMap` | `true` | GA | 1.21 | 1.22 | -| `RotateKubeletClientCertificate` | `true` | Beta | 1.8 | 1.18 | -| `RotateKubeletClientCertificate` | `true` | GA | 1.19 | 1.21 | -| `RunAsGroup` | `true` | Beta | 1.14 | 1.20 | -| `RunAsGroup` | `true` | GA | 1.21 | 1.22 | -| `RuntimeClass` | `false` | Alpha | 1.12 | 1.13 | -| `RuntimeClass` | `true` | Beta | 1.14 | 1.19 | -| `RuntimeClass` | `true` | GA | 1.20 | 1.24 | -| `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 | -| `SCTPSupport` | `true` | Beta | 1.19 | 1.19 | -| `SCTPSupport` | `true` | GA | 1.20 | 1.22 | -| `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 | -| `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | 1.16 | -| `ScheduleDaemonSetPods` | `true` | GA | 1.17 | 1.18 | -| `SelectorIndex` | `false` | Alpha | 1.18 | 1.18 | -| `SelectorIndex` | `true` | Beta | 1.19 | 1.19 | -| `SelectorIndex` | `true` | GA | 1.20 | 1.25 | -| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | 1.19 | -| `ServiceAccountIssuerDiscovery` | `true` | Beta | 1.20 | 1.20 | -| `ServiceAccountIssuerDiscovery` | `true` | GA | 1.21 | 1.23 | -| `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 | -| `ServiceAppProtocol` | `true` | Beta | 1.19 | 1.19 | -| `ServiceAppProtocol` | `true` | GA | 1.20 | 1.22 | -| `ServiceIPStaticSubrange` | `false` | Alpha | 1.24 | 1.24 | -| `ServiceIPStaticSubrange` | `true` | Beta | 1.25 | 1.25 | -| `ServiceIPStaticSubrange` | `true` | GA | 1.26 | 1.27 | -| `ServiceInternalTrafficPolicy` | `false` | Alpha | 1.21 | 1.21 | -| `ServiceInternalTrafficPolicy` | `true` | Beta | 1.22 | 1.25 | -| `ServiceInternalTrafficPolicy` | `true` | GA | 1.26 | 1.27 | -| `ServiceLBNodePortControl` | `false` | Alpha | 1.20 | 1.21 | -| `ServiceLBNodePortControl` | `true` | Beta | 1.22 | 1.23 | -| `ServiceLBNodePortControl` | `true` | GA | 1.24 | 1.25 | -| `ServiceLoadBalancerClass` | `false` | Alpha | 1.21 | 1.21 | -| `ServiceLoadBalancerClass` | `true` | Beta | 1.22 | 1.23 | -| `ServiceLoadBalancerClass` | `true` | GA | 1.24 | 1.25 | -| `ServiceLoadBalancerFinalizer` | `false` | Alpha | 1.15 | 1.15 | -| `ServiceLoadBalancerFinalizer` | `true` | Beta | 1.16 | 1.16 | -| `ServiceLoadBalancerFinalizer` | `true` | GA | 1.17 | 1.20 | -| `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 | -| `ServiceNodeExclusion` | `true` | Beta | 1.19 | 1.20 | -| `ServiceNodeExclusion` | `true` | GA | 1.21 | 1.22 | -| `ServiceTopology` | `false` | Alpha | 1.17 | 1.19 | -| `ServiceTopology` | `false` | Deprecated | 1.20 | 1.22 | -| `SetHostnameAsFQDN` | `false` | Alpha | 1.19 | 1.19 | -| `SetHostnameAsFQDN` | `true` | Beta | 1.20 | 1.21 | -| `SetHostnameAsFQDN` | `true` | GA | 1.22 | 1,24 | -| `StartupProbe` | `false` | Alpha | 1.16 | 1.17 | -| `StartupProbe` | `true` | Beta | 1.18 | 1.19 | -| `StartupProbe` | `true` | GA | 1.20 | 1.23 | -| `StatefulSetMinReadySeconds` | `false` | Alpha | 1.22 | 1.22 | -| `StatefulSetMinReadySeconds` | `true` | Beta | 1.23 | 1.24 | -| `StatefulSetMinReadySeconds` | `true` | GA | 1.25 | 1.26 | -| `StorageObjectInUseProtection` | `true` | Beta | 1.10 | 1.10 | -| `StorageObjectInUseProtection` | `true` | GA | 1.11 | 1.24 | -| `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 | -| `StreamingProxyRedirects` | `true` | Beta | 1.6 | 1.17 | -| `StreamingProxyRedirects` | `true` | Deprecated | 1.18 | 1.21 | -| `StreamingProxyRedirects` | `false` | Deprecated | 1.22 | 1.24 | -| `SupportIPVSProxyMode` | `false` | Alpha | 1.8 | 1.8 | -| `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 | -| `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 | -| `SupportIPVSProxyMode` | `true` | GA | 1.11 | 1.20 | -| `SupportNodePidsLimit` | `false` | Alpha | 1.14 | 1.14 | -| `SupportNodePidsLimit` | `true` | Beta | 1.15 | 1.19 | -| `SupportNodePidsLimit` | `true` | GA | 1.20 | 1.23 | -| `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 | -| `SupportPodPidsLimit` | `true` | Beta | 1.14 | 1.19 | -| `SupportPodPidsLimit` | `true` | GA | 1.20 | 1.23 | -| `SuspendJob` | `false` | Alpha | 1.21 | 1.21 | -| `SuspendJob` | `true` | Beta | 1.22 | 1.23 | -| `SuspendJob` | `true` | GA | 1.24 | 1.25 | -| `Sysctls` | `true` | Beta | 1.11 | 1.20 | -| `Sysctls` | `true` | GA | 1.21 | 1.22 | -| `TTLAfterFinished` | `false` | Alpha | 1.12 | 1.20 | -| `TTLAfterFinished` | `true` | Beta | 1.21 | 1.22 | -| `TTLAfterFinished` | `true` | GA | 1.23 | 1.24 | -| `TaintBasedEvictions` | `false` | Alpha | 1.6 | 1.12 | -| `TaintBasedEvictions` | `true` | Beta | 1.13 | 1.17 | -| `TaintBasedEvictions` | `true` | GA | 1.18 | 1.20 | -| `TaintNodesByCondition` | `false` | Alpha | 1.8 | 1.11 | -| `TaintNodesByCondition` | `true` | Beta | 1.12 | 1.16 | -| `TaintNodesByCondition` | `true` | GA | 1.17 | 1.18 | -| `TokenRequest` | `false` | Alpha | 1.10 | 1.11 | -| `TokenRequest` | `true` | Beta | 1.12 | 1.19 | -| `TokenRequest` | `true` | GA | 1.20 | 1.21 | -| `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | -| `TokenRequestProjection` | `true` | Beta | 1.12 | 1.19 | -| `TokenRequestProjection` | `true` | GA | 1.20 | 1.21 | -| `UserNamespacesStatelessPodsSupport` | `false` | Alpha | 1.25 | 1.27 | -| `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | -| `ValidateProxyRedirects` | `true` | Beta | 1.14 | 1.21 | -| `ValidateProxyRedirects` | `true` | Deprecated | 1.22 | 1.24 | -| `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 | -| `VolumePVCDataSource` | `true` | Beta | 1.16 | 1.17 | -| `VolumePVCDataSource` | `true` | GA | 1.18 | 1.21 | -| `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | -| `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | -| `VolumeScheduling` | `true` | GA | 1.13 | 1.16 | -| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 | -| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | 1.19 | -| `VolumeSnapshotDataSource` | `true` | GA | 1.20 | 1.22 | -| `VolumeSubpath` | `true` | GA | 1.10 | 1.24 | -| `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | 1.14 | -| `VolumeSubpathEnvExpansion` | `true` | Beta | 1.15 | 1.16 | -| `VolumeSubpathEnvExpansion` | `true` | GA | 1.17 | 1.24 | -| `WarningHeaders` | `true` | Beta | 1.19 | 1.21 | -| `WarningHeaders` | `true` | GA | 1.22 | 1.24 | -| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | 1.20 | -| `WindowsEndpointSliceProxying` | `true` | Beta | 1.21 | 1.21 | -| `WindowsEndpointSliceProxying` | `true` | GA | 1.22| 1.24 | -| `WindowsGMSA` | `false` | Alpha | 1.14 | 1.15 | -| `WindowsGMSA` | `true` | Beta | 1.16 | 1.17 | -| `WindowsGMSA` | `true` | GA | 1.18 | 1.20 | -| `WindowsHostProcessContainers` | `false` | Alpha | 1.22 | 1.22 | -| `WindowsHostProcessContainers` | `true` | Beta | 1.23 | 1.25 | -| `WindowsHostProcessContainers` | `true` | GA | 1.26 | 1.27 | -| `WindowsRunAsUserName` | `false` | Alpha | 1.16 | 1.16 | -| `WindowsRunAsUserName` | `true` | Beta | 1.17 | 1.17 | -| `WindowsRunAsUserName` | `true` | GA | 1.18 | 1.20 | -{{< /table >}} - -## Descriptions for removed feature gates - -- `Accelerators`: Provided an early form of plugin to enable Nvidia GPU support when using - Docker Engine; no longer available. See - [Device Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) for - an alternative. - -- `AffinityInAnnotations`: Enable setting - [Pod affinity or anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). - -- `AdvancedAuditing`: Enable [advanced auditing](/docs/tasks/debug/debug-cluster/audit/#advanced-audit) - -- `AllowExtTrafficLocalEndpoints`: Enable a service to route external requests to node local endpoints. - -- `AllowInsecureBackendProxy`: Enable the users to skip TLS verification of - kubelets on Pod log requests. - -- `AttachVolumeLimit`: Enable volume plugins to report limits on number of volumes - that can be attached to a node. - See [dynamic volume limits](/docs/concepts/storage/storage-limits/#dynamic-volume-limits) - for more details. - -- `BalanceAttachedNodeVolumes`: Include volume count on node to be considered for - balanced resource allocation while scheduling. A node which has closer CPU, - memory utilization, and volume count is favored by the scheduler while making decisions. - -- `BlockVolume`: Enable the definition and consumption of raw block devices in Pods. - See [Raw Block Volume Support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support) - for more details. - -- `BoundServiceAccountTokenVolume`: Migrate ServiceAccount volumes to use a projected volume - consisting of a ServiceAccountTokenVolumeProjection. Cluster admins can use metric - `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended - tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with - flag `--service-account-extend-token-expiration=false`. - Check [Bound Service Account Tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md) - for more details. - -- `CRIContainerLogRotation`: Enable container log rotation for CRI container runtime. - The default max size of a log file is 10MB and the default max number of - log files allowed for a container is 5. - These values can be configured in the kubelet config. - See [logging at node level](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level) - for more details. - -- `CSIBlockVolume`: Enable external CSI volume drivers to support block storage. - See [`csi` raw block volume support](/docs/concepts/storage/volumes/#csi-raw-block-volume-support) - for more details. - -- `CSIDriverRegistry`: Enable all logic related to the CSIDriver API object in - `csi.storage.k8s.io`. - -- `CSIInlineVolume`: Enable CSI Inline volumes support for pods. - -- `CSIMigration`: Enables shims and translation logic to route volume - operations from in-tree plugins to corresponding pre-installed CSI plugins - -- `CSIMigrationAWS`: Enables shims and translation logic to route volume - operations from the AWS-EBS in-tree plugin to EBS CSI plugin. Supports - falling back to in-tree EBS plugin for mount operations to nodes that have - the feature disabled or that do not have EBS CSI plugin installed and - configured. Does not support falling back for provision operations, for those - the CSI plugin must be installed and configured. - -- `CSIMigrationAWSComplete`: Stops registering the EBS in-tree plugin in - kubelet and volume controllers and enables shims and translation logic to - route volume operations from the AWS-EBS in-tree plugin to EBS CSI plugin. - Requires CSIMigration and CSIMigrationAWS feature flags enabled and EBS CSI - plugin installed and configured on all nodes in the cluster. This flag has - been deprecated in favor of the `InTreePluginAWSUnregister` feature flag - which prevents the registration of in-tree EBS plugin. - -- `CSIMigrationAzureDisk`: Enables shims and translation logic to route volume - operations from the Azure-Disk in-tree plugin to AzureDisk CSI plugin. - Supports falling back to in-tree AzureDisk plugin for mount operations to - nodes that have the feature disabled or that do not have AzureDisk CSI plugin - installed and configured. Does not support falling back for provision - operations, for those the CSI plugin must be installed and configured. - Requires CSIMigration feature flag enabled. - -- `CSIMigrationAzureDiskComplete`: Stops registering the Azure-Disk in-tree - plugin in kubelet and volume controllers and enables shims and translation - logic to route volume operations from the Azure-Disk in-tree plugin to - AzureDisk CSI plugin. Requires CSIMigration and CSIMigrationAzureDisk feature - flags enabled and AzureDisk CSI plugin installed and configured on all nodes - in the cluster. This flag has been deprecated in favor of the - `InTreePluginAzureDiskUnregister` feature flag which prevents the registration - of in-tree AzureDisk plugin. - -- `CSIMigrationAzureFileComplete`: Stops registering the Azure-File in-tree - plugin in kubelet and volume controllers and enables shims and translation - logic to route volume operations from the Azure-File in-tree plugin to - AzureFile CSI plugin. Requires CSIMigration and CSIMigrationAzureFile feature - flags enabled and AzureFile CSI plugin installed and configured on all nodes - in the cluster. This flag has been deprecated in favor of the - `InTreePluginAzureFileUnregister` feature flag which prevents the registration - of in-tree AzureFile plugin. - -- `CSIMigrationGCE`: Enables shims and translation logic to route volume - operations from the GCE-PD in-tree plugin to PD CSI plugin. Supports falling - back to in-tree GCE plugin for mount operations to nodes that have the - feature disabled or that do not have PD CSI plugin installed and configured. - Does not support falling back for provision operations, for those the CSI - plugin must be installed and configured. Requires CSIMigration feature flag - enabled. - -- `CSIMigrationGCEComplete`: Stops registering the GCE-PD in-tree plugin in - kubelet and volume controllers and enables shims and translation logic to - route volume operations from the GCE-PD in-tree plugin to PD CSI plugin. - Requires CSIMigration and CSIMigrationGCE feature flags enabled and PD CSI - plugin installed and configured on all nodes in the cluster. This flag has - been deprecated in favor of the `InTreePluginGCEUnregister` feature flag which - prevents the registration of in-tree GCE PD plugin. - -- `CSIMigrationOpenStack`: Enables shims and translation logic to route volume - operations from the Cinder in-tree plugin to Cinder CSI plugin. Supports - falling back to in-tree Cinder plugin for mount operations to nodes that have - the feature disabled or that do not have Cinder CSI plugin installed and - configured. Does not support falling back for provision operations, for those - the CSI plugin must be installed and configured. Requires CSIMigration - feature flag enabled. - -- `CSIMigrationOpenStackComplete`: Stops registering the Cinder in-tree plugin in - kubelet and volume controllers and enables shims and translation logic to route - volume operations from the Cinder in-tree plugin to Cinder CSI plugin. - Requires CSIMigration and CSIMigrationOpenStack feature flags enabled and Cinder - CSI plugin installed and configured on all nodes in the cluster. This flag has - been deprecated in favor of the `InTreePluginOpenStackUnregister` feature flag - which prevents the registration of in-tree openstack cinder plugin. - -- `CSIMigrationvSphereComplete`: Stops registering the vSphere in-tree plugin in kubelet - and volume controllers and enables shims and translation logic to route volume operations - from the vSphere in-tree plugin to vSphere CSI plugin. Requires CSIMigration and - CSIMigrationvSphere feature flags enabled and vSphere CSI plugin installed and - configured on all nodes in the cluster. This flag has been deprecated in favor - of the `InTreePluginvSphereUnregister` feature flag which prevents the - registration of in-tree vsphere plugin. - -- `CSINodeInfo`: Enable all logic related to the CSINodeInfo API object in `csi.storage.k8s.io`. - -- `CSIPersistentVolume`: Enable discovering and mounting volumes provisioned through a - [CSI (Container Storage Interface)](https://git.k8s.io/design-proposals-archive/storage/container-storage-interface.md) - compatible volume plugin. - -- `CSIServiceAccountToken`: Enable CSI drivers to receive the pods' service account token - that they mount volumes for. See - [Token Requests](https://kubernetes-csi.github.io/docs/token-requests.html). - -- `CSIStorageCapacity`: Enables CSI drivers to publish storage capacity information - and the Kubernetes scheduler to use that information when scheduling pods. See - [Storage Capacity](/docs/concepts/storage/storage-capacity/). - Check the [`csi` volume type](/docs/concepts/storage/volumes/#csi) documentation for more details. - -- `CSIVolumeFSGroupPolicy`: Allows CSIDrivers to use the `fsGroupPolicy` field. - This field controls whether volumes created by a CSIDriver support volume ownership - and permission modifications when these volumes are mounted. - -- `CSRDuration`: Allows clients to request a duration for certificates issued - via the Kubernetes CSR API. - -- `ConfigurableFSGroupPolicy`: Allows user to configure volume permission change policy - for fsGroups when mounting a volume in a Pod. See - [Configure volume permission and ownership change policy for Pods](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods) - for more details. - -- `CronJobControllerV2`: Use an alternative implementation of the - {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} controller. Otherwise, - version 1 of the same controller is selected. - -- `ControllerManagerLeaderMigration`: Enables Leader Migration for - [kube-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#initial-leader-migration-configuration) and - [cloud-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#deploy-cloud-controller-manager) - which allows a cluster operator to live migrate - controllers from the kube-controller-manager into an external controller-manager - (e.g. the cloud-controller-manager) in an HA cluster without downtime. - -- `CustomPodDNS`: Enable customizing the DNS settings for a Pod using its `dnsConfig` property. - Check [Pod's DNS Config](/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) - for more details. - -- `CustomResourceDefaulting`: Enable CRD support for default values in OpenAPI v3 validation schemas. - -- `CustomResourcePublishOpenAPI`: Enables publishing of CRD OpenAPI specs. - -- `CustomResourceSubresources`: Enable `/status` and `/scale` subresources - on resources created from [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - -- `CustomResourceValidation`: Enable schema based validation on resources created from - [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - -- `CustomResourceWebhookConversion`: Enable webhook-based conversion - on resources created from [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - -- `DaemonSetUpdateSurge`: Enables the DaemonSet workloads to maintain - availability during update per node. - See [Perform a Rolling Update on a DaemonSet](/docs/tasks/manage-daemon/update-daemon-set/). - -- `DefaultPodTopologySpread`: Enables the use of `PodTopologySpread` scheduling plugin to do - [default spreading](/docs/concepts/scheduling-eviction/topology-spread-constraints/#internal-default-constraints). - -- `DelegateFSGroupToCSIDriver`: If supported by the CSI driver, delegates the - role of applying `fsGroup` from a Pod's `securityContext` to the driver by - passing `fsGroup` through the NodeStageVolume and NodePublishVolume CSI calls. - -- `DevicePlugins`: Enable the [device-plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) - based resource provisioning on nodes. - -- `DisableAcceleratorUsageMetrics`: - [Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics). - -- `DryRun`: Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests - so that validation, merging, and mutation can be tested without committing. - -- `DynamicAuditing`: Used to enable dynamic auditing before v1.19. - -- `DynamicKubeletConfig`: Enable the dynamic configuration of kubelet. The - feature is no longer supported outside of supported skew policy. The feature - gate was removed from kubelet in 1.24. - -- `DynamicProvisioningScheduling`: Extend the default scheduler to be aware of - volume topology and handle PV provisioning. - This feature was superseded by the `VolumeScheduling` feature in v1.12. - -- `DynamicVolumeProvisioning`: Enable the - [dynamic provisioning](/docs/concepts/storage/dynamic-provisioning/) of persistent volumes to Pods. - -- `EnableAggregatedDiscoveryTimeout`: Enable the five second - timeout on aggregated discovery calls. - -- `EnableEquivalenceClassCache`: Enable the scheduler to cache equivalence of - nodes when scheduling Pods. - -- `EndpointSlice`: Enables EndpointSlices for more scalable and extensible - network endpoints. See [Enabling EndpointSlices](/docs/concepts/services-networking/endpoint-slices/). - -- `EndpointSliceNodeName`: Enables EndpointSlice `nodeName` field. - -- `EndpointSliceProxying`: When enabled, kube-proxy running - on Linux will use EndpointSlices as the primary data source instead of - Endpoints, enabling scalability and performance improvements. See - [Enabling Endpoint Slices](/docs/concepts/services-networking/endpoint-slices/). - -- `EndpointSliceTerminatingCondition`: Enables EndpointSlice `terminating` and `serving` - condition fields. - -- `EphemeralContainers`: Enable the ability to add - {{< glossary_tooltip text="ephemeral containers" term_id="ephemeral-container" >}} - to running Pods. - -- `EvenPodsSpread`: Enable pods to be scheduled evenly across topology domains. See - [Pod Topology Spread Constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/). - -- `ExpandCSIVolumes`: Enable the expanding of CSI volumes. - -- `ExpandInUsePersistentVolumes`: Enable expanding in-use PVCs. See - [Resizing an in-use PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim). - -- `ExpandPersistentVolumes`: Enable the expanding of persistent volumes. See - [Expanding Persistent Volumes Claims](/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims). - -- `ExperimentalCriticalPodAnnotation`: Enable annotating specific pods as *critical* - so that their [scheduling is guaranteed](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/). - This feature is deprecated by Pod Priority and Preemption as of v1.13. - -- `ExternalPolicyForExternalIP`: Fix a bug where ExternalTrafficPolicy is not - applied to Service ExternalIPs. - -- `GCERegionalPersistentDisk`: Enable the regional PD feature on GCE. - -- `GenericEphemeralVolume`: Enables ephemeral, inline volumes that support all features - of normal volumes (can be provided by third-party storage vendors, storage capacity tracking, - restore from snapshot, etc.). - See [Ephemeral Volumes](/docs/concepts/storage/ephemeral-volumes/). - -- `HugePageStorageMediumSize`: Enable support for multiple sizes pre-allocated - [huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/). - -- `HugePages`: Enable the allocation and consumption of pre-allocated - [huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/). - -- `HyperVContainer`: Enable - [Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) - for Windows containers. - -- `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) - support for IPv6. - -- `IdentifyPodOS`: Allows the Pod OS field to be specified. This helps in identifying - the OS of the pod authoritatively during the API server admission time. - In Kubernetes {{< skew currentVersion >}}, the allowed values for the `pod.spec.os.name` - are `windows` and `linux`. - -- `ImmutableEphemeralVolumes`: Allows for marking individual Secrets and ConfigMaps as - immutable for better safety and performance. - -- `IndexedJob`: Allows the [Job](/docs/concepts/workloads/controllers/job/) - controller to manage Pod completions per completion index. - -- `IngressClassNamespacedParams`: Allow namespace-scoped parameters reference in - `IngressClass` resource. This feature adds two fields - `Scope` and `Namespace` - to `IngressClass.spec.parameters`. - -- `Initializers`: Allow asynchronous coordination of object creation using the - Initializers admission plugin. - -- `KubeletConfigFile`: Enable loading kubelet configuration from - a file specified using a config file. - See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) - for more details. - -- `KubeletCredentialProviders`: Enable kubelet exec credential providers for - image pull credentials. - -- `KubeletPluginsWatcher`: Enable probe-based plugin watcher utility to enable kubelet - to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi). - -- `LegacyNodeRoleBehavior`: When disabled, legacy behavior in service load balancers and - node disruption will ignore the `node-role.kubernetes.io/master` label in favor of the - feature-specific labels provided by `NodeDisruptionExclusion` and `ServiceNodeExclusion`. - -- `LocalStorageCapacityIsolation`: Enable the consumption of - [local ephemeral storage](/docs/concepts/configuration/manage-resources-containers/) - and also the `sizeLimit` property of an - [emptyDir volume](/docs/concepts/storage/volumes/#emptydir). - -- `MixedProtocolLBService`: Enable using different protocols in the same `LoadBalancer` type - Service instance. - -- `MountContainers`: Enable using utility containers on host as the volume mounter. - -- `MountPropagation`: Enable sharing volume mounted by one container to other containers or pods. - For more details, please see [mount propagation](/docs/concepts/storage/volumes/#mount-propagation). - -- `NamespaceDefaultLabelName`: Configure the API Server to set an immutable - {{< glossary_tooltip text="label" term_id="label" >}} `kubernetes.io/metadata.name` - on all namespaces, containing the namespace name. - -- `NetworkPolicyStatus`: Enable the `status` subresource for NetworkPolicy objects. - -- `NodeDisruptionExclusion`: Enable use of the Node label `node.kubernetes.io/exclude-disruption` - which prevents nodes from being evacuated during zone failures. - -- `NodeLease`: Enable the new Lease API to report node heartbeats, which could be used as a node health signal. - -- `NonPreemptingPriority`: Enable `preemptionPolicy` field for PriorityClass and Pod. - -- `PVCProtection`: Enable the prevention of a PersistentVolumeClaim (PVC) from - being deleted when it is still used by any Pod. - -- `PersistentLocalVolumes`: Enable the usage of `local` volume type in Pods. - Pod affinity has to be specified if requesting a `local` volume. - -- `PodAffinityNamespaceSelector`: Enable the - [Pod Affinity Namespace Selector](/docs/concepts/scheduling-eviction/assign-pod-node/#namespace-selector) - and [CrossNamespacePodAffinity](/docs/concepts/policy/resource-quotas/#cross-namespace-pod-affinity-quota) - quota scope features. - -- `PodDisruptionBudget`: Enable the [PodDisruptionBudget](/docs/tasks/run-application/configure-pdb/) feature. - -- `PodHasNetworkCondition`: Enable the kubelet to mark the [PodHasNetwork](/docs/concepts/workloads/pods/pod-lifecycle/#pod-has-network) - condition on pods. This was renamed to `PodReadyToStartContainersCondition` in 1.28. - -- `PodOverhead`: Enable the [PodOverhead](/docs/concepts/scheduling-eviction/pod-overhead/) - feature to account for pod overheads. - -- `PodPriority`: Enable the descheduling and preemption of Pods based on their - [priorities](/docs/concepts/scheduling-eviction/pod-priority-preemption/). - -- `PodReadinessGates`: Enable the setting of `PodReadinessGate` field for extending - Pod readiness evaluation. See [Pod readiness gate](/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate) - for more details. - -- `PodSecurity`: Enables the `PodSecurity` admission plugin. - -- `PodShareProcessNamespace`: Enable the setting of `shareProcessNamespace` in a Pod for sharing - a single process namespace between containers running in a pod. More details can be found in - [Share Process Namespace between Containers in a Pod](/docs/tasks/configure-pod-container/share-process-namespace/). - -- `PreferNominatedNode`: This flag tells the scheduler whether the nominated - nodes will be checked first before looping through all the other nodes in - the cluster. - -- `RequestManagement`: Enables managing request concurrency with prioritization and fairness - at each API server. Deprecated by `APIPriorityAndFairness` since 1.17. - -- `ResourceLimitsPriorityFunction`: Enable a scheduler priority function that - assigns a lowest possible score of 1 to a node that satisfies at least one of - the input Pod's cpu and memory limits. The intent is to break ties between - nodes with same scores. - -- `ResourceQuotaScopeSelectors`: Enable resource quota scope selectors. - -- `RootCAConfigMap`: Configure the `kube-controller-manager` to publish a - {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} named `kube-root-ca.crt` - to every namespace. This ConfigMap contains a CA bundle used for verifying connections - to the kube-apiserver. See - [Bound Service Account Tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md) - for more details. - -- `RotateKubeletClientCertificate`: Enable the rotation of the client TLS certificate on the kubelet. - See [kubelet configuration](/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubelet-configuration) - for more details. - -- `RunAsGroup`: Enable control over the primary group ID set on the init processes of containers. - -- `RuntimeClass`: Enable the [RuntimeClass](/docs/concepts/containers/runtime-class/) feature for - selecting container runtime configurations. - -- `SCTPSupport`: Enables the _SCTP_ `protocol` value in Pod, Service, Endpoints, EndpointSlice, - and NetworkPolicy definitions. - -- `ScheduleDaemonSetPods`: Enable DaemonSet Pods to be scheduled by the default scheduler instead - of the DaemonSet controller. - -- `SelectorIndex`: Allows label and field based indexes in API server watch cache to accelerate - list operations. - -- `ServiceAccountIssuerDiscovery`: Enable OIDC discovery endpoints (issuer and JWKS URLs) for the - service account issuer in the API server. See - [Configure Service Accounts for Pods](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery) - for more details. - -- `ServiceAppProtocol`: Enables the `appProtocol` field on Services and Endpoints. - -- `ServiceIPStaticSubrange`: Enables a strategy for Services ClusterIP allocations, whereby the - ClusterIP range is subdivided. Dynamic allocated ClusterIP addresses will be allocated preferently - from the upper range allowing users to assign static ClusterIPs from the lower range with a low - risk of collision. See - [Avoiding collisions](/docs/reference/networking/virtual-ips/#avoiding-collisions) - for more details. - -- `ServiceInternalTrafficPolicy`: Enables the `internalTrafficPolicy` field on Services. - -- `ServiceLoadBalancerClass`: Enables the `loadBalancerClass` field on Services. See - [Specifying class of load balancer implementation](/docs/concepts/services-networking/service/#load-balancer-class) - for more details. - -- `ServiceLoadBalancerFinalizer`: Enable finalizer protection for Service load balancers. - -- `ServiceLBNodePortControl`: Enables the `allocateLoadBalancerNodePorts` field on Services. - -- `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider. - A node is eligible for exclusion if labelled with "`node.kubernetes.io/exclude-from-external-load-balancers`". - -- `ServiceTopology`: Enable service to route traffic based upon the Node topology of the cluster. - -- `SetHostnameAsFQDN`: Enable the ability of setting Fully Qualified Domain Name(FQDN) as the - hostname of a pod. See - [Pod's `setHostnameAsFQDN` field](/docs/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field). - -- `StartupProbe`: Enable the [startup](/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe) - probe in the kubelet. - -- `StatefulSetMinReadySeconds`: Allows `minReadySeconds` to be respected by - the StatefulSet controller. - -- `StorageObjectInUseProtection`: Postpone the deletion of PersistentVolume or - PersistentVolumeClaim objects if they are still being used. - -- `StreamingProxyRedirects`: Instructs the API server to intercept (and follow) redirects from the - backend (kubelet) for streaming requests. Examples of streaming requests include the `exec`, - `attach` and `port-forward` requests. - -- `SupportIPVSProxyMode`: Enable providing in-cluster service load balancing using IPVS. - See [service proxies](/docs/reference/networking/virtual-ips/) for more details. - -- `SupportNodePidsLimit`: Enable the support to limiting PIDs on the Node. The parameter - `pid=` in the `--system-reserved` and `--kube-reserved` options can be specified to - ensure that the specified number of process IDs will be reserved for the system as a whole and for - Kubernetes system daemons respectively. - -- `SupportPodPidsLimit`: Enable the support to limiting PIDs in Pods. - -- `SuspendJob`: Enable support to suspend and resume Jobs. For more details, see - [the Jobs docs](/docs/concepts/workloads/controllers/job/). - -- `Sysctls`: Enable support for namespaced kernel parameters (sysctls) that can be set for each - pod. See [sysctls](/docs/tasks/administer-cluster/sysctl-cluster/) for more details. - -- `TTLAfterFinished`: Allow a [TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) - to clean up resources after they finish execution. - -- `TaintBasedEvictions`: Enable evicting pods from nodes based on taints on Nodes and tolerations - on Pods. See [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/) - for more details. - -- `TaintNodesByCondition`: Enable automatic tainting nodes based on - [node conditions](/docs/concepts/architecture/nodes/#condition). - -- `TokenRequest`: Enable the `TokenRequest` endpoint on service account resources. - -- `TokenRequestProjection`: Enable the injection of service account tokens into a Pod through a - [`projected` volume](/docs/concepts/storage/volumes/#projected). - -- `UserNamespacesStatelessPodsSupport`: Enable user namespace support for stateless Pods. This flag was renamed on newer releases to `UserNamespacesSupport`. - -- `ValidateProxyRedirects`: This flag controls whether the API server should validate that redirects - are only followed to the same host. Only used if the `StreamingProxyRedirects` flag is enabled. - -- `VolumePVCDataSource`: Enable support for specifying an existing PVC as a DataSource. - -- `VolumeScheduling`: Enable volume topology aware scheduling and make the PersistentVolumeClaim - (PVC) binding aware of scheduling decisions. It also enables the usage of - [`local`](/docs/concepts/storage/volumes/#local) volume type when used together with the - `PersistentLocalVolumes` feature gate. - -- `VolumeSnapshotDataSource`: Enable volume snapshot data source support. - -- `VolumeSubpath`: Allow mounting a subpath of a volume in a container. - -- `VolumeSubpathEnvExpansion`: Enable `subPathExpr` field for expanding environment - variables into a `subPath`. - -- `WarningHeaders`: Allow sending warning headers in API responses. - -- `WindowsEndpointSliceProxying`: When enabled, kube-proxy running on Windows will use - EndpointSlices as the primary data source instead of Endpoints, enabling scalability and - performance improvements. See - [Enabling Endpoint Slices](/docs/concepts/services-networking/endpoint-slices/). - -- `WindowsGMSA`: Enables passing of GMSA credential specs from pods to container runtimes. - -- `WindowsHostProcessContainers`: Enables support for Windows HostProcess containers. - -- `WindowsRunAsUserName` : Enable support for running applications in Windows containers with as a - non-default user. See [Configuring RunAsUserName](/docs/tasks/configure-pod-container/configure-runasusername) - for more details. - diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates-removed/index.md b/content/en/docs/reference/command-line-tools-reference/feature-gates-removed/index.md new file mode 100644 index 0000000000000..9e37d0eb09692 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates-removed/index.md @@ -0,0 +1,624 @@ +--- +title: Feature Gates (removed) +weight: 15 +content_type: concept +--- + + + +This page contains list of feature gates that have been removed. The information on this page is for reference. +A removed feature gate is different from a GA'ed or deprecated one in that a removed one is +no longer recognized as a valid feature gate. +However, a GA'ed or a deprecated feature gate is still recognized by the corresponding Kubernetes +components although they are unable to cause any behavior differences in a cluster. + +For feature gates that are still recognized by the Kubernetes components, please refer to +the [Alpha/Beta feature gate table](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-alpha-or-beta-features) +or the [Graduated/Deprecated feature gate table](/docs/reference/command-line-tools-reference/feature-gates/#feature-gates-for-graduated-or-deprecated-features) + +### Feature gates that are removed + +In the following table: + +- The "From" column contains the Kubernetes release when a feature is introduced + or its release stage is changed. +- The "To" column, if not empty, contains the last Kubernetes release in which + you can still use a feature gate. If the feature stage is either "Deprecated" + or "GA", the "To" column is the Kubernetes release when the feature is removed. + +{{< table caption="Feature Gates Removed" >}} + +| Feature | Default | Stage | From | To | +|---------|---------|-------|-------|-------| +| `Accelerators` | `false` | Alpha | 1.6 | 1.10 | +| `Accelerators` | - | Deprecated | 1.11 | 1.11 | +| `AdvancedAuditing` | `false` | Alpha | 1.7 | 1.7 | +| `AdvancedAuditing` | `true` | Beta | 1.8 | 1.11 | +| `AdvancedAuditing` | `true` | GA | 1.12 | 1.27 | +| `AffinityInAnnotations` | `false` | Alpha | 1.6 | 1.7 | +| `AffinityInAnnotations` | - | Deprecated | 1.8 | 1.8 | +| `AllowExtTrafficLocalEndpoints` | `false` | Beta | 1.4 | 1.6 | +| `AllowExtTrafficLocalEndpoints` | `true` | GA | 1.7 | 1.9 | +| `AllowInsecureBackendProxy` | `true` | Beta | 1.17 | 1.20 | +| `AllowInsecureBackendProxy` | `true` | GA | 1.21 | 1.25 | +| `AttachVolumeLimit` | `false` | Alpha | 1.11 | 1.11 | +| `AttachVolumeLimit` | `true` | Beta | 1.12 | 1.16 | +| `AttachVolumeLimit` | `true` | GA | 1.17 | 1.21 | +| `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | 1.21 | +| `BalanceAttachedNodeVolumes` | `false` | Deprecated | 1.22 | 1.22 | +| `BlockVolume` | `false` | Alpha | 1.9 | 1.12 | +| `BlockVolume` | `true` | Beta | 1.13 | 1.17 | +| `BlockVolume` | `true` | GA | 1.18 | 1.21 | +| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | 1.20 | +| `BoundServiceAccountTokenVolume` | `true` | Beta | 1.21 | 1.21 | +| `BoundServiceAccountTokenVolume` | `true` | GA | 1.22 | 1.23 | +| `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | +| `CRIContainerLogRotation` | `true` | Beta | 1.11 | 1.20 | +| `CRIContainerLogRotation` | `true` | GA | 1.21 | 1.22 | +| `CSIBlockVolume` | `false` | Alpha | 1.11 | 1.13 | +| `CSIBlockVolume` | `true` | Beta | 1.14 | 1.17 | +| `CSIBlockVolume` | `true` | GA | 1.18 | 1.21 | +| `CSIDriverRegistry` | `false` | Alpha | 1.12 | 1.13 | +| `CSIDriverRegistry` | `true` | Beta | 1.14 | 1.17 | +| `CSIDriverRegistry` | `true` | GA | 1.18 | 1.21 | +| `CSIInlineVolume` | `false` | Alpha | 1.15 | 1.15 | +| `CSIInlineVolume` | `true` | Beta | 1.16 | 1.24 | +| `CSIInlineVolume` | `true` | GA | 1.25 | 1.26 | +| `CSIMigration` | `false` | Alpha | 1.14 | 1.16 | +| `CSIMigration` | `true` | Beta | 1.17 | 1.24 | +| `CSIMigration` | `true` | GA | 1.25 | 1.26 | +| `CSIMigrationAWS` | `false` | Alpha | 1.14 | 1.16 | +| `CSIMigrationAWS` | `false` | Beta | 1.17 | 1.22 | +| `CSIMigrationAWS` | `true` | Beta | 1.23 | 1.24 | +| `CSIMigrationAWS` | `true` | GA | 1.25 | 1.26 | +| `CSIMigrationAWSComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationAWSComplete` | - | Deprecated | 1.21 | 1.21 | +| `CSIMigrationAzureDisk` | `false` | Alpha | 1.15 | 1.18 | +| `CSIMigrationAzureDisk` | `false` | Beta | 1.19 | 1.22 | +| `CSIMigrationAzureDisk` | `true` | Beta | 1.23 | 1.23 | +| `CSIMigrationAzureDisk` | `true` | GA | 1.24 | 1.26 | +| `CSIMigrationAzureDiskComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationAzureDiskComplete` | - | Deprecated | 1.21 | 1.21 | +| `CSIMigrationAzureFileComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationAzureFileComplete` | - | Deprecated | 1.21 | 1.21 | +| `CSIMigrationGCE` | `false` | Alpha | 1.14 | 1.16 | +| `CSIMigrationGCE` | `false` | Beta | 1.17 | 1.22 | +| `CSIMigrationGCE` | `true` | Beta | 1.23 | 1.24 | +| `CSIMigrationGCE` | `true` | GA | 1.25 | 1.27 | +| `CSIMigrationGCEComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationGCEComplete` | - | Deprecated | 1.21 | 1.21 | +| `CSIMigrationOpenStack` | `false` | Alpha | 1.14 | 1.17 | +| `CSIMigrationOpenStack` | `true` | Beta | 1.18 | 1.23 | +| `CSIMigrationOpenStack` | `true` | GA | 1.24 | 1.25 | +| `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | 1.20 | +| `CSIMigrationOpenStackComplete` | - | Deprecated | 1.21 | 1.21 | +| `CSIMigrationvSphere` | `false` | Alpha | 1.18 | 1.18 | +| `CSIMigrationvSphere` | `false` | Beta | 1.19 | 1.24 | +| `CSIMigrationvSphere` | `true` | Beta | 1.25 | 1.25 | +| `CSIMigrationvSphere` | `true` | GA | 1.26 | 1.28 | +| `CSIMigrationvSphereComplete` | `false` | Beta | 1.19 | 1.21 | +| `CSIMigrationvSphereComplete` | - | Deprecated | 1.22 | 1.22 | +| `CSINodeInfo` | `false` | Alpha | 1.12 | 1.13 | +| `CSINodeInfo` | `true` | Beta | 1.14 | 1.16 | +| `CSINodeInfo` | `true` | GA | 1.17 | 1.22 | +| `CSIPersistentVolume` | `false` | Alpha | 1.9 | 1.9 | +| `CSIPersistentVolume` | `true` | Beta | 1.10 | 1.12 | +| `CSIPersistentVolume` | `true` | GA | 1.13 | 1.16 | +| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | 1.20 | +| `CSIServiceAccountToken` | `true` | Beta | 1.21 | 1.21 | +| `CSIServiceAccountToken` | `true` | GA | 1.22 | 1.24 | +| `CSIStorageCapacity` | `false` | Alpha | 1.19 | 1.20 | +| `CSIStorageCapacity` | `true` | Beta | 1.21 | 1.23 | +| `CSIStorageCapacity` | `true` | GA | 1.24 | 1.27 | +| `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | 1.19 | +| `CSIVolumeFSGroupPolicy` | `true` | Beta | 1.20 | 1.22 | +| `CSIVolumeFSGroupPolicy` | `true` | GA | 1.23 | 1.25 | +| `CSRDuration` | `true` | Beta | 1.22 | 1.23 | +| `CSRDuration` | `true` | GA | 1.24 | 1.25 | +| `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | 1.19 | +| `ConfigurableFSGroupPolicy` | `true` | Beta | 1.20 | 1.22 | +| `ConfigurableFSGroupPolicy` | `true` | GA | 1.23 | 1.25 | +| `ControllerManagerLeaderMigration` | `false` | Alpha | 1.21 | 1.21 | +| `ControllerManagerLeaderMigration` | `true` | Beta | 1.22 | 1.23 | +| `ControllerManagerLeaderMigration` | `true` | GA | 1.24 | 1.26 | +| `CronJobControllerV2` | `false` | Alpha | 1.20 | 1.20 | +| `CronJobControllerV2` | `true` | Beta | 1.21 | 1.21 | +| `CronJobControllerV2` | `true` | GA | 1.22 | 1.23 | +| `CronJobTimeZone` | `false` | Alpha | 1.24 | 1.24 | +| `CronJobTimeZone` | `true` | Beta | 1.25 | 1.26 | +| `CronJobTimeZone` | `true` | GA | 1.27 | 1.28 | +| `CustomPodDNS` | `false` | Alpha | 1.9 | 1.9 | +| `CustomPodDNS` | `true` | Beta| 1.10 | 1.13 | +| `CustomPodDNS` | `true` | GA | 1.14 | 1.16 | +| `CustomResourceDefaulting` | `false` | Alpha| 1.15 | 1.15 | +| `CustomResourceDefaulting` | `true` | Beta | 1.16 | 1.16 | +| `CustomResourceDefaulting` | `true` | GA | 1.17 | 1.18 | +| `CustomResourcePublishOpenAPI` | `false` | Alpha| 1.14 | 1.14 | +| `CustomResourcePublishOpenAPI` | `true` | Beta| 1.15 | 1.15 | +| `CustomResourcePublishOpenAPI` | `true` | GA | 1.16 | 1.18 | +| `CustomResourceSubresources` | `false` | Alpha | 1.10 | 1.10 | +| `CustomResourceSubresources` | `true` | Beta | 1.11 | 1.15 | +| `CustomResourceSubresources` | `true` | GA | 1.16 | 1.18 | +| `CustomResourceValidation` | `false` | Alpha | 1.8 | 1.8 | +| `CustomResourceValidation` | `true` | Beta | 1.9 | 1.15 | +| `CustomResourceValidation` | `true` | GA | 1.16 | 1.18 | +| `CustomResourceWebhookConversion` | `false` | Alpha | 1.13 | 1.14 | +| `CustomResourceWebhookConversion` | `true` | Beta | 1.15 | 1.15 | +| `CustomResourceWebhookConversion` | `true` | GA | 1.16 | 1.18 | +| `DaemonSetUpdateSurge` | `false` | Alpha | 1.21 | 1.21 | +| `DaemonSetUpdateSurge` | `true` | Beta | 1.22 | 1.24 | +| `DaemonSetUpdateSurge` | `true` | GA | 1.25 | 1.28 | +| `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | 1.19 | +| `DefaultPodTopologySpread` | `true` | Beta | 1.20 | 1.23 | +| `DefaultPodTopologySpread` | `true` | GA | 1.24 | 1.25 | +| `DelegateFSGroupToCSIDriver` | `false` | Alpha | 1.22 | 1.22 | +| `DelegateFSGroupToCSIDriver` | `true` | Beta | 1.23 | 1.25 | +| `DelegateFSGroupToCSIDriver` | `true` | GA | 1.26 | 1.27 | +| `DevicePlugins` | `false` | Alpha | 1.8 | 1.9 | +| `DevicePlugins` | `true` | Beta | 1.10 | 1.25 | +| `DevicePlugins` | `true` | GA | 1.26 | 1.27 | +| `DisableAcceleratorUsageMetrics` | `false` | Alpha | 1.19 | 1.19 | +| `DisableAcceleratorUsageMetrics` | `true` | Beta | 1.20 | 1.24 | +| `DisableAcceleratorUsageMetrics` | `true` | GA | 1.25 | 1.27 | +| `DownwardAPIHugePages` | `false` | Alpha | 1.20 | 1.20 | +| `DownwardAPIHugePages` | `false` | Beta | 1.21 | 1.21 | +| `DownwardAPIHugePages` | `true` | Beta | 1.22 | 1.26 | +| `DownwardAPIHugePages` | `true` | GA | 1.27 | 1.28 | +| `DryRun` | `false` | Alpha | 1.12 | 1.12 | +| `DryRun` | `true` | Beta | 1.13 | 1.18 | +| `DryRun` | `true` | GA | 1.19 | 1.27 | +| `DynamicAuditing` | `false` | Alpha | 1.13 | 1.18 | +| `DynamicAuditing` | - | Deprecated | 1.19 | 1.19 | +| `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | +| `DynamicKubeletConfig` | `true` | Beta | 1.11 | 1.21 | +| `DynamicKubeletConfig` | `false` | Deprecated | 1.22 | 1.25 | +| `DynamicProvisioningScheduling` | `false` | Alpha | 1.11 | 1.11 | +| `DynamicProvisioningScheduling` | - | Deprecated| 1.12 | - | +| `DynamicVolumeProvisioning` | `true` | Alpha | 1.3 | 1.7 | +| `DynamicVolumeProvisioning` | `true` | GA | 1.8 | 1.12 | +| `EnableAggregatedDiscoveryTimeout` | `true` | Deprecated | 1.16 | 1.17 | +| `EnableEquivalenceClassCache` | `false` | Alpha | 1.8 | 1.12 | +| `EnableEquivalenceClassCache` | - | Deprecated | 1.13 | 1.23 | +| `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 | +| `EndpointSlice` | `false` | Beta | 1.17 | 1.17 | +| `EndpointSlice` | `true` | Beta | 1.18 | 1.20 | +| `EndpointSlice` | `true` | GA | 1.21 | 1.24 | +| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | 1.20 | +| `EndpointSliceNodeName` | `true` | GA | 1.21 | 1.24 | +| `EndpointSliceProxying` | `false` | Alpha | 1.18 | 1.18 | +| `EndpointSliceProxying` | `true` | Beta | 1.19 | 1.21 | +| `EndpointSliceProxying` | `true` | GA | 1.22 | 1.24 | +| `EndpointSliceTerminatingCondition` | `false` | Alpha | 1.20 | 1.21 | +| `EndpointSliceTerminatingCondition` | `true` | Beta | 1.22 | 1.25 | +| `EndpointSliceTerminatingCondition` | `true` | GA | 1.26 | 1.27 | +| `EphemeralContainers` | `false` | Alpha | 1.16 | 1.22 | +| `EphemeralContainers` | `true` | Beta | 1.23 | 1.24 | +| `EphemeralContainers` | `true` | GA | 1.25 | 1.26 | +| `EvenPodsSpread` | `false` | Alpha | 1.16 | 1.17 | +| `EvenPodsSpread` | `true` | Beta | 1.18 | 1.18 | +| `EvenPodsSpread` | `true` | GA | 1.19 | 1.21 | +| `ExpandCSIVolumes` | `false` | Alpha | 1.14 | 1.15 | +| `ExpandCSIVolumes` | `true` | Beta | 1.16 | 1.23 | +| `ExpandCSIVolumes` | `true` | GA | 1.24 | 1.26 | +| `ExpandInUsePersistentVolumes` | `false` | Alpha | 1.11 | 1.14 | +| `ExpandInUsePersistentVolumes` | `true` | Beta | 1.15 | 1.23 | +| `ExpandInUsePersistentVolumes` | `true` | GA | 1.24 | 1.26 | +| `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | +| `ExpandPersistentVolumes` | `true` | Beta | 1.11 | 1.23 | +| `ExpandPersistentVolumes` | `true` | GA | 1.24 | 1.26 | +| `ExperimentalCriticalPodAnnotation` | `false` | Alpha | 1.5 | 1.12 | +| `ExperimentalCriticalPodAnnotation` | `false` | Deprecated | 1.13 | 1.16 | +| `ExternalPolicyForExternalIP` | `true` | GA | 1.18 | 1.22 | +| `GCERegionalPersistentDisk` | `true` | Beta | 1.10 | 1.12 | +| `GCERegionalPersistentDisk` | `true` | GA | 1.13 | 1.16 | +| `GRPCContainerProbe` | `false` | Alpha | 1.23 | 1.23 | +| `GRPCContainerProbe` | `true` | Beta | 1.24 | 1.26 | +| `GRPCContainerProbe` | `true` | GA | 1.27 | 1.28 | +| `GenericEphemeralVolume` | `false` | Alpha | 1.19 | 1.20 | +| `GenericEphemeralVolume` | `true` | Beta | 1.21 | 1.22 | +| `GenericEphemeralVolume` | `true` | GA | 1.23 | 1.24 | +| `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | +| `HugePageStorageMediumSize` | `true` | Beta | 1.19 | 1.21 | +| `HugePageStorageMediumSize` | `true` | GA | 1.22 | 1.24 | +| `HugePages` | `false` | Alpha | 1.8 | 1.9 | +| `HugePages` | `true` | Beta| 1.10 | 1.13 | +| `HugePages` | `true` | GA | 1.14 | 1.16 | +| `HyperVContainer` | `false` | Alpha | 1.10 | 1.19 | +| `HyperVContainer` | `false` | Deprecated | 1.20 | 1.20 | +| `IPv6DualStack` | `false` | Alpha | 1.15 | 1.20 | +| `IPv6DualStack` | `true` | Beta | 1.21 | 1.22 | +| `IPv6DualStack` | `true` | GA | 1.23 | 1.24 | +| `IdentifyPodOS` | `false` | Alpha | 1.23 | 1.23 | +| `IdentifyPodOS` | `true` | Beta | 1.24 | 1.24 | +| `IdentifyPodOS` | `true` | GA | 1.25 | 1.26 | +| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 | +| `ImmutableEphemeralVolumes` | `true` | Beta | 1.19 | 1.20 | +| `ImmutableEphemeralVolumes` | `true` | GA | 1.21 | 1.24 | +| `IndexedJob` | `false` | Alpha | 1.21 | 1.21 | +| `IndexedJob` | `true` | Beta | 1.22 | 1.23 | +| `IndexedJob` | `true` | GA | 1.24 | 1.25 | +| `IngressClassNamespacedParams` | `false` | Alpha | 1.21 | 1.21 | +| `IngressClassNamespacedParams` | `true` | Beta | 1.22 | 1.22 | +| `IngressClassNamespacedParams` | `true` | GA | 1.23 | 1.24 | +| `Initializers` | `false` | Alpha | 1.7 | 1.13 | +| `Initializers` | - | Deprecated | 1.14 | 1.14 | +| `JobMutableNodeSchedulingDirectives` | `true` | Beta | 1.23 | 1.26 | +| `JobMutableNodeSchedulingDirectives` | `true` | GA | 1.27 | 1.28 | +| `JobTrackingWithFinalizers` | `false` | Alpha | 1.22 | 1.22 | +| `JobTrackingWithFinalizers` | `false` | Beta | 1.23 | 1.24 | +| `JobTrackingWithFinalizers` | `true` | Beta | 1.25 | 1.25 | +| `JobTrackingWithFinalizers` | `true` | GA | 1.26 | 1.28 | +| `KubeletConfigFile` | `false` | Alpha | 1.8 | 1.9 | +| `KubeletConfigFile` | - | Deprecated | 1.10 | 1.10 | +| `KubeletCredentialProviders` | `false` | Alpha | 1.20 | 1.23 | +| `KubeletCredentialProviders` | `true` | Beta | 1.24 | 1.25 | +| `KubeletCredentialProviders` | `true` | GA | 1.26 | 1.28 | +| `KubeletPluginsWatcher` | `false` | Alpha | 1.11 | 1.11 | +| `KubeletPluginsWatcher` | `true` | Beta | 1.12 | 1.12 | +| `KubeletPluginsWatcher` | `true` | GA | 1.13 | 1.16 | +| `LegacyNodeRoleBehavior` | `false` | Alpha | 1.16 | 1.18 | +| `LegacyNodeRoleBehavior` | `true` | Beta | 1.19 | 1.20 | +| `LegacyNodeRoleBehavior` | `false` | GA | 1.21 | 1.22 | +| `LegacyServiceAccountTokenNoAutoGeneration` | `true` | Beta | 1.24 | 1.25 | +| `LegacyServiceAccountTokenNoAutoGeneration` | `true` | GA | 1.26 | 1.28 | +| `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | +| `LocalStorageCapacityIsolation` | `true` | Beta | 1.10 | 1.24 | +| `LocalStorageCapacityIsolation` | `true` | GA | 1.25 | 1.26 | +| `MixedProtocolLBService` | `false` | Alpha | 1.20 | 1.23 | +| `MixedProtocolLBService` | `true` | Beta | 1.24 | 1.25 | +| `MixedProtocolLBService` | `true` | GA | 1.26 | 1.27 | +| `MountContainers` | `false` | Alpha | 1.9 | 1.16 | +| `MountContainers` | `false` | Deprecated | 1.17 | 1.17 | +| `MountPropagation` | `false` | Alpha | 1.8 | 1.9 | +| `MountPropagation` | `true` | Beta | 1.10 | 1.11 | +| `MountPropagation` | `true` | GA | 1.12 | 1.14 | +| `MultiCIDRRangeAllocator` | `false` | Alpha | 1.25 | 1.28 | +| `NamespaceDefaultLabelName` | `true` | Beta | 1.21 | 1.21 | +| `NamespaceDefaultLabelName` | `true` | GA | 1.22 | 1.23 | +| `NetworkPolicyEndPort` | `false` | Alpha | 1.21 | 1.21 | +| `NetworkPolicyEndPort` | `true` | Beta | 1.22 | 1.24 | +| `NetworkPolicyEndPort` | `true` | GA | 1.25 | 1.26 | +| `NetworkPolicyStatus` | `false` | Alpha | 1.24 | 1.27 | +| `NodeDisruptionExclusion` | `false` | Alpha | 1.16 | 1.18 | +| `NodeDisruptionExclusion` | `true` | Beta | 1.19 | 1.20 | +| `NodeDisruptionExclusion` | `true` | GA | 1.21 | 1.22 | +| `NodeLease` | `false` | Alpha | 1.12 | 1.13 | +| `NodeLease` | `true` | Beta | 1.14 | 1.16 | +| `NodeLease` | `true` | GA | 1.17 | 1.23 | +| `NonPreemptingPriority` | `false` | Alpha | 1.15 | 1.18 | +| `NonPreemptingPriority` | `true` | Beta | 1.19 | 1.23 | +| `NonPreemptingPriority` | `true` | GA | 1.24 | 1.25 | +| `OpenAPIV3` | `false` | Alpha | 1.23 | 1.23 | +| `OpenAPIV3` | `true` | Beta | 1.24 | 1.26 | +| `OpenAPIV3` | `true` | GA | 1.27 | 1.28 | +| `PVCProtection` | `false` | Alpha | 1.9 | 1.9 | +| `PVCProtection` | - | Deprecated | 1.10 | 1.10 | +| `PersistentLocalVolumes` | `false` | Alpha | 1.7 | 1.9 | +| `PersistentLocalVolumes` | `true` | Beta | 1.10 | 1.13 | +| `PersistentLocalVolumes` | `true` | GA | 1.14 | 1.16 | +| `PodAffinityNamespaceSelector` | `false` | Alpha | 1.21 | 1.21 | +| `PodAffinityNamespaceSelector` | `true` | Beta | 1.22 | 1.23 | +| `PodAffinityNamespaceSelector` | `true` | GA | 1.24 | 1.25 | +| `PodDisruptionBudget` | `false` | Alpha | 1.3 | 1.4 | +| `PodDisruptionBudget` | `true` | Beta | 1.5 | 1.20 | +| `PodDisruptionBudget` | `true` | GA | 1.21 | 1.25 | +| `PodHasNetworkCondition` | `false` | Alpha | 1.25 | 1.27 | +| `PodOverhead` | `false` | Alpha | 1.16 | 1.17 | +| `PodOverhead` | `true` | Beta | 1.18 | 1.23 | +| `PodOverhead` | `true` | GA | 1.24 | 1.25 | +| `PodPriority` | `false` | Alpha | 1.8 | 1.10 | +| `PodPriority` | `true` | Beta | 1.11 | 1.13 | +| `PodPriority` | `true` | GA | 1.14 | 1.18 | +| `PodReadinessGates` | `false` | Alpha | 1.11 | 1.11 | +| `PodReadinessGates` | `true` | Beta | 1.12 | 1.13 | +| `PodReadinessGates` | `true` | GA | 1.14 | 1.16 | +| `PodSecurity` | `false` | Alpha | 1.22 | 1.22 | +| `PodSecurity` | `true` | Beta | 1.23 | 1.24 | +| `PodSecurity` | `true` | GA | 1.25 | 1.27 | +| `PodShareProcessNamespace` | `false` | Alpha | 1.10 | 1.11 | +| `PodShareProcessNamespace` | `true` | Beta | 1.12 | 1.16 | +| `PodShareProcessNamespace` | `true` | GA | 1.17 | 1.19 | +| `PreferNominatedNode` | `false` | Alpha | 1.21 | 1.21 | +| `PreferNominatedNode` | `true` | Beta | 1.22 | 1.23 | +| `PreferNominatedNode` | `true` | GA | 1.24 | 1.25 | +| `ProbeTerminationGracePeriod` | `false` | Alpha | 1.21 | 1.21 | +| `ProbeTerminationGracePeriod` | `false` | Beta | 1.22 | 1.24 | +| `ProbeTerminationGracePeriod` | `true` | Beta | 1.25 | 1.27 | +| `ProbeTerminationGracePeriod` | `true` | GA | 1.28 | 1.28 | +| `RequestManagement` | `false` | Alpha | 1.15 | 1.16 | +| `RequestManagement` | - | Deprecated | 1.17 | 1.17 | +| `ResourceLimitsPriorityFunction` | `false` | Alpha | 1.9 | 1.18 | +| `ResourceLimitsPriorityFunction` | - | Deprecated | 1.19 | 1.19 | +| `ResourceQuotaScopeSelectors` | `false` | Alpha | 1.11 | 1.11 | +| `ResourceQuotaScopeSelectors` | `true` | Beta | 1.12 | 1.16 | +| `ResourceQuotaScopeSelectors` | `true` | GA | 1.17 | 1.18 | +| `RetroactiveDefaultStorageClass` | `false` | Alpha | 1.25 | 1.25 | +| `RetroactiveDefaultStorageClass` | `true` | Beta | 1.26 | 1.27 | +| `RetroactiveDefaultStorageClass` | `true` | GA | 1.28 | 1.28 | +| `RootCAConfigMap` | `false` | Alpha | 1.13 | 1.19 | +| `RootCAConfigMap` | `true` | Beta | 1.20 | 1.20 | +| `RootCAConfigMap` | `true` | GA | 1.21 | 1.22 | +| `RotateKubeletClientCertificate` | `true` | Beta | 1.8 | 1.18 | +| `RotateKubeletClientCertificate` | `true` | GA | 1.19 | 1.21 | +| `RunAsGroup` | `true` | Beta | 1.14 | 1.20 | +| `RunAsGroup` | `true` | GA | 1.21 | 1.22 | +| `RuntimeClass` | `false` | Alpha | 1.12 | 1.13 | +| `RuntimeClass` | `true` | Beta | 1.14 | 1.19 | +| `RuntimeClass` | `true` | GA | 1.20 | 1.24 | +| `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 | +| `SCTPSupport` | `true` | Beta | 1.19 | 1.19 | +| `SCTPSupport` | `true` | GA | 1.20 | 1.22 | +| `ScheduleDaemonSetPods` | `false` | Alpha | 1.11 | 1.11 | +| `ScheduleDaemonSetPods` | `true` | Beta | 1.12 | 1.16 | +| `ScheduleDaemonSetPods` | `true` | GA | 1.17 | 1.18 | +| `SeccompDefault` | `false` | Alpha | 1.22 | 1.24 | +| `SeccompDefault` | `true` | Beta | 1.25 | 1.26 | +| `SeccompDefault` | `true` | GA | 1.27 | 1.28 | +| `SelectorIndex` | `false` | Alpha | 1.18 | 1.18 | +| `SelectorIndex` | `true` | Beta | 1.19 | 1.19 | +| `SelectorIndex` | `true` | GA | 1.20 | 1.25 | +| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | 1.19 | +| `ServiceAccountIssuerDiscovery` | `true` | Beta | 1.20 | 1.20 | +| `ServiceAccountIssuerDiscovery` | `true` | GA | 1.21 | 1.23 | +| `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 | +| `ServiceAppProtocol` | `true` | Beta | 1.19 | 1.19 | +| `ServiceAppProtocol` | `true` | GA | 1.20 | 1.22 | +| `ServiceIPStaticSubrange` | `false` | Alpha | 1.24 | 1.24 | +| `ServiceIPStaticSubrange` | `true` | Beta | 1.25 | 1.25 | +| `ServiceIPStaticSubrange` | `true` | GA | 1.26 | 1.27 | +| `ServiceInternalTrafficPolicy` | `false` | Alpha | 1.21 | 1.21 | +| `ServiceInternalTrafficPolicy` | `true` | Beta | 1.22 | 1.25 | +| `ServiceInternalTrafficPolicy` | `true` | GA | 1.26 | 1.27 | +| `ServiceLBNodePortControl` | `false` | Alpha | 1.20 | 1.21 | +| `ServiceLBNodePortControl` | `true` | Beta | 1.22 | 1.23 | +| `ServiceLBNodePortControl` | `true` | GA | 1.24 | 1.25 | +| `ServiceLoadBalancerClass` | `false` | Alpha | 1.21 | 1.21 | +| `ServiceLoadBalancerClass` | `true` | Beta | 1.22 | 1.23 | +| `ServiceLoadBalancerClass` | `true` | GA | 1.24 | 1.25 | +| `ServiceLoadBalancerFinalizer` | `false` | Alpha | 1.15 | 1.15 | +| `ServiceLoadBalancerFinalizer` | `true` | Beta | 1.16 | 1.16 | +| `ServiceLoadBalancerFinalizer` | `true` | GA | 1.17 | 1.20 | +| `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 | +| `ServiceNodeExclusion` | `true` | Beta | 1.19 | 1.20 | +| `ServiceNodeExclusion` | `true` | GA | 1.21 | 1.22 | +| `ServiceTopology` | `false` | Alpha | 1.17 | 1.19 | +| `ServiceTopology` | `false` | Deprecated | 1.20 | 1.22 | +| `SetHostnameAsFQDN` | `false` | Alpha | 1.19 | 1.19 | +| `SetHostnameAsFQDN` | `true` | Beta | 1.20 | 1.21 | +| `SetHostnameAsFQDN` | `true` | GA | 1.22 | 1.24 | +| `StartupProbe` | `false` | Alpha | 1.16 | 1.17 | +| `StartupProbe` | `true` | Beta | 1.18 | 1.19 | +| `StartupProbe` | `true` | GA | 1.20 | 1.23 | +| `StatefulSetMinReadySeconds` | `false` | Alpha | 1.22 | 1.22 | +| `StatefulSetMinReadySeconds` | `true` | Beta | 1.23 | 1.24 | +| `StatefulSetMinReadySeconds` | `true` | GA | 1.25 | 1.26 | +| `StorageObjectInUseProtection` | `true` | Beta | 1.10 | 1.10 | +| `StorageObjectInUseProtection` | `true` | GA | 1.11 | 1.24 | +| `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 | +| `StreamingProxyRedirects` | `true` | Beta | 1.6 | 1.17 | +| `StreamingProxyRedirects` | `true` | Deprecated | 1.18 | 1.21 | +| `StreamingProxyRedirects` | `false` | Deprecated | 1.22 | 1.24 | +| `SupportIPVSProxyMode` | `false` | Alpha | 1.8 | 1.8 | +| `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 | +| `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 | +| `SupportIPVSProxyMode` | `true` | GA | 1.11 | 1.20 | +| `SupportNodePidsLimit` | `false` | Alpha | 1.14 | 1.14 | +| `SupportNodePidsLimit` | `true` | Beta | 1.15 | 1.19 | +| `SupportNodePidsLimit` | `true` | GA | 1.20 | 1.23 | +| `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 | +| `SupportPodPidsLimit` | `true` | Beta | 1.14 | 1.19 | +| `SupportPodPidsLimit` | `true` | GA | 1.20 | 1.23 | +| `SuspendJob` | `false` | Alpha | 1.21 | 1.21 | +| `SuspendJob` | `true` | Beta | 1.22 | 1.23 | +| `SuspendJob` | `true` | GA | 1.24 | 1.25 | +| `Sysctls` | `true` | Beta | 1.11 | 1.20 | +| `Sysctls` | `true` | GA | 1.21 | 1.22 | +| `TTLAfterFinished` | `false` | Alpha | 1.12 | 1.20 | +| `TTLAfterFinished` | `true` | Beta | 1.21 | 1.22 | +| `TTLAfterFinished` | `true` | GA | 1.23 | 1.24 | +| `TaintBasedEvictions` | `false` | Alpha | 1.6 | 1.12 | +| `TaintBasedEvictions` | `true` | Beta | 1.13 | 1.17 | +| `TaintBasedEvictions` | `true` | GA | 1.18 | 1.20 | +| `TaintNodesByCondition` | `false` | Alpha | 1.8 | 1.11 | +| `TaintNodesByCondition` | `true` | Beta | 1.12 | 1.16 | +| `TaintNodesByCondition` | `true` | GA | 1.17 | 1.18 | +| `TokenRequest` | `false` | Alpha | 1.10 | 1.11 | +| `TokenRequest` | `true` | Beta | 1.12 | 1.19 | +| `TokenRequest` | `true` | GA | 1.20 | 1.21 | +| `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | +| `TokenRequestProjection` | `true` | Beta | 1.12 | 1.19 | +| `TokenRequestProjection` | `true` | GA | 1.20 | 1.21 | +| `TopologyManager` | `false` | Alpha | 1.16 | 1.17 | +| `TopologyManager` | `true` | Beta | 1.18 | 1.26 | +| `TopologyManager` | `true` | GA | 1.27 | 1.28 | +| `UserNamespacesStatelessPodsSupport` | `false` | Alpha | 1.25 | 1.27 | +| `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | +| `ValidateProxyRedirects` | `true` | Beta | 1.14 | 1.21 | +| `ValidateProxyRedirects` | `true` | Deprecated | 1.22 | 1.24 | +| `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 | +| `VolumePVCDataSource` | `true` | Beta | 1.16 | 1.17 | +| `VolumePVCDataSource` | `true` | GA | 1.18 | 1.21 | +| `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | +| `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | +| `VolumeScheduling` | `true` | GA | 1.13 | 1.16 | +| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 | +| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | 1.19 | +| `VolumeSnapshotDataSource` | `true` | GA | 1.20 | 1.22 | +| `VolumeSubpath` | `true` | GA | 1.10 | 1.24 | +| `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | 1.14 | +| `VolumeSubpathEnvExpansion` | `true` | Beta | 1.15 | 1.16 | +| `VolumeSubpathEnvExpansion` | `true` | GA | 1.17 | 1.24 | +| `WarningHeaders` | `true` | Beta | 1.19 | 1.21 | +| `WarningHeaders` | `true` | GA | 1.22 | 1.24 | +| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | 1.20 | +| `WindowsEndpointSliceProxying` | `true` | Beta | 1.21 | 1.21 | +| `WindowsEndpointSliceProxying` | `true` | GA | 1.22| 1.24 | +| `WindowsGMSA` | `false` | Alpha | 1.14 | 1.15 | +| `WindowsGMSA` | `true` | Beta | 1.16 | 1.17 | +| `WindowsGMSA` | `true` | GA | 1.18 | 1.20 | +| `WindowsHostProcessContainers` | `false` | Alpha | 1.22 | 1.22 | +| `WindowsHostProcessContainers` | `true` | Beta | 1.23 | 1.25 | +| `WindowsHostProcessContainers` | `true` | GA | 1.26 | 1.27 | +| `WindowsRunAsUserName` | `false` | Alpha | 1.16 | 1.16 | +| `WindowsRunAsUserName` | `true` | Beta | 1.17 | 1.17 | +| `WindowsRunAsUserName` | `true` | GA | 1.18 | 1.20 | +{{< /table >}} + +## Descriptions for removed feature gates + +- {{< feature-gate-description name="Accelerators" >}} +- {{< feature-gate-description name="AffinityInAnnotations" >}} +- {{< feature-gate-description name="AdvancedAuditing" >}} +- {{< feature-gate-description name="AllowExtTrafficLocalEndpoints" >}} +- {{< feature-gate-description name="AllowInsecureBackendProxy" >}} +- {{< feature-gate-description name="AttachVolumeLimit" >}} +- {{< feature-gate-description name="BalanceAttachedNodeVolumes" >}} +- {{< feature-gate-description name="BlockVolume" >}} +- {{< feature-gate-description name="BoundServiceAccountTokenVolume" >}} +- {{< feature-gate-description name="CRIContainerLogRotation" >}} +- {{< feature-gate-description name="CSIBlockVolume" >}} +- {{< feature-gate-description name="CSIDriverRegistry" >}} +- {{< feature-gate-description name="CSIInlineVolume" >}} +- {{< feature-gate-description name="CSIMigration" >}} +- {{< feature-gate-description name="CSIMigrationAWS" >}} +- {{< feature-gate-description name="CSIMigrationAWSComplete" >}} +- {{< feature-gate-description name="CSIMigrationAzureDisk" >}} +- {{< feature-gate-description name="CSIMigrationAzureDiskComplete" >}} +- {{< feature-gate-description name="CSIMigrationAzureFileComplete" >}} +- {{< feature-gate-description name="CSIMigrationGCE" >}} +- {{< feature-gate-description name="CSIMigrationGCEComplete" >}} +- {{< feature-gate-description name="CSIMigrationOpenStack" >}} +- {{< feature-gate-description name="CSIMigrationOpenStackComplete" >}} +- {{< feature-gate-description name="CSIMigrationvSphereComplete" >}} +- {{< feature-gate-description name="CSIMigrationvSphere" >}} +- {{< feature-gate-description name="CSINodeInfo" >}} +- {{< feature-gate-description name="CSIPersistentVolume" >}} +- {{< feature-gate-description name="CSIServiceAccountToken" >}} +- {{< feature-gate-description name="CSIStorageCapacity" >}} +- {{< feature-gate-description name="CSIVolumeFSGroupPolicy" >}} +- {{< feature-gate-description name="CSRDuration" >}} +- {{< feature-gate-description name="ConfigurableFSGroupPolicy" >}} +- {{< feature-gate-description name="ControllerManagerLeaderMigration" >}} +- {{< feature-gate-description name="CronJobControllerV2" >}} +- {{< feature-gate-description name="CronJobTimeZone" >}} +- {{< feature-gate-description name="CustomPodDNS" >}} +- {{< feature-gate-description name="CustomResourceDefaulting" >}} +- {{< feature-gate-description name="CustomResourcePublishOpenAPI" >}} +- {{< feature-gate-description name="CustomResourceSubresources" >}} +- {{< feature-gate-description name="CustomResourceValidation" >}} +- {{< feature-gate-description name="CustomResourceWebhookConversion" >}} +- {{< feature-gate-description name="DaemonSetUpdateSurge" >}} +- {{< feature-gate-description name="DefaultPodTopologySpread" >}} +- {{< feature-gate-description name="DelegateFSGroupToCSIDriver" >}} +- {{< feature-gate-description name="DevicePlugins" >}} +- {{< feature-gate-description name="DisableAcceleratorUsageMetrics" >}} +- {{< feature-gate-description name="DownwardAPIHugePages" >}} +- {{< feature-gate-description name="DryRun" >}} +- {{< feature-gate-description name="DynamicAuditing" >}} +- {{< feature-gate-description name="DynamicKubeletConfig" >}} +- {{< feature-gate-description name="DynamicProvisioningScheduling" >}} +- {{< feature-gate-description name="DynamicVolumeProvisioning" >}} +- {{< feature-gate-description name="EnableAggregatedDiscoveryTimeout" >}} +- {{< feature-gate-description name="EnableEquivalenceClassCache" >}} +- {{< feature-gate-description name="EndpointSlice" >}} +- {{< feature-gate-description name="EndpointSliceNodeName" >}} +- {{< feature-gate-description name="EndpointSliceProxying" >}} +- {{< feature-gate-description name="EndpointSliceTerminatingCondition" >}} +- {{< feature-gate-description name="EphemeralContainers" >}} +- {{< feature-gate-description name="EvenPodsSpread" >}} +- {{< feature-gate-description name="ExpandCSIVolumes" >}} +- {{< feature-gate-description name="ExpandInUsePersistentVolumes" >}} +- {{< feature-gate-description name="ExpandPersistentVolumes" >}} +- {{< feature-gate-description name="ExperimentalCriticalPodAnnotation" >}} +- {{< feature-gate-description name="ExternalPolicyForExternalIP" >}} +- {{< feature-gate-description name="GCERegionalPersistentDisk" >}} +- {{< feature-gate-description name="GRPCContainerProbe" >}} +- {{< feature-gate-description name="GenericEphemeralVolume" >}} +- {{< feature-gate-description name="HugePageStorageMediumSize" >}} +- {{< feature-gate-description name="HugePages" >}} +- {{< feature-gate-description name="HyperVContainer" >}} +- {{< feature-gate-description name="IPv6DualStack" >}} +- {{< feature-gate-description name="IdentifyPodOS" >}} +- {{< feature-gate-description name="ImmutableEphemeralVolumes" >}} +- {{< feature-gate-description name="IndexedJob" >}} +- {{< feature-gate-description name="IngressClassNamespacedParams" >}} +- {{< feature-gate-description name="Initializers" >}} +- {{< feature-gate-description name="JobMutableNodeSchedulingDirectives" >}} +- {{< feature-gate-description name="JobTrackingWithFinalizers" >}} +- {{< feature-gate-description name="KubeletConfigFile" >}} +- {{< feature-gate-description name="KubeletCredentialProviders" >}} +- {{< feature-gate-description name="KubeletPluginsWatcher" >}} +- {{< feature-gate-description name="LegacyNodeRoleBehavior" >}} +- {{< feature-gate-description name="LegacyServiceAccountTokenNoAutoGeneration" >}} +- {{< feature-gate-description name="LocalStorageCapacityIsolation" >}} +- {{< feature-gate-description name="MixedProtocolLBService" >}} +- {{< feature-gate-description name="MountContainers" >}} +- {{< feature-gate-description name="MountPropagation" >}} +- {{< feature-gate-description name="MultiCIDRRangeAllocator" >}} +- {{< feature-gate-description name="NamespaceDefaultLabelName" >}} +- {{< feature-gate-description name="NetworkPolicyStatus" >}} +- {{< feature-gate-description name="NodeDisruptionExclusion" >}} +- {{< feature-gate-description name="NodeLease" >}} +- {{< feature-gate-description name="NonPreemptingPriority" >}} +- {{< feature-gate-description name="OpenAPIV3" >}} +- {{< feature-gate-description name="PVCProtection" >}} +- {{< feature-gate-description name="PersistentLocalVolumes" >}} +- {{< feature-gate-description name="PodAffinityNamespaceSelector" >}} +- {{< feature-gate-description name="PodDisruptionBudget" >}} +- {{< feature-gate-description name="PodHasNetworkCondition" >}} +- {{< feature-gate-description name="PodOverhead" >}} +- {{< feature-gate-description name="PodPriority" >}} +- {{< feature-gate-description name="PodReadinessGates" >}} +- {{< feature-gate-description name="PodSecurity" >}} +- {{< feature-gate-description name="PodShareProcessNamespace" >}} +- {{< feature-gate-description name="PreferNominatedNode" >}} +- {{< feature-gate-description name="ProbeTerminationGracePeriod" >}} +- {{< feature-gate-description name="RequestManagement" >}} +- {{< feature-gate-description name="ResourceLimitsPriorityFunction" >}} +- {{< feature-gate-description name="ResourceQuotaScopeSelectors" >}} +- {{< feature-gate-description name="RetroactiveDefaultStorageClass" >}} +- {{< feature-gate-description name="RootCAConfigMap" >}} +- {{< feature-gate-description name="RotateKubeletClientCertificate" >}} +- {{< feature-gate-description name="RunAsGroup" >}} +- {{< feature-gate-description name="RuntimeClass" >}} +- {{< feature-gate-description name="SCTPSupport" >}} +- {{< feature-gate-description name="ScheduleDaemonSetPods" >}} +- {{< feature-gate-description name="SeccompDefault" >}} +- {{< feature-gate-description name="SelectorIndex" >}} +- {{< feature-gate-description name="ServiceAccountIssuerDiscovery" >}} +- {{< feature-gate-description name="ServiceAppProtocol" >}} +- {{< feature-gate-description name="ServiceIPStaticSubrange" >}} +- {{< feature-gate-description name="ServiceInternalTrafficPolicy" >}} +- {{< feature-gate-description name="ServiceLoadBalancerClass" >}} +- {{< feature-gate-description name="ServiceLoadBalancerFinalizer" >}} +- {{< feature-gate-description name="ServiceLBNodePortControl" >}} +- {{< feature-gate-description name="ServiceNodeExclusion" >}} +- {{< feature-gate-description name="ServiceTopology" >}} +- {{< feature-gate-description name="SetHostnameAsFQDN" >}} +- {{< feature-gate-description name="StartupProbe" >}} +- {{< feature-gate-description name="StatefulSetMinReadySeconds" >}} +- {{< feature-gate-description name="StorageObjectInUseProtection" >}} +- {{< feature-gate-description name="StreamingProxyRedirects" >}} +- {{< feature-gate-description name="SupportIPVSProxyMode" >}} +- {{< feature-gate-description name="SupportNodePidsLimit" >}} +- {{< feature-gate-description name="SupportPodPidsLimit" >}} +- {{< feature-gate-description name="SuspendJob" >}} +- {{< feature-gate-description name="Sysctls" >}} +- {{< feature-gate-description name="TTLAfterFinished" >}} +- {{< feature-gate-description name="TaintBasedEvictions" >}} +- {{< feature-gate-description name="TaintNodesByCondition" >}} +- {{< feature-gate-description name="TokenRequest" >}} +- {{< feature-gate-description name="TokenRequestProjection" >}} +- {{< feature-gate-description name="TopologyManager" >}} +- {{< feature-gate-description name="UserNamespacesStatelessPodsSupport" >}} +- {{< feature-gate-description name="ValidateProxyRedirects" >}} +- {{< feature-gate-description name="VolumePVCDataSource" >}} +- {{< feature-gate-description name="VolumeScheduling" >}} +- {{< feature-gate-description name="VolumeSnapshotDataSource" >}} +- {{< feature-gate-description name="VolumeSubpath" >}} +- {{< feature-gate-description name="VolumeSubpathEnvExpansion" >}} +- {{< feature-gate-description name="WarningHeaders" >}} +- {{< feature-gate-description name="WindowsEndpointSliceProxying" >}} +- {{< feature-gate-description name="WindowsGMSA" >}} +- {{< feature-gate-description name="WindowsHostProcessContainers" >}} +- {{< feature-gate-description name="WindowsRunAsUserName" >}} diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md deleted file mode 100644 index 5cdfeddead17b..0000000000000 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ /dev/null @@ -1,773 +0,0 @@ ---- -title: Feature Gates -weight: 10 -content_type: concept -card: - name: reference - weight: 60 ---- - - -This page contains an overview of the various feature gates an administrator -can specify on different Kubernetes components. - -See [feature stages](#feature-stages) for an explanation of the stages for a feature. - - - -## Overview - -Feature gates are a set of key=value pairs that describe Kubernetes features. -You can turn these features on or off using the `--feature-gates` command line flag -on each Kubernetes component. - -Each Kubernetes component lets you enable or disable a set of feature gates that -are relevant to that component. -Use `-h` flag to see a full set of feature gates for all components. -To set feature gates for a component, such as kubelet, use the `--feature-gates` -flag assigned to a list of feature pairs: - -```shell ---feature-gates=...,GracefulNodeShutdown=true -``` - -The following tables are a summary of the feature gates that you can set on -different Kubernetes components. - -- The "Since" column contains the Kubernetes release when a feature is introduced - or its release stage is changed. -- The "Until" column, if not empty, contains the last Kubernetes release in which - you can still use a feature gate. -- If a feature is in the Alpha or Beta state, you can find the feature listed - in the [Alpha/Beta feature gate table](#feature-gates-for-alpha-or-beta-features). -- If a feature is stable you can find all stages for that feature listed in the - [Graduated/Deprecated feature gate table](#feature-gates-for-graduated-or-deprecated-features). -- The [Graduated/Deprecated feature gate table](#feature-gates-for-graduated-or-deprecated-features) - also lists deprecated and withdrawn features. - -{{< note >}} -For a reference to old feature gates that are removed, please refer to -[feature gates removed](/docs/reference/command-line-tools-reference/feature-gates-removed/). -{{< /note >}} - -### Feature gates for Alpha or Beta features - -{{< table caption="Feature gates for features in Alpha or Beta states" >}} - -| Feature | Default | Stage | Since | Until | -|---------|---------|-------|-------|-------| -| `APIListChunking` | `false` | Alpha | 1.8 | 1.8 | -| `APIListChunking` | `true` | Beta | 1.9 | | -| `APIPriorityAndFairness` | `false` | Alpha | 1.18 | 1.19 | -| `APIPriorityAndFairness` | `true` | Beta | 1.20 | | -| `APIResponseCompression` | `false` | Alpha | 1.7 | 1.15 | -| `APIResponseCompression` | `true` | Beta | 1.16 | | -| `APIServerIdentity` | `false` | Alpha | 1.20 | 1.25 | -| `APIServerIdentity` | `true` | Beta | 1.26 | | -| `APIServerTracing` | `false` | Alpha | 1.22 | 1.26 | -| `APIServerTracing` | `true` | Beta | 1.27 | | -| `AdmissionWebhookMatchConditions` | `false` | Alpha | 1.27 | 1.27 | -| `AdmissionWebhookMatchConditions` | `true` | Beta | 1.28 | | -| `AggregatedDiscoveryEndpoint` | `false` | Alpha | 1.26 | 1.26 | -| `AggregatedDiscoveryEndpoint` | `true` | Beta | 1.27 | | -| `AnyVolumeDataSource` | `false` | Alpha | 1.18 | 1.23 | -| `AnyVolumeDataSource` | `true` | Beta | 1.24 | | -| `AppArmor` | `true` | Beta | 1.4 | | -| `CPUManagerPolicyAlphaOptions` | `false` | Alpha | 1.23 | | -| `CPUManagerPolicyBetaOptions` | `true` | Beta | 1.23 | | -| `CPUManagerPolicyOptions` | `false` | Alpha | 1.22 | 1.22 | -| `CPUManagerPolicyOptions` | `true` | Beta | 1.23 | | -| CRDValidationRatcheting | false | Alpha | 1.28 | -| `CSIMigrationPortworx` | `false` | Alpha | 1.23 | 1.24 | -| `CSIMigrationPortworx` | `false` | Beta | 1.25 | | -| `CSINodeExpandSecret` | `false` | Alpha | 1.25 | 1.26 | -| `CSINodeExpandSecret` | `true` | Beta | 1.27 | | -| `CSIVolumeHealth` | `false` | Alpha | 1.21 | | -| `CloudControllerManagerWebhook` | false | Alpha | 1.27 | | -| `CloudDualStackNodeIPs` | false | Alpha | 1.27 | | -| `ClusterTrustBundle` | false | Alpha | 1.27 | | -| `ComponentSLIs` | `false` | Alpha | 1.26 | 1.26 | -| `ComponentSLIs` | `true` | Beta | 1.27 | | -| `ConsistentListFromCache` | `false` | Alpha | 1.28 | -| `ContainerCheckpoint` | `false` | Alpha | 1.25 | | -| `ContextualLogging` | `false` | Alpha | 1.24 | | -| `CronJobsScheduledAnnotation` | `true` | Beta | 1.28 | | -| `CrossNamespaceVolumeDataSource` | `false` | Alpha| 1.26 | | -| `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | -| `CustomResourceValidationExpressions` | `false` | Alpha | 1.23 | 1.24 | -| `CustomResourceValidationExpressions` | `true` | Beta | 1.25 | | -| `DevicePluginCDIDevices` | `false` | Alpha | 1.28 | | -| `DisableCloudProviders` | `false` | Alpha | 1.22 | | -| `DisableKubeletCloudCredentialProviders` | `false` | Alpha | 1.23 | | -| `DynamicResourceAllocation` | `false` | Alpha | 1.26 | | -| `ElasticIndexedJob` | `true` | Beta` | 1.27 | | -| `EventedPLEG` | `false` | Alpha | 1.26 | 1.26 | -| `EventedPLEG` | `false` | Beta | 1.27 | - | -| `GracefulNodeShutdown` | `false` | Alpha | 1.20 | 1.20 | -| `GracefulNodeShutdown` | `true` | Beta | 1.21 | | -| `GracefulNodeShutdownBasedOnPodPriority` | `false` | Alpha | 1.23 | 1.23 | -| `GracefulNodeShutdownBasedOnPodPriority` | `true` | Beta | 1.24 | | -| `HPAContainerMetrics` | `false` | Alpha | 1.20 | 1.26 | -| `HPAContainerMetrics` | `true` | Beta | 1.27 | | -| `HPAScaleToZero` | `false` | Alpha | 1.16 | | -| `HonorPVReclaimPolicy` | `false` | Alpha | 1.23 | | -| `InPlacePodVerticalScaling` | `false` | Alpha | 1.27 | | -| `InTreePluginAWSUnregister` | `false` | Alpha | 1.21 | | -| `InTreePluginAzureDiskUnregister` | `false` | Alpha | 1.21 | | -| `InTreePluginAzureFileUnregister` | `false` | Alpha | 1.21 | | -| `InTreePluginGCEUnregister` | `false` | Alpha | 1.21 | | -| `InTreePluginOpenStackUnregister` | `false` | Alpha | 1.21 | | -| `InTreePluginPortworxUnregister` | `false` | Alpha | 1.23 | | -| `InTreePluginvSphereUnregister` | `false` | Alpha | 1.21 | | -| `JobBackoffLimitPerIndex` | `false` | Alpha | 1.28 | | -| `JobPodFailurePolicy` | `false` | Alpha | 1.25 | 1.25 | -| `JobPodFailurePolicy` | `true` | Beta | 1.26 | | -| `JobPodReplacementPolicy` | `false` | Alpha | 1.28 | | -| `JobReadyPods` | `false` | Alpha | 1.23 | 1.23 | -| `JobReadyPods` | `true` | Beta | 1.24 | | -| `KMSv2` | `false` | Alpha | 1.25 | 1.26 | -| `KMSv2` | `true` | Beta | 1.27 | | -| `KMSv2KDF` | `false` | Beta | 1.28 | | -| `KubeProxyDrainingTerminatingNodes` | `false` | Alpha | 1.28 | | -| `KubeletCgroupDriverFromCRI` | `false` | Alpha | 1.28 | | -| `KubeletInUserNamespace` | `false` | Alpha | 1.22 | | -| `KubeletPodResourcesDynamicResources` | `false` | Alpha | 1.27 | | -| `KubeletPodResourcesGet` | `false` | Alpha | 1.27 | | -| `KubeletTracing` | `false` | Alpha | 1.25 | 1.26 | -| `KubeletTracing` | `true` | Beta | 1.27 | | -| `LegacyServiceAccountTokenCleanUp` | `false` | Alpha | 1.28 | | -| `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha | 1.15 | - | -| `LogarithmicScaleDown` | `false` | Alpha | 1.21 | 1.21 | -| `LogarithmicScaleDown` | `true` | Beta | 1.22 | | -| `LoggingAlphaOptions` | `false` | Alpha | 1.24 | - | -| `LoggingBetaOptions` | `true` | Beta | 1.24 | - | -| `MatchLabelKeysInPodTopologySpread` | `false` | Alpha | 1.25 | 1.26 | -| `MatchLabelKeysInPodTopologySpread` | `true` | Beta | 1.27 | - | -| `MaxUnavailableStatefulSet` | `false` | Alpha | 1.24 | | -| `MemoryManager` | `false` | Alpha | 1.21 | 1.21 | -| `MemoryManager` | `true` | Beta | 1.22 | | -| `MemoryQoS` | `false` | Alpha | 1.22 | | -| `MinDomainsInPodTopologySpread` | `false` | Alpha | 1.24 | 1.24 | -| `MinDomainsInPodTopologySpread` | `false` | Beta | 1.25 | 1.26 | -| `MinDomainsInPodTopologySpread` | `true` | Beta | 1.27 | | -| `MultiCIDRRangeAllocator` | `false` | Alpha | 1.25 | | -| `MultiCIDRServiceAllocator` | `false` | Alpha | 1.27 | | -| `NewVolumeManagerReconstruction` | `false` | Beta | 1.27 | 1.27 | -| `NewVolumeManagerReconstruction` | `true` | Beta | 1.28 | | -| `NodeInclusionPolicyInPodTopologySpread` | `false` | Alpha | 1.25 | 1.25 | -| `NodeInclusionPolicyInPodTopologySpread` | `true` | Beta | 1.26 | | -| `NodeLogQuery` | `false` | Alpha | 1.27 | | -| `NodeSwap` | `false` | Alpha | 1.22 | 1.27 | -| `NodeSwap` | `false` | Beta | 1.28 | | -| `OpenAPIEnums` | `false` | Alpha | 1.23 | 1.23 | -| `OpenAPIEnums` | `true` | Beta | 1.24 | | -| `PDBUnhealthyPodEvictionPolicy` | `false` | Alpha | 1.26 | 1.26 | -| `PDBUnhealthyPodEvictionPolicy` | `true` | Beta | 1.27 | | -| `PersistentVolumeLastPhaseTransistionTime` | `false` | Alpha | 1.28 | | -| `PodAndContainerStatsFromCRI` | `false` | Alpha | 1.23 | | -| `PodDeletionCost` | `false` | Alpha | 1.21 | 1.21 | -| `PodDeletionCost` | `true` | Beta | 1.22 | | -| `PodDisruptionConditions` | `false` | Alpha | 1.25 | 1.25 | -| `PodDisruptionConditions` | `true` | Beta | 1.26 | | -| `PodHostIPs` | `false` | Alpha | 1.28 | | -| `PodIndexLabel` | `true` | Beta | 1.28 | | -| `PodReadyToStartContainersCondition` | `false` | Alpha | 1.28 | | -| `PodSchedulingReadiness` | `false` | Alpha | 1.26 | 1.26 | -| `PodSchedulingReadiness` | `true` | Beta | 1.27 | | -| `ProcMountType` | `false` | Alpha | 1.12 | | -| `QOSReserved` | `false` | Alpha | 1.11 | | -| `ReadWriteOncePod` | `false` | Alpha | 1.22 | 1.26 | -| `ReadWriteOncePod` | `true` | Beta | 1.27 | | -| `RecoverVolumeExpansionFailure` | `false` | Alpha | 1.23 | | -| `RemainingItemCount` | `false` | Alpha | 1.15 | 1.15 | -| `RemainingItemCount` | `true` | Beta | 1.16 | | -| `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | -| `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | -| `SELinuxMountReadWriteOncePod` | `false` | Alpha | 1.25 | 1.26 | -| `SELinuxMountReadWriteOncePod` | `false` | Beta | 1.27 | 1.27 | -| `SELinuxMountReadWriteOncePod` | `true` | Beta | 1.28 | | -| `SchedulerQueueingHints` | `false` | Alpha | 1.28 | | -| `SecurityContextDeny` | `false` | Alpha | 1.27 | | -| `ServiceNodePortStaticSubrange` | `false` | Alpha | 1.27 | 1.27 | -| `ServiceNodePortStaticSubrange` | `true` | Beta | 1.28 | | -| `SidecarContainers` | `false` | Alpha | 1.28 | | -| `SizeMemoryBackedVolumes` | `false` | Alpha | 1.20 | 1.21 | -| `SizeMemoryBackedVolumes` | `true` | Beta | 1.22 | | -| `SkipReadOnlyValidationGCE` | `false` | Alpha | 1.28 | | -| `StableLoadBalancerNodeSet` | `true` | Beta | 1.27 | | -| `StatefulSetAutoDeletePVC` | `false` | Alpha | 1.23 | 1.26 | -| `StatefulSetAutoDeletePVC` | `false` | Beta | 1.27 | | -| `StatefulSetStartOrdinal` | `false` | Alpha | 1.26 | 1.26 | -| `StatefulSetStartOrdinal` | `true` | Beta | 1.27 | | -| `StorageVersionAPI` | `false` | Alpha | 1.20 | | -| `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 | -| `StorageVersionHash` | `true` | Beta | 1.15 | | -| `TopologyAwareHints` | `false` | Alpha | 1.21 | 1.22 | -| `TopologyAwareHints` | `false` | Beta | 1.23 | 1.23 | -| `TopologyAwareHints` | `true` | Beta | 1.24 | | -| `TopologyManagerPolicyAlphaOptions` | `false` | Alpha | 1.26 | | -| `TopologyManagerPolicyBetaOptions` | `false` | Beta | 1.26 | 1.27 | -| `TopologyManagerPolicyBetaOptions` | `true` | Beta | 1.28 | | -| `TopologyManagerPolicyOptions` | `false` | Alpha | 1.26 | 1.27 | -| `TopologyManagerPolicyOptions` | `true` | Beta | 1.28 | | -| `UnknownVersionInteroperabilityProxy` | `false` | Alpha | 1.28 | | -| `UserNamespacesSupport` | `false` | Alpha | 1.28 | | -| `ValidatingAdmissionPolicy` | `false` | Alpha | 1.26 | 1.27 | -| `ValidatingAdmissionPolicy` | `false` | Beta | 1.28 | | -| `VolumeCapacityPriority` | `false` | Alpha | 1.21 | | -| `WatchList` | false | Alpha | 1.27 | | -| `WinDSR` | `false` | Alpha | 1.14 | | -| `WinOverlay` | `false` | Alpha | 1.14 | 1.19 | -| `WinOverlay` | `true` | Beta | 1.20 | | -| `WindowsHostNetwork` | `true` | Alpha | 1.26 | | -{{< /table >}} - -### Feature gates for graduated or deprecated features - -{{< table caption="Feature Gates for Graduated or Deprecated Features" >}} - -| Feature | Default | Stage | Since | Until | -|---------|---------|-------|-------|-------| -| `APISelfSubjectReview` | `false` | Alpha | 1.26 | 1.26 | -| `APISelfSubjectReview` | `true` | Beta | 1.27 | 1.27 | -| `APISelfSubjectReview` | `true` | GA | 1.28 | - | -| `CPUManager` | `false` | Alpha | 1.8 | 1.9 | -| `CPUManager` | `true` | Beta | 1.10 | 1.25 | -| `CPUManager` | `true` | GA | 1.26 | - | -| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | 1.20 | -| `CSIMigrationAzureFile` | `false` | Beta | 1.21 | 1.23 | -| `CSIMigrationAzureFile` | `true` | Beta | 1.24 | 1.25 | -| `CSIMigrationAzureFile` | `true` | GA | 1.26 | | -| `CSIMigrationRBD` | `false` | Alpha | 1.23 | 1.27 | -| `CSIMigrationRBD` | `false` | Deprecated | 1.28 | | -| `CSIMigrationvSphere` | `false` | Alpha | 1.18 | 1.18 | -| `CSIMigrationvSphere` | `false` | Beta | 1.19 | 1.24 | -| `CSIMigrationvSphere` | `true` | Beta | 1.25 | 1.25 | -| `CSIMigrationvSphere` | `true` | GA | 1.26 | - | -| `ConsistentHTTPGetHandlers` | `true` | GA | 1.25 | - | -| `CronJobTimeZone` | `false` | Alpha | 1.24 | 1.24 | -| `CronJobTimeZone` | `true` | Beta | 1.25 | 1.26 | -| `CronJobTimeZone` | `true` | GA | 1.27 | - | -| `DaemonSetUpdateSurge` | `false` | Alpha | 1.21 | 1.21 | -| `DaemonSetUpdateSurge` | `true` | Beta | 1.22 | 1.24 | -| `DaemonSetUpdateSurge` | `true` | GA | 1.25 | | -| `DefaultHostNetworkHostPortsInPodTemplates` | `false` | Deprecated | 1.28 | | -| `DownwardAPIHugePages` | `false` | Alpha | 1.20 | 1.20 | -| `DownwardAPIHugePages` | `false` | Beta | 1.21 | 1.21 | -| `DownwardAPIHugePages` | `true` | Beta | 1.22 | 1.26 | -| `DownwardAPIHugePages` | `true` | GA | 1.27 | | -| `EfficientWatchResumption` | `false` | Alpha | 1.20 | 1.20 | -| `EfficientWatchResumption` | `true` | Beta | 1.21 | 1.23 | -| `EfficientWatchResumption` | `true` | GA | 1.24 | | -| `ExecProbeTimeout` | `true` | GA | 1.20 | | -| `ExpandedDNSConfig` | `false` | Alpha | 1.22 | 1.25 | -| `ExpandedDNSConfig` | `true` | Beta | 1.26 | 1.27 | -| `ExpandedDNSConfig` | `true` | GA | 1.28 | | -| `ExperimentalHostUserNamespaceDefaulting` | `false` | Beta | 1.5 | 1.27 | -| `ExperimentalHostUserNamespaceDefaulting` | `false` | Deprecated | 1.28 | | -| `GRPCContainerProbe` | `false` | Alpha | 1.23 | 1.23 | -| `GRPCContainerProbe` | `true` | Beta | 1.24 | 1.26 | -| `GRPCContainerProbe` | `true` | GA | 1.27 | | -| `IPTablesOwnershipCleanup` | `false` | Alpha | 1.25 | 1.26 | -| `IPTablesOwnershipCleanup` | `true` | Beta | 1.27 | 1.27 | -| `IPTablesOwnershipCleanup` | `true` | GA | 1.28 | | -| `InTreePluginRBDUnregister` | `false` | Alpha | 1.23 | 1.27 | -| `InTreePluginRBDUnregister` | `false` | Deprecated | 1.28 | | -| `JobMutableNodeSchedulingDirectives` | `true` | Beta | 1.23 | 1.26 | -| `JobMutableNodeSchedulingDirectives` | `true` | GA | 1.27 | | -| `JobTrackingWithFinalizers` | `false` | Alpha | 1.22 | 1.22 | -| `JobTrackingWithFinalizers` | `false` | Beta | 1.23 | 1.24 | -| `JobTrackingWithFinalizers` | `true` | Beta | 1.25 | 1.25 | -| `JobTrackingWithFinalizers` | `true` | GA | 1.26 | | -| `KMSv1` | `true` | Deprecated | 1.28 | | -| `KubeletPodResources` | `false` | Alpha | 1.13 | 1.14 | -| `KubeletPodResources` | `true` | Beta | 1.15 | 1.27 | -| `KubeletPodResources` | `true` | GA | 1.28 | | -| `KubeletPodResourcesGetAllocatable` | `false` | Alpha | 1.21 | 1.22 | -| `KubeletPodResourcesGetAllocatable` | `true` | Beta | 1.23 | 1.27 | -| `KubeletPodResourcesGetAllocatable` | `true` | GA | 1.28 | | -| `LegacyServiceAccountTokenNoAutoGeneration` | `true` | Beta | 1.24 | 1.25 | -| `LegacyServiceAccountTokenNoAutoGeneration` | `true` | GA | 1.26 | | -| `LegacyServiceAccountTokenTracking` | `false` | Alpha | 1.26 | 1.26 | -| `LegacyServiceAccountTokenTracking` | `true` | Beta | 1.27 | 1.27 | -| `LegacyServiceAccountTokenTracking` | `true` | GA | 1.28 | | -| `MinimizeIPTablesRestore` | `false` | Alpha | 1.26 | 1.26 | -| `MinimizeIPTablesRestore` | `true` | Beta | 1.27 | 1.27 | -| `MinimizeIPTablesRestore` | `true` | GA | 1.28 | | -| `NodeOutOfServiceVolumeDetach` | `false` | Alpha | 1.24 | 1.25 | -| `NodeOutOfServiceVolumeDetach` | `true` | Beta | 1.26 | 1.27 | -| `NodeOutOfServiceVolumeDetach` | `true` | GA | 1.28 | | -| `OpenAPIV3` | `false` | Alpha | 1.23 | 1.23 | -| `OpenAPIV3` | `true` | Beta | 1.24 | 1.26 | -| `OpenAPIV3` | `true` | GA | 1.27 | | -| `ProbeTerminationGracePeriod` | `false` | Alpha | 1.21 | 1.21 | -| `ProbeTerminationGracePeriod` | `false` | Beta | 1.22 | 1.24 | -| `ProbeTerminationGracePeriod` | `true` | Beta | 1.25 | 1.27 | -| `ProbeTerminationGracePeriod` | `true` | GA | 1.28 | | -| `ProxyTerminatingEndpoints` | `false` | Alpha | 1.22 | 1.25 | -| `ProxyTerminatingEndpoints` | `true` | Beta | 1.26 | 1.27 | -| `ProxyTerminatingEndpoints` | `true` | GA | 1.28 | | -| `RemoveSelfLink` | `false` | Alpha | 1.16 | 1.19 | -| `RemoveSelfLink` | `true` | Beta | 1.20 | 1.23 | -| `RemoveSelfLink` | `true` | GA | 1.24 | | -| `RetroactiveDefaultStorageClass` | `false` | Alpha | 1.25 | 1.25 | -| `RetroactiveDefaultStorageClass` | `true` | Beta | 1.26 | 1.27 | -| `RetroactiveDefaultStorageClass` | `true` | GA | 1.28 | | -| `SeccompDefault` | `false` | Alpha | 1.22 | 1.24 | -| `SeccompDefault` | `true` | Beta | 1.25 | 1.26 | -| `SeccompDefault` | `true` | GA | 1.27 | - | -| `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | -| `ServerSideApply` | `true` | Beta | 1.16 | 1.21 | -| `ServerSideApply` | `true` | GA | 1.22 | - | -| `ServerSideFieldValidation` | `false` | Alpha | 1.23 | 1.24 | -| `ServerSideFieldValidation` | `true` | Beta | 1.25 | 1.26 | -| `ServerSideFieldValidation` | `true` | GA | 1.27 | - | -| `TopologyManager` | `false` | Alpha | 1.16 | 1.17 | -| `TopologyManager` | `true` | Beta | 1.18 | 1.26 | -| `TopologyManager` | `true` | GA | 1.27 | - | -| `WatchBookmark` | `false` | Alpha | 1.15 | 1.15 | -| `WatchBookmark` | `true` | Beta | 1.16 | 1.16 | -| `WatchBookmark` | `true` | GA | 1.17 | - | -{{< /table >}} - -## Using a feature - -### Feature stages - -A feature can be in *Alpha*, *Beta* or *GA* stage. -An *Alpha* feature means: - -* Disabled by default. -* Might be buggy. Enabling the feature may expose bugs. -* Support for feature may be dropped at any time without notice. -* The API may change in incompatible ways in a later software release without notice. -* Recommended for use only in short-lived testing clusters, due to increased - risk of bugs and lack of long-term support. - -A *Beta* feature means: - -* Usually enabled by default. Beta API groups are [disabled by default](https://github.com/kubernetes/enhancements/tree/master/keps/sig-architecture/3136-beta-apis-off-by-default). -* The feature is well tested. Enabling the feature is considered safe. -* Support for the overall feature will not be dropped, though details may change. -* The schema and/or semantics of objects may change in incompatible ways in a - subsequent beta or stable release. When this happens, we will provide instructions - for migrating to the next version. This may require deleting, editing, and - re-creating API objects. The editing process may require some thought. - This may require downtime for applications that rely on the feature. -* Recommended for only non-business-critical uses because of potential for - incompatible changes in subsequent releases. If you have multiple clusters - that can be upgraded independently, you may be able to relax this restriction. - -{{< note >}} -Please do try *Beta* features and give feedback on them! -After they exit beta, it may not be practical for us to make more changes. -{{< /note >}} - -A *General Availability* (GA) feature is also referred to as a *stable* feature. It means: - -* The feature is always enabled; you cannot disable it. -* The corresponding feature gate is no longer needed. -* Stable versions of features will appear in released software for many subsequent versions. - -## List of feature gates {#feature-gates} - -Each feature gate is designed for enabling/disabling a specific feature: - -- `AdmissionWebhookMatchConditions`: Enable [match conditions](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchconditions) - on mutating & validating admission webhooks. -- `APIListChunking`: Enable the API clients to retrieve (`LIST` or `GET`) - resources from API server in chunks. -- `APIPriorityAndFairness`: Enable managing request concurrency with - prioritization and fairness at each server. (Renamed from `RequestManagement`) -- `APIResponseCompression`: Compress the API responses for `LIST` or `GET` requests. -- `APISelfSubjectReview`: Activate the `SelfSubjectReview` API which allows users - to see the requesting subject's authentication information. - See [API access to authentication information for a client](/docs/reference/access-authn-authz/authentication/#self-subject-review) - for more details. -- `APIServerIdentity`: Assign each API server an ID in a cluster, using a [Lease](/docs/concepts/architecture/leases). -- `APIServerTracing`: Add support for distributed tracing in the API server. - See [Traces for Kubernetes System Components](/docs/concepts/cluster-administration/system-traces) for more details. -- `AggregatedDiscoveryEndpoint`: Enable a single HTTP endpoint `/discovery/` which - supports native HTTP caching with ETags containing all APIResources known to the API server. -- `AnyVolumeDataSource`: Enable use of any custom resource as the `DataSource` of a - {{< glossary_tooltip text="PVC" term_id="persistent-volume-claim" >}}. -- `AppArmor`: Enable use of AppArmor mandatory access control for Pods running on Linux nodes. - See [AppArmor Tutorial](/docs/tutorials/security/apparmor/) for more details. -- `CPUManager`: Enable container level CPU affinity support, see - [CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/). -- `CPUManagerPolicyAlphaOptions`: This allows fine-tuning of CPUManager policies, - experimental, Alpha-quality options - This feature gate guards *a group* of CPUManager options whose quality level is alpha. - This feature gate will never graduate to beta or stable. -- `CPUManagerPolicyBetaOptions`: This allows fine-tuning of CPUManager policies, - experimental, Beta-quality options - This feature gate guards *a group* of CPUManager options whose quality level is beta. - This feature gate will never graduate to stable. -- `CPUManagerPolicyOptions`: Allow fine-tuning of CPUManager policies. -- `CSIMigrationAzureFile`: Enables shims and translation logic to route volume - operations from the Azure-File in-tree plugin to AzureFile CSI plugin. - Supports falling back to in-tree AzureFile plugin for mount operations to - nodes that have the feature disabled or that do not have AzureFile CSI plugin - installed and configured. Does not support falling back for provision - operations, for those the CSI plugin must be installed and configured. - Requires CSIMigration feature flag enabled. -- `CSIMigrationRBD`: Enables shims and translation logic to route volume - operations from the RBD in-tree plugin to Ceph RBD CSI plugin. Requires - CSIMigration and csiMigrationRBD feature flags enabled and Ceph CSI plugin - installed and configured in the cluster. This flag has been deprecated in - favor of the `InTreePluginRBDUnregister` feature flag which prevents the registration of - in-tree RBD plugin. -- `CSIMigrationvSphere`: Enables shims and translation logic to route volume operations - from the vSphere in-tree plugin to vSphere CSI plugin. Supports falling back - to in-tree vSphere plugin for mount operations to nodes that have the feature - disabled or that do not have vSphere CSI plugin installed and configured. - Does not support falling back for provision operations, for those the CSI - plugin must be installed and configured. Requires CSIMigration feature flag - enabled. -- `CSIMigrationPortworx`: Enables shims and translation logic to route volume operations - from the Portworx in-tree plugin to Portworx CSI plugin. - Requires Portworx CSI driver to be installed and configured in the cluster. -- `CSINodeExpandSecret`: Enable passing secret authentication data to a CSI driver for use - during a `NodeExpandVolume` CSI operation. -- `CSIVolumeHealth`: Enable support for CSI volume health monitoring on node. -- `CloudControllerManagerWebhook`: Enable webhooks in cloud controller manager. -- `CloudDualStackNodeIPs`: Enables dual-stack `kubelet --node-ip` with external cloud providers. - See [Configure IPv4/IPv6 dual-stack](/docs/concepts/services-networking/dual-stack/#configure-ipv4-ipv6-dual-stack) - for more details. -- `ClusterTrustBundle`: Enable ClusterTrustBundle objects and kubelet integration. -- `ComponentSLIs`: Enable the `/metrics/slis` endpoint on Kubernetes components like - kubelet, kube-scheduler, kube-proxy, kube-controller-manager, cloud-controller-manager - allowing you to scrape health check metrics. -- `ConsistentHTTPGetHandlers`: Normalize HTTP get URL and Header passing for lifecycle - handlers with probers. -- `ConsistentListFromCache`: Allow the API server to serve consistent lists from cache. -- `ContainerCheckpoint`: Enables the kubelet `checkpoint` API. - See [Kubelet Checkpoint API](/docs/reference/node/kubelet-checkpoint-api/) for more details. -- `ContextualLogging`: When you enable this feature gate, Kubernetes components that support - contextual logging add extra detail to log output. -- `CronJobsScheduledAnnotation`: Set the scheduled job time as an - {{< glossary_tooltip text="annotation" term_id="annotation" >}} on Jobs that were created - on behalf of a CronJob. -- `CronJobTimeZone`: Allow the use of the `timeZone` optional field in [CronJobs](/docs/concepts/workloads/controllers/cron-jobs/) -- `CRDValidationRatcheting`: Enable updates to custom resources to contain - violations of their OpenAPI schema if the offending portions of the resource - update did not change. See [Validation Ratcheting](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-ratcheting) for more details. -- `CrossNamespaceVolumeDataSource`: Enable the usage of cross namespace volume data source - to allow you to specify a source namespace in the `dataSourceRef` field of a - PersistentVolumeClaim. -- `CustomCPUCFSQuotaPeriod`: Enable nodes to change `cpuCFSQuotaPeriod` in - [kubelet config](/docs/tasks/administer-cluster/kubelet-config-file/). -- `CustomResourceValidationExpressions`: Enable expression language validation in CRD - which will validate customer resource based on validation rules written in - the `x-kubernetes-validations` extension. -- `DaemonSetUpdateSurge`: Enables the DaemonSet workloads to maintain - availability during update per node. - See [Perform a Rolling Update on a DaemonSet](/docs/tasks/manage-daemon/update-daemon-set/). -- `DefaultHostNetworkHostPortsInPodTemplates`: Changes when the default value of - `PodSpec.containers[*].ports[*].hostPort` - is assigned. The default is to only set a default value in Pods. - Enabling this means a default will be assigned even to embedded - PodSpecs (e.g. in a Deployment), which is the historical default. -- `DevicePluginCDIDevices`: Enable support to CDI device IDs in the - [Device Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) API. -- `DisableCloudProviders`: Disables any functionality in `kube-apiserver`, - `kube-controller-manager` and `kubelet` related to the `--cloud-provider` - component flag. -- `DisableKubeletCloudCredentialProviders`: Disable the in-tree functionality in kubelet - to authenticate to a cloud provider container registry for image pull credentials. -- `DownwardAPIHugePages`: Enables usage of hugepages in - [downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information). -- `DynamicResourceAllocation`: Enables support for resources with custom parameters and a lifecycle - that is independent of a Pod. -- `ElasticIndexedJob`: Enables Indexed Jobs to be scaled up or down by mutating both - `spec.completions` and `spec.parallelism` together such that `spec.completions == spec.parallelism`. - See docs on [elastic Indexed Jobs](/docs/concepts/workloads/controllers/job#elastic-indexed-jobs) - for more details. -- `EfficientWatchResumption`: Allows for storage-originated bookmark (progress - notify) events to be delivered to the users. This is only applied to watch operations. -- `EventedPLEG`: Enable support for the kubelet to receive container life cycle events from the - {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}} via - an extension to {{}}. - (PLEG is an abbreviation for “Pod lifecycle event generator”). - For this feature to be useful, you also need to enable support for container lifecycle events - in each container runtime running in your cluster. If the container runtime does not announce - support for container lifecycle events then the kubelet automatically switches to the legacy - generic PLEG mechanism, even if you have this feature gate enabled. -- `ExecProbeTimeout`: Ensure kubelet respects exec probe timeouts. - This feature gate exists in case any of your existing workloads depend on a - now-corrected fault where Kubernetes ignored exec probe timeouts. See - [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). -- `ExpandedDNSConfig`: Enable kubelet and kube-apiserver to allow more DNS - search paths and longer list of DNS search paths. This feature requires container - runtime support(Containerd: v1.5.6 or higher, CRI-O: v1.22 or higher). See - [Expanded DNS Configuration](/docs/concepts/services-networking/dns-pod-service/#expanded-dns-configuration). -- `ExperimentalHostUserNamespaceDefaulting`: Enabling the defaulting user - namespace to host. This is for containers that are using other host namespaces, - host mounts, or containers that are privileged or using specific non-namespaced - capabilities (e.g. `MKNODE`, `SYS_MODULE` etc.). This should only be enabled - if user namespace remapping is enabled in the Docker daemon. -- `GracefulNodeShutdown`: Enables support for graceful shutdown in kubelet. - During a system shutdown, kubelet will attempt to detect the shutdown event - and gracefully terminate pods running on the node. See - [Graceful Node Shutdown](/docs/concepts/architecture/nodes/#graceful-node-shutdown) - for more details. -- `GracefulNodeShutdownBasedOnPodPriority`: Enables the kubelet to check Pod priorities - when shutting down a node gracefully. -- `GRPCContainerProbe`: Enables the gRPC probe method for {Liveness,Readiness,Startup}Probe. - See [Configure Liveness, Readiness and Startup Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-grpc-liveness-probe). -- `HonorPVReclaimPolicy`: Honor persistent volume reclaim policy when it is `Delete` irrespective of PV-PVC deletion ordering. - For more details, check the - [PersistentVolume deletion protection finalizer](/docs/concepts/storage/persistent-volumes/#persistentvolume-deletion-protection-finalizer) - documentation. -- `HPAContainerMetrics`: Enable the `HorizontalPodAutoscaler` to scale based on - metrics from individual containers in target pods. -- `HPAScaleToZero`: Enables setting `minReplicas` to 0 for `HorizontalPodAutoscaler` - resources when using custom or external metrics. -- `IPTablesOwnershipCleanup`: This causes kubelet to no longer create legacy iptables rules. -- `InPlacePodVerticalScaling`: Enables in-place Pod vertical scaling. -- `InTreePluginAWSUnregister`: Stops registering the aws-ebs in-tree plugin in kubelet - and volume controllers. -- `InTreePluginAzureDiskUnregister`: Stops registering the azuredisk in-tree plugin in kubelet - and volume controllers. -- `InTreePluginAzureFileUnregister`: Stops registering the azurefile in-tree plugin in kubelet - and volume controllers. -- `InTreePluginGCEUnregister`: Stops registering the gce-pd in-tree plugin in kubelet - and volume controllers. -- `InTreePluginOpenStackUnregister`: Stops registering the OpenStack cinder in-tree plugin in kubelet - and volume controllers. -- `InTreePluginPortworxUnregister`: Stops registering the Portworx in-tree plugin in kubelet - and volume controllers. -- `InTreePluginRBDUnregister`: Stops registering the RBD in-tree plugin in kubelet - and volume controllers. -- `InTreePluginvSphereUnregister`: Stops registering the vSphere in-tree plugin in kubelet - and volume controllers. -- `JobMutableNodeSchedulingDirectives`: Allows updating node scheduling directives in - the pod template of [Job](/docs/concepts/workloads/controllers/job). -- `JobBackoffLimitPerIndex`: Allows specifying the maximal number of pod - retries per index in Indexed jobs. -- `JobPodFailurePolicy`: Allow users to specify handling of pod failures based on container - exit codes and pod conditions. -- `JobPodReplacementPolicy`: Allows you to specify pod replacement for terminating pods in a [Job](/docs/concepts/workloads/controllers/job) -- `JobReadyPods`: Enables tracking the number of Pods that have a `Ready` - [condition](/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions). - The count of `Ready` pods is recorded in the - [status](/docs/reference/kubernetes-api/workload-resources/job-v1/#JobStatus) - of a [Job](/docs/concepts/workloads/controllers/job) status. -- `JobTrackingWithFinalizers`: Enables tracking [Job](/docs/concepts/workloads/controllers/job) - completions without relying on Pods remaining in the cluster indefinitely. - The Job controller uses Pod finalizers and a field in the Job status to keep - track of the finished Pods to count towards completion. -- `KMSv1`: Enables KMS v1 API for encryption at rest. See [Using a KMS Provider for data encryption](/docs/tasks/administer-cluster/kms-provider) for more details. -- `KMSv2`: Enables KMS v2 API for encryption at rest. See [Using a KMS Provider for data encryption](/docs/tasks/administer-cluster/kms-provider) for more details. -- `KMSv2KDF`: Enables KMS v2 to generate single use data encryption keys. - See [Using a KMS Provider for data encryption](/docs/tasks/administer-cluster/kms-provider) for more details. - If the `KMSv2` feature gate is not enabled in your cluster, the value of the `KMSv2KDF` feature gate has no effect. -- `KubeProxyDrainingTerminatingNodes`: Implement connection draining for - terminating nodes for `externalTrafficPolicy: Cluster` services. -- `KubeletCgroupDriverFromCRI`: Enable detection of the kubelet cgroup driver - configuration option from the {{}}. - You can use this feature gate on nodes with a kubelet that supports the feature gate - and where there is a CRI container runtime that supports the `RuntimeConfig` - CRI call. If both CRI and kubelet support this feature, the kubelet ignores the - `cgroupDriver` configuration setting (or deprecated `--cgroup-driver` command - line argument). If you enable this feature gate and the container runtime - doesn't support it, the kubelet falls back to using the driver configured using - the `cgroupDriver` configuration setting. - See [Configuring a cgroup driver](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver) - for more details. -- `KubeletInUserNamespace`: Enables support for running kubelet in a - {{}}. - See [Running Kubernetes Node Components as a Non-root User](/docs/tasks/administer-cluster/kubelet-in-userns/). -- `KubeletPodResources`: Enable the kubelet's pod resources gRPC endpoint. See - [Support Device Monitoring](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/606-compute-device-assignment/README.md) - for more details. -- `KubeletPodResourcesGet`: Enable the `Get` gRPC endpoint on kubelet's for Pod resources. - This API augments the [resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources). -- `KubeletPodResourcesGetAllocatable`: Enable the kubelet's pod resources - `GetAllocatableResources` functionality. This API augments the - [resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) -- `KubeletPodResourcesDynamicResources`: Extend the kubelet's pod resources gRPC endpoint to - to include resources allocated in `ResourceClaims` via `DynamicResourceAllocation` API. - See [resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) for more details. - with informations about the allocatable resources, enabling clients to properly - track the free compute resources on a node. -- `KubeletTracing`: Add support for distributed tracing in the kubelet. - When enabled, kubelet CRI interface and authenticated http servers are instrumented to generate - OpenTelemetry trace spans. - See [Traces for Kubernetes System Components](/docs/concepts/cluster-administration/system-traces) for more details. -- `LegacyServiceAccountTokenNoAutoGeneration`: Stop auto-generation of Secret-based - [service account tokens](/docs/concepts/security/service-accounts/#get-a-token). -- `LegacyServiceAccountTokenCleanUp`: Enable cleaning up Secret-based - [service account tokens](/docs/concepts/security/service-accounts/#get-a-token) - when they are not used in a specified time (default to be one year). -- `LegacyServiceAccountTokenTracking`: Track usage of Secret-based - [service account tokens](/docs/concepts/security/service-accounts/#get-a-token). -- `LocalStorageCapacityIsolationFSQuotaMonitoring`: When `LocalStorageCapacityIsolation` - is enabled for - [local ephemeral storage](/docs/concepts/configuration/manage-resources-containers/) - and the backing filesystem for [emptyDir volumes](/docs/concepts/storage/volumes/#emptydir) - supports project quotas and they are enabled, use project quotas to monitor - [emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than - filesystem walk for better performance and accuracy. -- `LogarithmicScaleDown`: Enable semi-random selection of pods to evict on controller scaledown - based on logarithmic bucketing of pod timestamps. -- `LoggingAlphaOptions`: Allow fine-tuing of experimental, alpha-quality logging options. -- `LoggingBetaOptions`: Allow fine-tuing of experimental, beta-quality logging options. -- `MatchLabelKeysInPodTopologySpread`: Enable the `matchLabelKeys` field for - [Pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/). -- `MaxUnavailableStatefulSet`: Enables setting the `maxUnavailable` field for the - [rolling update strategy](/docs/concepts/workloads/controllers/statefulset/#rolling-updates) - of a StatefulSet. The field specifies the maximum number of Pods - that can be unavailable during the update. -- `MemoryManager`: Allows setting memory affinity for a container based on - NUMA topology. -- `MemoryQoS`: Enable memory protection and usage throttle on pod / container using - cgroup v2 memory controller. -- `MinDomainsInPodTopologySpread`: Enable `minDomains` in - [Pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/). -- `MinimizeIPTablesRestore`: Enables new performance improvement logics - in the kube-proxy iptables mode. -- `MultiCIDRRangeAllocator`: Enables the MultiCIDR range allocator. -- `MultiCIDRServiceAllocator`: Track IP address allocations for Service cluster IPs using IPAddress objects. -- `NewVolumeManagerReconstruction`: Enables improved discovery of mounted volumes during kubelet - startup. Since this code has been significantly refactored, we allow to opt-out in case kubelet - gets stuck at the startup or is not unmounting volumes from terminated Pods. Note that this - refactoring was behind `SELinuxMountReadWriteOncePod` alpha feature gate in Kubernetes 1.25. - - Before Kubernetes v1.25, the kubelet used different default behavior for discovering mounted - volumes during the kubelet startup. If you disable this feature gate (it's enabled by default), you select - the legacy discovery behavior. - - In Kubernetes v1.25 and v1.26, this behavior toggle was part of the `SELinuxMountReadWriteOncePod` - feature gate. -- `NodeInclusionPolicyInPodTopologySpread`: Enable using `nodeAffinityPolicy` and `nodeTaintsPolicy` in - [Pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/) - when calculating pod topology spread skew. -- `NodeLogQuery`: Enables querying logs of node services using the `/logs` endpoint. -- `NodeOutOfServiceVolumeDetach`: When a Node is marked out-of-service using the - `node.kubernetes.io/out-of-service` taint, Pods on the node will be forcefully deleted - if they can not tolerate this taint, and the volume detach operations for Pods terminating - on the node will happen immediately. The deleted Pods can recover quickly on different nodes. -- `NodeSwap`: Enable the kubelet to allocate swap memory for Kubernetes workloads on a node. - Must be used with `KubeletConfiguration.failSwapOn` set to false. - For more details, please see [swap memory](/docs/concepts/architecture/nodes/#swap-memory) -- `OpenAPIEnums`: Enables populating "enum" fields of OpenAPI schemas in the - spec returned from the API server. -- `OpenAPIV3`: Enables the API server to publish OpenAPI v3. -- `PDBUnhealthyPodEvictionPolicy`: Enables the `unhealthyPodEvictionPolicy` field of a `PodDisruptionBudget`. This specifies - when unhealthy pods should be considered for eviction. Please see [Unhealthy Pod Eviction Policy](/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy) - for more details. -- `PersistentVolumeLastPhaseTransitionTime`: Adds a new field to PersistentVolume - which holds a timestamp of when the volume last transitioned its phase. -- `PodAndContainerStatsFromCRI`: Configure the kubelet to gather container and pod stats from the CRI container runtime rather than gathering them from cAdvisor. - As of 1.26, this also includes gathering metrics from CRI and emitting them over `/metrics/cadvisor` (rather than having cAdvisor emit them directly). -- `PodDeletionCost`: Enable the [Pod Deletion Cost](/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) - feature which allows users to influence ReplicaSet downscaling order. -- `PodDisruptionConditions`: Enables support for appending a dedicated pod condition indicating that the pod is being deleted due to a disruption. -- `PodHostIPs`: Enable the `status.hostIPs` field for pods and the {{< glossary_tooltip term_id="downward-api" text="downward API" >}}. - The field lets you expose host IP addresses to workloads. -- `PodIndexLabel`: Enables the Job controller and StatefulSet controller to add the pod index as a label when creating new pods. See [Job completion mode docs](/docs/concepts/workloads/controllers/job#completion-mode) and [StatefulSet pod index label docs](/docs/concepts/workloads/controllers/statefulset/#pod-index-label) for more details. -- `PodReadyToStartContainersCondition`: Enable the kubelet to mark the [PodReadyToStartContainers](/docs/concepts/workloads/pods/pod-lifecycle/#pod-has-network) - condition on pods. This was previously (1.25-1.27) known as `PodHasNetworkCondition`. -- `PodSchedulingReadiness`: Enable setting `schedulingGates` field to control a Pod's [scheduling readiness](/docs/concepts/scheduling-eviction/pod-scheduling-readiness). -- `ProbeTerminationGracePeriod`: Enable [setting probe-level - `terminationGracePeriodSeconds`](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds) - on pods. See the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period) - for more details. -- `ProcMountType`: Enables control over the type proc mounts for containers - by setting the `procMount` field of a SecurityContext. -- `ProxyTerminatingEndpoints`: Enable the kube-proxy to handle terminating - endpoints when `ExternalTrafficPolicy=Local`. -- `QOSReserved`: Allows resource reservations at the QoS level preventing pods - at lower QoS levels from bursting into resources requested at higher QoS levels - (memory only for now). -- `ReadWriteOncePod`: Enables the usage of `ReadWriteOncePod` PersistentVolume - access mode. -- `RecoverVolumeExpansionFailure`: Enables users to edit their PVCs to smaller - sizes so as they can recover from previously issued volume expansion failures. - See [Recovering from Failure when Expanding Volumes](/docs/concepts/storage/persistent-volumes/#recovering-from-failure-when-expanding-volumes) - for more details. -- `RemainingItemCount`: Allow the API servers to show a count of remaining - items in the response to a - [chunking list request](/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks). -- `RemoveSelfLink`: Sets the `.metadata.selfLink` field to blank (empty string) for all - objects and collections. This field has been deprecated since the Kubernetes v1.16 - release. When this feature is enabled, the `.metadata.selfLink` field remains part of - the Kubernetes API, but is always unset. -- `RetroactiveDefaultStorageClass`: Allow assigning StorageClass to unbound PVCs retroactively. -- `RotateKubeletServerCertificate`: Enable the rotation of the server TLS certificate on the kubelet. - See [kubelet configuration](/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubelet-configuration) - for more details. -- `SELinuxMountReadWriteOncePod`: Speeds up container startup by allowing kubelet to mount volumes - for a Pod directly with the correct SELinux label instead of changing each file on the volumes - recursively. The initial implementation focused on ReadWriteOncePod volumes. -- `SchedulerQueueingHints`: Enables the scheduler's _queueing hints_ enhancement, - which benefits to reduce the useless requeueing. -- `SeccompDefault`: Enables the use of `RuntimeDefault` as the default seccomp profile - for all workloads. - The seccomp profile is specified in the `securityContext` of a Pod and/or a Container. -- `SecurityContextDeny`: This gate signals that the `SecurityContextDeny` admission controller is deprecated. -- `ServerSideApply`: Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/server-side-apply/) - feature on the API Server. -- `ServerSideFieldValidation`: Enables server-side field validation. This means the validation - of resource schema is performed at the API server side rather than the client side - (for example, the `kubectl create` or `kubectl apply` command line). -- `SidecarContainers`: Allow setting the `restartPolicy` of an init container to - `Always` so that the container becomes a sidecar container (restartable init containers). - See - [Sidecar containers and restartPolicy](/docs/concepts/workloads/pods/init-containers/#sidecar-containers-and-restartpolicy) - for more details. -- `SizeMemoryBackedVolumes`: Enable kubelets to determine the size limit for - memory-backed volumes (mainly `emptyDir` volumes). -- `SkipReadOnlyValidationGCE`: Skip validation for GCE, will enable in the - next version. -- `StableLoadBalancerNodeSet`: Enables less load balancer re-configurations by - the service controller (KCCM) as an effect of changing node state. -- `StatefulSetStartOrdinal`: Allow configuration of the start ordinal in a - StatefulSet. See - [Start ordinal](/docs/concepts/workloads/controllers/statefulset/#start-ordinal) - for more details. -- `StorageVersionAPI`: Enable the - [storage version API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#storageversion-v1alpha1-internal-apiserver-k8s-io). -- `StorageVersionHash`: Allow API servers to expose the storage version hash in the - discovery. -- `TopologyAwareHints`: Enables topology aware routing based on topology hints - in EndpointSlices. See [Topology Aware - Hints](/docs/concepts/services-networking/topology-aware-hints/) for more - details. -- `TopologyManager`: Enable a mechanism to coordinate fine-grained hardware resource - assignments for different components in Kubernetes. See - [Control Topology Management Policies on a node](/docs/tasks/administer-cluster/topology-manager/). -- `TopologyManagerPolicyAlphaOptions`: Allow fine-tuning of topology manager policies, - experimental, Alpha-quality options. - This feature gate guards *a group* of topology manager options whose quality level is alpha. - This feature gate will never graduate to beta or stable. -- `TopologyManagerPolicyBetaOptions`: Allow fine-tuning of topology manager policies, - experimental, Beta-quality options. - This feature gate guards *a group* of topology manager options whose quality level is beta. - This feature gate will never graduate to stable. -- `TopologyManagerPolicyOptions`: Allow fine-tuning of topology manager policies, -- `UnknownVersionInteroperabilityProxy`: Proxy resource requests to the correct peer kube-apiserver when - multiple kube-apiservers exist at varied versions. - See [Mixed version proxy](/docs/concepts/architecture/mixed-version-proxy/) for more information. -- `UserNamespacesSupport`: Enable user namespace support for Pods. - Before Kubernetes v1.28, this feature gate was named `UserNamespacesStatelessPodsSupport`. -- `ValidatingAdmissionPolicy`: Enable [ValidatingAdmissionPolicy](/docs/reference/access-authn-authz/validating-admission-policy/) support for CEL validations be used in Admission Control. -- `VolumeCapacityPriority`: Enable support for prioritizing nodes in different - topologies based on available PV capacity. -- `WatchBookmark`: Enable support for watch bookmark events. -- `WatchList` : Enable support for [streaming initial state of objects in watch requests](/docs/reference/using-api/api-concepts/#streaming-lists). -- `WinDSR`: Allows kube-proxy to create DSR loadbalancers for Windows. -- `WinOverlay`: Allows kube-proxy to run in overlay mode for Windows. -- `WindowsHostNetwork`: Enables support for joining Windows containers to a hosts' network namespace. - - -## {{% heading "whatsnext" %}} - -* The [deprecation policy](/docs/reference/using-api/deprecation-policy/) for Kubernetes explains - the project's approach to removing features and components. -* Since Kubernetes 1.24, new beta APIs are not enabled by default. When enabling a beta - feature, you will also need to enable any associated API resources. - For example, to enable a particular resource like - `storage.k8s.io/v1beta1/csistoragecapacities`, set `--runtime-config=storage.k8s.io/v1beta1/csistoragecapacities`. - See [API Versioning](/docs/reference/using-api/#api-versioning) for more details on the command line flags. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/accelerators.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/accelerators.md new file mode 100644 index 0000000000000..99e64dc464bd9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/accelerators.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: Accelerators +content_type: feature_gate + +_build: + list: never + render: false +--- +Provided an early form of plugin to enable Nvidia GPU support when using +Docker Engine; no longer available. See +[Device Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) for +an alternative. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/admission-webhook-match-conditions.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/admission-webhook-match-conditions.md new file mode 100644 index 0000000000000..26e50d79cfdb0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/admission-webhook-match-conditions.md @@ -0,0 +1,9 @@ +--- +title: AdmissionWebhookMatchConditions +content_type: feature_gate +_build: + list: never + render: false +--- +Enable [match conditions](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchconditions) +on mutating & validating admission webhooks. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/advanced-auditing.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/advanced-auditing.md new file mode 100644 index 0000000000000..a483990e6be48 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/advanced-auditing.md @@ -0,0 +1,8 @@ +--- +title: AdvancedAuditing +content_type: feature_gate +_build: + list: never + render: false +--- +Enable [advanced auditing](/docs/tasks/debug/debug-cluster/audit/#advanced-audit) diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/affinity-in-annotations.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/affinity-in-annotations.md new file mode 100644 index 0000000000000..712bac66296e6 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/affinity-in-annotations.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: AffinityInAnnotations +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable setting +[Pod affinity or anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/aggregated-discovery-endpoint.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/aggregated-discovery-endpoint.md new file mode 100644 index 0000000000000..9b01ba25103d5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/aggregated-discovery-endpoint.md @@ -0,0 +1,9 @@ +--- +title: AggregatedDiscoveryEndpoint +content_type: feature_gate +_build: + list: never + render: false +--- +Enable a single HTTP endpoint `/discovery/` which +supports native HTTP caching with ETags containing all APIResources known to the API server. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-ext-traffic-local-endpoints.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-ext-traffic-local-endpoints.md new file mode 100644 index 0000000000000..22e1a731cb172 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-ext-traffic-local-endpoints.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: AllowExtTrafficLocalEndpoints +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable a service to route external requests to node local endpoints. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-insecure-backend-proxy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-insecure-backend-proxy.md new file mode 100644 index 0000000000000..df3bc91001c19 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-insecure-backend-proxy.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: AllowInsecureBackendProxy +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the users to skip TLS verification of +kubelets on Pod log requests. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-service-lb-status-on-non-lb.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-service-lb-status-on-non-lb.md new file mode 100644 index 0000000000000..2f76145a25042 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/allow-service-lb-status-on-non-lb.md @@ -0,0 +1,8 @@ +--- +title: AllowServiceLBStatusOnNonLB +content_type: feature_gate +_build: + list: never + render: false +--- +Enables `.status.ingress.loadBalancer` to be set on Services of types other than `LoadBalancer`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/any-volume-data-source.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/any-volume-data-source.md new file mode 100644 index 0000000000000..6ccfbd121f1ae --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/any-volume-data-source.md @@ -0,0 +1,9 @@ +--- +title: AnyVolumeDataSource +content_type: feature_gate +_build: + list: never + render: false +--- +Enable use of any custom resource as the `DataSource` of a +{{< glossary_tooltip text="PVC" term_id="persistent-volume-claim" >}}. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/api-list-chunking.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-list-chunking.md new file mode 100644 index 0000000000000..f0883ad53e506 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-list-chunking.md @@ -0,0 +1,9 @@ +--- +title: APIListChunking +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the API clients to retrieve (`LIST` or `GET`) +resources from API server in chunks. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/api-priority-and-fairness.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-priority-and-fairness.md new file mode 100644 index 0000000000000..1585d0561f06f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-priority-and-fairness.md @@ -0,0 +1,9 @@ +--- +title: APIPriorityAndFairness +content_type: feature_gate +_build: + list: never + render: false +--- +Enable managing request concurrency with +prioritization and fairness at each server. (Renamed from `RequestManagement`) diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/api-response-compression.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-response-compression.md new file mode 100644 index 0000000000000..b106d6bcaad33 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-response-compression.md @@ -0,0 +1,8 @@ +--- +title: APIResponseCompression +content_type: feature_gate +_build: + list: never + render: false +--- +Compress the API responses for `LIST` or `GET` requests. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/api-self-subject-review.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-self-subject-review.md new file mode 100644 index 0000000000000..98de6394b3efb --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-self-subject-review.md @@ -0,0 +1,11 @@ +--- +title: APISelfSubjectReview +content_type: feature_gate +_build: + list: never + render: false +--- +Activate the `SelfSubjectReview` API which allows users +to see the requesting subject's authentication information. +See [API access to authentication information for a client](/docs/reference/access-authn-authz/authentication/#self-subject-review) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/api-server-identity.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-server-identity.md new file mode 100644 index 0000000000000..d232b7456d9af --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-server-identity.md @@ -0,0 +1,8 @@ +--- +title: APIServerIdentity +content_type: feature_gate +_build: + list: never + render: false +--- +Assign each API server an ID in a cluster, using a [Lease](/docs/concepts/architecture/leases). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/api-server-tracing.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-server-tracing.md new file mode 100644 index 0000000000000..c532d60d167f9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/api-server-tracing.md @@ -0,0 +1,9 @@ +--- +title: APIServerTracing +content_type: feature_gate +_build: + list: never + render: false +--- +Add support for distributed tracing in the API server. +See [Traces for Kubernetes System Components](/docs/concepts/cluster-administration/system-traces) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/apparmor.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/apparmor.md new file mode 100644 index 0000000000000..b6abcab362e97 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/apparmor.md @@ -0,0 +1,9 @@ +--- +title: AppArmor +content_type: feature_gate +_build: + list: never + render: false +--- +Enable use of AppArmor mandatory access control for Pods running on Linux nodes. +See [AppArmor Tutorial](/docs/tutorials/security/apparmor/) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/attach-volume-limit.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/attach-volume-limit.md new file mode 100644 index 0000000000000..c2a84d7ac619a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/attach-volume-limit.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: AttachVolumeLimit +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable volume plugins to report limits on number of volumes +that can be attached to a node. +See [dynamic volume limits](/docs/concepts/storage/storage-limits/#dynamic-volume-limits) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/balance-attached-node-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/balance-attached-node-volumes.md new file mode 100644 index 0000000000000..00644f7ff7b05 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/balance-attached-node-volumes.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: BalanceAttachedNodeVolumes +content_type: feature_gate + +_build: + list: never + render: false +--- +Include volume count on node to be considered for +balanced resource allocation while scheduling. A node which has closer CPU, +memory utilization, and volume count is favored by the scheduler while making decisions. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/block-volume.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/block-volume.md new file mode 100644 index 0000000000000..d4a49752edab3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/block-volume.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: BlockVolume +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the definition and consumption of raw block devices in Pods. +See [Raw Block Volume Support](/docs/concepts/storage/persistent-volumes/#raw-block-volume-support) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/bound-service-account-token-volume.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/bound-service-account-token-volume.md new file mode 100644 index 0000000000000..0c5a322c31a2e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/bound-service-account-token-volume.md @@ -0,0 +1,17 @@ +--- +# Removed from Kubernetes +title: BoundServiceAccountTokenVolume +content_type: feature_gate + +_build: + list: never + render: false +--- +Migrate ServiceAccount volumes to use a projected volume +consisting of a ServiceAccountTokenVolumeProjection. Cluster admins can use metric +`serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended +tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with +flag `--service-account-extend-token-expiration=false`. + +Check [Bound Service Account Tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cloud-controller-manager-webhook.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cloud-controller-manager-webhook.md new file mode 100644 index 0000000000000..efc3253049ddd --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cloud-controller-manager-webhook.md @@ -0,0 +1,8 @@ +--- +title: CloudControllerManagerWebhook +content_type: feature_gate +_build: + list: never + render: false +--- +Enable webhooks in cloud controller manager. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cloud-dual-stack-node-ips.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cloud-dual-stack-node-ips.md new file mode 100644 index 0000000000000..648cc8cb7a4b6 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cloud-dual-stack-node-ips.md @@ -0,0 +1,10 @@ +--- +title: CloudDualStackNodeIPs +content_type: feature_gate +_build: + list: never + render: false +--- +Enables dual-stack `kubelet --node-ip` with external cloud providers. +See [Configure IPv4/IPv6 dual-stack](/docs/concepts/services-networking/dual-stack/#configure-ipv4-ipv6-dual-stack) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cluster-trust-bundle-projection.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cluster-trust-bundle-projection.md new file mode 100644 index 0000000000000..12d4e0104fd37 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cluster-trust-bundle-projection.md @@ -0,0 +1,8 @@ +--- +title: ClusterTrustBundleProjection +content_type: feature_gate +_build: + list: never + render: false +--- +[`clusterTrustBundle` projected volume sources](/docs/concepts/storage/projected-volumes#clustertrustbundle). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cluster-trust-bundle.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cluster-trust-bundle.md new file mode 100644 index 0000000000000..1a0453a20a0cc --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cluster-trust-bundle.md @@ -0,0 +1,8 @@ +--- +title: ClusterTrustBundle +content_type: feature_gate +_build: + list: never + render: false +--- +Enable ClusterTrustBundle objects and kubelet integration. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/component-slis.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/component-slis.md new file mode 100644 index 0000000000000..ea98167a6e247 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/component-slis.md @@ -0,0 +1,10 @@ +--- +title: ComponentSLIs +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the `/metrics/slis` endpoint on Kubernetes components like +kubelet, kube-scheduler, kube-proxy, kube-controller-manager, cloud-controller-manager +allowing you to scrape health check metrics. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/configurable-fs-group-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/configurable-fs-group-policy.md new file mode 100644 index 0000000000000..ace47535d09ac --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/configurable-fs-group-policy.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: ConfigurableFSGroupPolicy +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows user to configure volume permission change policy +for fsGroups when mounting a volume in a Pod. See +[Configure volume permission and ownership change policy for Pods](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/consistent-http-get-handlers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/consistent-http-get-handlers.md new file mode 100644 index 0000000000000..9a686ab91a40a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/consistent-http-get-handlers.md @@ -0,0 +1,9 @@ +--- +title: ConsistentHTTPGetHandlers +content_type: feature_gate +_build: + list: never + render: false +--- +Normalize HTTP get URL and Header passing for lifecycle +handlers with probers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/consistent-list-from-cache.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/consistent-list-from-cache.md new file mode 100644 index 0000000000000..8007517673f4a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/consistent-list-from-cache.md @@ -0,0 +1,9 @@ +--- +title: ConsistentListFromCache +content_type: feature_gate +_build: + list: never + render: false +--- +Allow the API server to serve consistent lists from cache. + diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/container-checkpoint.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/container-checkpoint.md new file mode 100644 index 0000000000000..834598e163c29 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/container-checkpoint.md @@ -0,0 +1,9 @@ +--- +title: ContainerCheckpoint +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the kubelet `checkpoint` API. +See [Kubelet Checkpoint API](/docs/reference/node/kubelet-checkpoint-api/) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/contextual-logging.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/contextual-logging.md new file mode 100644 index 0000000000000..a36db7b0f1d0d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/contextual-logging.md @@ -0,0 +1,9 @@ +--- +title: ContextualLogging +content_type: feature_gate +_build: + list: never + render: false +--- +When you enable this feature gate, Kubernetes components that support + contextual logging add extra detail to log output. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/controller-manager-leader-migration.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/controller-manager-leader-migration.md new file mode 100644 index 0000000000000..3426337645ed4 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/controller-manager-leader-migration.md @@ -0,0 +1,15 @@ +--- +# Removed from Kubernetes +title: ControllerManagerLeaderMigration +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables Leader Migration for +[kube-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#initial-leader-migration-configuration) and +[cloud-controller-manager](/docs/tasks/administer-cluster/controller-manager-leader-migration/#deploy-cloud-controller-manager) +which allows a cluster operator to live migrate +controllers from the kube-controller-manager into an external controller-manager +(e.g. the cloud-controller-manager) in an HA cluster without downtime. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-alpha-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-alpha-options.md new file mode 100644 index 0000000000000..a25106775540a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-alpha-options.md @@ -0,0 +1,11 @@ +--- +title: CPUManagerPolicyAlphaOptions +content_type: feature_gate +_build: + list: never + render: false +--- +This allows fine-tuning of CPUManager policies, +experimental, Alpha-quality options +This feature gate guards *a group* of CPUManager options whose quality level is alpha. +This feature gate will never graduate to beta or stable. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-beta-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-beta-options.md new file mode 100644 index 0000000000000..e930cd701044b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-beta-options.md @@ -0,0 +1,11 @@ +--- +title: CPUManagerPolicyBetaOptions +content_type: feature_gate +_build: + list: never + render: false +--- +This allows fine-tuning of CPUManager policies, +experimental, Beta-quality options +This feature gate guards *a group* of CPUManager options whose quality level is beta. +This feature gate will never graduate to stable. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-options.md new file mode 100644 index 0000000000000..9875b4ecd1594 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager-policy-options.md @@ -0,0 +1,8 @@ +--- +title: CPUManagerPolicyOptions +content_type: feature_gate +_build: + list: never + render: false +--- +Allow fine-tuning of CPUManager policies. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager.md new file mode 100644 index 0000000000000..732898d0af3e2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cpu-manager.md @@ -0,0 +1,9 @@ +--- +title: CPUManager +content_type: feature_gate +_build: + list: never + render: false +--- +Enable container level CPU affinity support, see +[CPU Management Policies](/docs/tasks/administer-cluster/cpu-management-policies/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/crd-validation-ratcheting.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/crd-validation-ratcheting.md new file mode 100644 index 0000000000000..f0906090bcbd5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/crd-validation-ratcheting.md @@ -0,0 +1,11 @@ +--- +title: CRDValidationRatcheting +content_type: feature_gate +_build: + list: never + render: false +--- +Enable updates to custom resources to contain +violations of their OpenAPI schema if the offending portions of the resource +update did not change. See [Validation Ratcheting](/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#validation-ratcheting) for more details. + diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cri-container-log-rotation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cri-container-log-rotation.md new file mode 100644 index 0000000000000..46c954e7fbc8c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cri-container-log-rotation.md @@ -0,0 +1,15 @@ +--- +# Removed from Kubernetes +title: CRIContainerLogRotation +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable container log rotation for CRI container runtime. +The default max size of a log file is 10MB and the default max number of +log files allowed for a container is 5. +These values can be configured in the kubelet config. +See [logging at node level](/docs/concepts/cluster-administration/logging/#logging-at-the-node-level) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-job-controller-v2.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-job-controller-v2.md new file mode 100644 index 0000000000000..9136202d5970d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-job-controller-v2.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: CronJobControllerV2 +content_type: feature_gate + +_build: + list: never + render: false +--- +Use an alternative implementation of the +{{< glossary_tooltip text="CronJob" term_id="cronjob" >}} controller. Otherwise, +version 1 of the same controller is selected. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-job-time-zone.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-job-time-zone.md new file mode 100644 index 0000000000000..523b96bd87c9a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-job-time-zone.md @@ -0,0 +1,8 @@ +--- +title: CronJobTimeZone +content_type: feature_gate +_build: + list: never + render: false +--- +Allow the use of the `timeZone` optional field in [CronJobs](/docs/concepts/workloads/controllers/cron-jobs/) diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-jobs-scheduled-annotation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-jobs-scheduled-annotation.md new file mode 100644 index 0000000000000..0f07c62d0776c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cron-jobs-scheduled-annotation.md @@ -0,0 +1,10 @@ +--- +title: CronJobsScheduledAnnotation +content_type: feature_gate +_build: + list: never + render: false +--- +Set the scheduled job time as an +{{< glossary_tooltip text="annotation" term_id="annotation" >}} on Jobs that were created +on behalf of a CronJob. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/cross-namespace-volume-data-source.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/cross-namespace-volume-data-source.md new file mode 100644 index 0000000000000..fc871f5b49c0d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/cross-namespace-volume-data-source.md @@ -0,0 +1,10 @@ +--- +title: CrossNamespaceVolumeDataSource +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the usage of cross namespace volume data source + to allow you to specify a source namespace in the `dataSourceRef` field of a + PersistentVolumeClaim. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-block-volume.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-block-volume.md new file mode 100644 index 0000000000000..5ba4b057fabf5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-block-volume.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: CSIBlockVolume +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable external CSI volume drivers to support block storage. +See [`csi` raw block volume support](/docs/concepts/storage/volumes/#csi-raw-block-volume-support) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-driver-registry.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-driver-registry.md new file mode 100644 index 0000000000000..8f05346580d12 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-driver-registry.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: CSIDriverRegistry +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable all logic related to the CSIDriver API object in +`csi.storage.k8s.io`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-inline-volume.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-inline-volume.md new file mode 100644 index 0000000000000..64dcdc3a9412e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-inline-volume.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: CSIInlineVolume +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable CSI Inline volumes support for pods. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-aws-complete.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-aws-complete.md new file mode 100644 index 0000000000000..59ca910f6c5e7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-aws-complete.md @@ -0,0 +1,16 @@ +--- +# Removed from Kubernetes +title: CSIMigrationAWSComplete +content_type: feature_gate + +_build: + list: never + render: false +--- +Stops registering the EBS in-tree plugin in +kubelet and volume controllers and enables shims and translation logic to +route volume operations from the AWS-EBS in-tree plugin to EBS CSI plugin. +Requires CSIMigration and CSIMigrationAWS feature flags enabled and EBS CSI +plugin installed and configured on all nodes in the cluster. This flag has +been deprecated in favor of the `InTreePluginAWSUnregister` feature flag +which prevents the registration of in-tree EBS plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-aws.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-aws.md new file mode 100644 index 0000000000000..2659f5764032c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-aws.md @@ -0,0 +1,15 @@ +--- +# Removed from Kubernetes +title: CSIMigrationAWS +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume +operations from the AWS-EBS in-tree plugin to EBS CSI plugin. Supports +falling back to in-tree EBS plugin for mount operations to nodes that have +the feature disabled or that do not have EBS CSI plugin installed and +configured. Does not support falling back for provision operations, for those +the CSI plugin must be installed and configured. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-disk-complete.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-disk-complete.md new file mode 100644 index 0000000000000..a9d653dedd1a0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-disk-complete.md @@ -0,0 +1,17 @@ +--- +# Removed from Kubernetes +title: CSIMigrationAzureDiskComplete +content_type: feature_gate + +_build: + list: never + render: false +--- +Stops registering the Azure-Disk in-tree +plugin in kubelet and volume controllers and enables shims and translation +logic to route volume operations from the Azure-Disk in-tree plugin to +AzureDisk CSI plugin. Requires CSIMigration and CSIMigrationAzureDisk feature +flags enabled and AzureDisk CSI plugin installed and configured on all nodes +in the cluster. This flag has been deprecated in favor of the +`InTreePluginAzureDiskUnregister` feature flag which prevents the registration +of in-tree AzureDisk plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-disk.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-disk.md new file mode 100644 index 0000000000000..100683eb5627a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-disk.md @@ -0,0 +1,16 @@ +--- +# Removed from Kubernetes +title: CSIMigrationAzureDisk +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume +operations from the Azure-Disk in-tree plugin to AzureDisk CSI plugin. +Supports falling back to in-tree AzureDisk plugin for mount operations to +nodes that have the feature disabled or that do not have AzureDisk CSI plugin +installed and configured. Does not support falling back for provision +operations, for those the CSI plugin must be installed and configured. +Requires CSIMigration feature flag enabled. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-file-complete.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-file-complete.md new file mode 100644 index 0000000000000..83ee952f9a7e8 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-file-complete.md @@ -0,0 +1,17 @@ +--- +# Removed from Kubernetes +title: CSIMigrationAzureFileComplete +content_type: feature_gate + +_build: + list: never + render: false +--- +Stops registering the Azure-File in-tree +plugin in kubelet and volume controllers and enables shims and translation +logic to route volume operations from the Azure-File in-tree plugin to +AzureFile CSI plugin. Requires CSIMigration and CSIMigrationAzureFile feature +flags enabled and AzureFile CSI plugin installed and configured on all nodes +in the cluster. This flag has been deprecated in favor of the +`InTreePluginAzureFileUnregister` feature flag which prevents the registration + of in-tree AzureFile plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-file.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-file.md new file mode 100644 index 0000000000000..b9d3405d4827b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-azure-file.md @@ -0,0 +1,14 @@ +--- +title: CSIMigrationAzureFile +content_type: feature_gate +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume +operations from the Azure-File in-tree plugin to AzureFile CSI plugin. +Supports falling back to in-tree AzureFile plugin for mount operations to +nodes that have the feature disabled or that do not have AzureFile CSI plugin +installed and configured. Does not support falling back for provision +operations, for those the CSI plugin must be installed and configured. +Requires CSIMigration feature flag enabled. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-gce-complete.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-gce-complete.md new file mode 100644 index 0000000000000..fda56a6cd0f57 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-gce-complete.md @@ -0,0 +1,16 @@ +--- +# Removed from Kubernetes +title: CSIMigrationGCEComplete +content_type: feature_gate + +_build: + list: never + render: false +--- +Stops registering the GCE-PD in-tree plugin in +kubelet and volume controllers and enables shims and translation logic to +route volume operations from the GCE-PD in-tree plugin to PD CSI plugin. +Requires CSIMigration and CSIMigrationGCE feature flags enabled and PD CSI +plugin installed and configured on all nodes in the cluster. This flag has +been deprecated in favor of the `InTreePluginGCEUnregister` feature flag which +prevents the registration of in-tree GCE PD plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-gce.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-gce.md new file mode 100644 index 0000000000000..e28c626b6e998 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-gce.md @@ -0,0 +1,14 @@ +--- +title: CSIMigrationGCE +content_type: feature_gate +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume +operations from the GCE-PD in-tree plugin to PD CSI plugin. Supports falling +back to in-tree GCE plugin for mount operations to nodes that have the +feature disabled or that do not have PD CSI plugin installed and configured. +Does not support falling back for provision operations, for those the CSI +plugin must be installed and configured. Requires CSIMigration feature flag +enabled. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-open-stack-complete.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-open-stack-complete.md new file mode 100644 index 0000000000000..530a5386d72d2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-open-stack-complete.md @@ -0,0 +1,16 @@ +--- +# Removed from Kubernetes +title: CSIMigrationOpenStackComplete +content_type: feature_gate + +_build: + list: never + render: false +--- +Stops registering the Cinder in-tree plugin in +kubelet and volume controllers and enables shims and translation logic to route +volume operations from the Cinder in-tree plugin to Cinder CSI plugin. +Requires CSIMigration and CSIMigrationOpenStack feature flags enabled and Cinder +CSI plugin installed and configured on all nodes in the cluster. This flag has +been deprecated in favor of the `InTreePluginOpenStackUnregister` feature flag +which prevents the registration of in-tree openstack cinder plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-open-stack.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-open-stack.md new file mode 100644 index 0000000000000..224d719357808 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-open-stack.md @@ -0,0 +1,16 @@ +--- +# Removed from Kubernetes +title: CSIMigrationOpenStack +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume +operations from the Cinder in-tree plugin to Cinder CSI plugin. Supports +falling back to in-tree Cinder plugin for mount operations to nodes that have +the feature disabled or that do not have Cinder CSI plugin installed and +configured. Does not support falling back for provision operations, for those +the CSI plugin must be installed and configured. Requires CSIMigration +feature flag enabled. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-portworx.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-portworx.md new file mode 100644 index 0000000000000..92739e96afc45 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-portworx.md @@ -0,0 +1,10 @@ +--- +title: CSIMigrationPortworx +content_type: feature_gate +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume operations +from the Portworx in-tree plugin to Portworx CSI plugin. +Requires Portworx CSI driver to be installed and configured in the cluster. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-rbd.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-rbd.md new file mode 100644 index 0000000000000..f086872827832 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration-rbd.md @@ -0,0 +1,13 @@ +--- +title: CSIMigrationRBD +content_type: feature_gate +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume +operations from the RBD in-tree plugin to Ceph RBD CSI plugin. Requires +CSIMigration and csiMigrationRBD feature flags enabled and Ceph CSI plugin +installed and configured in the cluster. This flag has been deprecated in +favor of the `InTreePluginRBDUnregister` feature flag which prevents the registration of +in-tree RBD plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration.md new file mode 100644 index 0000000000000..6bb8b797d13e7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migration.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: CSIMigration +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume +operations from in-tree plugins to corresponding pre-installed CSI plugins diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migrationv-sphere-complete.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migrationv-sphere-complete.md new file mode 100644 index 0000000000000..632066594db60 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migrationv-sphere-complete.md @@ -0,0 +1,16 @@ +--- +# Removed from Kubernetes +title: CSIMigrationvSphereComplete +content_type: feature_gate + +_build: + list: never + render: false +--- +Stops registering the vSphere in-tree plugin in kubelet +and volume controllers and enables shims and translation logic to route volume operations +from the vSphere in-tree plugin to vSphere CSI plugin. Requires CSIMigration and +CSIMigrationvSphere feature flags enabled and vSphere CSI plugin installed and +configured on all nodes in the cluster. This flag has been deprecated in favor +of the `InTreePluginvSphereUnregister` feature flag which prevents the +registration of in-tree vsphere plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migrationv-sphere.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migrationv-sphere.md new file mode 100644 index 0000000000000..630217b89a78f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-migrationv-sphere.md @@ -0,0 +1,14 @@ +--- +title: CSIMigrationvSphere +content_type: feature_gate +_build: + list: never + render: false +--- +Enables shims and translation logic to route volume operations +from the vSphere in-tree plugin to vSphere CSI plugin. Supports falling back +to in-tree vSphere plugin for mount operations to nodes that have the feature +disabled or that do not have vSphere CSI plugin installed and configured. +Does not support falling back for provision operations, for those the CSI +plugin must be installed and configured. Requires CSIMigration feature flag +enabled. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-node-expand-secret.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-node-expand-secret.md new file mode 100644 index 0000000000000..31c55b4a3d7bd --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-node-expand-secret.md @@ -0,0 +1,9 @@ +--- +title: CSINodeExpandSecret +content_type: feature_gate +_build: + list: never + render: false +--- +Enable passing secret authentication data to a CSI driver for use + during a `NodeExpandVolume` CSI operation. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-node-info.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-node-info.md new file mode 100644 index 0000000000000..1c9665da135b7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-node-info.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: CSINodeInfo +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable all logic related to the CSINodeInfo API object in `csi.storage.k8s.io`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-persistent-volume.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-persistent-volume.md new file mode 100644 index 0000000000000..2b1a6f3feb520 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-persistent-volume.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: CSIPersistentVolume +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable discovering and mounting volumes provisioned through a +[CSI (Container Storage Interface)](https://git.k8s.io/design-proposals-archive/storage/container-storage-interface.md) +compatible volume plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-service-account-token.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-service-account-token.md new file mode 100644 index 0000000000000..fc9118df0ad65 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-service-account-token.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: CSIServiceAccountToken +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable CSI drivers to receive the pods' service account token +that they mount volumes for. See +[Token Requests](https://kubernetes-csi.github.io/docs/token-requests.html). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-storage-capacity.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-storage-capacity.md new file mode 100644 index 0000000000000..f979e042866cf --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-storage-capacity.md @@ -0,0 +1,11 @@ +--- +title: CSIStorageCapacity +content_type: feature_gate +_build: + list: never + render: false +--- +Enables CSI drivers to publish storage capacity information +and the Kubernetes scheduler to use that information when scheduling pods. See +[Storage Capacity](/docs/concepts/storage/storage-capacity/). +Check the [`csi` volume type](/docs/concepts/storage/volumes/#csi) documentation for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-volume-fs-group-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-volume-fs-group-policy.md new file mode 100644 index 0000000000000..0d044f11e7345 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-volume-fs-group-policy.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: CSIVolumeFSGroupPolicy +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows CSIDrivers to use the `fsGroupPolicy` field. +This field controls whether volumes created by a CSIDriver support volume ownership +and permission modifications when these volumes are mounted. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-volume-health.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-volume-health.md new file mode 100644 index 0000000000000..3ba35cc866685 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csi-volume-health.md @@ -0,0 +1,8 @@ +--- +title: CSIVolumeHealth +content_type: feature_gate +_build: + list: never + render: false +--- +Enable support for CSI volume health monitoring on node. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/csr-duration.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/csr-duration.md new file mode 100644 index 0000000000000..1788e2bb21615 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/csr-duration.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: CSRDuration +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows clients to request a duration for certificates issued +via the Kubernetes CSR API. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-cpu-cfs-quota-period.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-cpu-cfs-quota-period.md new file mode 100644 index 0000000000000..5860d7f0d927a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-cpu-cfs-quota-period.md @@ -0,0 +1,9 @@ +--- +title: CustomCPUCFSQuotaPeriod +content_type: feature_gate +_build: + list: never + render: false +--- +Enable nodes to change `cpuCFSQuotaPeriod` in +[kubelet config](/docs/tasks/administer-cluster/kubelet-config-file/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-pod-dns.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-pod-dns.md new file mode 100644 index 0000000000000..e02d8b22950c2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-pod-dns.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: CustomPodDNS +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable customizing the DNS settings for a Pod using its `dnsConfig` property. +Check [Pod's DNS Config](/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-defaulting.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-defaulting.md new file mode 100644 index 0000000000000..072f55e72f6b7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-defaulting.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: CustomResourceDefaulting +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable CRD support for default values in OpenAPI v3 validation schemas. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-publish-open-api.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-publish-open-api.md new file mode 100644 index 0000000000000..8b00ad4e40fa8 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-publish-open-api.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: CustomResourcePublishOpenAPI +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables publishing of CRD OpenAPI specs. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-subresources.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-subresources.md new file mode 100644 index 0000000000000..2823a82a3ed17 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-subresources.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: CustomResourceSubresources +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable `/status` and `/scale` subresources +on resources created from [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-validation-expressions.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-validation-expressions.md new file mode 100644 index 0000000000000..ca1dee7c2869a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-validation-expressions.md @@ -0,0 +1,10 @@ +--- +title: CustomResourceValidationExpressions +content_type: feature_gate +_build: + list: never + render: false +--- +Enable expression language validation in CRD +which will validate customer resource based on validation rules written in +the `x-kubernetes-validations` extension. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-validation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-validation.md new file mode 100644 index 0000000000000..fa155be0af7c3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-validation.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: CustomResourceValidation +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable schema based validation on resources created from +[CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-webhook-conversion.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-webhook-conversion.md new file mode 100644 index 0000000000000..d142e85cd789b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/custom-resource-webhook-conversion.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: CustomResourceWebhookConversion +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable webhook-based conversion +on resources created from [CustomResourceDefinition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/daemon-set-update-surge.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/daemon-set-update-surge.md new file mode 100644 index 0000000000000..0e5f8be42ae04 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/daemon-set-update-surge.md @@ -0,0 +1,11 @@ +--- +title: DaemonSetUpdateSurge +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables the DaemonSet workloads to maintain +availability during update per node. +See [Perform a Rolling Update on a DaemonSet](/docs/tasks/manage-daemon/update-daemon-set/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/default-host-network-ports-in-pod-templates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/default-host-network-ports-in-pod-templates.md new file mode 100644 index 0000000000000..d2b2395617436 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/default-host-network-ports-in-pod-templates.md @@ -0,0 +1,13 @@ +--- +title: DefaultHostNetworkHostPortsInPodTemplates +content_type: feature_gate +_build: + list: never + render: false +--- +Changes when the default value of +`PodSpec.containers[*].ports[*].hostPort` +is assigned. The default is to only set a default value in Pods. + +Enabling this means a default will be assigned even to embedded +PodSpecs (e.g. in a Deployment), which is the historical default. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/default-pod-topology-spread.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/default-pod-topology-spread.md new file mode 100644 index 0000000000000..f8dd08b72d4cf --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/default-pod-topology-spread.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: DefaultPodTopologySpread +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables the use of `PodTopologySpread` scheduling plugin to do +[default spreading](/docs/concepts/scheduling-eviction/topology-spread-constraints/#internal-default-constraints). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/delegate-fs-group-to-csi-driver.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/delegate-fs-group-to-csi-driver.md new file mode 100644 index 0000000000000..2ddbc1d067cd6 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/delegate-fs-group-to-csi-driver.md @@ -0,0 +1,10 @@ +--- +title: DelegateFSGroupToCSIDriver +content_type: feature_gate +_build: + list: never + render: false +--- +If supported by the CSI driver, delegates the +role of applying `fsGroup` from a Pod's `securityContext` to the driver by +passing `fsGroup` through the NodeStageVolume and NodePublishVolume CSI calls. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/device-plugin-cdi-devices.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/device-plugin-cdi-devices.md new file mode 100644 index 0000000000000..3cd9f6ae1600f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/device-plugin-cdi-devices.md @@ -0,0 +1,9 @@ +--- +title: DevicePluginCDIDevices +content_type: feature_gate +_build: + list: never + render: false +--- +Enable support to CDI device IDs in the +[Device Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) API. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/device-plugins.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/device-plugins.md new file mode 100644 index 0000000000000..81ec3fe2ccf61 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/device-plugins.md @@ -0,0 +1,9 @@ +--- +title: DevicePlugins +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the [device-plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) +based resource provisioning on nodes. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-accelerator-usage-metrics.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-accelerator-usage-metrics.md new file mode 100644 index 0000000000000..75c5ab82854b9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-accelerator-usage-metrics.md @@ -0,0 +1,8 @@ +--- +title: DisableAcceleratorUsageMetrics +content_type: feature_gate +_build: + list: never + render: false +--- +[Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-cloud-providers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-cloud-providers.md new file mode 100644 index 0000000000000..fa9336b7aadd4 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-cloud-providers.md @@ -0,0 +1,10 @@ +--- +title: DisableCloudProviders +content_type: feature_gate +_build: + list: never + render: false +--- +Disables any functionality in `kube-apiserver`, +`kube-controller-manager` and `kubelet` related to the `--cloud-provider` +component flag. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-kubelet-cloud-credential-providers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-kubelet-cloud-credential-providers.md new file mode 100644 index 0000000000000..cbdca7708412b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-kubelet-cloud-credential-providers.md @@ -0,0 +1,9 @@ +--- +title: DisableKubeletCloudCredentialProviders +content_type: feature_gate +_build: + list: never + render: false +--- +Disable the in-tree functionality in kubelet +to authenticate to a cloud provider container registry for image pull credentials. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-node-kube-proxy-version.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-node-kube-proxy-version.md new file mode 100644 index 0000000000000..849dc08ab8395 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/disable-node-kube-proxy-version.md @@ -0,0 +1,8 @@ +--- +title: DisableNodeKubeProxyVersion +content_type: feature_gate +_build: + list: never + render: false +--- +Disable setting the `kubeProxyVersion` field of the Node. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/downward-api-huge-pages.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/downward-api-huge-pages.md new file mode 100644 index 0000000000000..7262abcbbe714 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/downward-api-huge-pages.md @@ -0,0 +1,9 @@ +--- +title: DownwardAPIHugePages +content_type: feature_gate +_build: + list: never + render: false +--- +Enables usage of hugepages in +[downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/dry-run.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/dry-run.md new file mode 100644 index 0000000000000..f0c068d08aa31 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/dry-run.md @@ -0,0 +1,9 @@ +--- +title: DryRun +content_type: feature_gate +_build: + list: never + render: false +--- +Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests +so that validation, merging, and mutation can be tested without committing. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-auditing.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-auditing.md new file mode 100644 index 0000000000000..d7abbd52123b8 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-auditing.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: DynamicAuditing +content_type: feature_gate + +_build: + list: never + render: false +--- +Used to enable dynamic auditing before v1.19. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-kubelet-config.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-kubelet-config.md new file mode 100644 index 0000000000000..84360ca421c42 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-kubelet-config.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: DynamicKubeletConfig +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the dynamic configuration of kubelet. The +feature is no longer supported outside of supported skew policy. The feature +gate was removed from kubelet in 1.24. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-provisioning-scheduling.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-provisioning-scheduling.md new file mode 100644 index 0000000000000..912036aca1712 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-provisioning-scheduling.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: DynamicProvisioningScheduling +content_type: feature_gate + +_build: + list: never + render: false +--- +Extend the default scheduler to be aware of +volume topology and handle PV provisioning. +This feature was superseded by the `VolumeScheduling` feature in v1.12. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-resource-allocation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-resource-allocation.md new file mode 100644 index 0000000000000..3510967e12e26 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-resource-allocation.md @@ -0,0 +1,9 @@ +--- +title: DynamicResourceAllocation +content_type: feature_gate +_build: + list: never + render: false +--- +Enables support for resources with custom parameters and a lifecycle +that is independent of a Pod. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-volume-provisioning.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-volume-provisioning.md new file mode 100644 index 0000000000000..196675e4f8bd0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/dynamic-volume-provisioning.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: DynamicVolumeProvisioning +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the +[dynamic provisioning](/docs/concepts/storage/dynamic-provisioning/) of persistent volumes to Pods. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/efficient-watch-resumption.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/efficient-watch-resumption.md new file mode 100644 index 0000000000000..ec6f524e84aed --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/efficient-watch-resumption.md @@ -0,0 +1,9 @@ +--- +title: EfficientWatchResumption +content_type: feature_gate +_build: + list: never + render: false +--- +Allows for storage-originated bookmark (progress +notify) events to be delivered to the users. This is only applied to watch operations. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/elastic-indexed-job.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/elastic-indexed-job.md new file mode 100644 index 0000000000000..8370e6fef29a9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/elastic-indexed-job.md @@ -0,0 +1,11 @@ +--- +title: ElasticIndexedJob +content_type: feature_gate +_build: + list: never + render: false +--- +Enables Indexed Jobs to be scaled up or down by mutating both +`spec.completions` and `spec.parallelism` together such that `spec.completions == spec.parallelism`. +See docs on [elastic Indexed Jobs](/docs/concepts/workloads/controllers/job#elastic-indexed-jobs) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/enable-aggregated-discovery-timeout.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/enable-aggregated-discovery-timeout.md new file mode 100644 index 0000000000000..44453f6d8c7a0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/enable-aggregated-discovery-timeout.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: EnableAggregatedDiscoveryTimeout +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the five second +timeout on aggregated discovery calls. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/enable-equivalence-class-cache.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/enable-equivalence-class-cache.md new file mode 100644 index 0000000000000..87471bc245bb5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/enable-equivalence-class-cache.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: EnableEquivalenceClassCache +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the scheduler to cache equivalence of +nodes when scheduling Pods. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-node-name.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-node-name.md new file mode 100644 index 0000000000000..c0433abb9c413 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-node-name.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: EndpointSliceNodeName +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables EndpointSlice `nodeName` field. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-proxying.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-proxying.md new file mode 100644 index 0000000000000..b1f048517b38b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-proxying.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: EndpointSliceProxying +content_type: feature_gate + +_build: + list: never + render: false +--- +When enabled, kube-proxy running + on Linux will use EndpointSlices as the primary data source instead of + Endpoints, enabling scalability and performance improvements. See + [Enabling Endpoint Slices](/docs/concepts/services-networking/endpoint-slices/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-terminating-condition.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-terminating-condition.md new file mode 100644 index 0000000000000..b5c71ac20e642 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice-terminating-condition.md @@ -0,0 +1,9 @@ +--- +title: EndpointSliceTerminatingCondition +content_type: feature_gate +_build: + list: never + render: false +--- +Enables EndpointSlice `terminating` and `serving` + condition fields. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice.md new file mode 100644 index 0000000000000..e912c594afd91 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/endpoint-slice.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: EndpointSlice +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables EndpointSlices for more scalable and extensible + network endpoints. See [Enabling EndpointSlices](/docs/concepts/services-networking/endpoint-slices/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/ephemeral-containers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/ephemeral-containers.md new file mode 100644 index 0000000000000..03e79e76d362d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/ephemeral-containers.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: EphemeralContainers +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the ability to add +{{< glossary_tooltip text="ephemeral containers" term_id="ephemeral-container" >}} +to running Pods. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/even-pods-spread.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/even-pods-spread.md new file mode 100644 index 0000000000000..5a0b4a87f22b3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/even-pods-spread.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: EvenPodsSpread +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable pods to be scheduled evenly across topology domains. See +[Pod Topology Spread Constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/evented-pleg.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/evented-pleg.md new file mode 100644 index 0000000000000..01721ccdf12ca --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/evented-pleg.md @@ -0,0 +1,15 @@ +--- +title: EventedPLEG +content_type: feature_gate +_build: + list: never + render: false +--- +Enable support for the kubelet to receive container life cycle events from the +{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}} via +an extension to {{}}. +(PLEG is an abbreviation for “Pod lifecycle event generator”). +For this feature to be useful, you also need to enable support for container lifecycle events +in each container runtime running in your cluster. If the container runtime does not announce +support for container lifecycle events then the kubelet automatically switches to the legacy +generic PLEG mechanism, even if you have this feature gate enabled. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/exec-probe-timeout.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/exec-probe-timeout.md new file mode 100644 index 0000000000000..a742a10b4ffdf --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/exec-probe-timeout.md @@ -0,0 +1,11 @@ +--- +title: ExecProbeTimeout +content_type: feature_gate +_build: + list: never + render: false +--- +Ensure kubelet respects exec probe timeouts. +This feature gate exists in case any of your existing workloads depend on a +now-corrected fault where Kubernetes ignored exec probe timeouts. See +[readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-csi-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-csi-volumes.md new file mode 100644 index 0000000000000..78c9c9ab585f9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-csi-volumes.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: ExpandCSIVolumes +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the expanding of CSI volumes. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-in-use-persistent-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-in-use-persistent-volumes.md new file mode 100644 index 0000000000000..75bc6788b51ed --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-in-use-persistent-volumes.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: ExpandInUsePersistentVolumes +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable expanding in-use PVCs. See +[Resizing an in-use PersistentVolumeClaim](/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-persistent-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-persistent-volumes.md new file mode 100644 index 0000000000000..83f5f5a22d178 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/expand-persistent-volumes.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: ExpandPersistentVolumes +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the expanding of persistent volumes. See +[Expanding Persistent Volumes Claims](/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/expanded-dns-config.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/expanded-dns-config.md new file mode 100644 index 0000000000000..7e1e83f2a6b95 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/expanded-dns-config.md @@ -0,0 +1,11 @@ +--- +title: ExpandedDNSConfig +content_type: feature_gate +_build: + list: never + render: false +--- +Enable kubelet and kube-apiserver to allow more DNS +search paths and longer list of DNS search paths. This feature requires container +runtime support(Containerd: v1.5.6 or higher, CRI-O: v1.22 or higher). See +[Expanded DNS Configuration](/docs/concepts/services-networking/dns-pod-service/#expanded-dns-configuration). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/experimental-critical-pod-annotation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/experimental-critical-pod-annotation.md new file mode 100644 index 0000000000000..6d3c4835f44b0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/experimental-critical-pod-annotation.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: ExperimentalCriticalPodAnnotation +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable annotating specific pods as *critical* +so that their [scheduling is guaranteed](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/). +This feature is deprecated by Pod Priority and Preemption as of v1.13. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/experimental-host-user-namespace-defaulting.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/experimental-host-user-namespace-defaulting.md new file mode 100644 index 0000000000000..802c59317f7a7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/experimental-host-user-namespace-defaulting.md @@ -0,0 +1,12 @@ +--- +title: ExperimentalHostUserNamespaceDefaulting +content_type: feature_gate +_build: + list: never + render: false +--- +Enabling the defaulting user +namespace to host. This is for containers that are using other host namespaces, +host mounts, or containers that are privileged or using specific non-namespaced +capabilities (e.g. `MKNODE`, `SYS_MODULE` etc.). This should only be enabled +if user namespace remapping is enabled in the Docker daemon. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/external-policy-for-external-ip.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/external-policy-for-external-ip.md new file mode 100644 index 0000000000000..cfd0fd3ddb5a2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/external-policy-for-external-ip.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: ExternalPolicyForExternalIP +content_type: feature_gate + +_build: + list: never + render: false +--- +Fix a bug where ExternalTrafficPolicy is not +applied to Service ExternalIPs. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/gce-regional-persistent-disk.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/gce-regional-persistent-disk.md new file mode 100644 index 0000000000000..b8738d7232074 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/gce-regional-persistent-disk.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: GCERegionalPersistentDisk +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the regional PD feature on GCE. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/generic-ephemeral-volume.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/generic-ephemeral-volume.md new file mode 100644 index 0000000000000..8fd4f225c7039 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/generic-ephemeral-volume.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: GenericEphemeralVolume +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables ephemeral, inline volumes that support all features +of normal volumes (can be provided by third-party storage vendors, storage capacity tracking, +restore from snapshot, etc.). +See [Ephemeral Volumes](/docs/concepts/storage/ephemeral-volumes/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/graceful-node-shutdown-based-on-pod-priority.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/graceful-node-shutdown-based-on-pod-priority.md new file mode 100644 index 0000000000000..dead74438a61c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/graceful-node-shutdown-based-on-pod-priority.md @@ -0,0 +1,9 @@ +--- +title: GracefulNodeShutdownBasedOnPodPriority +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the kubelet to check Pod priorities +when shutting down a node gracefully. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/graceful-node-shutdown.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/graceful-node-shutdown.md new file mode 100644 index 0000000000000..ed301a27bb7e5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/graceful-node-shutdown.md @@ -0,0 +1,12 @@ +--- +title: GracefulNodeShutdown +content_type: feature_gate +_build: + list: never + render: false +--- +Enables support for graceful shutdown in kubelet. +During a system shutdown, kubelet will attempt to detect the shutdown event +and gracefully terminate pods running on the node. See +[Graceful Node Shutdown](/docs/concepts/architecture/nodes/#graceful-node-shutdown) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/grpc-container-probe.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/grpc-container-probe.md new file mode 100644 index 0000000000000..6960e40de7cd7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/grpc-container-probe.md @@ -0,0 +1,9 @@ +--- +title: GRPCContainerProbe +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the gRPC probe method for {Liveness,Readiness,Startup}Probe. +See [Configure Liveness, Readiness and Startup Probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-grpc-liveness-probe). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/honor-pv-reclaim-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/honor-pv-reclaim-policy.md new file mode 100644 index 0000000000000..bffd6877df9d2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/honor-pv-reclaim-policy.md @@ -0,0 +1,11 @@ +--- +title: HonorPVReclaimPolicy +content_type: feature_gate +_build: + list: never + render: false +--- +Honor persistent volume reclaim policy when it is `Delete` irrespective of PV-PVC deletion ordering. +For more details, check the +[PersistentVolume deletion protection finalizer](/docs/concepts/storage/persistent-volumes/#persistentvolume-deletion-protection-finalizer) +documentation. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/hpa-container-metrics.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/hpa-container-metrics.md new file mode 100644 index 0000000000000..003a1354b265f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/hpa-container-metrics.md @@ -0,0 +1,9 @@ +--- +title: HPAContainerMetrics +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the `HorizontalPodAutoscaler` to scale based on +metrics from individual containers in target pods. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/hpa-scale-to-zero.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/hpa-scale-to-zero.md new file mode 100644 index 0000000000000..4f200af8eeb3d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/hpa-scale-to-zero.md @@ -0,0 +1,9 @@ +--- +title: HPAScaleToZero +content_type: feature_gate +_build: + list: never + render: false +--- +Enables setting `minReplicas` to 0 for `HorizontalPodAutoscaler` +resources when using custom or external metrics. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/huge-page-storage-medium-size.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/huge-page-storage-medium-size.md new file mode 100644 index 0000000000000..0e01d8ca4846e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/huge-page-storage-medium-size.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: HugePageStorageMediumSize +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable support for multiple sizes pre-allocated +[huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/huge-pages.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/huge-pages.md new file mode 100644 index 0000000000000..cf8e10bfd4850 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/huge-pages.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: HugePages +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the allocation and consumption of pre-allocated +[huge pages](/docs/tasks/manage-hugepages/scheduling-hugepages/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/hyper-v-container.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/hyper-v-container.md new file mode 100644 index 0000000000000..cb768d465b0d3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/hyper-v-container.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: HyperVContainer +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable +[Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) +for Windows containers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/identify-pod-os.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/identify-pod-os.md new file mode 100644 index 0000000000000..34a5d9d71f931 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/identify-pod-os.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: IdentifyPodOS +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows the Pod OS field to be specified. This helps in identifying +the OS of the pod authoritatively during the API server admission time. +In Kubernetes {{< skew currentVersion >}}, the allowed values for the `pod.spec.os.name` +are `windows` and `linux`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/image-maximum-gc-age.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/image-maximum-gc-age.md new file mode 100644 index 0000000000000..f463f473ee61c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/image-maximum-gc-age.md @@ -0,0 +1,8 @@ +--- +title: ImageMaximumGCAge +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the kubelet configuration field `imageMaximumGCAge`, allowing an administrator to specify the age after which an image will be garbage collected. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/immutable-ephemeral-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/immutable-ephemeral-volumes.md new file mode 100644 index 0000000000000..ae6a98d2392f2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/immutable-ephemeral-volumes.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: ImmutableEphemeralVolumes +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows for marking individual Secrets and ConfigMaps as +immutable for better safety and performance. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-place-pod-vertical-scaling.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-place-pod-vertical-scaling.md new file mode 100644 index 0000000000000..13ef2960d101f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-place-pod-vertical-scaling.md @@ -0,0 +1,8 @@ +--- +title: InPlacePodVerticalScaling +content_type: feature_gate +_build: + list: never + render: false +--- +Enables in-place Pod vertical scaling. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-aws-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-aws-unregister.md new file mode 100644 index 0000000000000..fd35a7c40d17e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-aws-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginAWSUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the aws-ebs in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-azure-disk-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-azure-disk-unregister.md new file mode 100644 index 0000000000000..380fe220bd4cc --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-azure-disk-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginAzureDiskUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the azuredisk in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-azure-file-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-azure-file-unregister.md new file mode 100644 index 0000000000000..b4ae8e593a802 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-azure-file-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginAzureFileUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the azurefile in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-gce-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-gce-unregister.md new file mode 100644 index 0000000000000..229bd7acc567f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-gce-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginGCEUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the gce-pd in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-openstack-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-openstack-unregister.md new file mode 100644 index 0000000000000..038112c8ff253 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-openstack-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginOpenStackUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the OpenStack cinder in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-portworx-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-portworx-unregister.md new file mode 100644 index 0000000000000..86551fcbe99dc --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-portworx-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginPortworxUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the Portworx in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-rbd-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-rbd-unregister.md new file mode 100644 index 0000000000000..d53ae0165d40a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-rbd-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginRBDUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the RBD in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-vsphere-unregister.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-vsphere-unregister.md new file mode 100644 index 0000000000000..4e0e5fe2e307c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/in-tree-plugin-vsphere-unregister.md @@ -0,0 +1,9 @@ +--- +title: InTreePluginvSphereUnregister +content_type: feature_gate +_build: + list: never + render: false +--- +Stops registering the vSphere in-tree plugin in kubelet +and volume controllers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/index.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/index.md new file mode 100644 index 0000000000000..dcd2772b099b0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/index.md @@ -0,0 +1,555 @@ +--- +title: Feature Gates +weight: 10 +content_type: concept +card: + name: reference + weight: 60 +--- + + +This page contains an overview of the various feature gates an administrator +can specify on different Kubernetes components. + +See [feature stages](#feature-stages) for an explanation of the stages for a feature. + + + +## Overview + +Feature gates are a set of key=value pairs that describe Kubernetes features. +You can turn these features on or off using the `--feature-gates` command line flag +on each Kubernetes component. + +Each Kubernetes component lets you enable or disable a set of feature gates that +are relevant to that component. +Use `-h` flag to see a full set of feature gates for all components. +To set feature gates for a component, such as kubelet, use the `--feature-gates` +flag assigned to a list of feature pairs: + +```shell +--feature-gates=...,GracefulNodeShutdown=true +``` + +The following tables are a summary of the feature gates that you can set on +different Kubernetes components. + +- The "Since" column contains the Kubernetes release when a feature is introduced + or its release stage is changed. +- The "Until" column, if not empty, contains the last Kubernetes release in which + you can still use a feature gate. +- If a feature is in the Alpha or Beta state, you can find the feature listed + in the [Alpha/Beta feature gate table](#feature-gates-for-alpha-or-beta-features). +- If a feature is stable you can find all stages for that feature listed in the + [Graduated/Deprecated feature gate table](#feature-gates-for-graduated-or-deprecated-features). +- The [Graduated/Deprecated feature gate table](#feature-gates-for-graduated-or-deprecated-features) + also lists deprecated and withdrawn features. + +{{< note >}} +For a reference to old feature gates that are removed, please refer to +[feature gates removed](/docs/reference/command-line-tools-reference/feature-gates-removed/). +{{< /note >}} + +### Feature gates for Alpha or Beta features + +{{< table caption="Feature gates for features in Alpha or Beta states" sortable="true" >}} + +| Feature | Default | Stage | Since | Until | +|---------|---------|-------|-------|-------| +| `APIResponseCompression` | `false` | Alpha | 1.7 | 1.15 | +| `APIResponseCompression` | `true` | Beta | 1.16 | | +| `APIServerIdentity` | `false` | Alpha | 1.20 | 1.25 | +| `APIServerIdentity` | `true` | Beta | 1.26 | | +| `APIServerTracing` | `false` | Alpha | 1.22 | 1.26 | +| `APIServerTracing` | `true` | Beta | 1.27 | | +| `AdmissionWebhookMatchConditions` | `false` | Alpha | 1.27 | 1.27 | +| `AdmissionWebhookMatchConditions` | `true` | Beta | 1.28 | | +| `AggregatedDiscoveryEndpoint` | `false` | Alpha | 1.26 | 1.26 | +| `AggregatedDiscoveryEndpoint` | `true` | Beta | 1.27 | | +| `AnyVolumeDataSource` | `false` | Alpha | 1.18 | 1.23 | +| `AnyVolumeDataSource` | `true` | Beta | 1.24 | | +| `AppArmor` | `true` | Beta | 1.4 | | +| `CPUManagerPolicyAlphaOptions` | `false` | Alpha | 1.23 | | +| `CPUManagerPolicyBetaOptions` | `true` | Beta | 1.23 | | +| `CPUManagerPolicyOptions` | `false` | Alpha | 1.22 | 1.22 | +| `CPUManagerPolicyOptions` | `true` | Beta | 1.23 | | +| `CRDValidationRatcheting` | `false` | Alpha | 1.28 | | +| `CSIMigrationPortworx` | `false` | Alpha | 1.23 | 1.24 | +| `CSIMigrationPortworx` | `false` | Beta | 1.25 | | +| `CSIVolumeHealth` | `false` | Alpha | 1.21 | | +| `CloudControllerManagerWebhook` | `false` | Alpha | 1.27 | | +| `CloudDualStackNodeIPs` | `false` | Alpha | 1.27 | 1.28 | +| `CloudDualStackNodeIPs` | `true` | Beta | 1.29 | | +| `ClusterTrustBundle` | false | Alpha | 1.27 | | +| `ClusterTrustBundleProjection` | `false` | Alpha | 1.29 | | +| `ComponentSLIs` | `false` | Alpha | 1.26 | 1.26 | +| `ComponentSLIs` | `true` | Beta | 1.27 | | +| `ConsistentListFromCache` | `false` | Alpha | 1.28 | | +| `ContainerCheckpoint` | `false` | Alpha | 1.25 | | +| `ContextualLogging` | `false` | Alpha | 1.24 | | +| `CronJobsScheduledAnnotation` | `true` | Beta | 1.28 | | +| `CrossNamespaceVolumeDataSource` | `false` | Alpha| 1.26 | | +| `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | +| `DevicePluginCDIDevices` | `false` | Alpha | 1.28 | 1.28 | +| `DevicePluginCDIDevices` | `true` | Beta | 1.29 | | +| `DisableCloudProviders` | `false` | Alpha | 1.22 | 1.28 | +| `DisableCloudProviders` | `true` | Beta | 1.29 | | +| `DisableKubeletCloudCredentialProviders` | `false` | Alpha | 1.23 | 1.28 | +| `DisableKubeletCloudCredentialProviders` | `true` | Beta | 1.29 | | +| `DisableNodeKubeProxyVersion` | `false` | Alpha | 1.29 | | +| `DynamicResourceAllocation` | `false` | Alpha | 1.26 | | +| `ElasticIndexedJob` | `true` | Beta | 1.27 | | +| `EventedPLEG` | `false` | Alpha | 1.26 | 1.26 | +| `EventedPLEG` | `false` | Beta | 1.27 | | +| `GracefulNodeShutdown` | `false` | Alpha | 1.20 | 1.20 | +| `GracefulNodeShutdown` | `true` | Beta | 1.21 | | +| `GracefulNodeShutdownBasedOnPodPriority` | `false` | Alpha | 1.23 | 1.23 | +| `GracefulNodeShutdownBasedOnPodPriority` | `true` | Beta | 1.24 | | +| `HPAContainerMetrics` | `false` | Alpha | 1.20 | 1.26 | +| `HPAContainerMetrics` | `true` | Beta | 1.27 | | +| `HPAScaleToZero` | `false` | Alpha | 1.16 | | +| `HonorPVReclaimPolicy` | `false` | Alpha | 1.23 | | +| `ImageMaximumGCAge` | `false` | Alpha | 1.29 | | +| `InPlacePodVerticalScaling` | `false` | Alpha | 1.27 | | +| `InTreePluginAWSUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginAzureDiskUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginAzureFileUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginGCEUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginOpenStackUnregister` | `false` | Alpha | 1.21 | | +| `InTreePluginPortworxUnregister` | `false` | Alpha | 1.23 | | +| `InTreePluginvSphereUnregister` | `false` | Alpha | 1.21 | | +| `JobBackoffLimitPerIndex` | `false` | Alpha | 1.28 | 1.28 | +| `JobBackoffLimitPerIndex` | `true` | Beta | 1.29 | | +| `JobPodFailurePolicy` | `false` | Alpha | 1.25 | 1.25 | +| `JobPodFailurePolicy` | `true` | Beta | 1.26 | | +| `JobPodReplacementPolicy` | `false` | Alpha | 1.28 | 1.28 | +| `JobPodReplacementPolicy` | `true` | Beta | 1.29 | | +| `KubeProxyDrainingTerminatingNodes` | `false` | Alpha | 1.28 | | +| `KubeletCgroupDriverFromCRI` | `false` | Alpha | 1.28 | | +| `KubeletInUserNamespace` | `false` | Alpha | 1.22 | | +| `KubeletPodResourcesDynamicResources` | `false` | Alpha | 1.27 | | +| `KubeletPodResourcesGet` | `false` | Alpha | 1.27 | | +| `KubeletSeparateDiskGC` | `false` | Alpha | 1.29 | | +| `KubeletTracing` | `false` | Alpha | 1.25 | 1.26 | +| `KubeletTracing` | `true` | Beta | 1.27 | | +| `LegacyServiceAccountTokenCleanUp` | `false` | Alpha | 1.28 | 1.28 | +| `LegacyServiceAccountTokenCleanUp` | `true` | Beta | 1.29 | | +| `LoadBalancerIPMode` | `false` | Alpha | 1.29 | | +| `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha | 1.15 | - | +| `LogarithmicScaleDown` | `false` | Alpha | 1.21 | 1.21 | +| `LogarithmicScaleDown` | `true` | Beta | 1.22 | | +| `LoggingAlphaOptions` | `false` | Alpha | 1.24 | - | +| `LoggingBetaOptions` | `true` | Beta | 1.24 | - | +| `MatchLabelKeysInPodAffinity` | `false` | Alpha | 1.29 | - | +| `MatchLabelKeysInPodTopologySpread` | `false` | Alpha | 1.25 | 1.26 | +| `MatchLabelKeysInPodTopologySpread` | `true` | Beta | 1.27 | - | +| `MaxUnavailableStatefulSet` | `false` | Alpha | 1.24 | | +| `MemoryManager` | `false` | Alpha | 1.21 | 1.21 | +| `MemoryManager` | `true` | Beta | 1.22 | | +| `MemoryQoS` | `false` | Alpha | 1.22 | | +| `MinDomainsInPodTopologySpread` | `false` | Alpha | 1.24 | 1.24 | +| `MinDomainsInPodTopologySpread` | `false` | Beta | 1.25 | 1.26 | +| `MinDomainsInPodTopologySpread` | `true` | Beta | 1.27 | | +| `MultiCIDRServiceAllocator` | `false` | Alpha | 1.27 | | +| `NFTablesProxyMode` | `false` | Alpha | 1.29 | | +| `NewVolumeManagerReconstruction` | `false` | Beta | 1.27 | 1.27 | +| `NewVolumeManagerReconstruction` | `true` | Beta | 1.28 | | +| `NodeInclusionPolicyInPodTopologySpread` | `false` | Alpha | 1.25 | 1.25 | +| `NodeInclusionPolicyInPodTopologySpread` | `true` | Beta | 1.26 | | +| `NodeLogQuery` | `false` | Alpha | 1.27 | | +| `NodeSwap` | `false` | Alpha | 1.22 | 1.27 | +| `NodeSwap` | `false` | Beta | 1.28 | | +| `OpenAPIEnums` | `false` | Alpha | 1.23 | 1.23 | +| `OpenAPIEnums` | `true` | Beta | 1.24 | | +| `PDBUnhealthyPodEvictionPolicy` | `false` | Alpha | 1.26 | 1.26 | +| `PDBUnhealthyPodEvictionPolicy` | `true` | Beta | 1.27 | | +| `PersistentVolumeLastPhaseTransitionTime` | `false` | Alpha | 1.28 | 1.28 | +| `PersistentVolumeLastPhaseTransitionTime` | `true` | Beta | 1.29 | | +| `PodAndContainerStatsFromCRI` | `false` | Alpha | 1.23 | | +| `PodDeletionCost` | `false` | Alpha | 1.21 | 1.21 | +| `PodDeletionCost` | `true` | Beta | 1.22 | | +| `PodDisruptionConditions` | `false` | Alpha | 1.25 | 1.25 | +| `PodDisruptionConditions` | `true` | Beta | 1.26 | | +| `PodHostIPs` | `false` | Alpha | 1.28 | 1.28 | +| `PodHostIPs` | `true` | Beta | 1.29 | | +| `PodIndexLabel` | `true` | Beta | 1.28 | | +| `PodLifecycleSleepAction` | `false` | Alpha | 1.29 | | +| `PodReadyToStartContainersCondition` | `false` | Alpha | 1.28 | 1.28 | +| `PodReadyToStartContainersCondition` | `true` | Beta | 1.29 | | +| `PodSchedulingReadiness` | `false` | Alpha | 1.26 | 1.26 | +| `PodSchedulingReadiness` | `true` | Beta | 1.27 | | +| `ProcMountType` | `false` | Alpha | 1.12 | | +| `QOSReserved` | `false` | Alpha | 1.11 | | +| `RecoverVolumeExpansionFailure` | `false` | Alpha | 1.23 | | +| `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | +| `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | +| `RuntimeClassInImageCriApi` | `false` | Alpha | 1.29 | | +| `SELinuxMountReadWriteOncePod` | `false` | Alpha | 1.25 | 1.26 | +| `SELinuxMountReadWriteOncePod` | `false` | Beta | 1.27 | 1.27 | +| `SELinuxMountReadWriteOncePod` | `true` | Beta | 1.28 | | +| `SchedulerQueueingHints` | `true` | Beta | 1.28 | 1.28 | +| `SchedulerQueueingHints` | `false` | Beta | 1.29 | | +| `SecurityContextDeny` | `false` | Alpha | 1.27 | | +| `SeparateTaintEvictionController` | `true` | Beta | 1.29 | | +| `ServiceAccountTokenJTI` | `false` | Alpha | 1.29 | | +| `ServiceAccountTokenNodeBinding` | `false` | Alpha | 1.29 | | +| `ServiceAccountTokenNodeBindingValidation` | `false` | Alpha | 1.29 | | +| `ServiceAccountTokenPodNodeInfo` | `false` | Alpha | 1.29 | | +| `SidecarContainers` | `false` | Alpha | 1.28 | 1.28 | +| `SidecarContainers` | `true` | Beta | 1.29 | | +| `SizeMemoryBackedVolumes` | `false` | Alpha | 1.20 | 1.21 | +| `SizeMemoryBackedVolumes` | `true` | Beta | 1.22 | | +| `StableLoadBalancerNodeSet` | `true` | Beta | 1.27 | | +| `StatefulSetAutoDeletePVC` | `false` | Alpha | 1.23 | 1.26 | +| `StatefulSetAutoDeletePVC` | `false` | Beta | 1.27 | | +| `StatefulSetStartOrdinal` | `false` | Alpha | 1.26 | 1.26 | +| `StatefulSetStartOrdinal` | `true` | Beta | 1.27 | | +| `StorageVersionAPI` | `false` | Alpha | 1.20 | | +| `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 | +| `StorageVersionHash` | `true` | Beta | 1.15 | | +| `StructuredAuthenticationConfiguration` | `false` | Alpha | 1.29 | | +| `StructuredAuthorizationConfiguration` | `false` | Alpha | 1.29 | | +| `TopologyAwareHints` | `false` | Alpha | 1.21 | 1.22 | +| `TopologyAwareHints` | `false` | Beta | 1.23 | 1.23 | +| `TopologyAwareHints` | `true` | Beta | 1.24 | | +| `TopologyManagerPolicyAlphaOptions` | `false` | Alpha | 1.26 | | +| `TopologyManagerPolicyBetaOptions` | `false` | Beta | 1.26 | 1.27 | +| `TopologyManagerPolicyBetaOptions` | `true` | Beta | 1.28 | | +| `TopologyManagerPolicyOptions` | `false` | Alpha | 1.26 | 1.27 | +| `TopologyManagerPolicyOptions` | `true` | Beta | 1.28 | | +| `TranslateStreamCloseWebsocketRequests` | `false` | Alpha | 1.29 | | +| `UnauthenticatedHTTP2DOSMitigation` | `false` | Beta | 1.28 | 1.28 | +| `UnauthenticatedHTTP2DOSMitigation` | `true` | Beta | 1.29 | | +| `UnknownVersionInteroperabilityProxy` | `false` | Alpha | 1.28 | | +| `UserNamespacesPodSecurityStandards` | `false` | Alpha | 1.29 | | +| `UserNamespacesSupport` | `false` | Alpha | 1.28 | | +| `ValidatingAdmissionPolicy` | `false` | Alpha | 1.26 | 1.27 | +| `ValidatingAdmissionPolicy` | `false` | Beta | 1.28 | | +| `VolumeAttributesClass` | `false` | Alpha | 1.29 | | +| `VolumeCapacityPriority` | `false` | Alpha | 1.21 | | +| `WatchList` | `false` | Alpha | 1.27 | | +| `WinDSR` | `false` | Alpha | 1.14 | | +| `WinOverlay` | `false` | Alpha | 1.14 | 1.19 | +| `WinOverlay` | `true` | Beta | 1.20 | | +| `WindowsHostNetwork` | `true` | Alpha | 1.26 | | +| `ZeroLimitedNominalConcurrencyShares` | `false` | Beta | 1.29 | | + +{{< /table >}} + +### Feature gates for graduated or deprecated features + +{{< table caption="Feature Gates for Graduated or Deprecated Features" sortable="true">}} + +| Feature | Default | Stage | Since | Until | +|---------|---------|-------|-------|-------| +| `APIListChunking` | `false` | Alpha | 1.8 | 1.8 | +| `APIListChunking` | `true` | Beta | 1.9 | 1.28 | +| `APIListChunking` | `true` | GA | 1.29 | - | +| `APIPriorityAndFairness` | `false` | Alpha | 1.18 | 1.19 | +| `APIPriorityAndFairness` | `true` | Beta | 1.20 | 1.28 | +| `APIPriorityAndFairness` | `true` | GA | 1.29 | - | +| `APISelfSubjectReview` | `false` | Alpha | 1.26 | 1.26 | +| `APISelfSubjectReview` | `true` | Beta | 1.27 | 1.27 | +| `APISelfSubjectReview` | `true` | GA | 1.28 | - | +| `AllowServiceLBStatusOnNonLB` | `false` | Deprecated | 1.29 | | +| `CPUManager` | `false` | Alpha | 1.8 | 1.9 | +| `CPUManager` | `true` | Beta | 1.10 | 1.25 | +| `CPUManager` | `true` | GA | 1.26 | - | +| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | 1.20 | +| `CSIMigrationAzureFile` | `false` | Beta | 1.21 | 1.23 | +| `CSIMigrationAzureFile` | `true` | Beta | 1.24 | 1.25 | +| `CSIMigrationAzureFile` | `true` | GA | 1.26 | | +| `CSIMigrationRBD` | `false` | Alpha | 1.23 | 1.27 | +| `CSIMigrationRBD` | `false` | Deprecated | 1.28 | | +| `CSINodeExpandSecret` | `false` | Alpha | 1.25 | 1.26 | +| `CSINodeExpandSecret` | `true` | Beta | 1.27 | 1.28 | +| `CSINodeExpandSecret` | `true` | GA | 1.29 | | +| `ConsistentHTTPGetHandlers` | `true` | GA | 1.25 | - | +| `CustomResourceValidationExpressions` | `false` | Alpha | 1.23 | 1.24 | +| `CustomResourceValidationExpressions` | `true` | Beta | 1.25 | 1.28 | +| `CustomResourceValidationExpressions` | `true` | GA | 1.29 | - | +| `DefaultHostNetworkHostPortsInPodTemplates` | `false` | Deprecated | 1.28 | | +| `EfficientWatchResumption` | `false` | Alpha | 1.20 | 1.20 | +| `EfficientWatchResumption` | `true` | Beta | 1.21 | 1.23 | +| `EfficientWatchResumption` | `true` | GA | 1.24 | | +| `ExecProbeTimeout` | `true` | GA | 1.20 | | +| `ExpandedDNSConfig` | `false` | Alpha | 1.22 | 1.25 | +| `ExpandedDNSConfig` | `true` | Beta | 1.26 | 1.27 | +| `ExpandedDNSConfig` | `true` | GA | 1.28 | | +| `ExperimentalHostUserNamespaceDefaulting` | `false` | Beta | 1.5 | 1.27 | +| `ExperimentalHostUserNamespaceDefaulting` | `false` | Deprecated | 1.28 | | +| `IPTablesOwnershipCleanup` | `false` | Alpha | 1.25 | 1.26 | +| `IPTablesOwnershipCleanup` | `true` | Beta | 1.27 | 1.27 | +| `IPTablesOwnershipCleanup` | `true` | GA | 1.28 | | +| `InTreePluginRBDUnregister` | `false` | Alpha | 1.23 | 1.27 | +| `InTreePluginRBDUnregister` | `false` | Deprecated | 1.28 | | +| `JobReadyPods` | `false` | Alpha | 1.23 | 1.23 | +| `JobReadyPods` | `true` | Beta | 1.24 | 1.28 | +| `JobReadyPods` | `true` | GA | 1.29 | | +| `KMSv1` | `true` | Deprecated | 1.28 | 1.28 | +| `KMSv1` | `false` | Deprecated | 1.29 | | +| `KMSv2` | `false` | Alpha | 1.25 | 1.26 | +| `KMSv2` | `true` | Beta | 1.27 | 1.28 | +| `KMSv2` | `true` | GA | 1.29 | | +| `KMSv2KDF` | `false` | Beta | 1.28 | 1.28 | +| `KMSv2KDF` | `true` | GA | 1.29 | | +| `KubeletPodResources` | `false` | Alpha | 1.13 | 1.14 | +| `KubeletPodResources` | `true` | Beta | 1.15 | 1.27 | +| `KubeletPodResources` | `true` | GA | 1.28 | | +| `KubeletPodResourcesGetAllocatable` | `false` | Alpha | 1.21 | 1.22 | +| `KubeletPodResourcesGetAllocatable` | `true` | Beta | 1.23 | 1.27 | +| `KubeletPodResourcesGetAllocatable` | `true` | GA | 1.28 | | +| `LegacyServiceAccountTokenTracking` | `false` | Alpha | 1.26 | 1.26 | +| `LegacyServiceAccountTokenTracking` | `true` | Beta | 1.27 | 1.27 | +| `LegacyServiceAccountTokenTracking` | `true` | GA | 1.28 | | +| `MinimizeIPTablesRestore` | `false` | Alpha | 1.26 | 1.26 | +| `MinimizeIPTablesRestore` | `true` | Beta | 1.27 | 1.27 | +| `MinimizeIPTablesRestore` | `true` | GA | 1.28 | | +| `NodeOutOfServiceVolumeDetach` | `false` | Alpha | 1.24 | 1.25 | +| `NodeOutOfServiceVolumeDetach` | `true` | Beta | 1.26 | 1.27 | +| `NodeOutOfServiceVolumeDetach` | `true` | GA | 1.28 | | +| `ProxyTerminatingEndpoints` | `false` | Alpha | 1.22 | 1.25 | +| `ProxyTerminatingEndpoints` | `true` | Beta | 1.26 | 1.27 | +| `ProxyTerminatingEndpoints` | `true` | GA | 1.28 | | +| `ReadWriteOncePod` | `false` | Alpha | 1.22 | 1.26 | +| `ReadWriteOncePod` | `true` | Beta | 1.27 | 1.28 | +| `ReadWriteOncePod` | `true` | GA | 1.29 | | +| `RemainingItemCount` | `false` | Alpha | 1.15 | 1.15 | +| `RemainingItemCount` | `true` | Beta | 1.16 | 1.28 | +| `RemainingItemCount` | `true` | GA | 1.29 | | +| `RemoveSelfLink` | `false` | Alpha | 1.16 | 1.19 | +| `RemoveSelfLink` | `true` | Beta | 1.20 | 1.23 | +| `RemoveSelfLink` | `true` | GA | 1.24 | | +| `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | +| `ServerSideApply` | `true` | Beta | 1.16 | 1.21 | +| `ServerSideApply` | `true` | GA | 1.22 | - | +| `ServerSideFieldValidation` | `false` | Alpha | 1.23 | 1.24 | +| `ServerSideFieldValidation` | `true` | Beta | 1.25 | 1.26 | +| `ServerSideFieldValidation` | `true` | GA | 1.27 | - | +| `ServiceNodePortStaticSubrange` | `false` | Alpha | 1.27 | 1.27 | +| `ServiceNodePortStaticSubrange` | `true` | Beta | 1.28 | 1.28 | +| `ServiceNodePortStaticSubrange` | `true` | GA | 1.29 | - | +| `SkipReadOnlyValidationGCE` | `false` | Alpha | 1.28 | 1.28 | +| `SkipReadOnlyValidationGCE` | `true` | Deprecated | 1.29 | | +| `WatchBookmark` | `false` | Alpha | 1.15 | 1.15 | +| `WatchBookmark` | `true` | Beta | 1.16 | 1.16 | +| `WatchBookmark` | `true` | GA | 1.17 | - | +{{< /table >}} + +## Using a feature + +### Feature stages + +A feature can be in *Alpha*, *Beta* or *GA* stage. +An *Alpha* feature means: + +* Disabled by default. +* Might be buggy. Enabling the feature may expose bugs. +* Support for feature may be dropped at any time without notice. +* The API may change in incompatible ways in a later software release without notice. +* Recommended for use only in short-lived testing clusters, due to increased + risk of bugs and lack of long-term support. + +A *Beta* feature means: + +* Usually enabled by default. Beta API groups are + [disabled by default](https://github.com/kubernetes/enhancements/tree/master/keps/sig-architecture/3136-beta-apis-off-by-default). +* The feature is well tested. Enabling the feature is considered safe. +* Support for the overall feature will not be dropped, though details may change. +* The schema and/or semantics of objects may change in incompatible ways in a + subsequent beta or stable release. When this happens, we will provide instructions + for migrating to the next version. This may require deleting, editing, and + re-creating API objects. The editing process may require some thought. + This may require downtime for applications that rely on the feature. +* Recommended for only non-business-critical uses because of potential for + incompatible changes in subsequent releases. If you have multiple clusters + that can be upgraded independently, you may be able to relax this restriction. + +{{< note >}} +Please do try *Beta* features and give feedback on them! +After they exit beta, it may not be practical for us to make more changes. +{{< /note >}} + +A *General Availability* (GA) feature is also referred to as a *stable* feature. It means: + +* The feature is always enabled; you cannot disable it. +* The corresponding feature gate is no longer needed. +* Stable versions of features will appear in released software for many subsequent versions. + +## List of feature gates {#feature-gates} + +Each feature gate is designed for enabling/disabling a specific feature: + +- {{< feature-gate-description name="AdmissionWebhookMatchConditions" >}} +- {{< feature-gate-description name="AllowServiceLBStatusOnNonLB" >}} +- {{< feature-gate-description name="AggregatedDiscoveryEndpoint" >}} +- {{< feature-gate-description name="AnyVolumeDataSource" >}} +- {{< feature-gate-description name="APIListChunking" >}} +- {{< feature-gate-description name="APIPriorityAndFairness" >}} +- {{< feature-gate-description name="APIResponseCompression" >}} +- {{< feature-gate-description name="APISelfSubjectReview" >}} +- {{< feature-gate-description name="APIServerIdentity" >}} +- {{< feature-gate-description name="APIServerTracing" >}} +- {{< feature-gate-description name="AppArmor" >}} +- {{< feature-gate-description name="CloudControllerManagerWebhook" >}} +- {{< feature-gate-description name="CloudDualStackNodeIPs" >}} +- {{< feature-gate-description name="ClusterTrustBundle" >}} +- {{< feature-gate-description name="ClusterTrustBundleProjection" >}} +- {{< feature-gate-description name="ComponentSLIs" >}} +- {{< feature-gate-description name="ConsistentHTTPGetHandlers" >}} +- {{< feature-gate-description name="ConsistentListFromCache" >}} +- {{< feature-gate-description name="ContainerCheckpoint" >}} +- {{< feature-gate-description name="ContextualLogging" >}} +- {{< feature-gate-description name="CPUManager" >}} +- {{< feature-gate-description name="CPUManagerPolicyAlphaOptions" >}} +- {{< feature-gate-description name="CPUManagerPolicyBetaOptions" >}} +- {{< feature-gate-description name="CPUManagerPolicyOptions" >}} +- {{< feature-gate-description name="CRDValidationRatcheting" >}} +- {{< feature-gate-description name="CronJobsScheduledAnnotation" >}} +- {{< feature-gate-description name="CronJobTimeZone" >}} +- {{< feature-gate-description name="CrossNamespaceVolumeDataSource" >}} +- {{< feature-gate-description name="CSIMigrationAzureFile" >}} +- {{< feature-gate-description name="CSIMigrationPortworx" >}} +- {{< feature-gate-description name="CSIMigrationRBD" >}} +- {{< feature-gate-description name="CSINodeExpandSecret" >}} +- {{< feature-gate-description name="CSIVolumeHealth" >}} +- {{< feature-gate-description name="CustomCPUCFSQuotaPeriod" >}} +- {{< feature-gate-description name="CustomResourceValidationExpressions" >}} +- {{< feature-gate-description name="DefaultHostNetworkHostPortsInPodTemplates" >}} +- {{< feature-gate-description name="DevicePluginCDIDevices" >}} +- {{< feature-gate-description name="DisableCloudProviders" >}} +- {{< feature-gate-description name="DisableKubeletCloudCredentialProviders" >}} +- {{< feature-gate-description name="DisableNodeKubeProxyVersion" >}} +- {{< feature-gate-description name="DynamicResourceAllocation" >}} +- {{< feature-gate-description name="EfficientWatchResumption" >}} +- {{< feature-gate-description name="ElasticIndexedJob" >}} +- {{< feature-gate-description name="EventedPLEG" >}} +- {{< feature-gate-description name="ExecProbeTimeout" >}} +- {{< feature-gate-description name="ExpandedDNSConfig" >}} +- {{< feature-gate-description name="ExperimentalHostUserNamespaceDefaulting" >}} +- {{< feature-gate-description name="GracefulNodeShutdown" >}} +- {{< feature-gate-description name="GracefulNodeShutdownBasedOnPodPriority" >}} +- {{< feature-gate-description name="GRPCContainerProbe" >}} +- {{< feature-gate-description name="HonorPVReclaimPolicy" >}} +- {{< feature-gate-description name="HPAContainerMetrics" >}} +- {{< feature-gate-description name="HPAScaleToZero" >}} +- {{< feature-gate-description name="ImageMaximumGCAge" >}} +- {{< feature-gate-description name="InPlacePodVerticalScaling" >}} +- {{< feature-gate-description name="InTreePluginAWSUnregister" >}} +- {{< feature-gate-description name="InTreePluginAzureDiskUnregister" >}} +- {{< feature-gate-description name="InTreePluginAzureFileUnregister" >}} +- {{< feature-gate-description name="InTreePluginGCEUnregister" >}} +- {{< feature-gate-description name="InTreePluginOpenStackUnregister" >}} +- {{< feature-gate-description name="InTreePluginPortworxUnregister" >}} +- {{< feature-gate-description name="InTreePluginRBDUnregister" >}} +- {{< feature-gate-description name="InTreePluginvSphereUnregister" >}} +- {{< feature-gate-description name="IPTablesOwnershipCleanup" >}} +- {{< feature-gate-description name="JobBackoffLimitPerIndex" >}} +- {{< feature-gate-description name="JobMutableNodeSchedulingDirectives" >}} +- {{< feature-gate-description name="JobPodFailurePolicy" >}} +- {{< feature-gate-description name="JobPodReplacementPolicy" >}} +- {{< feature-gate-description name="JobReadyPods" >}} +- {{< feature-gate-description name="KMSv1" >}} +- {{< feature-gate-description name="KMSv2" >}} +- {{< feature-gate-description name="KMSv2KDF" >}} +- {{< feature-gate-description name="KubeletCgroupDriverFromCRI" >}} +- {{< feature-gate-description name="KubeletInUserNamespace" >}} +- {{< feature-gate-description name="KubeletPodResources" >}} +- {{< feature-gate-description name="KubeletPodResourcesDynamicResources" >}} +- {{< feature-gate-description name="KubeletPodResourcesGet" >}} +- {{< feature-gate-description name="KubeletPodResourcesGetAllocatable" >}} +- {{< feature-gate-description name="KubeletSeparateDiskGC" >}} +- {{< feature-gate-description name="KubeletTracing" >}} +- {{< feature-gate-description name="KubeProxyDrainingTerminatingNodes" >}} +- {{< feature-gate-description name="LegacyServiceAccountTokenCleanUp" >}} +- {{< feature-gate-description name="LegacyServiceAccountTokenNoAutoGeneration" >}} +- {{< feature-gate-description name="LegacyServiceAccountTokenTracking" >}} +- {{< feature-gate-description name="LoadBalancerIPMode" >}} +- {{< feature-gate-description name="LocalStorageCapacityIsolationFSQuotaMonitoring" >}} +- {{< feature-gate-description name="LogarithmicScaleDown" >}} +- {{< feature-gate-description name="LoggingAlphaOptions" >}} +- {{< feature-gate-description name="LoggingBetaOptions" >}} +- {{< feature-gate-description name="MatchLabelKeysInPodAffinity" >}} +- {{< feature-gate-description name="MatchLabelKeysInPodTopologySpread" >}} +- {{< feature-gate-description name="MaxUnavailableStatefulSet" >}} +- {{< feature-gate-description name="MemoryManager" >}} +- {{< feature-gate-description name="MemoryQoS" >}} +- {{< feature-gate-description name="MinDomainsInPodTopologySpread" >}} +- {{< feature-gate-description name="MinimizeIPTablesRestore" >}} +- {{< feature-gate-description name="MultiCIDRServiceAllocator" >}} +- {{< feature-gate-description name="NewVolumeManagerReconstruction" >}} +- {{< feature-gate-description name="NFTablesProxyMode" >}} +- {{< feature-gate-description name="NodeInclusionPolicyInPodTopologySpread" >}} +- {{< feature-gate-description name="NodeLogQuery" >}} +- {{< feature-gate-description name="NodeOutOfServiceVolumeDetach" >}} +- {{< feature-gate-description name="NodeSwap" >}} +- {{< feature-gate-description name="OpenAPIEnums" >}} +- {{< feature-gate-description name="PDBUnhealthyPodEvictionPolicy" >}} +- {{< feature-gate-description name="PersistentVolumeLastPhaseTransitionTime" >}} +- {{< feature-gate-description name="PodAndContainerStatsFromCRI" >}} +- {{< feature-gate-description name="PodDeletionCost" >}} +- {{< feature-gate-description name="PodDisruptionConditions" >}} +- {{< feature-gate-description name="PodHostIPs" >}} +- {{< feature-gate-description name="PodIndexLabel" >}} +- {{< feature-gate-description name="PodLifecycleSleepAction" >}} +- {{< feature-gate-description name="PodReadyToStartContainersCondition" >}} +- {{< feature-gate-description name="PodSchedulingReadiness" >}} +- {{< feature-gate-description name="ProcMountType" >}} +- {{< feature-gate-description name="ProxyTerminatingEndpoints" >}} +- {{< feature-gate-description name="QOSReserved" >}} +- {{< feature-gate-description name="ReadWriteOncePod" >}} +- {{< feature-gate-description name="RecoverVolumeExpansionFailure" >}} +- {{< feature-gate-description name="RemainingItemCount" >}} +- {{< feature-gate-description name="RemoveSelfLink" >}} +- {{< feature-gate-description name="RotateKubeletServerCertificate" >}} +- {{< feature-gate-description name="RuntimeClassInImageCriApi" >}} +- {{< feature-gate-description name="SchedulerQueueingHints" >}} +- {{< feature-gate-description name="SecurityContextDeny" >}} +- {{< feature-gate-description name="SELinuxMountReadWriteOncePod" >}} +- {{< feature-gate-description name="SeparateTaintEvictionController" >}} +- {{< feature-gate-description name="ServerSideApply" >}} +- {{< feature-gate-description name="ServerSideFieldValidation" >}} +- {{< feature-gate-description name="ServiceAccountTokenJTI" >}} +- {{< feature-gate-description name="ServiceAccountTokenNodeBinding" >}} +- {{< feature-gate-description name="ServiceAccountTokenNodeBindingValidation" >}} +- {{< feature-gate-description name="ServiceAccountTokenPodNodeInfo" >}} +- {{< feature-gate-description name="ServiceNodePortStaticSubrange" >}} +- {{< feature-gate-description name="SidecarContainers" >}} +- {{< feature-gate-description name="SizeMemoryBackedVolumes" >}} +- {{< feature-gate-description name="SkipReadOnlyValidationGCE" >}} +- {{< feature-gate-description name="StableLoadBalancerNodeSet" >}} +- {{< feature-gate-description name="StatefulSetAutoDeletePVC" >}} +- {{< feature-gate-description name="StatefulSetStartOrdinal" >}} +- {{< feature-gate-description name="StorageVersionAPI" >}} +- {{< feature-gate-description name="StorageVersionHash" >}} +- {{< feature-gate-description name="StructuredAuthenticationConfiguration" >}} +- {{< feature-gate-description name="StructuredAuthorizationConfiguration" >}} +- {{< feature-gate-description name="TopologyAwareHints" >}} +- {{< feature-gate-description name="TopologyManager" >}} +- {{< feature-gate-description name="TopologyManagerPolicyAlphaOptions" >}} +- {{< feature-gate-description name="TopologyManagerPolicyBetaOptions" >}} +- {{< feature-gate-description name="TopologyManagerPolicyOptions" >}} +- {{< feature-gate-description name="TranslateStreamCloseWebsocketRequests" >}} +- {{< feature-gate-description name="UnauthenticatedHTTP2DOSMitigation" >}} +- {{< feature-gate-description name="UnknownVersionInteroperabilityProxy" >}} +- {{< feature-gate-description name="UserNamespacesPodSecurityStandards" >}} +- {{< feature-gate-description name="UserNamespacesSupport" >}} +- {{< feature-gate-description name="ValidatingAdmissionPolicy" >}} +- {{< feature-gate-description name="VolumeAttributesClass" >}} +- {{< feature-gate-description name="VolumeCapacityPriority" >}} +- {{< feature-gate-description name="WatchBookmark" >}} +- {{< feature-gate-description name="WatchList" >}} +- {{< feature-gate-description name="WindowsHostNetwork" >}} +- {{< feature-gate-description name="WinDSR" >}} +- {{< feature-gate-description name="WinOverlay" >}} +- {{< feature-gate-description name="ZeroLimitedNominalConcurrencyShares" >}} + +## {{% heading "whatsnext" %}} + +* The [deprecation policy](/docs/reference/using-api/deprecation-policy/) for Kubernetes explains + the project's approach to removing features and components. +* Since Kubernetes 1.24, new beta APIs are not enabled by default. When enabling a beta + feature, you will also need to enable any associated API resources. + For example, to enable a particular resource like + `storage.k8s.io/v1beta1/csistoragecapacities`, set `--runtime-config=storage.k8s.io/v1beta1/csistoragecapacities`. + See [API Versioning](/docs/reference/using-api/#api-versioning) for more details on the command line flags. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/indexed-job.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/indexed-job.md new file mode 100644 index 0000000000000..e2d2ed3819a9a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/indexed-job.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: IndexedJob +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows the [Job](/docs/concepts/workloads/controllers/job/) +controller to manage Pod completions per completion index. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/ingress-class-namespaced-params.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/ingress-class-namespaced-params.md new file mode 100644 index 0000000000000..8ffb5effcb4b5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/ingress-class-namespaced-params.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: IngressClassNamespacedParams +content_type: feature_gate + +_build: + list: never + render: false +--- +Allow namespace-scoped parameters reference in +`IngressClass` resource. This feature adds two fields - `Scope` and `Namespace` +to `IngressClass.spec.parameters`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/initializers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/initializers.md new file mode 100644 index 0000000000000..0d8bc630c5645 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/initializers.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: Initializers +content_type: feature_gate + +_build: + list: never + render: false +--- +Allow asynchronous coordination of object creation using the +Initializers admission plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/ip-tables-ownership-cleanup.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/ip-tables-ownership-cleanup.md new file mode 100644 index 0000000000000..e3975b4e0083a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/ip-tables-ownership-cleanup.md @@ -0,0 +1,8 @@ +--- +title: IPTablesOwnershipCleanup +content_type: feature_gate +_build: + list: never + render: false +--- +This causes kubelet to no longer create legacy iptables rules. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/ipv6-dual-stack.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/ipv6-dual-stack.md new file mode 100644 index 0000000000000..98bdbbbeb956e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/ipv6-dual-stack.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: IPv6DualStack +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable [dual stack](/docs/concepts/services-networking/dual-stack/) +support for IPv6. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/job-backoff-limit-per-index.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-backoff-limit-per-index.md new file mode 100644 index 0000000000000..1b1206077d332 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-backoff-limit-per-index.md @@ -0,0 +1,10 @@ +--- +title: JobBackoffLimitPerIndex +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows specifying the maximal number of pod +retries per index in Indexed jobs. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/job-mutable-node-scheduling-directives.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-mutable-node-scheduling-directives.md new file mode 100644 index 0000000000000..2e02cc251a668 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-mutable-node-scheduling-directives.md @@ -0,0 +1,9 @@ +--- +title: JobMutableNodeSchedulingDirectives +content_type: feature_gate +_build: + list: never + render: false +--- +Allows updating node scheduling directives in +the pod template of [Job](/docs/concepts/workloads/controllers/job). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/job-pod-failure-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-pod-failure-policy.md new file mode 100644 index 0000000000000..a6681ad7cd052 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-pod-failure-policy.md @@ -0,0 +1,9 @@ +--- +title: JobPodFailurePolicy +content_type: feature_gate +_build: + list: never + render: false +--- +Allow users to specify handling of pod failures based on container +exit codes and pod conditions. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/job-pod-replacement-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-pod-replacement-policy.md new file mode 100644 index 0000000000000..bbee9df31eb83 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-pod-replacement-policy.md @@ -0,0 +1,8 @@ +--- +title: JobPodReplacementPolicy +content_type: feature_gate +_build: + list: never + render: false +--- +Allows you to specify pod replacement for terminating pods in a [Job](/docs/concepts/workloads/controllers/job) diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/job-ready-pods.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-ready-pods.md new file mode 100644 index 0000000000000..e8c19f539f321 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-ready-pods.md @@ -0,0 +1,12 @@ +--- +title: JobReadyPods +content_type: feature_gate +_build: + list: never + render: false +--- +Enables tracking the number of Pods that have a `Ready` +[condition](/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions). +The count of `Ready` pods is recorded in the +[status](/docs/reference/kubernetes-api/workload-resources/job-v1/#JobStatus) +of a [Job](/docs/concepts/workloads/controllers/job) status. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/job-tracking-with-finalizers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-tracking-with-finalizers.md new file mode 100644 index 0000000000000..9c622998c6ece --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/job-tracking-with-finalizers.md @@ -0,0 +1,11 @@ +--- +title: JobTrackingWithFinalizers +content_type: feature_gate +_build: + list: never + render: false +--- +Enables tracking [Job](/docs/concepts/workloads/controllers/job) +completions without relying on Pods remaining in the cluster indefinitely. +The Job controller uses Pod finalizers and a field in the Job status to keep +track of the finished Pods to count towards completion. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv1.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv1.md new file mode 100644 index 0000000000000..85542c5a9d7a0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv1.md @@ -0,0 +1,8 @@ +--- +title: KMSv1 +content_type: feature_gate +_build: + list: never + render: false +--- +Enables KMS v1 API for encryption at rest. See [Using a KMS Provider for data encryption](/docs/tasks/administer-cluster/kms-provider) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv2-kdf.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv2-kdf.md new file mode 100644 index 0000000000000..698df0e82b1e5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv2-kdf.md @@ -0,0 +1,10 @@ +--- +title: KMSv2KDF +content_type: feature_gate +_build: + list: never + render: false +--- +Enables KMS v2 to generate single use data encryption keys. +See [Using a KMS Provider for data encryption](/docs/tasks/administer-cluster/kms-provider) for more details. +If the `KMSv2` feature gate is not enabled in your cluster, the value of the `KMSv2KDF` feature gate has no effect. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv2.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv2.md new file mode 100644 index 0000000000000..b0994792dc4d3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kmsv2.md @@ -0,0 +1,8 @@ +--- +title: KMSv2 +content_type: feature_gate +_build: + list: never + render: false +--- +Enables KMS v2 API for encryption at rest. See [Using a KMS Provider for data encryption](/docs/tasks/administer-cluster/kms-provider) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kube-proxy-draining-terminating-nodes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kube-proxy-draining-terminating-nodes.md new file mode 100644 index 0000000000000..ff16161469096 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kube-proxy-draining-terminating-nodes.md @@ -0,0 +1,9 @@ +--- +title: KubeProxyDrainingTerminatingNodes +content_type: feature_gate +_build: + list: never + render: false +--- +Implement connection draining for +terminating nodes for `externalTrafficPolicy: Cluster` services. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-cgroup-driver-from-cri.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-cgroup-driver-from-cri.md new file mode 100644 index 0000000000000..c837c454f5455 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-cgroup-driver-from-cri.md @@ -0,0 +1,18 @@ +--- +title: KubeletCgroupDriverFromCRI +content_type: feature_gate +_build: + list: never + render: false +--- +Enable detection of the kubelet cgroup driver +configuration option from the {{}}. +You can use this feature gate on nodes with a kubelet that supports the feature gate +and where there is a CRI container runtime that supports the `RuntimeConfig` +CRI call. If both CRI and kubelet support this feature, the kubelet ignores the +`cgroupDriver` configuration setting (or deprecated `--cgroup-driver` command +line argument). If you enable this feature gate and the container runtime +doesn't support it, the kubelet falls back to using the driver configured using +the `cgroupDriver` configuration setting. +See [Configuring a cgroup driver](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-config-file.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-config-file.md new file mode 100644 index 0000000000000..a1da303c4ace5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-config-file.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: KubeletConfigFile +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable loading kubelet configuration from +a file specified using a config file. +See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-credential-providers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-credential-providers.md new file mode 100644 index 0000000000000..3c777ba57be0a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-credential-providers.md @@ -0,0 +1,9 @@ +--- +title: KubeletCredentialProviders +content_type: feature_gate +_build: + list: never + render: false +--- +Enable kubelet exec credential providers for +image pull credentials. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-in-user-namespace.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-in-user-namespace.md new file mode 100644 index 0000000000000..bca920fc43806 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-in-user-namespace.md @@ -0,0 +1,10 @@ +--- +title: KubeletInUserNamespace +content_type: feature_gate +_build: + list: never + render: false +--- +Enables support for running kubelet in a +{{}}. + See [Running Kubernetes Node Components as a Non-root User](/docs/tasks/administer-cluster/kubelet-in-userns/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-plugins-watcher.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-plugins-watcher.md new file mode 100644 index 0000000000000..5d94abac81b06 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-plugins-watcher.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: KubeletPluginsWatcher +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable probe-based plugin watcher utility to enable kubelet +to discover plugins such as [CSI volume drivers](/docs/concepts/storage/volumes/#csi). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-dynamice-resources.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-dynamice-resources.md new file mode 100644 index 0000000000000..11a3397d7550a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-dynamice-resources.md @@ -0,0 +1,12 @@ +--- +title: KubeletPodResourcesDynamicResources +content_type: feature_gate +_build: + list: never + render: false +--- +Extend the kubelet's pod resources gRPC endpoint to +to include resources allocated in `ResourceClaims` via `DynamicResourceAllocation` API. +See [resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) for more details. +with informations about the allocatable resources, enabling clients to properly +track the free compute resources on a node. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-get-allocatable.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-get-allocatable.md new file mode 100644 index 0000000000000..b6e389921112f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-get-allocatable.md @@ -0,0 +1,10 @@ +--- +title: KubeletPodResourcesGetAllocatable +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the kubelet's pod resources +`GetAllocatableResources` functionality. This API augments the +[resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-get.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-get.md new file mode 100644 index 0000000000000..1f708b4ab017d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources-get.md @@ -0,0 +1,9 @@ +--- +title: KubeletPodResourcesGet +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the `Get` gRPC endpoint on kubelet's for Pod resources. +This API augments the [resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources.md new file mode 100644 index 0000000000000..b668d43f19bef --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-pod-resources.md @@ -0,0 +1,10 @@ +--- +title: KubeletPodResources +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the kubelet's pod resources gRPC endpoint. See +[Support Device Monitoring](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/606-compute-device-assignment/README.md) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-separate-disk-gc.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-separate-disk-gc.md new file mode 100644 index 0000000000000..c395f47d6393d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-separate-disk-gc.md @@ -0,0 +1,9 @@ +--- +title: KubeletSeparateDiskGC +content_type: feature_gate +_build: + list: never + render: false +--- +Enable kubelet to garbage collect container images and containers +even when those are on a separate filesystem. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-tracing.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-tracing.md new file mode 100644 index 0000000000000..891935144a91b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/kubelet-tracing.md @@ -0,0 +1,11 @@ +--- +title: KubeletTracing +content_type: feature_gate +_build: + list: never + render: false +--- +Add support for distributed tracing in the kubelet. +When enabled, kubelet CRI interface and authenticated http servers are instrumented to generate +OpenTelemetry trace spans. +See [Traces for Kubernetes System Components](/docs/concepts/cluster-administration/system-traces) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-node-role-behavior.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-node-role-behavior.md new file mode 100644 index 0000000000000..e234069b5f5b2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-node-role-behavior.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: LegacyNodeRoleBehavior +content_type: feature_gate + +_build: + list: never + render: false +--- +When disabled, legacy behavior in service load balancers and +node disruption will ignore the `node-role.kubernetes.io/master` label in favor of the +feature-specific labels provided by `NodeDisruptionExclusion` and `ServiceNodeExclusion`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-clean-up.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-clean-up.md new file mode 100644 index 0000000000000..43c89cb2c3da2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-clean-up.md @@ -0,0 +1,10 @@ +--- +title: LegacyServiceAccountTokenCleanUp +content_type: feature_gate +_build: + list: never + render: false +--- +Enable cleaning up Secret-based +[service account tokens](/docs/concepts/security/service-accounts/#get-a-token) +when they are not used in a specified time (default to be one year). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-no-auto-generation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-no-auto-generation.md new file mode 100644 index 0000000000000..e966870ec27b7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-no-auto-generation.md @@ -0,0 +1,9 @@ +--- +title: LegacyServiceAccountTokenNoAutoGeneration +content_type: feature_gate +_build: + list: never + render: false +--- +Stop auto-generation of Secret-based +[service account tokens](/docs/concepts/security/service-accounts/#get-a-token). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-tracking.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-tracking.md new file mode 100644 index 0000000000000..45df199371aa5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/legacy-service-account-token-tracking.md @@ -0,0 +1,9 @@ +--- +title: LegacyServiceAccountTokenTracking +content_type: feature_gate +_build: + list: never + render: false +--- +Track usage of Secret-based +[service account tokens](/docs/concepts/security/service-accounts/#get-a-token). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/load-balancer-ip-mode.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/load-balancer-ip-mode.md new file mode 100644 index 0000000000000..3d41588813029 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/load-balancer-ip-mode.md @@ -0,0 +1,10 @@ +--- +title: LoadBalancerIPMode +content_type: feature_gate +_build: + list: never + render: false +--- +Allows setting `ipMode` for Services where `type` is set to `LoadBalancer`. +See [Specifying IPMode of load balancer status](/docs/concepts/services-networking/service/#load-balancer-ip-mode) +for more information. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/local-storage-capacity-isolation-fs-quota-monitoring.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/local-storage-capacity-isolation-fs-quota-monitoring.md new file mode 100644 index 0000000000000..b64b78978f03e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/local-storage-capacity-isolation-fs-quota-monitoring.md @@ -0,0 +1,14 @@ +--- +title: LocalStorageCapacityIsolationFSQuotaMonitoring +content_type: feature_gate +_build: + list: never + render: false +--- +When `LocalStorageCapacityIsolation` +is enabled for +[local ephemeral storage](/docs/concepts/configuration/manage-resources-containers/) +and the backing filesystem for [emptyDir volumes](/docs/concepts/storage/volumes/#emptydir) +supports project quotas and they are enabled, use project quotas to monitor +[emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than +filesystem walk for better performance and accuracy. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/local-storage-capacity-isolation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/local-storage-capacity-isolation.md new file mode 100644 index 0000000000000..7d2e6d0885c36 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/local-storage-capacity-isolation.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: LocalStorageCapacityIsolation +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the consumption of +[local ephemeral storage](/docs/concepts/configuration/manage-resources-containers/) +and also the `sizeLimit` property of an +[emptyDir volume](/docs/concepts/storage/volumes/#emptydir). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/logarithmic-scale-down.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/logarithmic-scale-down.md new file mode 100644 index 0000000000000..bdd67042fd61e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/logarithmic-scale-down.md @@ -0,0 +1,9 @@ +--- +title: LogarithmicScaleDown +content_type: feature_gate +_build: + list: never + render: false +--- +Enable semi-random selection of pods to evict on controller scaledown +based on logarithmic bucketing of pod timestamps. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/logging-alpha-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/logging-alpha-options.md new file mode 100644 index 0000000000000..67167f975d81e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/logging-alpha-options.md @@ -0,0 +1,8 @@ +--- +title: LoggingAlphaOptions +content_type: feature_gate +_build: + list: never + render: false +--- +Allow fine-tuing of experimental, alpha-quality logging options. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/logging-beta-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/logging-beta-options.md new file mode 100644 index 0000000000000..b83a4b833aaa0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/logging-beta-options.md @@ -0,0 +1,8 @@ +--- +title: LoggingBetaOptions +content_type: feature_gate +_build: + list: never + render: false +--- +Allow fine-tuing of experimental, beta-quality logging options. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/match-label-keys-in-pod-affinity.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/match-label-keys-in-pod-affinity.md new file mode 100644 index 0000000000000..0d5f572d4f448 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/match-label-keys-in-pod-affinity.md @@ -0,0 +1,9 @@ +--- +title: MatchLabelKeysInPodAffinity +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the `matchLabelKeys` and `mismatchLabelKeys` field for +[pod (anti)affinity](/docs/concepts/scheduling-eviction/assign-pod-node/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/match-label-keys-in-pod-topology-spread.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/match-label-keys-in-pod-topology-spread.md new file mode 100644 index 0000000000000..a00108bbb8b75 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/match-label-keys-in-pod-topology-spread.md @@ -0,0 +1,9 @@ +--- +title: MatchLabelKeysInPodTopologySpread +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the `matchLabelKeys` field for +[Pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/max-unavailable-stateful-set.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/max-unavailable-stateful-set.md new file mode 100644 index 0000000000000..7d9a9689c4f1d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/max-unavailable-stateful-set.md @@ -0,0 +1,11 @@ +--- +title: MaxUnavailableStatefulSet +content_type: feature_gate +_build: + list: never + render: false +--- +Enables setting the `maxUnavailable` field for the +[rolling update strategy](/docs/concepts/workloads/controllers/statefulset/#rolling-updates) +of a StatefulSet. The field specifies the maximum number of Pods +that can be unavailable during the update. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/memory-manager.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/memory-manager.md new file mode 100644 index 0000000000000..81ed0974f8b3f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/memory-manager.md @@ -0,0 +1,9 @@ +--- +title: MemoryManager +content_type: feature_gate +_build: + list: never + render: false +--- +Allows setting memory affinity for a container based on +NUMA topology. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/memory-qos.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/memory-qos.md new file mode 100644 index 0000000000000..49c74b6d3d1d9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/memory-qos.md @@ -0,0 +1,9 @@ +--- +title: MemoryQoS +content_type: feature_gate +_build: + list: never + render: false +--- +Enable memory protection and usage throttle on pod / container using +cgroup v2 memory controller. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/min-domains-in-pod-topology-spread.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/min-domains-in-pod-topology-spread.md new file mode 100644 index 0000000000000..82b7572cf8839 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/min-domains-in-pod-topology-spread.md @@ -0,0 +1,9 @@ +--- +title: MinDomainsInPodTopologySpread +content_type: feature_gate +_build: + list: never + render: false +--- +Enable `minDomains` in +[Pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/minimize-ip-tables-restore.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/minimize-ip-tables-restore.md new file mode 100644 index 0000000000000..c9d8dabae063f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/minimize-ip-tables-restore.md @@ -0,0 +1,9 @@ +--- +title: MinimizeIPTablesRestore +content_type: feature_gate +_build: + list: never + render: false +--- +Enables new performance improvement logics +in the kube-proxy iptables mode. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/mixed-protocol-lb-service.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/mixed-protocol-lb-service.md new file mode 100644 index 0000000000000..b994ef582dd29 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/mixed-protocol-lb-service.md @@ -0,0 +1,9 @@ +--- +title: MixedProtocolLBService +content_type: feature_gate +_build: + list: never + render: false +--- +Enable using different protocols in the same `LoadBalancer` type +Service instance. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/mount-containers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/mount-containers.md new file mode 100644 index 0000000000000..79525c5738022 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/mount-containers.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: MountContainers +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable using utility containers on host as the volume mounter. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/mount-propagation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/mount-propagation.md new file mode 100644 index 0000000000000..def95eba40419 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/mount-propagation.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: MountPropagation +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable sharing volume mounted by one container to other containers or pods. +For more details, please see [mount propagation](/docs/concepts/storage/volumes/#mount-propagation). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/multi-cidr-range-allocator.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/multi-cidr-range-allocator.md new file mode 100644 index 0000000000000..29723d1726978 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/multi-cidr-range-allocator.md @@ -0,0 +1,8 @@ +--- +title: MultiCIDRRangeAllocator +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the MultiCIDR range allocator. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/multi-cidr-service-allocator.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/multi-cidr-service-allocator.md new file mode 100644 index 0000000000000..a6bcaf5dcf13f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/multi-cidr-service-allocator.md @@ -0,0 +1,8 @@ +--- +title: MultiCIDRServiceAllocator +content_type: feature_gate +_build: + list: never + render: false +--- +Track IP address allocations for Service cluster IPs using IPAddress objects. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/namespace-default-label-name.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/namespace-default-label-name.md new file mode 100644 index 0000000000000..81d0510ac26b7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/namespace-default-label-name.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: NamespaceDefaultLabelName +content_type: feature_gate + +_build: + list: never + render: false +--- +Configure the API Server to set an immutable +{{< glossary_tooltip text="label" term_id="label" >}} `kubernetes.io/metadata.name` +on all namespaces, containing the namespace name. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/network-policy-end-port.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/network-policy-end-port.md new file mode 100644 index 0000000000000..0fb007971ea09 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/network-policy-end-port.md @@ -0,0 +1,10 @@ +--- +title: NetworkPolicyEndPort +content_type: feature_gate +_build: + list: never + render: false +--- +Allows you to define ports in a +[NetworkPolicy](docs/concepts/services-networking/network-policies/) +rule as a range of port numbers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/network-policy-status.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/network-policy-status.md new file mode 100644 index 0000000000000..6744409f9ae67 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/network-policy-status.md @@ -0,0 +1,8 @@ +--- +title: NetworkPolicyStatus +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the `status` subresource for NetworkPolicy objects. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/new-volume-manager-reconstruction.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/new-volume-manager-reconstruction.md new file mode 100644 index 0000000000000..5d54e860808fa --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/new-volume-manager-reconstruction.md @@ -0,0 +1,19 @@ +--- +title: NewVolumeManagerReconstruction +content_type: feature_gate +_build: + list: never + render: false +--- +Enables improved discovery of mounted volumes during kubelet +startup. Since this code has been significantly refactored, we allow to opt-out in case kubelet +gets stuck at the startup or is not unmounting volumes from terminated Pods. Note that this +refactoring was behind `SELinuxMountReadWriteOncePod` alpha feature gate in Kubernetes 1.25. + + +Before Kubernetes v1.25, the kubelet used different default behavior for discovering mounted +volumes during the kubelet startup. If you disable this feature gate (it's enabled by default), you select +the legacy discovery behavior. + +In Kubernetes v1.25 and v1.26, this behavior toggle was part of the `SELinuxMountReadWriteOncePod` +feature gate. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/nftables-proxy-mode.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/nftables-proxy-mode.md new file mode 100644 index 0000000000000..3b4f846a72f8f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/nftables-proxy-mode.md @@ -0,0 +1,8 @@ +--- +title: NFTablesProxyMode +content_type: feature_gate +_build: + list: never + render: false +--- +Allow running kube-proxy with in [nftables mode](/docs/reference/networking/virtual-ips/#proxy-mode-nftables). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/node-disruption-exclusion.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-disruption-exclusion.md new file mode 100644 index 0000000000000..78c83f586b02b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-disruption-exclusion.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: NodeDisruptionExclusion +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable use of the Node label `node.kubernetes.io/exclude-disruption` +which prevents nodes from being evacuated during zone failures. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/node-inclusion-policy-in-pod-topology-spread.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-inclusion-policy-in-pod-topology-spread.md new file mode 100644 index 0000000000000..4f30224da53e9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-inclusion-policy-in-pod-topology-spread.md @@ -0,0 +1,10 @@ +--- +title: NodeInclusionPolicyInPodTopologySpread +content_type: feature_gate +_build: + list: never + render: false +--- +Enable using `nodeAffinityPolicy` and `nodeTaintsPolicy` in +[Pod topology spread constraints](/docs/concepts/scheduling-eviction/topology-spread-constraints/) +when calculating pod topology spread skew. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/node-lease.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-lease.md new file mode 100644 index 0000000000000..381fbf7b2a269 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-lease.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: NodeLease +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the new Lease API to report node heartbeats, which could be used as a node health signal. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/node-log-query.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-log-query.md new file mode 100644 index 0000000000000..f66e858fd26f8 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-log-query.md @@ -0,0 +1,8 @@ +--- +title: NodeLogQuery +content_type: feature_gate +_build: + list: never + render: false +--- +Enables querying logs of node services using the `/logs` endpoint. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/node-out-of-service-volume-detach.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-out-of-service-volume-detach.md new file mode 100644 index 0000000000000..695ddc5b28b16 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-out-of-service-volume-detach.md @@ -0,0 +1,11 @@ +--- +title: NodeOutOfServiceVolumeDetach +content_type: feature_gate +_build: + list: never + render: false +--- +When a Node is marked out-of-service using the +`node.kubernetes.io/out-of-service` taint, Pods on the node will be forcefully deleted + if they can not tolerate this taint, and the volume detach operations for Pods terminating + on the node will happen immediately. The deleted Pods can recover quickly on different nodes. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/node-swap.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-swap.md new file mode 100644 index 0000000000000..16f7cbf522312 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/node-swap.md @@ -0,0 +1,10 @@ +--- +title: NodeSwap +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the kubelet to allocate swap memory for Kubernetes workloads on a node. +Must be used with `KubeletConfiguration.failSwapOn` set to false. +For more details, please see [swap memory](/docs/concepts/architecture/nodes/#swap-memory) diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/non-preempting-priority.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/non-preempting-priority.md new file mode 100644 index 0000000000000..44e72e755e5d2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/non-preempting-priority.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: NonPreemptingPriority +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable `preemptionPolicy` field for PriorityClass and Pod. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/openapi-enums.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/openapi-enums.md new file mode 100644 index 0000000000000..ba60f93893258 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/openapi-enums.md @@ -0,0 +1,9 @@ +--- +title: OpenAPIEnums +content_type: feature_gate +_build: + list: never + render: false +--- +Enables populating "enum" fields of OpenAPI schemas in the +spec returned from the API server. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/openapiv3.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/openapiv3.md new file mode 100644 index 0000000000000..03655a75ca3ce --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/openapiv3.md @@ -0,0 +1,8 @@ +--- +title: OpenAPIV3 +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the API server to publish OpenAPI v3. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pdb-unhealthy-pod-eviction-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pdb-unhealthy-pod-eviction-policy.md new file mode 100644 index 0000000000000..34d2f9a1dd54f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pdb-unhealthy-pod-eviction-policy.md @@ -0,0 +1,10 @@ +--- +title: PDBUnhealthyPodEvictionPolicy +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the `unhealthyPodEvictionPolicy` field of a `PodDisruptionBudget`. This specifies +when unhealthy pods should be considered for eviction. Please see [Unhealthy Pod Eviction Policy](/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/persistent-local-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/persistent-local-volumes.md new file mode 100644 index 0000000000000..e047d3dbf87c1 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/persistent-local-volumes.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: PersistentLocalVolumes +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the usage of `local` volume type in Pods. +Pod affinity has to be specified if requesting a `local` volume. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/persistent-volume-last-phase-transition-time.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/persistent-volume-last-phase-transition-time.md new file mode 100644 index 0000000000000..2a710c85db61b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/persistent-volume-last-phase-transition-time.md @@ -0,0 +1,9 @@ +--- +title: PersistentVolumeLastPhaseTransitionTime +content_type: feature_gate +_build: + list: never + render: false +--- +Adds a new field to PersistentVolume +which holds a timestamp of when the volume last transitioned its phase. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-affinity-namespace-selector.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-affinity-namespace-selector.md new file mode 100644 index 0000000000000..84c95c8763a84 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-affinity-namespace-selector.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: PodAffinityNamespaceSelector +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the +[Pod Affinity Namespace Selector](/docs/concepts/scheduling-eviction/assign-pod-node/#namespace-selector) +and [CrossNamespacePodAffinity](/docs/concepts/policy/resource-quotas/#cross-namespace-pod-affinity-quota) +quota scope features. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-and-container-stats-from-cri.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-and-container-stats-from-cri.md new file mode 100644 index 0000000000000..ed083c73863c3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-and-container-stats-from-cri.md @@ -0,0 +1,9 @@ +--- +title: PodAndContainerStatsFromCRI +content_type: feature_gate +_build: + list: never + render: false +--- +Configure the kubelet to gather container and pod stats from the CRI container runtime rather than gathering them from cAdvisor. +As of 1.26, this also includes gathering metrics from CRI and emitting them over `/metrics/cadvisor` (rather than having cAdvisor emit them directly). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-deletion-cost.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-deletion-cost.md new file mode 100644 index 0000000000000..7837cd832c1c8 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-deletion-cost.md @@ -0,0 +1,9 @@ +--- +title: PodDeletionCost +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the [Pod Deletion Cost](/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) + feature which allows users to influence ReplicaSet downscaling order. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-disruption-budget.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-disruption-budget.md new file mode 100644 index 0000000000000..6dbcb9f98abe8 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-disruption-budget.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: PodDisruptionBudget +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the [PodDisruptionBudget](/docs/tasks/run-application/configure-pdb/) feature. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-disruption-conditions.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-disruption-conditions.md new file mode 100644 index 0000000000000..ae268f72f54a4 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-disruption-conditions.md @@ -0,0 +1,8 @@ +--- +title: PodDisruptionConditions +content_type: feature_gate +_build: + list: never + render: false +--- +Enables support for appending a dedicated pod condition indicating that the pod is being deleted due to a disruption. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-has-network-condition.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-has-network-condition.md new file mode 100644 index 0000000000000..a71f647fe40e5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-has-network-condition.md @@ -0,0 +1,9 @@ +--- +title: PodHasNetworkCondition +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the kubelet to mark the [PodHasNetwork](/docs/concepts/workloads/pods/pod-lifecycle/#pod-has-network) +condition on pods. This was renamed to `PodReadyToStartContainersCondition` in 1.28. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-host-ips.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-host-ips.md new file mode 100644 index 0000000000000..27698fd9c8129 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-host-ips.md @@ -0,0 +1,9 @@ +--- +title: PodHostIPs +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the `status.hostIPs` field for pods and the {{< glossary_tooltip term_id="downward-api" text="downward API" >}}. +The field lets you expose host IP addresses to workloads. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-index-label.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-index-label.md new file mode 100644 index 0000000000000..357a2dcc4c321 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-index-label.md @@ -0,0 +1,8 @@ +--- +title: PodIndexLabel +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the Job controller and StatefulSet controller to add the pod index as a label when creating new pods. See [Job completion mode docs](/docs/concepts/workloads/controllers/job#completion-mode) and [StatefulSet pod index label docs](/docs/concepts/workloads/controllers/statefulset/#pod-index-label) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-lifecycle-sleep-action.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-lifecycle-sleep-action.md new file mode 100644 index 0000000000000..e0e528b2383b8 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-lifecycle-sleep-action.md @@ -0,0 +1,8 @@ +--- +title: PodLifecycleSleepAction +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the `sleep` action in Container lifecycle hooks. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-overhead.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-overhead.md new file mode 100644 index 0000000000000..05712565fef1b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-overhead.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: PodOverhead +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the [PodOverhead](/docs/concepts/scheduling-eviction/pod-overhead/) +feature to account for pod overheads. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-priority.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-priority.md new file mode 100644 index 0000000000000..d09e9eb8c77c0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-priority.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: PodPriority +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the descheduling and preemption of Pods based on their +[priorities](/docs/concepts/scheduling-eviction/pod-priority-preemption/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-readiness-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-readiness-gates.md new file mode 100644 index 0000000000000..fe24af132540b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-readiness-gates.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: PodReadinessGates +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the setting of `PodReadinessGate` field for extending +Pod readiness evaluation. See [Pod readiness gate](/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-gate) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-ready-to-start-containers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-ready-to-start-containers.md new file mode 100644 index 0000000000000..df9d38bcb6c37 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-ready-to-start-containers.md @@ -0,0 +1,13 @@ +--- +title: PodReadyToStartContainersCondition +former_titles: + - PodHasNetworkCondition +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the kubelet to mark the [PodReadyToStartContainers](/docs/concepts/workloads/pods/pod-lifecycle/#pod-has-network) condition on pods. + +This feature gate was previously known as `PodHasNetworkCondition`, and the associated condition was +named `PodHasNetwork`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-scheduling-readiness.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-scheduling-readiness.md new file mode 100644 index 0000000000000..c8778cf2b4320 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-scheduling-readiness.md @@ -0,0 +1,8 @@ +--- +title: PodSchedulingReadiness +content_type: feature_gate +_build: + list: never + render: false +--- +Enable setting `schedulingGates` field to control a Pod's [scheduling readiness](/docs/concepts/scheduling-eviction/pod-scheduling-readiness). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-security.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-security.md new file mode 100644 index 0000000000000..7a2a3efa0108a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-security.md @@ -0,0 +1,9 @@ +--- +# Removed from Kubernetes +title: PodSecurity +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the `PodSecurity` admission plugin. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-share-process-namespace.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-share-process-namespace.md new file mode 100644 index 0000000000000..42eb71c5c2677 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pod-share-process-namespace.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: PodShareProcessNamespace +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the setting of `shareProcessNamespace` in a Pod for sharing +a single process namespace between containers running in a pod. More details can be found in +[Share Process Namespace between Containers in a Pod](/docs/tasks/configure-pod-container/share-process-namespace/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/prefer-nominated-node.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/prefer-nominated-node.md new file mode 100644 index 0000000000000..ad999617077cd --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/prefer-nominated-node.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: PreferNominatedNode +content_type: feature_gate + +_build: + list: never + render: false +--- +This flag tells the scheduler whether the nominated +nodes will be checked first before looping through all the other nodes in +the cluster. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/probe-termination-grace-period.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/probe-termination-grace-period.md new file mode 100644 index 0000000000000..930dadf06fd5a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/probe-termination-grace-period.md @@ -0,0 +1,11 @@ +--- +title: ProbeTerminationGracePeriod +content_type: feature_gate +_build: + list: never + render: false +--- +Enable [setting probe-level +`terminationGracePeriodSeconds`](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds) +on pods. See the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/proc-mount-type.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/proc-mount-type.md new file mode 100644 index 0000000000000..73a3173eaec8e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/proc-mount-type.md @@ -0,0 +1,9 @@ +--- +title: ProcMountType +content_type: feature_gate +_build: + list: never + render: false +--- +Enables control over the type proc mounts for containers +by setting the `procMount` field of a SecurityContext. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/proxy-terminating-endpoints.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/proxy-terminating-endpoints.md new file mode 100644 index 0000000000000..a0672b3ca1c46 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/proxy-terminating-endpoints.md @@ -0,0 +1,9 @@ +--- +title: ProxyTerminatingEndpoints +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the kube-proxy to handle terminating +endpoints when `ExternalTrafficPolicy=Local`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/pvc-protection.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/pvc-protection.md new file mode 100644 index 0000000000000..d64d7951051df --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/pvc-protection.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: PVCProtection +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the prevention of a PersistentVolumeClaim (PVC) from +being deleted when it is still used by any Pod. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/qos-reserved.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/qos-reserved.md new file mode 100644 index 0000000000000..7db6ba5543171 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/qos-reserved.md @@ -0,0 +1,10 @@ +--- +title: QOSReserved +content_type: feature_gate +_build: + list: never + render: false +--- +Allows resource reservations at the QoS level preventing pods +at lower QoS levels from bursting into resources requested at higher QoS levels +(memory only for now). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/read-write-once-pod.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/read-write-once-pod.md new file mode 100644 index 0000000000000..5dd15edfbe73b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/read-write-once-pod.md @@ -0,0 +1,9 @@ +--- +title: ReadWriteOncePod +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the usage of `ReadWriteOncePod` PersistentVolume +access mode. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/recover-volume-expansion-failure.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/recover-volume-expansion-failure.md new file mode 100644 index 0000000000000..fd9720abfa07a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/recover-volume-expansion-failure.md @@ -0,0 +1,11 @@ +--- +title: RecoverVolumeExpansionFailure +content_type: feature_gate +_build: + list: never + render: false +--- +Enables users to edit their PVCs to smaller +sizes so as they can recover from previously issued volume expansion failures. +See [Recovering from Failure when Expanding Volumes](/docs/concepts/storage/persistent-volumes/#recovering-from-failure-when-expanding-volumes) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/remaining-item-count.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/remaining-item-count.md new file mode 100644 index 0000000000000..e3238a2e1ea17 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/remaining-item-count.md @@ -0,0 +1,10 @@ +--- +title: RemainingItemCount +content_type: feature_gate +_build: + list: never + render: false +--- +Allow the API servers to show a count of remaining +items in the response to a +[chunking list request](/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/remove-self-link.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/remove-self-link.md new file mode 100644 index 0000000000000..25e4e977d3aa0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/remove-self-link.md @@ -0,0 +1,11 @@ +--- +title: RemoveSelfLink +content_type: feature_gate +_build: + list: never + render: false +--- +Sets the `.metadata.selfLink` field to blank (empty string) for all +objects and collections. This field has been deprecated since the Kubernetes v1.16 +release. When this feature is enabled, the `.metadata.selfLink` field remains part of +the Kubernetes API, but is always unset. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/request-management.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/request-management.md new file mode 100644 index 0000000000000..3c147f36b6d8e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/request-management.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: RequestManagement +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables managing request concurrency with prioritization and fairness +at each API server. Deprecated by `APIPriorityAndFairness` since 1.17. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/resource-limits-priority-function.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/resource-limits-priority-function.md new file mode 100644 index 0000000000000..256f59d412668 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/resource-limits-priority-function.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: ResourceLimitsPriorityFunction +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable a scheduler priority function that +assigns a lowest possible score of 1 to a node that satisfies at least one of +the input Pod's cpu and memory limits. The intent is to break ties between +nodes with same scores. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/resource-quota-scope-selectors.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/resource-quota-scope-selectors.md new file mode 100644 index 0000000000000..306be747fb218 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/resource-quota-scope-selectors.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: ResourceQuotaScopeSelectors +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable resource quota scope selectors. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/retroactive-default-storage-class.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/retroactive-default-storage-class.md new file mode 100644 index 0000000000000..36298a5f39a83 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/retroactive-default-storage-class.md @@ -0,0 +1,8 @@ +--- +title: RetroactiveDefaultStorageClass +content_type: feature_gate +_build: + list: never + render: false +--- +Allow assigning StorageClass to unbound PVCs retroactively. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/root-ca-config-map.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/root-ca-config-map.md new file mode 100644 index 0000000000000..8611ac90c6d92 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/root-ca-config-map.md @@ -0,0 +1,15 @@ +--- +# Removed from Kubernetes +title: RootCAConfigMap +content_type: feature_gate + +_build: + list: never + render: false +--- +Configure the `kube-controller-manager` to publish a +{{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} named `kube-root-ca.crt` +to every namespace. This ConfigMap contains a CA bundle used for verifying connections +to the kube-apiserver. See +[Bound Service Account Tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/rotate-kubelet-client-certificate.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/rotate-kubelet-client-certificate.md new file mode 100644 index 0000000000000..0d5c80fa5b5e3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/rotate-kubelet-client-certificate.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: RotateKubeletClientCertificate +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the rotation of the client TLS certificate on the kubelet. +See [kubelet configuration](/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubelet-configuration) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/rotate-kubelet-server-certificate.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/rotate-kubelet-server-certificate.md new file mode 100644 index 0000000000000..b828830ab0df5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/rotate-kubelet-server-certificate.md @@ -0,0 +1,10 @@ +--- +title: RotateKubeletServerCertificate +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the rotation of the server TLS certificate on the kubelet. +See [kubelet configuration](/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/#kubelet-configuration) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/run-as-group.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/run-as-group.md new file mode 100644 index 0000000000000..7e2927906224c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/run-as-group.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: RunAsGroup +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable control over the primary group ID set on the init processes of containers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/runtime-class-in-image-cri-api.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/runtime-class-in-image-cri-api.md new file mode 100644 index 0000000000000..88abe4473158e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/runtime-class-in-image-cri-api.md @@ -0,0 +1,9 @@ +--- +title: RuntimeClassInImageCriApi +content_type: feature_gate +_build: + list: never + render: false +--- +Enables images to be pulled based on the [runtime class] +(/docs/concepts/containers/runtime-class/) of the pods that reference them. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/runtime-class.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/runtime-class.md new file mode 100644 index 0000000000000..c028f713d9ec9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/runtime-class.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: RuntimeClass +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the [RuntimeClass](/docs/concepts/containers/runtime-class/) feature for +selecting container runtime configurations. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/schedule-daemon-set-pods.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/schedule-daemon-set-pods.md new file mode 100644 index 0000000000000..fba999d571c02 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/schedule-daemon-set-pods.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: ScheduleDaemonSetPods +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable DaemonSet Pods to be scheduled by the default scheduler instead +of the DaemonSet controller. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/scheduler-queueing-hints.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/scheduler-queueing-hints.md new file mode 100644 index 0000000000000..b2beaddec6b42 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/scheduler-queueing-hints.md @@ -0,0 +1,12 @@ +--- +title: SchedulerQueueingHints +content_type: feature_gate +_build: + list: never + render: false +--- +Enables [the scheduler's _queueing hints_ enhancement](https://github.com/kubernetes/enhancements/blob/master/keps/sig-scheduling/4247-queueinghint/README.md), +which benefits to reduce the useless requeueing. +The scheduler retries scheduling pods if something changes in the cluster that could make the pod scheduled. +Queueing hints are internal signals that allow the scheduler to filter the changes in the cluster +that are relevant to the unscheduled pod, based on previous scheduling attempts. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/sctp-support.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/sctp-support.md new file mode 100644 index 0000000000000..8c7a54937ece3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/sctp-support.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: SCTPSupport +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables the _SCTP_ `protocol` value in Pod, Service, Endpoints, EndpointSlice, +and NetworkPolicy definitions. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/seccomp-default.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/seccomp-default.md new file mode 100644 index 0000000000000..dea1497b38aeb --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/seccomp-default.md @@ -0,0 +1,10 @@ +--- +title: SeccompDefault +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the use of `RuntimeDefault` as the default seccomp profile +for all workloads. +The seccomp profile is specified in the `securityContext` of a Pod and/or a Container. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/security-context-deny.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/security-context-deny.md new file mode 100644 index 0000000000000..b304808b38720 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/security-context-deny.md @@ -0,0 +1,8 @@ +--- +title: SecurityContextDeny +content_type: feature_gate +_build: + list: never + render: false +--- +This gate signals that the `SecurityContextDeny` admission controller is deprecated. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/selector-index.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/selector-index.md new file mode 100644 index 0000000000000..7e9423696f6df --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/selector-index.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: SelectorIndex +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows label and field based indexes in API server watch cache to accelerate +list operations. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/selinux-mount-read-write-once-pod.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/selinux-mount-read-write-once-pod.md new file mode 100644 index 0000000000000..65bd39aa4a4cc --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/selinux-mount-read-write-once-pod.md @@ -0,0 +1,10 @@ +--- +title: SELinuxMountReadWriteOncePod +content_type: feature_gate +_build: + list: never + render: false +--- +Speeds up container startup by allowing kubelet to mount volumes +for a Pod directly with the correct SELinux label instead of changing each file on the volumes +recursively. The initial implementation focused on ReadWriteOncePod volumes. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/separate-taint-eviction-controller.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/separate-taint-eviction-controller.md new file mode 100644 index 0000000000000..29c989b2fac7f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/separate-taint-eviction-controller.md @@ -0,0 +1,12 @@ +--- +title: SeparateTaintEvictionController +content_type: feature_gate +_build: + list: never + render: false +--- +Enables running `TaintEvictionController`, +that performs [Taint-based Evictions](/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-based-evictions), +in a controller separated from `NodeLifecycleController`. When this feature is +enabled, users can optionally disable Taint-based Eviction setting the +`--controllers=-taint-eviction-controller` flag on the `kube-controller-manager`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/server-side-apply.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/server-side-apply.md new file mode 100644 index 0000000000000..2d8b66cff5169 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/server-side-apply.md @@ -0,0 +1,9 @@ +--- +title: ServerSideApply +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the [Sever Side Apply (SSA)](/docs/reference/using-api/server-side-apply/) +feature on the API Server. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/server-side-field-validation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/server-side-field-validation.md new file mode 100644 index 0000000000000..b97cbdbdebfbf --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/server-side-field-validation.md @@ -0,0 +1,10 @@ +--- +title: ServerSideFieldValidation +content_type: feature_gate +_build: + list: never + render: false +--- +Enables server-side field validation. This means the validation +of resource schema is performed at the API server side rather than the client side +(for example, the `kubectl create` or `kubectl apply` command line). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-issuer-discovery.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-issuer-discovery.md new file mode 100644 index 0000000000000..581179575b31d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-issuer-discovery.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: ServiceAccountIssuerDiscovery +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable OIDC discovery endpoints (issuer and JWKS URLs) for the +service account issuer in the API server. See +[Configure Service Accounts for Pods](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-jti.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-jti.md new file mode 100644 index 0000000000000..abc66d675b4a7 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-jti.md @@ -0,0 +1,9 @@ +--- +title: ServiceAccountTokenJTI +content_type: feature_gate +_build: + list: never + render: false +--- +Controls whether JTIs (UUIDs) are embedded into generated service account tokens, +and whether these JTIs are recorded into the Kubernetes audit log for future requests made by these tokens. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding-validation.m b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding-validation.m new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding-validation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding-validation.md new file mode 100644 index 0000000000000..7a4c77ff1731d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding-validation.md @@ -0,0 +1,9 @@ +--- +title: ServiceAccountTokenNodeBindingValidation +content_type: feature_gate +_build: + list: never + render: false +--- +Controls whether the apiserver will validate a Node reference in service account tokens. + diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding.md new file mode 100644 index 0000000000000..4fd31b624abe0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-node-binding.md @@ -0,0 +1,8 @@ +--- +title: ServiceAccountTokenNodeBinding +content_type: feature_gate +_build: + list: never + render: false +--- +Controls whether the apiserver allows binding service account tokens to Node objects. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-pod-node-info.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-pod-node-info.md new file mode 100644 index 0000000000000..3eec66d02aecf --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-account-token-pod-node-info.md @@ -0,0 +1,9 @@ +--- +title: ServiceAccountTokenPodNodeInfo +content_type: feature_gate +_build: + list: never + render: false +--- +Controls whether the apiserver embeds the node name and uid +for the associated node when issuing service account tokens bound to Pod objects. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-app-protocol.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-app-protocol.md new file mode 100644 index 0000000000000..53a78a8cb8ee6 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-app-protocol.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: ServiceAppProtocol +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables the `appProtocol` field on Services and Endpoints. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-internal-traffic-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-internal-traffic-policy.md new file mode 100644 index 0000000000000..cd3e53a53d0b4 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-internal-traffic-policy.md @@ -0,0 +1,8 @@ +--- +title: ServiceInternalTrafficPolicy +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the `internalTrafficPolicy` field on Services diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-ip-static-subrange.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-ip-static-subrange.md new file mode 100644 index 0000000000000..a65651f27bf6f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-ip-static-subrange.md @@ -0,0 +1,13 @@ +--- +title: ServiceIPStaticSubrange +content_type: feature_gate +_build: + list: never + render: false +--- +Enables a strategy for Services ClusterIP allocations, whereby the +ClusterIP range is subdivided. Dynamic allocated ClusterIP addresses will be allocated preferently +from the upper range allowing users to assign static ClusterIPs from the lower range with a low +risk of collision. See +[Avoiding collisions](/docs/reference/networking/virtual-ips/#avoiding-collisions) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-lb-node-port-control.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-lb-node-port-control.md new file mode 100644 index 0000000000000..0273c908c9454 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-lb-node-port-control.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: ServiceLBNodePortControl +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables the `allocateLoadBalancerNodePorts` field on Services. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-load-balancer-class.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-load-balancer-class.md new file mode 100644 index 0000000000000..e9b4a6383fe8b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-load-balancer-class.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: ServiceLoadBalancerClass +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables the `loadBalancerClass` field on Services. See +[Specifying class of load balancer implementation](/docs/concepts/services-networking/service/#load-balancer-class) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-load-balancer-finalizer.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-load-balancer-finalizer.md new file mode 100644 index 0000000000000..9c7594ba75fd2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-load-balancer-finalizer.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: ServiceLoadBalancerFinalizer +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable finalizer protection for Service load balancers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-node-exclusion.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-node-exclusion.md new file mode 100644 index 0000000000000..67037b040cefb --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-node-exclusion.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: ServiceNodeExclusion +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the exclusion of nodes from load balancers created by a cloud provider. +A node is eligible for exclusion if labelled with "`node.kubernetes.io/exclude-from-external-load-balancers`". diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-nodeport-static-subrange.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-nodeport-static-subrange.md new file mode 100644 index 0000000000000..8241ca5aad115 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-nodeport-static-subrange.md @@ -0,0 +1,10 @@ +--- +title: ServiceNodePortStaticSubrange +content_type: feature_gate +_build: + list: never + render: false +--- +Enables the use of different port allocation +strategies for NodePort Services. For more details, see +[reserve NodePort ranges to avoid collisions](/docs/concepts/services-networking/service/#avoid-nodeport-collisions). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/service-topology.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-topology.md new file mode 100644 index 0000000000000..52ed1bc7e2769 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/service-topology.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: ServiceTopology +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable service to route traffic based upon the Node topology of the cluster. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/set-hostname-as-fqdn.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/set-hostname-as-fqdn.md new file mode 100644 index 0000000000000..a816c884ae41d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/set-hostname-as-fqdn.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: SetHostnameAsFQDN +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the ability of setting Fully Qualified Domain Name(FQDN) as the +hostname of a pod. See +[Pod's `setHostnameAsFQDN` field](/docs/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/sidecar-containers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/sidecar-containers.md new file mode 100644 index 0000000000000..803f2d9797565 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/sidecar-containers.md @@ -0,0 +1,11 @@ +--- +title: SidecarContainers +content_type: feature_gate +_build: + list: never + render: false +--- +Allow setting the `restartPolicy` of an init container to +`Always` so that the container becomes a sidecar container (restartable init containers). +See [Sidecar containers and restartPolicy](/docs/concepts/workloads/pods/sidecar-containers/) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/size-memory-backed-volumes.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/size-memory-backed-volumes.md new file mode 100644 index 0000000000000..7ed5db157671d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/size-memory-backed-volumes.md @@ -0,0 +1,9 @@ +--- +title: SizeMemoryBackedVolumes +content_type: feature_gate +_build: + list: never + render: false +--- +Enable kubelets to determine the size limit for +memory-backed volumes (mainly `emptyDir` volumes). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/skip-read-only-validation-gce.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/skip-read-only-validation-gce.md new file mode 100644 index 0000000000000..9f84e5ac7c430 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/skip-read-only-validation-gce.md @@ -0,0 +1,9 @@ +--- +title: SkipReadOnlyValidationGCE +content_type: feature_gate +_build: + list: never + render: false +--- +Skip validation for GCE, will enable in the +next version. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/stable-load-balancer-node-set.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/stable-load-balancer-node-set.md new file mode 100644 index 0000000000000..16a3451ba1093 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/stable-load-balancer-node-set.md @@ -0,0 +1,9 @@ +--- +title: StableLoadBalancerNodeSet +content_type: feature_gate +_build: + list: never + render: false +--- +Enables less load balancer re-configurations by +the service controller (KCCM) as an effect of changing node state. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/startup-probe.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/startup-probe.md new file mode 100644 index 0000000000000..365423cfd48be --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/startup-probe.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: StartupProbe +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the [startup](/docs/concepts/workloads/pods/pod-lifecycle/#when-should-you-use-a-startup-probe) +probe in the kubelet. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-auto-delete-pvc.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-auto-delete-pvc.md new file mode 100644 index 0000000000000..3d5de8c1e40cb --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-auto-delete-pvc.md @@ -0,0 +1,13 @@ +--- +title: StatefulSetAutoDeletePVC +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows the use of the optional `.spec.persistentVolumeClaimRetentionPolicy` field, +providing control over the deletion of PVCs in a StatefulSet's lifecycle. +See +[PersistentVolumeClaim retention](/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention) +for more details. \ No newline at end of file diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-min-ready-seconds.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-min-ready-seconds.md new file mode 100644 index 0000000000000..a3a5a6b1565a9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-min-ready-seconds.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: StatefulSetMinReadySeconds +content_type: feature_gate + +_build: + list: never + render: false +--- +Allows `minReadySeconds` to be respected by +the StatefulSet controller. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-start-ordinal.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-start-ordinal.md new file mode 100644 index 0000000000000..fd0276f6f4fe5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/stateful-set-start-ordinal.md @@ -0,0 +1,11 @@ +--- +title: StatefulSetStartOrdinal +content_type: feature_gate +_build: + list: never + render: false +--- +Allow configuration of the start ordinal in a +StatefulSet. See +[Start ordinal](/docs/concepts/workloads/controllers/statefulset/#start-ordinal) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-object-in-use-protection.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-object-in-use-protection.md new file mode 100644 index 0000000000000..298c499473c50 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-object-in-use-protection.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: StorageObjectInUseProtection +content_type: feature_gate + +_build: + list: never + render: false +--- +Postpone the deletion of PersistentVolume or +PersistentVolumeClaim objects if they are still being used. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-version-api.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-version-api.md new file mode 100644 index 0000000000000..4797309247df5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-version-api.md @@ -0,0 +1,9 @@ +--- +title: StorageVersionAPI +content_type: feature_gate +_build: + list: never + render: false +--- +Enable the +[storage version API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#storageversion-v1alpha1-internal-apiserver-k8s-io). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-version-hash.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-version-hash.md new file mode 100644 index 0000000000000..0f8ee3b73f97d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/storage-version-hash.md @@ -0,0 +1,9 @@ +--- +title: StorageVersionHash +content_type: feature_gate +_build: + list: never + render: false +--- +Allow API servers to expose the storage version hash in the +discovery. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/streaming-proxy-redirects.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/streaming-proxy-redirects.md new file mode 100644 index 0000000000000..51d4d5e4b3844 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/streaming-proxy-redirects.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: StreamingProxyRedirects +content_type: feature_gate + +_build: + list: never + render: false +--- +Instructs the API server to intercept (and follow) redirects from the +backend (kubelet) for streaming requests. Examples of streaming requests include the `exec`, +`attach` and `port-forward` requests. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/structured-authentication-configuration.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/structured-authentication-configuration.md new file mode 100644 index 0000000000000..23e25dbb94993 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/structured-authentication-configuration.md @@ -0,0 +1,9 @@ +--- +title: StructuredAuthenticationConfiguration +content_type: feature_gate +_build: + list: never + render: false +--- +Enable [structured authentication configuration](/docs/reference/access-authn-authz/authentication/#configuring-the-api-server) +for the API server. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/structured-authorization-configuration.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/structured-authorization-configuration.md new file mode 100644 index 0000000000000..ebd2ee8cfa9b4 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/structured-authorization-configuration.md @@ -0,0 +1,10 @@ +--- +title: StructuredAuthorizationConfiguration +content_type: feature_gate +_build: + list: never + render: false +--- +Enable structured authorization configuration, so that cluster administrators +can specify more than one [authorization webhook](/docs/reference/access-authn-authz/webhook/) +in the API server handler chain. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/support-ipvs-proxy-mode.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/support-ipvs-proxy-mode.md new file mode 100644 index 0000000000000..80afa61d62e91 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/support-ipvs-proxy-mode.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: SupportIPVSProxyMode +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable providing in-cluster service load balancing using IPVS. +See [service proxies](/docs/reference/networking/virtual-ips/) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/support-node-pids-limit.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/support-node-pids-limit.md new file mode 100644 index 0000000000000..185bbaa4ae49d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/support-node-pids-limit.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: SupportNodePidsLimit +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the support to limiting PIDs on the Node. The parameter +`pid=` in the `--system-reserved` and `--kube-reserved` options can be specified to +ensure that the specified number of process IDs will be reserved for the system as a whole and for + Kubernetes system daemons respectively. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/support-pod-pids-limit.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/support-pod-pids-limit.md new file mode 100644 index 0000000000000..df1a73b597b64 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/support-pod-pids-limit.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: SupportPodPidsLimit +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the support to limiting PIDs in Pods. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/suspend-job.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/suspend-job.md new file mode 100644 index 0000000000000..ceab5000369f6 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/suspend-job.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: SuspendJob +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable support to suspend and resume Jobs. For more details, see + [the Jobs docs](/docs/concepts/workloads/controllers/job/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/sysctls.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/sysctls.md new file mode 100644 index 0000000000000..2d3ec087bbb79 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/sysctls.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: Sysctls +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable support for namespaced kernel parameters (sysctls) that can be set for each +pod. See [sysctls](/docs/tasks/administer-cluster/sysctl-cluster/) for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/taint-based-evictions.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/taint-based-evictions.md new file mode 100644 index 0000000000000..814546565320b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/taint-based-evictions.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: TaintBasedEvictions +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable evicting pods from nodes based on taints on Nodes and tolerations +on Pods. See [taints and tolerations](/docs/concepts/scheduling-eviction/taint-and-toleration/) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/taint-nodes-by-condition.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/taint-nodes-by-condition.md new file mode 100644 index 0000000000000..34b17e6de653f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/taint-nodes-by-condition.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: TaintNodesByCondition +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable automatic tainting nodes based on +[node conditions](/docs/concepts/architecture/nodes/#condition). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/token-request-projection.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/token-request-projection.md new file mode 100644 index 0000000000000..996d3dabee265 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/token-request-projection.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: TokenRequestProjection +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the injection of service account tokens into a Pod through a +[`projected` volume](/docs/concepts/storage/volumes/#projected). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/token-request.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/token-request.md new file mode 100644 index 0000000000000..3fc2a510b6f4a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/token-request.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: TokenRequest +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable the `TokenRequest` endpoint on service account resources. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-aware-hints.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-aware-hints.md new file mode 100644 index 0000000000000..accc97690be95 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-aware-hints.md @@ -0,0 +1,11 @@ +--- +title: TopologyAwareHints +content_type: feature_gate +_build: + list: never + render: false +--- +Enables topology aware routing based on topology hints +in EndpointSlices. See [Topology Aware +Hints](/docs/concepts/services-networking/topology-aware-hints/) for more +details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-alpha-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-alpha-options.md new file mode 100644 index 0000000000000..1cc5e5c4b6216 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-alpha-options.md @@ -0,0 +1,11 @@ +--- +title: TopologyManagerPolicyAlphaOptions +content_type: feature_gate +_build: + list: never + render: false +--- +Allow fine-tuning of topology manager policies, +experimental, Alpha-quality options. +This feature gate guards *a group* of topology manager options whose quality level is alpha. +This feature gate will never graduate to beta or stable. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-beta-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-beta-options.md new file mode 100644 index 0000000000000..5e98776027fb3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-beta-options.md @@ -0,0 +1,11 @@ +--- +title: TopologyManagerPolicyBetaOptions +content_type: feature_gate +_build: + list: never + render: false +--- +Allow fine-tuning of topology manager policies, +experimental, Beta-quality options. +This feature gate guards *a group* of topology manager options whose quality level is beta. +This feature gate will never graduate to stable. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-options.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-options.md new file mode 100644 index 0000000000000..52deabc2cb259 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager-policy-options.md @@ -0,0 +1,8 @@ +--- +title: TopologyManagerPolicyOptions +content_type: feature_gate +_build: + list: never + render: false +--- +Allow fine-tuning of topology manager policies, diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager.md new file mode 100644 index 0000000000000..bece6d3dcb626 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/topology-manager.md @@ -0,0 +1,10 @@ +--- +title: TopologyManager +content_type: feature_gate +_build: + list: never + render: false +--- +Enable a mechanism to coordinate fine-grained hardware resource +assignments for different components in Kubernetes. See +[Control Topology Management Policies on a node](/docs/tasks/administer-cluster/topology-manager/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/translate-stream-close-websocket-requests.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/translate-stream-close-websocket-requests.md new file mode 100644 index 0000000000000..f7c1d80734721 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/translate-stream-close-websocket-requests.md @@ -0,0 +1,10 @@ +--- +title: TranslateStreamCloseWebsocketRequests +content_type: feature_gate +_build: + list: never + render: false +--- +Allow WebSocket streaming of the +remote command sub-protocol (`exec`, `cp`, `attach`) from clients requesting +version 5 (v5) of the sub-protocol. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/ttl-after-finished.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/ttl-after-finished.md new file mode 100644 index 0000000000000..af0feaeb6587b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/ttl-after-finished.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: TTLAfterFinished +content_type: feature_gate + +_build: + list: never + render: false +--- +Allow a [TTL controller](/docs/concepts/workloads/controllers/ttlafterfinished/) +to clean up resources after they finish execution. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/unauthenticated-http2-dos-mitigation.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/unauthenticated-http2-dos-mitigation.md new file mode 100644 index 0000000000000..1477f7ac4ee6c --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/unauthenticated-http2-dos-mitigation.md @@ -0,0 +1,9 @@ +--- +title: UnauthenticatedHTTP2DOSMitigation +content_type: feature_gate +_build: + list: never + render: false +--- +Enables HTTP/2 Denial of Service (DoS) mitigations for unauthenticated clients. +Kubernetes v1.28.0 through v1.28.2 do not include this feature gate. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/unknown-version-interoperability-proxy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/unknown-version-interoperability-proxy.md new file mode 100644 index 0000000000000..69d3b8691efee --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/unknown-version-interoperability-proxy.md @@ -0,0 +1,10 @@ +--- +title: UnknownVersionInteroperabilityProxy +content_type: feature_gate +_build: + list: never + render: false +--- +Proxy resource requests to the correct peer kube-apiserver when +multiple kube-apiservers exist at varied versions. +See [Mixed version proxy](/docs/concepts/architecture/mixed-version-proxy/) for more information. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-pod-security-standards.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-pod-security-standards.md new file mode 100644 index 0000000000000..1a1fee0058a6e --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-pod-security-standards.md @@ -0,0 +1,10 @@ +--- +title: UserNamespacesPodSecurityStandards +content_type: feature_gate +_build: + list: never + render: false +--- +Enable Pod Security Standards policies relaxation for pods +that run with namespaces. You must set the value of this feature gate consistently across all nodes in +your cluster, and you must also enable `UserNamespacesSupport` to use this feature. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-stateless-pods-support.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-stateless-pods-support.md new file mode 100644 index 0000000000000..32a08177d0799 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-stateless-pods-support.md @@ -0,0 +1,9 @@ +--- +title: UserNamespacesStatelessPodsSupport +content_type: feature_gate +_build: + list: never + render: false +--- +Enable user namespace support for stateless Pods. This feature gate was superseded +by the `UserNamespacesSupport` feature gate in the Kubernetes v1.28 release. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-support.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-support.md new file mode 100644 index 0000000000000..e6d1032b4325f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/user-namespaces-support.md @@ -0,0 +1,8 @@ +--- +title: UserNamespacesSupport +content_type: feature_gate +_build: + list: never + render: false +--- +Enable user namespace support for Pods. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/validate-proxy-redirects.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/validate-proxy-redirects.md new file mode 100644 index 0000000000000..513fd911e902f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/validate-proxy-redirects.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: ValidateProxyRedirects +content_type: feature_gate + +_build: + list: never + render: false +--- +This flag controls whether the API server should validate that redirects +are only followed to the same host. Only used if the `StreamingProxyRedirects` flag is enabled. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/validating-admission-policy.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/validating-admission-policy.md new file mode 100644 index 0000000000000..ee5bfbcdc4e7d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/validating-admission-policy.md @@ -0,0 +1,8 @@ +--- +title: ValidatingAdmissionPolicy +content_type: feature_gate +_build: + list: never + render: false +--- +Enable [ValidatingAdmissionPolicy](/docs/reference/access-authn-authz/validating-admission-policy/) support for CEL validations be used in Admission Control. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-attributes-class.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-attributes-class.md new file mode 100644 index 0000000000000..e97a870f2ae6d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-attributes-class.md @@ -0,0 +1,10 @@ +--- +title: VolumeAttributesClass +content_type: feature_gate +_build: + list: never + render: false +--- +Enable support for VolumeAttributesClasses. +See [Volume Attributes Classes](/docs/concepts/storage/volume-attributes-classes/) +for more information. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-capacity-priority.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-capacity-priority.md new file mode 100644 index 0000000000000..3cc62ee723e35 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-capacity-priority.md @@ -0,0 +1,9 @@ +--- +title: VolumeCapacityPriority +content_type: feature_gate +_build: + list: never + render: false +--- +Enable support for prioritizing nodes in different +topologies based on available PV capacity. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-pvc-data-source.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-pvc-data-source.md new file mode 100644 index 0000000000000..5b1ea196f496d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-pvc-data-source.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: VolumePVCDataSource +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable support for specifying an existing PVC as a DataSource. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-scheduling.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-scheduling.md new file mode 100644 index 0000000000000..9c6dd67abaa1b --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-scheduling.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: VolumeScheduling +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable volume topology aware scheduling and make the PersistentVolumeClaim +(PVC) binding aware of scheduling decisions. It also enables the usage of +[`local`](/docs/concepts/storage/volumes/#local) volume type when used together with the +`PersistentLocalVolumes` feature gate. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-snapshot-data-source.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-snapshot-data-source.md new file mode 100644 index 0000000000000..bb3238b87d0f5 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-snapshot-data-source.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: VolumeSnapshotDataSource +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable volume snapshot data source support. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-subpath-env-expansion.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-subpath-env-expansion.md new file mode 100644 index 0000000000000..7be67bcea50e2 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-subpath-env-expansion.md @@ -0,0 +1,11 @@ +--- +# Removed from Kubernetes +title: VolumeSubpathEnvExpansion +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable `subPathExpr` field for expanding environment +variables into a `subPath`. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-subpath.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-subpath.md new file mode 100644 index 0000000000000..6122f6649633f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/volume-subpath.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: VolumeSubpath +content_type: feature_gate + +_build: + list: never + render: false +--- +Allow mounting a subpath of a volume in a container. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/warning-headers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/warning-headers.md new file mode 100644 index 0000000000000..3ff9a7880fc0d --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/warning-headers.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: WarningHeaders +content_type: feature_gate + +_build: + list: never + render: false +--- +Allow sending warning headers in API responses. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/watch-bookmark.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/watch-bookmark.md new file mode 100644 index 0000000000000..0f087a17e00e0 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/watch-bookmark.md @@ -0,0 +1,8 @@ +--- +title: WatchBookmark +content_type: feature_gate +_build: + list: never + render: false +--- +Enable support for watch bookmark events. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/watch-list.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/watch-list.md new file mode 100644 index 0000000000000..c3754662b5d1a --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/watch-list.md @@ -0,0 +1,8 @@ +--- +title: WatchList +content_type: feature_gate +_build: + list: never + render: false +--- +Enable support for [streaming initial state of objects in watch requests](/docs/reference/using-api/api-concepts/#streaming-lists). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/win-dsr.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/win-dsr.md new file mode 100644 index 0000000000000..c5db941441cb3 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/win-dsr.md @@ -0,0 +1,8 @@ +--- +title: WinDSR +content_type: feature_gate +_build: + list: never + render: false +--- +Allows kube-proxy to create DSR loadbalancers for Windows. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/win-overlay.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/win-overlay.md new file mode 100644 index 0000000000000..8f68d64eaeb1f --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/win-overlay.md @@ -0,0 +1,8 @@ +--- +title: WinOverlay +content_type: feature_gate +_build: + list: never + render: false +--- +Allows kube-proxy to run in overlay mode for Windows. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-endpoint-slice-proxying.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-endpoint-slice-proxying.md new file mode 100644 index 0000000000000..7270269fe8563 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-endpoint-slice-proxying.md @@ -0,0 +1,13 @@ +--- +# Removed from Kubernetes +title: WindowsEndpointSliceProxying +content_type: feature_gate + +_build: + list: never + render: false +--- +When enabled, kube-proxy running on Windows will use +EndpointSlices as the primary data source instead of Endpoints, enabling scalability and +performance improvements. See +[Enabling Endpoint Slices](/docs/concepts/services-networking/endpoint-slices/). diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-gmsa.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-gmsa.md new file mode 100644 index 0000000000000..4047fdf9fa9d9 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-gmsa.md @@ -0,0 +1,10 @@ +--- +# Removed from Kubernetes +title: WindowsGMSA +content_type: feature_gate + +_build: + list: never + render: false +--- +Enables passing of GMSA credential specs from pods to container runtimes. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-host-network.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-host-network.md new file mode 100644 index 0000000000000..1c47419f0b841 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-host-network.md @@ -0,0 +1,8 @@ +--- +title: WindowsHostNetwork +content_type: feature_gate +_build: + list: never + render: false +--- +Enables support for joining Windows containers to a hosts' network namespace. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-host-process-containers.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-host-process-containers.md new file mode 100644 index 0000000000000..16e956999ca22 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-host-process-containers.md @@ -0,0 +1,8 @@ +--- +title: WindowsHostProcessContainers +content_type: feature_gate +_build: + list: never + render: false +--- +Enables support for Windows HostProcess containers. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-run-as-user-name.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-run-as-user-name.md new file mode 100644 index 0000000000000..012f490297fab --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/windows-run-as-user-name.md @@ -0,0 +1,12 @@ +--- +# Removed from Kubernetes +title: WindowsRunAsUserName +content_type: feature_gate + +_build: + list: never + render: false +--- +Enable support for running applications in Windows containers with as a +non-default user. See [Configuring RunAsUserName](/docs/tasks/configure-pod-container/configure-runasusername) +for more details. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates/zero-limited-nominal-concurrency-shares.md b/content/en/docs/reference/command-line-tools-reference/feature-gates/zero-limited-nominal-concurrency-shares.md new file mode 100644 index 0000000000000..1e6ed4d9ea940 --- /dev/null +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates/zero-limited-nominal-concurrency-shares.md @@ -0,0 +1,10 @@ +--- +title: ZeroLimitedNominalConcurrencyShares +content_type: feature_gate +_build: + list: never + render: false +--- +Allow [Priority & Fairness](/docs/concepts/cluster-administration/flow-control/) +in the API server to use a zero value for the `nominalConcurrencyShares field of +the `limited` section of a priority level. diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md index 16326ef26bad9..c4ab21a754cc7 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -11,7 +11,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -67,6 +67,13 @@ kube-apiserver [flags]

    The map from metric-label to value allow-list of this label. The key's format is <MetricName>,<LabelName>. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

    + +--allow-metric-labels-manifest string + + +

    The path to the manifest file that contains the allow-list mapping. The format of the file is the same as the flag --allow-metric-labels. Note that the flag --allow-metric-labels will override the manifest file.

    + + --allow-privileged @@ -305,6 +312,13 @@ kube-apiserver [flags]

    API group and version used for serializing audit events written to webhook.

    + +--authentication-config string + + +

    File with Authentication Configuration to configure the JWT Token authenticator. Note: This feature is in Alpha since v1.29.--feature-gate=StructuredAuthenticationConfiguration=true needs to be set for enabling this feature.This feature is mutually exclusive with the oidc-* flags.

    + + --authentication-token-webhook-cache-ttl duration     Default: 2m0s @@ -327,10 +341,17 @@ kube-apiserver [flags] ---authorization-mode strings     Default: "AlwaysAllow" +--authorization-config string -

    Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node.

    +

    File with Authorization Configuration to configure the authorizer chain.Note: This feature is in Alpha since v1.29.--feature-gate=StructuredAuthorizationConfiguration=true feature flag needs to be set to true for enabling the functionality.This feature is mutually exclusive with the other --authorization-mode and --authorization-webhook-* flags.

    + + + +--authorization-mode strings + + +

    Ordered list of plug-ins to do authorization on secure port. Defaults to AlwaysAllow if --authorization-config is not used. Comma-delimited list of: AlwaysAllow,AlwaysDeny,ABAC,Webhook,RBAC,Node.

    @@ -396,20 +417,6 @@ kube-apiserver [flags]

    If set, any request presenting a client certificate signed by one of the authorities in the client-ca-file is authenticated with an identity corresponding to the CommonName of the client certificate.

    - ---cloud-config string - - -

    The path to the cloud provider configuration file. Empty string for no configuration file.

    - - - ---cloud-provider string - - -

    The provider for cloud services. Empty string for no provider.

    - - --cloud-provider-gce-l7lb-src-cidrs cidrs     Default: 130.211.0.0/22,35.191.0.0/16 @@ -512,7 +519,7 @@ kube-apiserver [flags] --enable-priority-and-fairness     Default: true -

    If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness

    +

    If true, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness

    @@ -631,7 +638,7 @@ kube-apiserver [flags] --feature-gates <comma-separated 'key=True|False' pairs> -

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIListChunking=true|false (BETA - default=true)
    APIPriorityAndFairness=true|false (BETA - default=true)
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSINodeExpandSecret=true|false (BETA - default=true)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (ALPHA - default=false)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    CustomResourceValidationExpressions=true|false (BETA - default=true)
    DevicePluginCDIDevices=true|false (ALPHA - default=false)
    DisableCloudProviders=true|false (ALPHA - default=false)
    DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (ALPHA - default=false)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (ALPHA - default=false)
    JobReadyPods=true|false (BETA - default=true)
    KMSv2=true|false (BETA - default=true)
    KMSv2KDF=true|false (BETA - default=false)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRRangeAllocator=true|false (ALPHA - default=false)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (ALPHA - default=false)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (ALPHA - default=false)
    PodIndexLabel=true|false (BETA - default=true)
    PodReadyToStartContainersCondition=true|false (ALPHA - default=false)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    ReadWriteOncePod=true|false (BETA - default=true)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RemainingItemCount=true|false (BETA - default=true)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=true)
    SecurityContextDeny=true|false (ALPHA - default=false)
    ServiceNodePortStaticSubrange=true|false (BETA - default=true)
    SidecarContainers=true|false (ALPHA - default=false)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    SkipReadOnlyValidationGCE=true|false (ALPHA - default=false)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)

    +

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (BETA - default=true)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ClusterTrustBundleProjection=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    DevicePluginCDIDevices=true|false (BETA - default=true)
    DisableCloudProviders=true|false (BETA - default=true)
    DisableKubeletCloudCredentialProviders=true|false (BETA - default=true)
    DisableNodeKubeProxyVersion=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    ImageMaximumGCAge=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (BETA - default=true)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (BETA - default=true)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletSeparateDiskGC=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (BETA - default=true)
    LoadBalancerIPMode=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodAffinity=true|false (ALPHA - default=false)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NFTablesProxyMode=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (BETA - default=true)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (BETA - default=true)
    PodIndexLabel=true|false (BETA - default=true)
    PodLifecycleSleepAction=true|false (ALPHA - default=false)
    PodReadyToStartContainersCondition=true|false (BETA - default=true)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    RuntimeClassInImageCriApi=true|false (ALPHA - default=false)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=false)
    SecurityContextDeny=true|false (ALPHA - default=false)
    SeparateTaintEvictionController=true|false (BETA - default=true)
    ServiceAccountTokenJTI=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBinding=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBindingValidation=true|false (ALPHA - default=false)
    ServiceAccountTokenPodNodeInfo=true|false (ALPHA - default=false)
    SidecarContainers=true|false (BETA - default=true)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    StructuredAuthenticationConfiguration=true|false (ALPHA - default=false)
    StructuredAuthorizationConfiguration=true|false (ALPHA - default=false)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    TranslateStreamCloseWebsocketRequests=true|false (ALPHA - default=false)
    UnauthenticatedHTTP2DOSMitigation=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesPodSecurityStandards=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeAttributesClass=true|false (ALPHA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)
    ZeroLimitedNominalConcurrencyShares=true|false (BETA - default=false)

    diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md index 578a8785d10fd..6982a31dc1d60 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -11,7 +11,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -57,6 +57,13 @@ kube-controller-manager [flags]

    The map from metric-label to value allow-list of this label. The key's format is <MetricName>,<LabelName>. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

    + +--allow-metric-labels-manifest string + + +

    The path to the manifest file that contains the allow-list mapping. The format of the file is the same as the flag --allow-metric-labels. Note that the flag --allow-metric-labels will override the manifest file.

    + + --attach-detach-reconcile-sync-period duration     Default: 1m0s @@ -404,7 +411,7 @@ kube-controller-manager [flags] --controllers strings     Default: "*" -

    A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.
    All controllers: bootstrap-signer-controller, certificatesigningrequest-approving-controller, certificatesigningrequest-cleaner-controller, certificatesigningrequest-signing-controller, cloud-node-lifecycle-controller, clusterrole-aggregation-controller, cronjob-controller, daemonset-controller, deployment-controller, disruption-controller, endpoints-controller, endpointslice-controller, endpointslice-mirroring-controller, ephemeral-volume-controller, garbage-collector-controller, horizontal-pod-autoscaler-controller, job-controller, namespace-controller, node-ipam-controller, node-lifecycle-controller, node-route-controller, persistentvolume-attach-detach-controller, persistentvolume-binder-controller, persistentvolume-expander-controller, persistentvolume-protection-controller, persistentvolumeclaim-protection-controller, pod-garbage-collector-controller, replicaset-controller, replicationcontroller-controller, resourcequota-controller, root-ca-certificate-publisher-controller, service-lb-controller, serviceaccount-controller, serviceaccount-token-controller, statefulset-controller, token-cleaner-controller, ttl-after-finished-controller, ttl-controller
    Disabled-by-default controllers: bootstrap-signer-controller, token-cleaner-controller

    +

    A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'.
    All controllers: bootstrap-signer-controller, certificatesigningrequest-approving-controller, certificatesigningrequest-cleaner-controller, certificatesigningrequest-signing-controller, cloud-node-lifecycle-controller, clusterrole-aggregation-controller, cronjob-controller, daemonset-controller, deployment-controller, disruption-controller, endpoints-controller, endpointslice-controller, endpointslice-mirroring-controller, ephemeral-volume-controller, garbage-collector-controller, horizontal-pod-autoscaler-controller, job-controller, legacy-serviceaccount-token-cleaner-controller, namespace-controller, node-ipam-controller, node-lifecycle-controller, node-route-controller, persistentvolume-attach-detach-controller, persistentvolume-binder-controller, persistentvolume-expander-controller, persistentvolume-protection-controller, persistentvolumeclaim-protection-controller, pod-garbage-collector-controller, replicaset-controller, replicationcontroller-controller, resourceclaim-controller, resourcequota-controller, root-ca-certificate-publisher-controller, service-cidr-controller, service-lb-controller, serviceaccount-controller, serviceaccount-token-controller, statefulset-controller, storageversion-garbage-collector-controller, taint-eviction-controller, token-cleaner-controller, ttl-after-finished-controller, ttl-controller, validatingadmissionpolicy-status-controller
    Disabled-by-default controllers: bootstrap-signer-controller, token-cleaner-controller

    @@ -474,7 +481,7 @@ kube-controller-manager [flags] --feature-gates <comma-separated 'key=True|False' pairs> -

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIListChunking=true|false (BETA - default=true)
    APIPriorityAndFairness=true|false (BETA - default=true)
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSINodeExpandSecret=true|false (BETA - default=true)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (ALPHA - default=false)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    CustomResourceValidationExpressions=true|false (BETA - default=true)
    DevicePluginCDIDevices=true|false (ALPHA - default=false)
    DisableCloudProviders=true|false (ALPHA - default=false)
    DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (ALPHA - default=false)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (ALPHA - default=false)
    JobReadyPods=true|false (BETA - default=true)
    KMSv2=true|false (BETA - default=true)
    KMSv2KDF=true|false (BETA - default=false)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRRangeAllocator=true|false (ALPHA - default=false)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (ALPHA - default=false)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (ALPHA - default=false)
    PodIndexLabel=true|false (BETA - default=true)
    PodReadyToStartContainersCondition=true|false (ALPHA - default=false)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    ReadWriteOncePod=true|false (BETA - default=true)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RemainingItemCount=true|false (BETA - default=true)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=true)
    SecurityContextDeny=true|false (ALPHA - default=false)
    ServiceNodePortStaticSubrange=true|false (BETA - default=true)
    SidecarContainers=true|false (ALPHA - default=false)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    SkipReadOnlyValidationGCE=true|false (ALPHA - default=false)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)

    +

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (BETA - default=true)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ClusterTrustBundleProjection=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    DevicePluginCDIDevices=true|false (BETA - default=true)
    DisableCloudProviders=true|false (BETA - default=true)
    DisableKubeletCloudCredentialProviders=true|false (BETA - default=true)
    DisableNodeKubeProxyVersion=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    ImageMaximumGCAge=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (BETA - default=true)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (BETA - default=true)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletSeparateDiskGC=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (BETA - default=true)
    LoadBalancerIPMode=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodAffinity=true|false (ALPHA - default=false)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NFTablesProxyMode=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (BETA - default=true)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (BETA - default=true)
    PodIndexLabel=true|false (BETA - default=true)
    PodLifecycleSleepAction=true|false (ALPHA - default=false)
    PodReadyToStartContainersCondition=true|false (BETA - default=true)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    RuntimeClassInImageCriApi=true|false (ALPHA - default=false)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=false)
    SecurityContextDeny=true|false (ALPHA - default=false)
    SeparateTaintEvictionController=true|false (BETA - default=true)
    ServiceAccountTokenJTI=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBinding=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBindingValidation=true|false (ALPHA - default=false)
    ServiceAccountTokenPodNodeInfo=true|false (ALPHA - default=false)
    SidecarContainers=true|false (BETA - default=true)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    StructuredAuthenticationConfiguration=true|false (ALPHA - default=false)
    StructuredAuthorizationConfiguration=true|false (ALPHA - default=false)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    TranslateStreamCloseWebsocketRequests=true|false (ALPHA - default=false)
    UnauthenticatedHTTP2DOSMitigation=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesPodSecurityStandards=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeAttributesClass=true|false (ALPHA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)
    ZeroLimitedNominalConcurrencyShares=true|false (BETA - default=false)

    @@ -565,7 +572,7 @@ kube-controller-manager [flags] --large-cluster-size-threshold int32     Default: 50 -

    Number of nodes from which node-lifecycle-controller treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.

    +

    Number of nodes from which node-lifecycle-controller treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller. Notice: If nodes reside in multiple zones, this threshold will be considered as zone node size threshold for each zone to determine node eviction rate independently.

    diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md index 22efe66a88d00..f5c0ac0de049f 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md @@ -11,7 +11,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -35,7 +35,7 @@ kube-proxy [flags] ## {{% heading "options" %}} - +
    @@ -46,427 +46,448 @@ kube-proxy [flags] - + - + - + - + - + - + - + - + - + - + - + + + + + + + + - + - + + + + + + + + + + + + + + + - + - + - + - + - + + + + + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - + - + - + - + - + - + - + - + - + diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index e469398b189a4..13c6014ee4313 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -11,7 +11,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -50,6 +50,13 @@ kube-scheduler [flags] + + + + + + + @@ -159,7 +166,7 @@ kube-scheduler [flags] - + diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index 47e3dd7a93b8f..dbb3254ca3d81 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -140,14 +140,14 @@ kubelet [flags] - + - + @@ -193,10 +193,10 @@ kubelet [flags] - + - + @@ -298,7 +298,7 @@ kubelet [flags] - + @@ -312,14 +312,14 @@ kubelet [flags] - + - + @@ -336,7 +336,7 @@ kubelet [flags] - + @@ -358,12 +358,10 @@ kubelet [flags] @@ -558,7 +567,7 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + @@ -579,7 +588,7 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + @@ -597,10 +606,10 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + - + @@ -628,7 +637,7 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + @@ -681,7 +690,7 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + @@ -824,7 +833,7 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + @@ -940,7 +949,7 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + @@ -978,7 +987,7 @@ WindowsHostNetwork=true|false (ALPHA - default=true)

    - + @@ -1054,7 +1063,7 @@ Insecure values: - + @@ -1075,7 +1084,7 @@ Insecure values: - +
    --add_dir_header

    If true, adds the file directory to the header of the log messages

    If true, adds the file directory to the header of the log messages

    --alsologtostderr

    log to standard error as well as files (no effect when -logtostderr=true)

    log to standard error as well as files (no effect when -logtostderr=true)

    --bind-address string     Default: 0.0.0.0

    The IP address for the proxy server to serve on (set to '0.0.0.0' for all IPv4 interfaces and '::' for all IPv6 interfaces). This parameter is ignored if a config file is specified by --config.

    Overrides kube-proxy's idea of what its node's primary IP is. Note that the name is a historical artifact, and kube-proxy does not actually bind any sockets to this IP. This parameter is ignored if a config file is specified by --config.

    --bind-address-hard-fail

    If true kube-proxy will treat failure to bind to a port as fatal and exit

    If true kube-proxy will treat failure to bind to a port as fatal and exit

    --boot_id_file string     Default: "/proc/sys/kernel/random/boot_id"

    Comma-separated list of files to check for boot-id. Use the first one that exists.

    Comma-separated list of files to check for boot-id. Use the first one that exists.

    --cleanup

    If true cleanup iptables and ipvs rules and exit.

    If true cleanup iptables and ipvs rules and exit.

    --cluster-cidr string

    The CIDR range of pods in the cluster. When configured, traffic sent to a Service cluster IP from outside this range will be masqueraded and traffic sent from pods to an external LoadBalancer IP will be directed to the respective cluster IP instead. For dual-stack clusters, a comma-separated list is accepted with at least one CIDR per IP family (IPv4 and IPv6). This parameter is ignored if a config file is specified by --config.

    The CIDR range of the pods in the cluster. (For dual-stack clusters, this can be a comma-separated dual-stack pair of CIDR ranges.). When --detect-local-mode is set to ClusterCIDR, kube-proxy will consider traffic to be local if its source IP is in this range. (Otherwise it is not used.) This parameter is ignored if a config file is specified by --config.

    --config string

    The path to the configuration file.

    The path to the configuration file.

    --config-sync-period duration     Default: 15m0s

    How often configuration from the apiserver is refreshed. Must be greater than 0.

    How often configuration from the apiserver is refreshed. Must be greater than 0.

    --conntrack-max-per-core int32     Default: 32768

    Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).

    Maximum number of NAT connections to track per CPU core (0 to leave the limit as-is and ignore conntrack-min).

    --conntrack-min int32     Default: 131072

    Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).

    Minimum number of conntrack entries to allocate, regardless of conntrack-max-per-core (set conntrack-max-per-core=0 to leave the limit as-is).

    --conntrack-tcp-be-liberal

    Enable liberal mode for tracking TCP packets by setting nf_conntrack_tcp_be_liberal to 1

    --conntrack-tcp-timeout-close-wait duration     Default: 1h0m0s

    NAT timeout for TCP connections in the CLOSE_WAIT state

    NAT timeout for TCP connections in the CLOSE_WAIT state

    --conntrack-tcp-timeout-established duration     Default: 24h0m0s

    Idle timeout for established TCP connections (0 to leave as-is)

    Idle timeout for established TCP connections (0 to leave as-is)

    --conntrack-udp-timeout duration

    Idle timeout for UNREPLIED UDP connections (0 to leave as-is)

    --conntrack-udp-timeout-stream duration

    Idle timeout for ASSURED UDP connections (0 to leave as-is)

    --detect-local-mode LocalMode

    Mode to use to detect local traffic. This parameter is ignored if a config file is specified by --config.

    Mode to use to detect local traffic. This parameter is ignored if a config file is specified by --config.

    --feature-gates <comma-separated 'key=True|False' pairs>

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIListChunking=true|false (BETA - default=true)
    APIPriorityAndFairness=true|false (BETA - default=true)
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSINodeExpandSecret=true|false (BETA - default=true)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (ALPHA - default=false)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    CustomResourceValidationExpressions=true|false (BETA - default=true)
    DevicePluginCDIDevices=true|false (ALPHA - default=false)
    DisableCloudProviders=true|false (ALPHA - default=false)
    DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (ALPHA - default=false)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (ALPHA - default=false)
    JobReadyPods=true|false (BETA - default=true)
    KMSv2=true|false (BETA - default=true)
    KMSv2KDF=true|false (BETA - default=false)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRRangeAllocator=true|false (ALPHA - default=false)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (ALPHA - default=false)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (ALPHA - default=false)
    PodIndexLabel=true|false (BETA - default=true)
    PodReadyToStartContainersCondition=true|false (ALPHA - default=false)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    ReadWriteOncePod=true|false (BETA - default=true)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RemainingItemCount=true|false (BETA - default=true)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=true)
    SecurityContextDeny=true|false (ALPHA - default=false)
    ServiceNodePortStaticSubrange=true|false (BETA - default=true)
    SidecarContainers=true|false (ALPHA - default=false)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    SkipReadOnlyValidationGCE=true|false (ALPHA - default=false)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)
    This parameter is ignored if a config file is specified by --config.

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (BETA - default=true)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ClusterTrustBundleProjection=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    DevicePluginCDIDevices=true|false (BETA - default=true)
    DisableCloudProviders=true|false (BETA - default=true)
    DisableKubeletCloudCredentialProviders=true|false (BETA - default=true)
    DisableNodeKubeProxyVersion=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    ImageMaximumGCAge=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (BETA - default=true)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (BETA - default=true)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletSeparateDiskGC=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (BETA - default=true)
    LoadBalancerIPMode=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodAffinity=true|false (ALPHA - default=false)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NFTablesProxyMode=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (BETA - default=true)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (BETA - default=true)
    PodIndexLabel=true|false (BETA - default=true)
    PodLifecycleSleepAction=true|false (ALPHA - default=false)
    PodReadyToStartContainersCondition=true|false (BETA - default=true)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    RuntimeClassInImageCriApi=true|false (ALPHA - default=false)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=false)
    SecurityContextDeny=true|false (ALPHA - default=false)
    SeparateTaintEvictionController=true|false (BETA - default=true)
    ServiceAccountTokenJTI=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBinding=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBindingValidation=true|false (ALPHA - default=false)
    ServiceAccountTokenPodNodeInfo=true|false (ALPHA - default=false)
    SidecarContainers=true|false (BETA - default=true)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    StructuredAuthenticationConfiguration=true|false (ALPHA - default=false)
    StructuredAuthorizationConfiguration=true|false (ALPHA - default=false)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    TranslateStreamCloseWebsocketRequests=true|false (ALPHA - default=false)
    UnauthenticatedHTTP2DOSMitigation=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesPodSecurityStandards=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeAttributesClass=true|false (ALPHA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)
    ZeroLimitedNominalConcurrencyShares=true|false (BETA - default=false)
    This parameter is ignored if a config file is specified by --config.

    --healthz-bind-address ipport     Default: 0.0.0.0:10256

    The IP address with port for the health check server to serve on (set to '0.0.0.0:10256' for all IPv4 interfaces and '[::]:10256' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.

    The IP address and port for the health check server to serve on, defaulting to "0.0.0.0:10256" (if --bind-address is unset or IPv4), or "[::]:10256" (if --bind-address is IPv6). Set empty to disable. This parameter is ignored if a config file is specified by --config.

    -h, --help

    help for kube-proxy

    help for kube-proxy

    --hostname-override string

    If non-empty, will use this string as identification instead of the actual hostname.

    If non-empty, will be used as the name of the Node that kube-proxy is running on. If unset, the node name is assumed to be the same as the node's hostname.

    --init-only

    If true, perform any initialization steps that must be done with full root privileges, and then exit. After doing this, you can run kube-proxy again with only the CAP_NET_ADMIN capability.

    --iptables-localhost-nodeports     Default: true

    If false Kube-proxy will disable the legacy behavior of allowing NodePort services to be accessed via localhost, This only applies to iptables mode and ipv4.

    If false, kube-proxy will disable the legacy behavior of allowing NodePort services to be accessed via localhost. (Applies only to iptables mode and IPv4; localhost NodePorts are never allowed with other proxy modes or with IPv6.)

    --iptables-masquerade-bit int32     Default: 14

    If using the pure iptables proxy, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].

    If using the iptables or ipvs proxy mode, the bit of the fwmark space to mark packets requiring SNAT with. Must be within the range [0, 31].

    --iptables-min-sync-period duration     Default: 1s

    The minimum interval of how often the iptables rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').

    The minimum period between iptables rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate iptables resync.

    --iptables-sync-period duration     Default: 30s

    The maximum interval of how often iptables rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.

    An interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.

    --ipvs-exclude-cidrs strings

    A comma-separated list of CIDR's which the ipvs proxier should not touch when cleaning up IPVS rules.

    A comma-separated list of CIDRs which the ipvs proxier should not touch when cleaning up IPVS rules.

    --ipvs-min-sync-period duration

    The minimum interval of how often the ipvs rules can be refreshed as endpoints and services change (e.g. '5s', '1m', '2h22m').

    The minimum period between IPVS rule resyncs (e.g. '5s', '1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will result in an immediate IPVS resync.

    --ipvs-scheduler string

    The ipvs scheduler type when proxy mode is ipvs

    The ipvs scheduler type when proxy mode is ipvs

    --ipvs-strict-arp

    Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2

    Enable strict ARP by setting arp_ignore to 1 and arp_announce to 2

    --ipvs-sync-period duration     Default: 30s

    The maximum interval of how often ipvs rules are refreshed (e.g. '5s', '1m', '2h22m'). Must be greater than 0.

    An interval (e.g. '5s', '1m', '2h22m') indicating how frequently various re-synchronizing and cleanup operations are performed. Must be greater than 0.

    --ipvs-tcp-timeout duration

    The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

    The timeout for idle IPVS TCP connections, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

    --ipvs-tcpfin-timeout duration

    The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

    The timeout for IPVS TCP connections after receiving a FIN packet, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

    --ipvs-udp-timeout duration

    The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

    The timeout for IPVS UDP packets, 0 to leave as-is. (e.g. '5s', '1m', '2h22m').

    --kube-api-burst int32     Default: 10

    Burst to use while talking with kubernetes apiserver

    Burst to use while talking with kubernetes apiserver

    --kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf"

    Content type of requests sent to apiserver.

    Content type of requests sent to apiserver.

    --kube-api-qps float     Default: 5

    QPS to use while talking with kubernetes apiserver

    QPS to use while talking with kubernetes apiserver

    --kubeconfig string

    Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).

    Path to kubeconfig file with authorization information (the master location can be overridden by the master flag).

    --log-flush-frequency duration     Default: 5s

    Maximum number of seconds between log flushes

    Maximum number of seconds between log flushes

    --log_backtrace_at <a string in the form 'file:N'>     Default: :0

    when logging hits line file:N, emit a stack trace

    when logging hits line file:N, emit a stack trace

    --log_dir string

    If non-empty, write log files in this directory (no effect when -logtostderr=true)

    If non-empty, write log files in this directory (no effect when -logtostderr=true)

    --log_file string

    If non-empty, use this log file (no effect when -logtostderr=true)

    If non-empty, use this log file (no effect when -logtostderr=true)

    --log_file_max_size uint     Default: 1800

    Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited.

    Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. If the value is 0, the maximum file size is unlimited.

    --logging-format string     Default: "text"

    Sets the log format. Permitted formats: "text".

    Sets the log format. Permitted formats: "text".

    --logtostderr     Default: true

    log to standard error instead of files

    log to standard error instead of files

    --machine_id_file string     Default: "/etc/machine-id,/var/lib/dbus/machine-id"

    Comma-separated list of files to check for machine-id. Use the first one that exists.

    Comma-separated list of files to check for machine-id. Use the first one that exists.

    --masquerade-all

    If using the pure iptables proxy, SNAT all traffic sent via Service cluster IPs (this not commonly needed)

    If using the iptables or ipvs proxy mode, SNAT all traffic sent via Service cluster IPs. This may be required with some CNI plugins.

    --master string

    The address of the Kubernetes API server (overrides any value in kubeconfig)

    The address of the Kubernetes API server (overrides any value in kubeconfig)

    --metrics-bind-address ipport     Default: 127.0.0.1:10249

    The IP address with port for the metrics server to serve on (set to '0.0.0.0:10249' for all IPv4 interfaces and '[::]:10249' for all IPv6 interfaces). Set empty to disable. This parameter is ignored if a config file is specified by --config.

    The IP address and port for the metrics server to serve on, defaulting to "127.0.0.1:10249" (if --bind-address is unset or IPv4), or "[::1]:10249" (if --bind-address is IPv6). (Set to "0.0.0.0:10249" / "[::]:10249" to bind on all interfaces.) Set empty to disable. This parameter is ignored if a config file is specified by --config.

    --nodeport-addresses strings

    A string slice of values which specify the addresses to use for NodePorts. Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). The default empty string slice ([]) means to use all local addresses. This parameter is ignored if a config file is specified by --config.

    A list of CIDR ranges that contain valid node IPs. If set, connections to NodePort services will only be accepted on node IPs in one of the indicated ranges. If unset, NodePort connections will be accepted on all local IPs. This parameter is ignored if a config file is specified by --config.

    --one_output

    If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)

    If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)

    --oom-score-adj int32     Default: -999

    The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.

    The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]. This parameter is ignored if a config file is specified by --config.

    --pod-bridge-interface string

    A bridge interface name in the cluster. Kube-proxy considers traffic as local if originating from an interface which matches the value. This argument should be set if DetectLocalMode is set to BridgeInterface.

    A bridge interface name. When --detect-local-mode is set to BridgeInterface, kube-proxy will consider traffic to be local if it originates from this bridge.

    --pod-interface-name-prefix string

    An interface prefix in the cluster. Kube-proxy considers traffic as local if originating from interfaces that match the given prefix. This argument should be set if DetectLocalMode is set to InterfaceNamePrefix.

    An interface name prefix. When --detect-local-mode is set to InterfaceNamePrefix, kube-proxy will consider traffic to be local if it originates from any interface whose name begins with this prefix.

    --profiling

    If true enables profiling via web interface on /debug/pprof handler. This parameter is ignored if a config file is specified by --config.

    If true enables profiling via web interface on /debug/pprof handler. This parameter is ignored if a config file is specified by --config.

    --proxy-mode ProxyMode

    Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'.This parameter is ignored if a config file is specified by --config.

    --proxy-port-range port-range

    Range of host ports (beginPort-endPort, single port or beginPort+offset, inclusive) that may be consumed in order to proxy service traffic. If (unspecified, 0, or 0-0) then ports will be randomly chosen.

    Which proxy mode to use: on Linux this can be 'iptables' (default) or 'ipvs'. On Windows the only supported value is 'kernelspace'.This parameter is ignored if a config file is specified by --config.

    --show-hidden-metrics-for-version string

    The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. This parameter is ignored if a config file is specified by --config.

    The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is <major>.<minor>, e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that. This parameter is ignored if a config file is specified by --config.

    --skip_headers

    If true, avoid header prefixes in the log messages

    If true, avoid header prefixes in the log messages

    --skip_log_headers

    If true, avoid headers when opening log files (no effect when -logtostderr=true)

    If true, avoid headers when opening log files (no effect when -logtostderr=true)

    --stderrthreshold int     Default: 2

    logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)

    logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)

    -v, --v int

    number for the log level verbosity

    number for the log level verbosity

    --version version[=true]

    --version, --version=raw prints version information and quits; --version=vX.Y.Z... sets the reported version

    --version, --version=raw prints version information and quits; --version=vX.Y.Z... sets the reported version

    --vmodule pattern=N,...

    comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)

    comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)

    --write-config-to string

    If set, write the default configuration values to this file and exit.

    If set, write the default configuration values to this file and exit.

    The map from metric-label to value allow-list of this label. The key's format is <MetricName>,<LabelName>. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

    --allow-metric-labels-manifest string

    The path to the manifest file that contains the allow-list mapping. The format of the file is the same as the flag --allow-metric-labels. Note that the flag --allow-metric-labels will override the manifest file.

    --authentication-kubeconfig string
    --feature-gates <comma-separated 'key=True|False' pairs>

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIListChunking=true|false (BETA - default=true)
    APIPriorityAndFairness=true|false (BETA - default=true)
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSINodeExpandSecret=true|false (BETA - default=true)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (ALPHA - default=false)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    CustomResourceValidationExpressions=true|false (BETA - default=true)
    DevicePluginCDIDevices=true|false (ALPHA - default=false)
    DisableCloudProviders=true|false (ALPHA - default=false)
    DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (ALPHA - default=false)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (ALPHA - default=false)
    JobReadyPods=true|false (BETA - default=true)
    KMSv2=true|false (BETA - default=true)
    KMSv2KDF=true|false (BETA - default=false)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRRangeAllocator=true|false (ALPHA - default=false)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (ALPHA - default=false)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (ALPHA - default=false)
    PodIndexLabel=true|false (BETA - default=true)
    PodReadyToStartContainersCondition=true|false (ALPHA - default=false)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    ReadWriteOncePod=true|false (BETA - default=true)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RemainingItemCount=true|false (BETA - default=true)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=true)
    SecurityContextDeny=true|false (ALPHA - default=false)
    ServiceNodePortStaticSubrange=true|false (BETA - default=true)
    SidecarContainers=true|false (ALPHA - default=false)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    SkipReadOnlyValidationGCE=true|false (ALPHA - default=false)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)

    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    AnyVolumeDataSource=true|false (BETA - default=true)
    AppArmor=true|false (BETA - default=true)
    CPUManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    CloudDualStackNodeIPs=true|false (BETA - default=true)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    ClusterTrustBundleProjection=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    DevicePluginCDIDevices=true|false (BETA - default=true)
    DisableCloudProviders=true|false (BETA - default=true)
    DisableKubeletCloudCredentialProviders=true|false (BETA - default=true)
    DisableNodeKubeProxyVersion=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    GracefulNodeShutdown=true|false (BETA - default=true)
    GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    ImageMaximumGCAge=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
    InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    JobBackoffLimitPerIndex=true|false (BETA - default=true)
    JobPodFailurePolicy=true|false (BETA - default=true)
    JobPodReplacementPolicy=true|false (BETA - default=true)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    KubeletSeparateDiskGC=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    LegacyServiceAccountTokenCleanUp=true|false (BETA - default=true)
    LoadBalancerIPMode=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    MatchLabelKeysInPodAffinity=true|false (ALPHA - default=false)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    NFTablesProxyMode=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    PersistentVolumeLastPhaseTransitionTime=true|false (BETA - default=true)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    PodHostIPs=true|false (BETA - default=true)
    PodIndexLabel=true|false (BETA - default=true)
    PodLifecycleSleepAction=true|false (ALPHA - default=false)
    PodReadyToStartContainersCondition=true|false (BETA - default=true)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    RuntimeClassInImageCriApi=true|false (ALPHA - default=false)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    SchedulerQueueingHints=true|false (BETA - default=false)
    SecurityContextDeny=true|false (ALPHA - default=false)
    SeparateTaintEvictionController=true|false (BETA - default=true)
    ServiceAccountTokenJTI=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBinding=true|false (ALPHA - default=false)
    ServiceAccountTokenNodeBindingValidation=true|false (ALPHA - default=false)
    ServiceAccountTokenPodNodeInfo=true|false (ALPHA - default=false)
    SidecarContainers=true|false (BETA - default=true)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    StructuredAuthenticationConfiguration=true|false (ALPHA - default=false)
    StructuredAuthorizationConfiguration=true|false (ALPHA - default=false)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    TranslateStreamCloseWebsocketRequests=true|false (ALPHA - default=false)
    UnauthenticatedHTTP2DOSMitigation=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    UserNamespacesPodSecurityStandards=true|false (ALPHA - default=false)
    UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    VolumeAttributesClass=true|false (ALPHA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    WindowsHostNetwork=true|false (ALPHA - default=true)
    ZeroLimitedNominalConcurrencyShares=true|false (BETA - default=false)

    --cloud-config string
    The path to the cloud provider configuration file. Empty string for no configuration file. (DEPRECATED: will be removed in 1.24 or later, in favor of removing cloud providers code from kubelet.)The path to the cloud provider configuration file. Empty string for no configuration file. (DEPRECATED: will be removed in 1.25 or later, in favor of removing cloud providers code from kubelet.)
    --cloud-provider string
    The provider for cloud services. Set to empty string for running with no cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used). (DEPRECATED: will be removed in 1.24 or later, in favor of removing cloud provider code from kubelet.)The provider for cloud services. Set to empty string for running with no cloud provider. Set to 'external' for running with an external cloud provider. If set, the cloud provider determines the name of the node (consult cloud provider documentation to determine if and how the hostname is used).
    --container-runtime-endpoint string--container-runtime-endpoint string     Default: "unix:///run/containerd/containerd.sock"
    The endpoint of remote runtime service. UNIX domain sockets are supported on Linux, while 'npipe' and 'tcp' endpoints are supported on windows. Examples: unix:///path/to/runtime.sock, npipe:////./pipe/runtime.The endpoint of remote runtime service. UNIX domain sockets are supported on Linux, while 'npipe' and 'tcp' endpoints are supported on windows. Examples: 'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --eviction-minimum-reclaim string--eviction-minimum-reclaim strings
    A set of minimum reclaims (e.g. "imagefs.available=2Gi") that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --eviction-soft string--eviction-soft strings
    A set of eviction thresholds (e.g. "memory.available<1.5Gi") that if met over a corresponding grace period would trigger a pod eviction. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --eviction-soft-grace-period string--eviction-soft-grace-period strings
    A set of eviction grace periods (e.g. "memory.available=1m30s") that correspond to how long a soft eviction threshold must hold before triggering a pod eviction. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)--experimental-allocatable-ignore-eviction     Default: false
    When set to true, hard eviction thresholds will be ignored while calculating node allocatable. See here for more details. (DEPRECATED: will be removed in 1.24 or later)When set to true, hard eviction thresholds will be ignored while calculating node allocatable. See here for more details. (DEPRECATED: will be removed in 1.25 or later)
    A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
    -APIListChunking=true|false (BETA - default=true)
    -APIPriorityAndFairness=true|false (BETA - default=true)
    APIResponseCompression=true|false (BETA - default=true)
    APIServerIdentity=true|false (BETA - default=true)
    APIServerTracing=true|false (BETA - default=true)
    -AdmissionWebhookMatchConditions=true|false (BETA- default=true)
    +AdmissionWebhookMatchConditions=true|false (BETA - default=true)
    AggregatedDiscoveryEndpoint=true|false (BETA - default=true)
    AllAlpha=true|false (ALPHA - default=false)
    AllBeta=true|false (BETA - default=false)
    @@ -374,11 +372,11 @@ CPUManagerPolicyBetaOptions=true|false (BETA - default=true)
    CPUManagerPolicyOptions=true|false (BETA - default=true)
    CRDValidationRatcheting=true|false (ALPHA - default=false)
    CSIMigrationPortworx=true|false (BETA - default=false)
    -CSINodeExpandSecret=true|false (BETA - default=true)
    CSIVolumeHealth=true|false (ALPHA - default=false)
    CloudControllerManagerWebhook=true|false (ALPHA - default=false)
    -CloudDualStackNodeIPs=true|false (ALPHA - default=false)
    +CloudDualStackNodeIPs=true|false (BETA - default=true)
    ClusterTrustBundle=true|false (ALPHA - default=false)
    +ClusterTrustBundleProjection=true|false (ALPHA - default=false)
    ComponentSLIs=true|false (BETA - default=true)
    ConsistentListFromCache=true|false (ALPHA - default=false)
    ContainerCheckpoint=true|false (ALPHA - default=false)
    @@ -386,10 +384,10 @@ ContextualLogging=true|false (ALPHA - default=false)
    CronJobsScheduledAnnotation=true|false (BETA - default=true)
    CrossNamespaceVolumeDataSource=true|false (ALPHA - default=false)
    CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
    -CustomResourceValidationExpressions=true|false (BETA - default=true)
    -DevicePluginCDIDevices=true|false (ALPHA - default=false)
    -DisableCloudProviders=true|false (ALPHA - default=false)
    -DisableKubeletCloudCredentialProviders=true|false (ALPHA - default=false)
    +DevicePluginCDIDevices=true|false (BETA - default=true)
    +DisableCloudProviders=true|false (BETA - default=true)
    +DisableKubeletCloudCredentialProviders=true|false (BETA - default=true)
    +DisableNodeKubeProxyVersion=true|false (ALPHA - default=false)
    DynamicResourceAllocation=true|false (ALPHA - default=false)
    ElasticIndexedJob=true|false (BETA - default=true)
    EventedPLEG=true|false (BETA - default=false)
    @@ -398,6 +396,7 @@ GracefulNodeShutdownBasedOnPodPriority=true|false (BETA - default=true)
    HPAContainerMetrics=true|false (BETA - default=true)
    HPAScaleToZero=true|false (ALPHA - default=false)
    HonorPVReclaimPolicy=true|false (ALPHA - default=false)
    +ImageMaximumGCAge=true|false (ALPHA - default=false)
    InPlacePodVerticalScaling=true|false (ALPHA - default=false)
    InTreePluginAWSUnregister=true|false (ALPHA - default=false)
    InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
    @@ -406,74 +405,84 @@ InTreePluginGCEUnregister=true|false (ALPHA - default=false)
    InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
    InTreePluginPortworxUnregister=true|false (ALPHA - default=false)
    InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
    -JobBackoffLimitPerIndex=true|false (ALPHA - default=false)
    +JobBackoffLimitPerIndex=true|false (BETA - default=true)
    JobPodFailurePolicy=true|false (BETA - default=true)
    -JobPodReplacementPolicy=true|false (ALPHA - default=false)
    -JobReadyPods=true|false (BETA - default=true)
    -KMSv2=true|false (BETA - default=true)
    -KMSv2KDF=true|false (BETA - default=false)
    +JobPodReplacementPolicy=true|false (BETA - default=true)
    KubeProxyDrainingTerminatingNodes=true|false (ALPHA - default=false)
    KubeletCgroupDriverFromCRI=true|false (ALPHA - default=false)
    KubeletInUserNamespace=true|false (ALPHA - default=false)
    KubeletPodResourcesDynamicResources=true|false (ALPHA - default=false)
    KubeletPodResourcesGet=true|false (ALPHA - default=false)
    +KubeletSeparateDiskGC=true|false (ALPHA - default=false)
    KubeletTracing=true|false (BETA - default=true)
    -LegacyServiceAccountTokenCleanUp=true|false (ALPHA - default=false)
    +LegacyServiceAccountTokenCleanUp=true|false (BETA - default=true)
    +LoadBalancerIPMode=true|false (ALPHA - default=false)
    LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
    LogarithmicScaleDown=true|false (BETA - default=true)
    LoggingAlphaOptions=true|false (ALPHA - default=false)
    LoggingBetaOptions=true|false (BETA - default=true)
    +MatchLabelKeysInPodAffinity=true|false (ALPHA - default=false)
    MatchLabelKeysInPodTopologySpread=true|false (BETA - default=true)
    MaxUnavailableStatefulSet=true|false (ALPHA - default=false)
    MemoryManager=true|false (BETA - default=true)
    MemoryQoS=true|false (ALPHA - default=false)
    MinDomainsInPodTopologySpread=true|false (BETA - default=true)
    -MultiCIDRRangeAllocator=true|false (ALPHA - default=false)
    MultiCIDRServiceAllocator=true|false (ALPHA - default=false)
    +NFTablesProxyMode=true|false (ALPHA - default=false)
    NewVolumeManagerReconstruction=true|false (BETA - default=true)
    NodeInclusionPolicyInPodTopologySpread=true|false (BETA - default=true)
    NodeLogQuery=true|false (ALPHA - default=false)
    NodeSwap=true|false (BETA - default=false)
    OpenAPIEnums=true|false (BETA - default=true)
    PDBUnhealthyPodEvictionPolicy=true|false (BETA - default=true)
    -PersistentVolumeLastPhaseTransitionTime=true|false (ALPHA - default=false)
    +PersistentVolumeLastPhaseTransitionTime=true|false (BETA - default=true)
    PodAndContainerStatsFromCRI=true|false (ALPHA - default=false)
    PodDeletionCost=true|false (BETA - default=true)
    PodDisruptionConditions=true|false (BETA - default=true)
    -PodHostIPs=true|false (ALPHA - default=false)
    +PodHostIPs=true|false (BETA - default=true)
    PodIndexLabel=true|false (BETA - default=true)
    -PodReadyToStartContainersCondition=true|false (ALPHA - default=false)
    +PodLifecycleSleepAction=true|false (ALPHA - default=false)
    +PodReadyToStartContainersCondition=true|false (BETA - default=true)
    PodSchedulingReadiness=true|false (BETA - default=true)
    ProcMountType=true|false (ALPHA - default=false)
    QOSReserved=true|false (ALPHA - default=false)
    -ReadWriteOncePod=true|false (BETA - default=true)
    RecoverVolumeExpansionFailure=true|false (ALPHA - default=false)
    -RemainingItemCount=true|false (BETA - default=true)
    RotateKubeletServerCertificate=true|false (BETA - default=true)
    +RuntimeClassInImageCriApi=true|false (ALPHA - default=false)
    SELinuxMountReadWriteOncePod=true|false (BETA - default=true)
    -SchedulerQueueingHints=true|false (BETA - default=true)
    +SchedulerQueueingHints=true|false (BETA - default=false)
    SecurityContextDeny=true|false (ALPHA - default=false)
    -ServiceNodePortStaticSubrange=true|false (BETA - default=true)
    -SidecarContainers=true|false (ALPHA - default=false)
    +SeparateTaintEvictionController=true|false (BETA - default=true)
    +ServiceAccountTokenJTI=true|false (ALPHA - default=false)
    +ServiceAccountTokenNodeBinding=true|false (ALPHA - default=false)
    +ServiceAccountTokenNodeBindingValidation=true|false (ALPHA - default=false)
    +ServiceAccountTokenPodNodeInfo=true|false (ALPHA - default=false)
    +SidecarContainers=true|false (BETA - default=true)
    SizeMemoryBackedVolumes=true|false (BETA - default=true)
    -SkipReadOnlyValidationGCE=true|false (ALPHA - default=false)
    StableLoadBalancerNodeSet=true|false (BETA - default=true)
    StatefulSetAutoDeletePVC=true|false (BETA - default=true)
    StatefulSetStartOrdinal=true|false (BETA - default=true)
    StorageVersionAPI=true|false (ALPHA - default=false)
    StorageVersionHash=true|false (BETA - default=true)
    +StructuredAuthenticationConfiguration=true|false (ALPHA - default=false)
    +StructuredAuthorizationConfiguration=true|false (ALPHA - default=false)
    TopologyAwareHints=true|false (BETA - default=true)
    TopologyManagerPolicyAlphaOptions=true|false (ALPHA - default=false)
    -TopologyManagerPolicyBetaOptions=true|false (BETA - default=false)
    +TopologyManagerPolicyBetaOptions=true|false (BETA - default=true)
    TopologyManagerPolicyOptions=true|false (BETA - default=true)
    +TranslateStreamCloseWebsocketRequests=true|false (ALPHA - default=false)
    +UnauthenticatedHTTP2DOSMitigation=true|false (BETA - default=true)
    UnknownVersionInteroperabilityProxy=true|false (ALPHA - default=false)
    -UserNamespacesStatelessPodsSupport=true|false (ALPHA - default=false)
    +UserNamespacesPodSecurityStandards=true|false (ALPHA - default=false)
    +UserNamespacesSupport=true|false (ALPHA - default=false)
    ValidatingAdmissionPolicy=true|false (BETA - default=false)
    +VolumeAttributesClass=true|false (ALPHA - default=false)
    VolumeCapacityPriority=true|false (ALPHA - default=false)
    WatchList=true|false (ALPHA - default=false)
    WinDSR=true|false (ALPHA - default=false)
    WinOverlay=true|false (BETA - default=true)
    -WindowsHostNetwork=true|false (ALPHA - default=true)

    +WindowsHostNetwork=true|false (ALPHA - default=true)
    +ZeroLimitedNominalConcurrencyShares=true|false (BETA - default=false)
    (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --image-service-endpoint string
    The endpoint of remote image service. If not specified, it will be the same with --container-runtime-endpoint by default. UNIX domain socket are supported on Linux, while `npipe` and `tcp` endpoints are supported on Windows. Examples: unix:///var/run/dockershim.sock, npipe:////./pipe/dockershimThe endpoint of remote image service. If not specified, it will be the same with --container-runtime-endpoint by default. UNIX domain socket are supported on Linux, while `npipe` and `tcp` endpoints are supported on Windows. Examples: unix:///path/to/runtime.sock, npipe:////./pipe/runtime. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --kube-api-burst int32     Default: 100
    Burst to use while talking with kubernetes API server. The number must be >= 0. If 0 will use default burst (100). (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)Burst to use while talking with kubernetes API server. The number must be >= 0. If 0 will use default burst (100). Doesn't cover events and node heartbeat apis which rate limiting is controlled by a different set of flags. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --kube-reserved string     Default: <None>--kube-reserved strings     Default: <None>
    A set of <resource name>=<resource quantity> (e.g. "cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'&auot;) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local ephemeral-storage for root file system are supported. See here for more detail. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)A set of <resource name>=<resource quantity> (e.g. "cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'&auot;) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local ephemeral-storage for root file system are supported. See here for more detail. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --local-storage-capacity-isolation>     Default: true
    If true, local ephemeral storage isolation is enabled. Otherwise, local storage isolation feature will be disabled. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information.)If true, local ephemeral storage isolation is enabled. Otherwise, local storage isolation feature will be disabled. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --manifest-url-header string--manifest-url-header strings
    Comma-separated list of HTTP headers to use when accessing the URL provided to --manifest-url. Multiple headers with the same name will be added in the same order provided. This flag can be repeatedly invoked. For example: --manifest-url-header 'a:hello,b:again,c:world' --manifest-url-header 'b:beautiful' (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.) --provider-id string
    Unique identifier for identifying the node in a machine database, i.e cloud provider. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)Unique identifier for identifying the node in a machine database, i.e cloud provider.
    --seccomp-default     Default: false--seccomp-default
    Enable the use of RuntimeDefault as the default seccomp profile for all workloads. --system-reserved string     Default: <none>
    A set of <resource name>=<resource quantity> (e.g. "cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'") pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory and local ephemeral storage for root file system are supported. See here for more detail. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)A set of <resource name>=<resource quantity> (e.g. "cpu=200m,memory=500Mi,ephemeral-storage=1Gi,pid='100'") pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory and local ephemeral storage for root file system are supported. See here for more detail. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    --version version[=true]
    Print version information and quit; --version=vX.Y.Z... sets the reported version.Print version information and quit; --version=vX.Y.Z... sets the reported version.
    --volume-stats-agg-period duration     Default: 1m0s
    Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to a negative number. (DEPRECATED: This parameter should be set via the config file specified by the kubelet's --config flag. See kubelet-config-file for more information.)
    diff --git a/content/en/docs/reference/config-api/apiserver-admission.v1.md b/content/en/docs/reference/config-api/apiserver-admission.v1.md index 5555e6f5c12b4..3deb8521b783b 100644 --- a/content/en/docs/reference/config-api/apiserver-admission.v1.md +++ b/content/en/docs/reference/config-api/apiserver-admission.v1.md @@ -11,7 +11,6 @@ auto_generated: true - [AdmissionReview](#admission-k8s-io-v1-AdmissionReview) - ## `AdmissionReview` {#admission-k8s-io-v1-AdmissionReview} @@ -153,7 +152,7 @@ requested. e.g. a patch can result in either a CREATE or UPDATE Operation.

    userInfo [Required]
    -authentication/v1.UserInfo +authentication/v1.UserInfo

    UserInfo is information about the requesting user

    @@ -227,7 +226,7 @@ This must be copied over from the corresponding AdmissionRequest.

    status
    -meta/v1.Status +meta/v1.Status

    Result contains extra details into why an admission request was denied. diff --git a/content/en/docs/reference/config-api/apiserver-audit.v1.md b/content/en/docs/reference/config-api/apiserver-audit.v1.md index abab04f1bd2e1..7f9314292b94d 100644 --- a/content/en/docs/reference/config-api/apiserver-audit.v1.md +++ b/content/en/docs/reference/config-api/apiserver-audit.v1.md @@ -14,7 +14,6 @@ auto_generated: true - [Policy](#audit-k8s-io-v1-Policy) - [PolicyList](#audit-k8s-io-v1-PolicyList) - ## `Event` {#audit-k8s-io-v1-Event} @@ -72,14 +71,14 @@ For non-resource requests, this is the lower-cased HTTP method.

    user [Required]
    -authentication/v1.UserInfo +authentication/v1.UserInfo

    Authenticated user information.

    impersonatedUser
    -authentication/v1.UserInfo +authentication/v1.UserInfo

    Impersonated user information.

    @@ -117,7 +116,7 @@ Does not apply for List-type requests, or non-resource requests.

    responseStatus
    -meta/v1.Status +meta/v1.Status

    The response status, populated even when the ResponseObject is not a Status type. @@ -145,14 +144,14 @@ at Response Level.

    requestReceivedTimestamp
    -meta/v1.MicroTime +meta/v1.MicroTime

    Time the request reached the apiserver.

    stageTimestamp
    -meta/v1.MicroTime +meta/v1.MicroTime

    Time the request reached current audit stage.

    @@ -189,7 +188,7 @@ should be short. Annotations are included in the Metadata level.

    metadata
    -meta/v1.ListMeta +meta/v1.ListMeta No description provided. @@ -224,7 +223,7 @@ categories are logged.

    metadata
    -meta/v1.ObjectMeta +meta/v1.ObjectMeta

    ObjectMeta is included for interoperability with API infrastructure.

    @@ -279,7 +278,7 @@ in a rule will override the global default.

    metadata
    -meta/v1.ListMeta +meta/v1.ListMeta No description provided. @@ -322,12 +321,14 @@ The empty string represents the core API group.

    Resources is a list of resources this rule applies to.

    -

    For example: -'pods' matches pods. -'pods/log' matches the log subresource of pods. -'*' matches all resources and their subresources. -'pods/*' matches all subresources of pods. -'*/scale' matches all scale subresources.

    +

    For example:

    +
      +
    • pods matches pods.
    • +
    • pods/log matches the log subresource of pods.
    • +
    • * matches all resources and their subresources.
    • +
    • pods/* matches all subresources of pods.
    • +
    • */scale matches all scale subresources.
    • +

    If wildcard is present, the validation rule will ensure resources do not overlap with each other.

    An empty list implies all resources and subresources in this API groups apply.

    @@ -501,10 +502,12 @@ An empty list implies every namespace.

    NonResourceURLs is a set of URL paths that should be audited. -*s are allowed, but only as the full, final step in the path. -Examples: -"/metrics" - Log requests for apiserver metrics -"/healthz*" - Log all health checks

    +*s are allowed, but only as the full, final step in the path. +Examples:

    +
      +
    • /metrics - Log requests for apiserver metrics
    • +
    • /healthz* - Log all health checks
    • +
    omitStages
    @@ -552,4 +555,4 @@ Policy.OmitManagedFields will stand. - + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/apiserver-config.v1.md b/content/en/docs/reference/config-api/apiserver-config.v1.md index ec78a45da1a51..c133724ec70bd 100644 --- a/content/en/docs/reference/config-api/apiserver-config.v1.md +++ b/content/en/docs/reference/config-api/apiserver-config.v1.md @@ -12,7 +12,6 @@ auto_generated: true - [AdmissionConfiguration](#apiserver-config-k8s-io-v1-AdmissionConfiguration) - ## `AdmissionConfiguration` {#apiserver-config-k8s-io-v1-AdmissionConfiguration} diff --git a/content/en/docs/reference/config-api/apiserver-config.v1alpha1.md b/content/en/docs/reference/config-api/apiserver-config.v1alpha1.md index 0c85b397f61f7..4207670760275 100644 --- a/content/en/docs/reference/config-api/apiserver-config.v1alpha1.md +++ b/content/en/docs/reference/config-api/apiserver-config.v1alpha1.md @@ -11,10 +11,53 @@ auto_generated: true - [AdmissionConfiguration](#apiserver-k8s-io-v1alpha1-AdmissionConfiguration) +- [AuthenticationConfiguration](#apiserver-k8s-io-v1alpha1-AuthenticationConfiguration) +- [AuthorizationConfiguration](#apiserver-k8s-io-v1alpha1-AuthorizationConfiguration) - [EgressSelectorConfiguration](#apiserver-k8s-io-v1alpha1-EgressSelectorConfiguration) - [TracingConfiguration](#apiserver-k8s-io-v1alpha1-TracingConfiguration) + + +## `TracingConfiguration` {#TracingConfiguration} + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + +- [TracingConfiguration](#apiserver-k8s-io-v1alpha1-TracingConfiguration) + + +

    TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients.

    + + + + + + + + + + + + + + +
    FieldDescription
    endpoint
    +string +
    +

    Endpoint of the collector this component will report traces to. +The connection is insecure, and does not currently support TLS. +Recommended is unset, and endpoint is the otlp grpc default, localhost:4317.

    +
    samplingRatePerMillion
    +int32 +
    +

    SamplingRatePerMillion is the number of samples to collect per million spans. +Recommended is unset. If unset, sampler respects its parent span's sampling +rate, but otherwise never samples.

    +
    + ## `AdmissionConfiguration` {#apiserver-k8s-io-v1alpha1-AdmissionConfiguration} @@ -41,6 +84,67 @@ auto_generated: true +## `AuthenticationConfiguration` {#apiserver-k8s-io-v1alpha1-AuthenticationConfiguration} + + + +

    AuthenticationConfiguration provides versioned configuration for authentication.

    + + + + + + + + + + + + + + +
    FieldDescription
    apiVersion
    string
    apiserver.k8s.io/v1alpha1
    kind
    string
    AuthenticationConfiguration
    jwt [Required]
    +[]JWTAuthenticator +
    +

    jwt is a list of authenticator to authenticate Kubernetes users using +JWT compliant tokens. The authenticator will attempt to parse a raw ID token, +verify it's been signed by the configured issuer. The public key to verify the +signature is discovered from the issuer's public endpoint using OIDC discovery. +For an incoming token, each JWT authenticator will be attempted in +the order in which it is specified in this list. Note however that +other authenticators may run before or after the JWT authenticators. +The specific position of JWT authenticators in relation to other +authenticators is neither defined nor stable across releases. Since +each JWT authenticator must have a unique issuer URL, at most one +JWT authenticator will attempt to cryptographically validate the token.

    +
    + +## `AuthorizationConfiguration` {#apiserver-k8s-io-v1alpha1-AuthorizationConfiguration} + + + + + + + + + + + + + + + + +
    FieldDescription
    apiVersion
    string
    apiserver.k8s.io/v1alpha1
    kind
    string
    AuthorizationConfiguration
    authorizers [Required]
    +[]AuthorizerConfiguration +
    +

    Authorizers is an ordered list of authorizers to +authorize requests against. +This is similar to the --authorization-modes kube-apiserver flag +Must be at least one.

    +
    + ## `EgressSelectorConfiguration` {#apiserver-k8s-io-v1alpha1-EgressSelectorConfiguration} @@ -134,6 +238,249 @@ configuration. If present, it will be used instead of the path to the configurat +## `AuthorizerConfiguration` {#apiserver-k8s-io-v1alpha1-AuthorizerConfiguration} + + +**Appears in:** + +- [AuthorizationConfiguration](#apiserver-k8s-io-v1alpha1-AuthorizationConfiguration) + + + + + + + + + + + + + + + + + + +
    FieldDescription
    type [Required]
    +string +
    +

    Type refers to the type of the authorizer +"Webhook" is supported in the generic API server +Other API servers may support additional authorizer +types like Node, RBAC, ABAC, etc.

    +
    name [Required]
    +string +
    +

    Name used to describe the webhook +This is explicitly used in monitoring machinery for metrics +Note: Names must be DNS1123 labels like myauthorizername or +subdomains like myauthorizer.example.domain +Required, with no default

    +
    webhook [Required]
    +WebhookConfiguration +
    +

    Webhook defines the configuration for a Webhook authorizer +Must be defined when Type=Webhook +Must not be defined when Type!=Webhook

    +
    + +## `ClaimMappings` {#apiserver-k8s-io-v1alpha1-ClaimMappings} + + +**Appears in:** + +- [JWTAuthenticator](#apiserver-k8s-io-v1alpha1-JWTAuthenticator) + + +

    ClaimMappings provides the configuration for claim mapping

    + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    username [Required]
    +PrefixedClaimOrExpression +
    +

    username represents an option for the username attribute. +The claim's value must be a singular string. +Same as the --oidc-username-claim and --oidc-username-prefix flags. +If username.expression is set, the expression must produce a string value.

    +

    In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, +the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. +For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. +For prefix: +(1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, +set username.prefix="" +(2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "<value of --oidc-issuer-url>#". For the same +behavior using authentication config, set username.prefix="#" +(3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix=""

    +
    groups
    +PrefixedClaimOrExpression +
    +

    groups represents an option for the groups attribute. +The claim's value must be a string or string array claim. +If groups.claim is set, the prefix must be specified (and can be the empty string). +If groups.expression is set, the expression must produce a string or string array value. +"", [], and null values are treated as the group mapping not being present.

    +
    uid
    +ClaimOrExpression +
    +

    uid represents an option for the uid attribute. +Claim must be a singular string claim. +If uid.expression is set, the expression must produce a string value.

    +
    extra
    +[]ExtraMapping +
    +

    extra represents an option for the extra attribute. +expression must produce a string or string array value. +If the value is empty, the extra mapping will not be present.

    +

    hard-coded extra key/value

    +
      +
    • key: "foo" +valueExpression: "'bar'" +This will result in an extra attribute - foo: ["bar"]
    • +
    +

    hard-coded key, value copying claim value

    +
      +
    • key: "foo" +valueExpression: "claims.some_claim" +This will result in an extra attribute - foo: [value of some_claim]
    • +
    +

    hard-coded key, value derived from claim value

    +
      +
    • key: "admin" +valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' +This will result in:
    • +
    • if is_admin claim is present and true, extra attribute - admin: ["true"]
    • +
    • if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added
    • +
    +
    + +## `ClaimOrExpression` {#apiserver-k8s-io-v1alpha1-ClaimOrExpression} + + +**Appears in:** + +- [ClaimMappings](#apiserver-k8s-io-v1alpha1-ClaimMappings) + + +

    ClaimOrExpression provides the configuration for a single claim or expression.

    + + + + + + + + + + + + + + +
    FieldDescription
    claim
    +string +
    +

    claim is the JWT claim to use. +Either claim or expression must be set. +Mutually exclusive with expression.

    +
    expression
    +string +
    +

    expression represents the expression which will be evaluated by CEL.

    +

    CEL expressions have access to the contents of the token claims, organized into CEL variable:

    +
      +
    • 'claims' is a map of claim names to claim values. +For example, a variable named 'sub' can be accessed as 'claims.sub'. +Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'.
    • +
    +

    Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/

    +

    Mutually exclusive with claim.

    +
    + +## `ClaimValidationRule` {#apiserver-k8s-io-v1alpha1-ClaimValidationRule} + + +**Appears in:** + +- [JWTAuthenticator](#apiserver-k8s-io-v1alpha1-JWTAuthenticator) + + +

    ClaimValidationRule provides the configuration for a single claim validation rule.

    + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    claim
    +string +
    +

    claim is the name of a required claim. +Same as --oidc-required-claim flag. +Only string claim keys are supported. +Mutually exclusive with expression and message.

    +
    requiredValue
    +string +
    +

    requiredValue is the value of a required claim. +Same as --oidc-required-claim flag. +Only string claim values are supported. +If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. +Mutually exclusive with expression and message.

    +
    expression
    +string +
    +

    expression represents the expression which will be evaluated by CEL. +Must produce a boolean.

    +

    CEL expressions have access to the contents of the token claims, organized into CEL variable:

    +
      +
    • 'claims' is a map of claim names to claim values. +For example, a variable named 'sub' can be accessed as 'claims.sub'. +Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'. +Must return true for the validation to pass.
    • +
    +

    Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/

    +

    Mutually exclusive with claim and requiredValue.

    +
    message
    +string +
    +

    message customizes the returned error message when expression returns false. +message is a literal string. +Mutually exclusive with claim and requiredValue.

    +
    + ## `Connection` {#apiserver-k8s-io-v1alpha1-Connection} @@ -203,6 +550,205 @@ The "master" egress selector is deprecated in favor of "controlpl +## `ExtraMapping` {#apiserver-k8s-io-v1alpha1-ExtraMapping} + + +**Appears in:** + +- [ClaimMappings](#apiserver-k8s-io-v1alpha1-ClaimMappings) + + +

    ExtraMapping provides the configuration for a single extra mapping.

    + + + + + + + + + + + + + + +
    FieldDescription
    key [Required]
    +string +
    +

    key is a string to use as the extra attribute key. +key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid +subdomain as defined by RFC 1123. All characters trailing the first "/" must +be valid HTTP Path characters as defined by RFC 3986. +key must be lowercase.

    +
    valueExpression [Required]
    +string +
    +

    valueExpression is a CEL expression to extract extra attribute value. +valueExpression must produce a string or string array value. +"", [], and null values are treated as the extra mapping not being present. +Empty string values contained within a string array are filtered out.

    +

    CEL expressions have access to the contents of the token claims, organized into CEL variable:

    +
      +
    • 'claims' is a map of claim names to claim values. +For example, a variable named 'sub' can be accessed as 'claims.sub'. +Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'.
    • +
    +

    Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/

    +
    + +## `Issuer` {#apiserver-k8s-io-v1alpha1-Issuer} + + +**Appears in:** + +- [JWTAuthenticator](#apiserver-k8s-io-v1alpha1-JWTAuthenticator) + + +

    Issuer provides the configuration for a external provider specific settings.

    + + + + + + + + + + + + + + + + + +
    FieldDescription
    url [Required]
    +string +
    +

    url points to the issuer URL in a format https://url or https://url/path. +This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. +Same value as the --oidc-issuer-url flag. +Used to fetch discovery information unless overridden by discoveryURL. +Required to be unique. +Note that egress selection configuration is not used for this network connection.

    +
    certificateAuthority
    +string +
    +

    certificateAuthority contains PEM-encoded certificate authority certificates +used to validate the connection when fetching discovery information. +If unset, the system verifier is used. +Same value as the content of the file referenced by the --oidc-ca-file flag.

    +
    audiences [Required]
    +[]string +
    +

    audiences is the set of acceptable audiences the JWT must be issued to. +At least one of the entries must match the "aud" claim in presented JWTs. +Same value as the --oidc-client-id flag (though this field supports an array). +Required to be non-empty.

    +
    + +## `JWTAuthenticator` {#apiserver-k8s-io-v1alpha1-JWTAuthenticator} + + +**Appears in:** + +- [AuthenticationConfiguration](#apiserver-k8s-io-v1alpha1-AuthenticationConfiguration) + + +

    JWTAuthenticator provides the configuration for a single JWT authenticator.

    + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    issuer [Required]
    +Issuer +
    +

    issuer contains the basic OIDC provider connection options.

    +
    claimValidationRules
    +[]ClaimValidationRule +
    +

    claimValidationRules are rules that are applied to validate token claims to authenticate users.

    +
    claimMappings [Required]
    +ClaimMappings +
    +

    claimMappings points claims of a token to be treated as user attributes.

    +
    userValidationRules
    +[]UserValidationRule +
    +

    userValidationRules are rules that are applied to final user before completing authentication. +These allow invariants to be applied to incoming identities such as preventing the +use of the system: prefix that is commonly used by Kubernetes components. +The validation rules are logically ANDed together and must all return true for the validation to pass.

    +
    + +## `PrefixedClaimOrExpression` {#apiserver-k8s-io-v1alpha1-PrefixedClaimOrExpression} + + +**Appears in:** + +- [ClaimMappings](#apiserver-k8s-io-v1alpha1-ClaimMappings) + + +

    PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression.

    + + + + + + + + + + + + + + + + + +
    FieldDescription
    claim
    +string +
    +

    claim is the JWT claim to use. +Mutually exclusive with expression.

    +
    prefix
    +string +
    +

    prefix is prepended to claim's value to prevent clashes with existing names. +prefix needs to be set if claim is set and can be the empty string. +Mutually exclusive with expression.

    +
    expression
    +string +
    +

    expression represents the expression which will be evaluated by CEL.

    +

    CEL expressions have access to the contents of the token claims, organized into CEL variable:

    +
      +
    • 'claims' is a map of claim names to claim values. +For example, a variable named 'sub' can be accessed as 'claims.sub'. +Nested claims can be accessed using dot notation, e.g. 'claims.email.verified'.
    • +
    +

    Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/

    +

    Mutually exclusive with claim and prefix.

    +
    + ## `ProtocolType` {#apiserver-k8s-io-v1alpha1-ProtocolType} (Alias of `string`) @@ -360,21 +906,166 @@ This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server - - + +## `UserValidationRule` {#apiserver-k8s-io-v1alpha1-UserValidationRule} -## `TracingConfiguration` {#TracingConfiguration} +**Appears in:** + +- [JWTAuthenticator](#apiserver-k8s-io-v1alpha1-JWTAuthenticator) + + +

    UserValidationRule provides the configuration for a single user info validation rule.

    + + + + + + + + + + + + + + +
    FieldDescription
    expression [Required]
    +string +
    +

    expression represents the expression which will be evaluated by CEL. +Must return true for the validation to pass.

    +

    CEL expressions have access to the contents of UserInfo, organized into CEL variable:

    +
      +
    • 'user' - authentication.k8s.io/v1, Kind=UserInfo object +Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. +API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io
    • +
    +

    Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/

    +
    message
    +string +
    +

    message customizes the returned error message when rule returns false. +message is a literal string.

    +
    + +## `WebhookConfiguration` {#apiserver-k8s-io-v1alpha1-WebhookConfiguration} **Appears in:** -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +- [AuthorizerConfiguration](#apiserver-k8s-io-v1alpha1-AuthorizerConfiguration) -- [TracingConfiguration](#apiserver-k8s-io-v1alpha1-TracingConfiguration) -

    TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    authorizedTTL [Required]
    +meta/v1.Duration +
    +

    The duration to cache 'authorized' responses from the webhook +authorizer. +Same as setting --authorization-webhook-cache-authorized-ttl flag +Default: 5m0s

    +
    unauthorizedTTL [Required]
    +meta/v1.Duration +
    +

    The duration to cache 'unauthorized' responses from the webhook +authorizer. +Same as setting --authorization-webhook-cache-unauthorized-ttl flag +Default: 30s

    +
    timeout [Required]
    +meta/v1.Duration +
    +

    Timeout for the webhook request +Maximum allowed value is 30s. +Required, no default value.

    +
    subjectAccessReviewVersion [Required]
    +string +
    +

    The API version of the authorization.k8s.io SubjectAccessReview to +send to and expect from the webhook. +Same as setting --authorization-webhook-version flag +Valid values: v1beta1, v1 +Required, no default value

    +
    matchConditionSubjectAccessReviewVersion [Required]
    +string +
    +

    MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview +version the CEL expressions are evaluated against +Valid values: v1 +Required, no default value

    +
    failurePolicy [Required]
    +string +
    +

    Controls the authorization decision when a webhook request fails to +complete or returns a malformed response or errors evaluating +matchConditions. +Valid values:

    +
      +
    • NoOpinion: continue to subsequent authorizers to see if one of +them allows the request
    • +
    • Deny: reject the request without consulting subsequent authorizers +Required, with no default.
    • +
    +
    connectionInfo [Required]
    +WebhookConnectionInfo +
    +

    ConnectionInfo defines how we talk to the webhook

    +
    matchConditions [Required]
    +[]WebhookMatchCondition +
    +

    matchConditions is a list of conditions that must be met for a request to be sent to this +webhook. An empty list of matchConditions matches all requests. +There are a maximum of 64 match conditions allowed.

    +

    The exact matching logic is (in order):

    +
      +
    1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped.
    2. +
    3. If ALL matchConditions evaluate to TRUE, then the webhook is called.
    4. +
    5. If at least one matchCondition evaluates to an error (but none are FALSE): +
        +
      • If failurePolicy=Deny, then the webhook rejects the request
      • +
      • If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped
      • +
      +
    6. +
    +
    + +## `WebhookConnectionInfo` {#apiserver-k8s-io-v1alpha1-WebhookConnectionInfo} + + +**Appears in:** + +- [WebhookConfiguration](#apiserver-k8s-io-v1alpha1-WebhookConfiguration) + @@ -382,23 +1073,57 @@ This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server - - -
    endpoint
    +
    type [Required]
    string
    -

    Endpoint of the collector this component will report traces to. -The connection is insecure, and does not currently support TLS. -Recommended is unset, and endpoint is the otlp grpc default, localhost:4317.

    +

    Controls how the webhook should communicate with the server. +Valid values:

    +
      +
    • KubeConfigFile: use the file specified in kubeConfigFile to locate the +server.
    • +
    • InClusterConfig: use the in-cluster configuration to call the +SubjectAccessReview API hosted by kube-apiserver. This mode is not +allowed for kube-apiserver.
    • +
    samplingRatePerMillion
    -int32 +
    kubeConfigFile [Required]
    +string
    -

    SamplingRatePerMillion is the number of samples to collect per million spans. -Recommended is unset. If unset, sampler respects its parent span's sampling -rate, but otherwise never samples.

    +

    Path to KubeConfigFile for connection info +Required, if connectionInfo.Type is KubeConfig

    \ No newline at end of file + + +## `WebhookMatchCondition` {#apiserver-k8s-io-v1alpha1-WebhookMatchCondition} + + +**Appears in:** + +- [WebhookConfiguration](#apiserver-k8s-io-v1alpha1-WebhookConfiguration) + + + + + + + + + + + + +
    FieldDescription
    expression [Required]
    +string +
    +

    expression represents the expression which will be evaluated by CEL. Must evaluate to bool. +CEL expressions have access to the contents of the SubjectAccessReview in v1 version. +If version specified by subjectAccessReviewVersion in the request variable is v1beta1, +the contents would be converted to the v1 version before evaluating the CEL expression.

    +

    Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/

    +
    + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/apiserver-config.v1beta1.md b/content/en/docs/reference/config-api/apiserver-config.v1beta1.md index 6acb3540cd06f..06dfaab72291e 100644 --- a/content/en/docs/reference/config-api/apiserver-config.v1beta1.md +++ b/content/en/docs/reference/config-api/apiserver-config.v1beta1.md @@ -14,6 +14,49 @@ auto_generated: true - [TracingConfiguration](#apiserver-k8s-io-v1beta1-TracingConfiguration) + + +## `TracingConfiguration` {#TracingConfiguration} + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + +- [TracingConfiguration](#apiserver-k8s-io-v1alpha1-TracingConfiguration) + +- [TracingConfiguration](#apiserver-k8s-io-v1beta1-TracingConfiguration) + + +

    TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients.

    + + + + + + + + + + + + + + +
    FieldDescription
    endpoint
    +string +
    +

    Endpoint of the collector this component will report traces to. +The connection is insecure, and does not currently support TLS. +Recommended is unset, and endpoint is the otlp grpc default, localhost:4317.

    +
    samplingRatePerMillion
    +int32 +
    +

    SamplingRatePerMillion is the number of samples to collect per million spans. +Recommended is unset. If unset, sampler respects its parent span's sampling +rate, but otherwise never samples.

    +
    + ## `EgressSelectorConfiguration` {#apiserver-k8s-io-v1beta1-EgressSelectorConfiguration} @@ -291,47 +334,4 @@ This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server - - - - -## `TracingConfiguration` {#TracingConfiguration} - - -**Appears in:** - -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) - -- [TracingConfiguration](#apiserver-k8s-io-v1alpha1-TracingConfiguration) - -- [TracingConfiguration](#apiserver-k8s-io-v1beta1-TracingConfiguration) - - -

    TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients.

    - - - - - - - - - - - - - - -
    FieldDescription
    endpoint
    -string -
    -

    Endpoint of the collector this component will report traces to. -The connection is insecure, and does not currently support TLS. -Recommended is unset, and endpoint is the otlp grpc default, localhost:4317.

    -
    samplingRatePerMillion
    -int32 -
    -

    SamplingRatePerMillion is the number of samples to collect per million spans. -Recommended is unset. If unset, sampler respects its parent span's sampling -rate, but otherwise never samples.

    -
    \ No newline at end of file + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/apiserver-encryption.v1.md b/content/en/docs/reference/config-api/apiserver-encryption.v1.md index 148dc374e8cad..30d42277e6b29 100644 --- a/content/en/docs/reference/config-api/apiserver-encryption.v1.md +++ b/content/en/docs/reference/config-api/apiserver-encryption.v1.md @@ -12,7 +12,6 @@ auto_generated: true - [EncryptionConfiguration](#apiserver-config-k8s-io-v1-EncryptionConfiguration) - ## `EncryptionConfiguration` {#apiserver-config-k8s-io-v1-EncryptionConfiguration} @@ -20,8 +19,8 @@ auto_generated: true

    EncryptionConfiguration stores the complete configuration for encryption providers. It also allows the use of wildcards to specify the resources that should be encrypted. -Use '*.<group>' to encrypt all resources within a group or '*.*' to encrypt all resources. -'*.' can be used to encrypt all resource in the core group. '*.*' will encrypt all +Use '.' to encrypt all resources within a group or '.' to encrypt all resources. +'.' can be used to encrypt all resource in the core group. '.' will encrypt all resources, even custom resources that are added after API server start. Use of wildcards that overlap within the same resource list or across multiple entries are not allowed since part of the configuration would be ineffective. @@ -283,9 +282,9 @@ Set to a negative value to disable caching. This field is only allowed for KMS v

    resources is a list of kubernetes resources which have to be encrypted. The resource names are derived from resource or resource.group of the group/version/resource. eg: pandas.awesome.bears.example is a custom resource with 'group': awesome.bears.example, 'resource': pandas. -Use '*.*' to encrypt all resources and '*.<group>' to encrypt all resources in a specific group. -eg: '*.awesome.bears.example' will encrypt all resources in the group 'awesome.bears.example'. -eg: '*.' will encrypt all resources in the core group (such as pods, configmaps, etc).

    +Use '.' to encrypt all resources and '.' to encrypt all resources in a specific group. +eg: '.awesome.bears.example' will encrypt all resources in the group 'awesome.bears.example'. +eg: '*.' will encrypt all resources in the core group (such as pods, configmaps, etc).

    providers [Required]
    @@ -325,4 +324,4 @@ Each key has to be 32 bytes long.

    - + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/apiserver-eventratelimit.v1alpha1.md b/content/en/docs/reference/config-api/apiserver-eventratelimit.v1alpha1.md index 2189c4910d277..60a5bcbedf9d2 100644 --- a/content/en/docs/reference/config-api/apiserver-eventratelimit.v1alpha1.md +++ b/content/en/docs/reference/config-api/apiserver-eventratelimit.v1alpha1.md @@ -11,7 +11,6 @@ auto_generated: true - [Configuration](#eventratelimit-admission-k8s-io-v1alpha1-Configuration) - ## `Configuration` {#eventratelimit-admission-k8s-io-v1alpha1-Configuration} diff --git a/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md b/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md index b806f3b6c6075..9520d2ce53768 100644 --- a/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md +++ b/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md @@ -12,7 +12,6 @@ auto_generated: true - [WebhookAdmission](#apiserver-config-k8s-io-v1-WebhookAdmission) - ## `WebhookAdmission` {#apiserver-config-k8s-io-v1-WebhookAdmission} diff --git a/content/en/docs/reference/config-api/client-authentication.v1.md b/content/en/docs/reference/config-api/client-authentication.v1.md index 53e602d0f22a2..e49be08c93dc6 100644 --- a/content/en/docs/reference/config-api/client-authentication.v1.md +++ b/content/en/docs/reference/config-api/client-authentication.v1.md @@ -11,7 +11,6 @@ auto_generated: true - [ExecCredential](#client-authentication-k8s-io-v1-ExecCredential) - ## `ExecCredential` {#client-authentication-k8s-io-v1-ExecCredential} @@ -206,7 +205,7 @@ itself should at least be protected via file permissions.

    expirationTimestamp
    -meta/v1.Time +meta/v1.Time

    ExpirationTimestamp indicates a time when the provided credentials expire.

    diff --git a/content/en/docs/reference/config-api/client-authentication.v1beta1.md b/content/en/docs/reference/config-api/client-authentication.v1beta1.md index d9e55d0ee2beb..cd60d94c56480 100644 --- a/content/en/docs/reference/config-api/client-authentication.v1beta1.md +++ b/content/en/docs/reference/config-api/client-authentication.v1beta1.md @@ -11,7 +11,6 @@ auto_generated: true - [ExecCredential](#client-authentication-k8s-io-v1beta1-ExecCredential) - ## `ExecCredential` {#client-authentication-k8s-io-v1beta1-ExecCredential} @@ -206,7 +205,7 @@ itself should at least be protected via file permissions.

    expirationTimestamp
    -meta/v1.Time +meta/v1.Time

    ExpirationTimestamp indicates a time when the provided credentials expire.

    diff --git a/content/en/docs/reference/config-api/imagepolicy.v1alpha1.md b/content/en/docs/reference/config-api/imagepolicy.v1alpha1.md index f6eaa915a8b41..d03ab7479629e 100644 --- a/content/en/docs/reference/config-api/imagepolicy.v1alpha1.md +++ b/content/en/docs/reference/config-api/imagepolicy.v1alpha1.md @@ -11,7 +11,6 @@ auto_generated: true - [ImageReview](#imagepolicy-k8s-io-v1alpha1-ImageReview) - ## `ImageReview` {#imagepolicy-k8s-io-v1alpha1-ImageReview} @@ -29,7 +28,7 @@ auto_generated: true metadata
    -meta/v1.ObjectMeta +meta/v1.ObjectMeta

    Standard object's metadata. diff --git a/content/en/docs/reference/config-api/kube-controller-manager-config.v1alpha1.md b/content/en/docs/reference/config-api/kube-controller-manager-config.v1alpha1.md index 348c557807eed..d63e35f68a973 100644 --- a/content/en/docs/reference/config-api/kube-controller-manager-config.v1alpha1.md +++ b/content/en/docs/reference/config-api/kube-controller-manager-config.v1alpha1.md @@ -9,301 +9,366 @@ auto_generated: true ## Resource Types -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) - [LeaderMigrationConfiguration](#controllermanager-config-k8s-io-v1alpha1-LeaderMigrationConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + -## `KubeControllerManagerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration} +## `NodeControllerConfiguration` {#NodeControllerConfiguration} +**Appears in:** -

    KubeControllerManagerConfiguration contains elements describing kube-controller manager.

    +- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) + + +

    NodeControllerConfiguration contains elements describing NodeController.

    - - - - - +
    FieldDescription
    apiVersion
    string
    kubecontrollermanager.config.k8s.io/v1alpha1
    kind
    string
    KubeControllerManagerConfiguration
    Generic [Required]
    -GenericControllerManagerConfiguration +
    ConcurrentNodeSyncs [Required]
    +int32
    -

    Generic holds configuration for a generic controller-manager

    +

    ConcurrentNodeSyncs is the number of workers +concurrently synchronizing nodes

    KubeCloudShared [Required]
    -KubeCloudSharedConfiguration +
    + +## `ServiceControllerConfiguration` {#ServiceControllerConfiguration} + + +**Appears in:** + +- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) + +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    ServiceControllerConfiguration contains elements describing ServiceController.

    + + + + + + + + - +
    FieldDescription
    ConcurrentServiceSyncs [Required]
    +int32
    -

    KubeCloudSharedConfiguration holds configuration for shared related features -both in cloud controller manager and kube-controller manager.

    +

    concurrentServiceSyncs is the number of services that are +allowed to sync concurrently. Larger number = more responsive service +management, but more CPU (and network) load.

    AttachDetachController [Required]
    -AttachDetachControllerConfiguration +
    + + +## `CloudControllerManagerConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration} + + + +

    CloudControllerManagerConfiguration contains elements describing cloud-controller manager.

    + + + + + + + + + + + - - - - - - +
    FieldDescription
    apiVersion
    string
    cloudcontrollermanager.config.k8s.io/v1alpha1
    kind
    string
    CloudControllerManagerConfiguration
    Generic [Required]
    +GenericControllerManagerConfiguration
    -

    AttachDetachControllerConfiguration holds configuration for -AttachDetachController related features.

    +

    Generic holds configuration for a generic controller-manager

    CSRSigningController [Required]
    -CSRSigningControllerConfiguration +
    KubeCloudShared [Required]
    +KubeCloudSharedConfiguration
    -

    CSRSigningControllerConfiguration holds configuration for -CSRSigningController related features.

    +

    KubeCloudSharedConfiguration holds configuration for shared related features +both in cloud controller manager and kube-controller manager.

    DaemonSetController [Required]
    -DaemonSetControllerConfiguration +
    NodeController [Required]
    +NodeControllerConfiguration
    -

    DaemonSetControllerConfiguration holds configuration for DaemonSetController +

    NodeController holds configuration for node controller related features.

    DeploymentController [Required]
    -DeploymentControllerConfiguration +
    ServiceController [Required]
    +ServiceControllerConfiguration
    -

    DeploymentControllerConfiguration holds configuration for -DeploymentController related features.

    +

    ServiceControllerConfiguration holds configuration for ServiceController +related features.

    StatefulSetController [Required]
    -StatefulSetControllerConfiguration +
    NodeStatusUpdateFrequency [Required]
    +meta/v1.Duration
    -

    StatefulSetControllerConfiguration holds configuration for -StatefulSetController related features.

    +

    NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status

    DeprecatedController [Required]
    -DeprecatedControllerConfiguration +
    Webhook [Required]
    +WebhookConfiguration
    -

    DeprecatedControllerConfiguration holds configuration for some deprecated -features.

    +

    Webhook is the configuration for cloud-controller-manager hosted webhooks

    EndpointController [Required]
    -EndpointControllerConfiguration +
    + +## `CloudProviderConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudProviderConfiguration} + + +**Appears in:** + +- [KubeCloudSharedConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-KubeCloudSharedConfiguration) + + +

    CloudProviderConfiguration contains basically elements about cloud provider.

    + + + + + + + + - - +
    FieldDescription
    Name [Required]
    +string
    -

    EndpointControllerConfiguration holds configuration for EndpointController -related features.

    +

    Name is the provider for cloud services.

    EndpointSliceController [Required]
    -EndpointSliceControllerConfiguration +
    CloudConfigFile [Required]
    +string
    -

    EndpointSliceControllerConfiguration holds configuration for -EndpointSliceController related features.

    +

    cloudConfigFile is the path to the cloud provider configuration file.

    EndpointSliceMirroringController [Required]
    -EndpointSliceMirroringControllerConfiguration +
    + +## `KubeCloudSharedConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-KubeCloudSharedConfiguration} + + +**Appears in:** + +- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) + +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    KubeCloudSharedConfiguration contains elements shared by both kube-controller manager +and cloud-controller manager, but not genericconfig.

    + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
    FieldDescription
    CloudProvider [Required]
    +CloudProviderConfiguration
    -

    EndpointSliceMirroringControllerConfiguration holds configuration for -EndpointSliceMirroringController related features.

    +

    CloudProviderConfiguration holds configuration for CloudProvider related features.

    EphemeralVolumeController [Required]
    -EphemeralVolumeControllerConfiguration +
    ExternalCloudVolumePlugin [Required]
    +string
    -

    EphemeralVolumeControllerConfiguration holds configuration for EphemeralVolumeController -related features.

    +

    externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". +It is currently used by the in repo cloud providers to handle node and volume control in the KCM.

    GarbageCollectorController [Required]
    -GarbageCollectorControllerConfiguration +
    UseServiceAccountCredentials [Required]
    +bool
    -

    GarbageCollectorControllerConfiguration holds configuration for -GarbageCollectorController related features.

    +

    useServiceAccountCredentials indicates whether controllers should be run with +individual service account credentials.

    HPAController [Required]
    -HPAControllerConfiguration +
    AllowUntaggedCloud [Required]
    +bool
    -

    HPAControllerConfiguration holds configuration for HPAController related features.

    +

    run with untagged cloud instances

    JobController [Required]
    -JobControllerConfiguration +
    RouteReconciliationPeriod [Required]
    +meta/v1.Duration
    -

    JobControllerConfiguration holds configuration for JobController related features.

    +

    routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider..

    CronJobController [Required]
    -CronJobControllerConfiguration +
    NodeMonitorPeriod [Required]
    +meta/v1.Duration
    -

    CronJobControllerConfiguration holds configuration for CronJobController related features.

    +

    nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.

    LegacySATokenCleaner [Required]
    -LegacySATokenCleanerConfiguration +
    ClusterName [Required]
    +string
    -

    LegacySATokenCleanerConfiguration holds configuration for LegacySATokenCleaner related features.

    +

    clusterName is the instance prefix for the cluster.

    NamespaceController [Required]
    -NamespaceControllerConfiguration +
    ClusterCIDR [Required]
    +string
    -

    NamespaceControllerConfiguration holds configuration for NamespaceController -related features.

    +

    clusterCIDR is CIDR Range for Pods in cluster.

    NodeIPAMController [Required]
    -NodeIPAMControllerConfiguration +
    AllocateNodeCIDRs [Required]
    +bool
    -

    NodeIPAMControllerConfiguration holds configuration for NodeIPAMController -related features.

    +

    AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if +ConfigureCloudRoutes is true, to be set on the cloud provider.

    NodeLifecycleController [Required]
    -NodeLifecycleControllerConfiguration +
    CIDRAllocatorType [Required]
    +string
    -

    NodeLifecycleControllerConfiguration holds configuration for -NodeLifecycleController related features.

    +

    CIDRAllocatorType determines what kind of pod CIDR allocator will be used.

    PersistentVolumeBinderController [Required]
    -PersistentVolumeBinderControllerConfiguration +
    ConfigureCloudRoutes [Required]
    +bool
    -

    PersistentVolumeBinderControllerConfiguration holds configuration for -PersistentVolumeBinderController related features.

    +

    configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs +to be configured on the cloud provider.

    PodGCController [Required]
    -PodGCControllerConfiguration +
    NodeSyncPeriod [Required]
    +meta/v1.Duration
    -

    PodGCControllerConfiguration holds configuration for PodGCController -related features.

    +

    nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer +periods will result in fewer calls to cloud provider, but may delay addition +of new nodes to cluster.

    ReplicaSetController [Required]
    -ReplicaSetControllerConfiguration -
    -

    ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features.

    -
    ReplicationController [Required]
    -ReplicationControllerConfiguration -
    -

    ReplicationControllerConfiguration holds configuration for -ReplicationController related features.

    -
    ResourceQuotaController [Required]
    -ResourceQuotaControllerConfiguration -
    -

    ResourceQuotaControllerConfiguration holds configuration for -ResourceQuotaController related features.

    -
    SAController [Required]
    -SAControllerConfiguration -
    -

    SAControllerConfiguration holds configuration for ServiceAccountController -related features.

    -
    ServiceController [Required]
    -ServiceControllerConfiguration -
    -

    ServiceControllerConfiguration holds configuration for ServiceController -related features.

    -
    TTLAfterFinishedController [Required]
    -TTLAfterFinishedControllerConfiguration -
    -

    TTLAfterFinishedControllerConfiguration holds configuration for -TTLAfterFinishedController related features.

    -
    ValidatingAdmissionPolicyStatusController [Required]
    -ValidatingAdmissionPolicyStatusControllerConfiguration +
    + +## `WebhookConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-WebhookConfiguration} + + +**Appears in:** + +- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) + + +

    WebhookConfiguration contains configuration related to +cloud-controller-manager hosted webhooks

    + + + + + + + +
    FieldDescription
    Webhooks [Required]
    +[]string
    -

    ValidatingAdmissionPolicyStatusControllerConfiguration holds configuration for -ValidatingAdmissionPolicyStatusController related features.

    +

    Webhooks is the list of webhooks to enable or disable +'*' means "all enabled by default webhooks" +'foo' means "enable 'foo'" +'-foo' means "disable 'foo'" +first item for a particular name wins

    + + -## `AttachDetachControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-AttachDetachControllerConfiguration} +## `LeaderMigrationConfiguration` {#controllermanager-config-k8s-io-v1alpha1-LeaderMigrationConfiguration} **Appears in:** -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) +- [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) -

    AttachDetachControllerConfiguration contains elements describing AttachDetachController.

    +

    LeaderMigrationConfiguration provides versioned configuration for all migrating leader locks.

    + + + - - + + +
    FieldDescription
    apiVersion
    string
    controllermanager.config.k8s.io/v1alpha1
    kind
    string
    LeaderMigrationConfiguration
    DisableAttachDetachReconcilerSync [Required]
    -bool +
    leaderName [Required]
    +string
    -

    Reconciler runs a periodic loop to reconcile the desired state of the with -the actual state of the world by triggering attach detach operations. -This flag enables or disables reconcile. Is false by default, and thus enabled.

    +

    LeaderName is the name of the leader election resource that protects the migration +E.g. 1-20-KCM-to-1-21-CCM

    ReconcilerSyncLoopPeriod [Required]
    -meta/v1.Duration +
    resourceLock [Required]
    +string
    -

    ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop -wait between successive executions. Is set to 5 sec by default.

    +

    ResourceLock indicates the resource object type that will be used to lock +Should be "leases" or "endpoints"

    +
    controllerLeaders [Required]
    +[]ControllerLeaderConfiguration +
    +

    ControllerLeaders contains a list of migrating leader lock configurations

    -## `CSRSigningConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-CSRSigningConfiguration} +## `ControllerLeaderConfiguration` {#controllermanager-config-k8s-io-v1alpha1-ControllerLeaderConfiguration} **Appears in:** -- [CSRSigningControllerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-CSRSigningControllerConfiguration) +- [LeaderMigrationConfiguration](#controllermanager-config-k8s-io-v1alpha1-LeaderMigrationConfiguration) -

    CSRSigningConfiguration holds information about a particular CSR signer

    +

    ControllerLeaderConfiguration provides the configuration for a migrating leader lock.

    @@ -311,34 +376,37 @@ wait between successive executions. Is set to 5 sec by default.

    - -
    CertFile [Required]
    +
    name [Required]
    string
    -

    certFile is the filename containing a PEM-encoded -X509 CA certificate used to issue certificates

    +

    Name is the name of the controller being migrated +E.g. service-controller, route-controller, cloud-node-controller, etc

    KeyFile [Required]
    +
    component [Required]
    string
    -

    keyFile is the filename containing a PEM-encoded -RSA or ECDSA private key used to issue certificates

    +

    Component is the name of the component in which the controller should be running. +E.g. kube-controller-manager, cloud-controller-manager, etc +Or '*' meaning the controller can be run under any component that participates in the migration

    -## `CSRSigningControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-CSRSigningControllerConfiguration} +## `GenericControllerManagerConfiguration` {#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration} **Appears in:** +- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) + - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    CSRSigningControllerConfiguration contains elements describing CSRSigningController.

    +

    GenericControllerManagerConfiguration holds configuration for a generic controller-manager.

    @@ -346,534 +414,332 @@ RSA or ECDSA private key used to issue certificates

    - - - - - - - - -
    ClusterSigningCertFile [Required]
    -string +
    Port [Required]
    +int32
    -

    clusterSigningCertFile is the filename containing a PEM-encoded -X509 CA certificate used to issue cluster-scoped certificates

    +

    port is the port that the controller-manager's http service runs on.

    ClusterSigningKeyFile [Required]
    +
    Address [Required]
    string
    -

    clusterSigningCertFile is the filename containing a PEM-encoded -RSA or ECDSA private key used to issue cluster-scoped certificates

    +

    address is the IP address to serve on (set to 0.0.0.0 for all interfaces).

    KubeletServingSignerConfiguration [Required]
    -CSRSigningConfiguration +
    MinResyncPeriod [Required]
    +meta/v1.Duration
    -

    kubeletServingSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/kubelet-serving signer

    +

    minResyncPeriod is the resync period in reflectors; will be random between +minResyncPeriod and 2*minResyncPeriod.

    KubeletClientSignerConfiguration [Required]
    -CSRSigningConfiguration +
    ClientConnection [Required]
    +ClientConnectionConfiguration
    -

    kubeletClientSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/kube-apiserver-client-kubelet

    +

    ClientConnection specifies the kubeconfig file and client connection +settings for the proxy server to use when communicating with the apiserver.

    KubeAPIServerClientSignerConfiguration [Required]
    -CSRSigningConfiguration +
    ControllerStartInterval [Required]
    +meta/v1.Duration
    -

    kubeAPIServerClientSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/kube-apiserver-client

    +

    How long to wait between starting controller managers

    LegacyUnknownSignerConfiguration [Required]
    -CSRSigningConfiguration +
    LeaderElection [Required]
    +LeaderElectionConfiguration
    -

    legacyUnknownSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/legacy-unknown

    +

    leaderElection defines the configuration of leader election client.

    ClusterSigningDuration [Required]
    -meta/v1.Duration +
    Controllers [Required]
    +[]string
    -

    clusterSigningDuration is the max length of duration signed certificates will be given. -Individual CSRs may request shorter certs by setting spec.expirationSeconds.

    +

    Controllers is the list of controllers to enable or disable +'*' means "all enabled by default controllers" +'foo' means "enable 'foo'" +'-foo' means "disable 'foo'" +first item for a particular name wins

    - -## `CronJobControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-CronJobControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    CronJobControllerConfiguration contains elements describing CrongJob2Controller.

    - - - - - - - - + + + + + +
    FieldDescription
    ConcurrentCronJobSyncs [Required]
    -int32 +
    Debugging [Required]
    +DebuggingConfiguration
    -

    concurrentCronJobSyncs is the number of job objects that are -allowed to sync concurrently. Larger number = more responsive jobs, -but more CPU (and network) load.

    +

    DebuggingConfiguration holds configuration for Debugging related features.

    +
    LeaderMigrationEnabled [Required]
    +bool +
    +

    LeaderMigrationEnabled indicates whether Leader Migration should be enabled for the controller manager.

    +
    LeaderMigration [Required]
    +LeaderMigrationConfiguration +
    +

    LeaderMigration holds the configuration for Leader Migration.

    + + -## `DaemonSetControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-DaemonSetControllerConfiguration} +## `KubeControllerManagerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration} -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - -

    DaemonSetControllerConfiguration contains elements describing DaemonSetController.

    +

    KubeControllerManagerConfiguration contains elements describing kube-controller manager.

    + + + - - -
    FieldDescription
    apiVersion
    string
    kubecontrollermanager.config.k8s.io/v1alpha1
    kind
    string
    KubeControllerManagerConfiguration
    ConcurrentDaemonSetSyncs [Required]
    -int32 +
    Generic [Required]
    +GenericControllerManagerConfiguration
    -

    concurrentDaemonSetSyncs is the number of daemonset objects that are -allowed to sync concurrently. Larger number = more responsive daemonset, -but more CPU (and network) load.

    +

    Generic holds configuration for a generic controller-manager

    - -## `DeploymentControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-DeploymentControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    DeploymentControllerConfiguration contains elements describing DeploymentController.

    - - - - - - - - - -
    FieldDescription
    ConcurrentDeploymentSyncs [Required]
    -int32 +
    KubeCloudShared [Required]
    +KubeCloudSharedConfiguration
    -

    concurrentDeploymentSyncs is the number of deployment objects that are -allowed to sync concurrently. Larger number = more responsive deployments, -but more CPU (and network) load.

    +

    KubeCloudSharedConfiguration holds configuration for shared related features +both in cloud controller manager and kube-controller manager.

    - -## `DeprecatedControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-DeprecatedControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    DeprecatedControllerConfiguration contains elements be deprecated.

    - - - - -## `EndpointControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EndpointControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    EndpointControllerConfiguration contains elements describing EndpointController.

    - - - - - - - - - - -
    FieldDescription
    ConcurrentEndpointSyncs [Required]
    -int32 +
    AttachDetachController [Required]
    +AttachDetachControllerConfiguration
    -

    concurrentEndpointSyncs is the number of endpoint syncing operations -that will be done concurrently. Larger number = faster endpoint updating, -but more CPU (and network) load.

    +

    AttachDetachControllerConfiguration holds configuration for +AttachDetachController related features.

    EndpointUpdatesBatchPeriod [Required]
    -meta/v1.Duration +
    CSRSigningController [Required]
    +CSRSigningControllerConfiguration
    -

    EndpointUpdatesBatchPeriod describes the length of endpoint updates batching period. -Processing of pod changes will be delayed by this duration to join them with potential -upcoming updates and reduce the overall number of endpoints updates.

    +

    CSRSigningControllerConfiguration holds configuration for +CSRSigningController related features.

    - -## `EndpointSliceControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EndpointSliceControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    EndpointSliceControllerConfiguration contains elements describing -EndpointSliceController.

    - - - - - - - - - - - -
    FieldDescription
    ConcurrentServiceEndpointSyncs [Required]
    -int32 +
    DaemonSetController [Required]
    +DaemonSetControllerConfiguration
    -

    concurrentServiceEndpointSyncs is the number of service endpoint syncing -operations that will be done concurrently. Larger number = faster -endpoint slice updating, but more CPU (and network) load.

    +

    DaemonSetControllerConfiguration holds configuration for DaemonSetController +related features.

    MaxEndpointsPerSlice [Required]
    -int32 +
    DeploymentController [Required]
    +DeploymentControllerConfiguration
    -

    maxEndpointsPerSlice is the maximum number of endpoints that will be -added to an EndpointSlice. More endpoints per slice will result in fewer -and larger endpoint slices, but larger resources.

    +

    DeploymentControllerConfiguration holds configuration for +DeploymentController related features.

    EndpointUpdatesBatchPeriod [Required]
    -meta/v1.Duration +
    StatefulSetController [Required]
    +StatefulSetControllerConfiguration
    -

    EndpointUpdatesBatchPeriod describes the length of endpoint updates batching period. -Processing of pod changes will be delayed by this duration to join them with potential -upcoming updates and reduce the overall number of endpoints updates.

    +

    StatefulSetControllerConfiguration holds configuration for +StatefulSetController related features.

    - -## `EndpointSliceMirroringControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EndpointSliceMirroringControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    EndpointSliceMirroringControllerConfiguration contains elements describing -EndpointSliceMirroringController.

    - - - - - - - - - - - -
    FieldDescription
    MirroringConcurrentServiceEndpointSyncs [Required]
    -int32 +
    DeprecatedController [Required]
    +DeprecatedControllerConfiguration
    -

    mirroringConcurrentServiceEndpointSyncs is the number of service endpoint -syncing operations that will be done concurrently. Larger number = faster -endpoint slice updating, but more CPU (and network) load.

    +

    DeprecatedControllerConfiguration holds configuration for some deprecated +features.

    MirroringMaxEndpointsPerSubset [Required]
    -int32 +
    EndpointController [Required]
    +EndpointControllerConfiguration
    -

    mirroringMaxEndpointsPerSubset is the maximum number of endpoints that -will be mirrored to an EndpointSlice for an EndpointSubset.

    +

    EndpointControllerConfiguration holds configuration for EndpointController +related features.

    MirroringEndpointUpdatesBatchPeriod [Required]
    -meta/v1.Duration +
    EndpointSliceController [Required]
    +EndpointSliceControllerConfiguration
    -

    mirroringEndpointUpdatesBatchPeriod can be used to batch EndpointSlice -updates. All updates triggered by EndpointSlice changes will be delayed -by up to 'mirroringEndpointUpdatesBatchPeriod'. If other addresses in the -same Endpoints resource change in that period, they will be batched to a -single EndpointSlice update. Default 0 value means that each Endpoints -update triggers an EndpointSlice update.

    +

    EndpointSliceControllerConfiguration holds configuration for +EndpointSliceController related features.

    - -## `EphemeralVolumeControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EphemeralVolumeControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    EphemeralVolumeControllerConfiguration contains elements describing EphemeralVolumeController.

    - - - - - - - - - -
    FieldDescription
    ConcurrentEphemeralVolumeSyncs [Required]
    -int32 +
    EndpointSliceMirroringController [Required]
    +EndpointSliceMirroringControllerConfiguration
    -

    ConcurrentEphemeralVolumeSyncseSyncs is the number of ephemeral volume syncing operations -that will be done concurrently. Larger number = faster ephemeral volume updating, -but more CPU (and network) load.

    +

    EndpointSliceMirroringControllerConfiguration holds configuration for +EndpointSliceMirroringController related features.

    - -## `GarbageCollectorControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-GarbageCollectorControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    GarbageCollectorControllerConfiguration contains elements describing GarbageCollectorController.

    - - - - - - - - - - - -
    FieldDescription
    EnableGarbageCollector [Required]
    -bool +
    EphemeralVolumeController [Required]
    +EphemeralVolumeControllerConfiguration
    -

    enables the generic garbage collector. MUST be synced with the -corresponding flag of the kube-apiserver. WARNING: the generic garbage -collector is an alpha feature.

    +

    EphemeralVolumeControllerConfiguration holds configuration for EphemeralVolumeController +related features.

    ConcurrentGCSyncs [Required]
    -int32 +
    GarbageCollectorController [Required]
    +GarbageCollectorControllerConfiguration
    -

    concurrentGCSyncs is the number of garbage collector workers that are -allowed to sync concurrently.

    +

    GarbageCollectorControllerConfiguration holds configuration for +GarbageCollectorController related features.

    GCIgnoredResources [Required]
    -[]GroupResource +
    HPAController [Required]
    +HPAControllerConfiguration
    -

    gcIgnoredResources is the list of GroupResources that garbage collection should ignore.

    +

    HPAControllerConfiguration holds configuration for HPAController related features.

    - -## `GroupResource` {#kubecontrollermanager-config-k8s-io-v1alpha1-GroupResource} - - -**Appears in:** - -- [GarbageCollectorControllerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-GarbageCollectorControllerConfiguration) - - -

    GroupResource describes an group resource.

    - - - - - - - - - - -
    FieldDescription
    Group [Required]
    -string +
    JobController [Required]
    +JobControllerConfiguration
    -

    group is the group portion of the GroupResource.

    +

    JobControllerConfiguration holds configuration for JobController related features.

    Resource [Required]
    -string +
    CronJobController [Required]
    +CronJobControllerConfiguration
    -

    resource is the resource portion of the GroupResource.

    +

    CronJobControllerConfiguration holds configuration for CronJobController related features.

    - -## `HPAControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-HPAControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    HPAControllerConfiguration contains elements describing HPAController.

    - - - - - - - - - - - - - - - - -
    FieldDescription
    ConcurrentHorizontalPodAutoscalerSyncs [Required]
    -int32 +
    LegacySATokenCleaner [Required]
    +LegacySATokenCleanerConfiguration
    -

    ConcurrentHorizontalPodAutoscalerSyncs is the number of HPA objects that are allowed to sync concurrently. -Larger number = more responsive HPA processing, but more CPU (and network) load.

    +

    LegacySATokenCleanerConfiguration holds configuration for LegacySATokenCleaner related features.

    HorizontalPodAutoscalerSyncPeriod [Required]
    -meta/v1.Duration +
    NamespaceController [Required]
    +NamespaceControllerConfiguration
    -

    HorizontalPodAutoscalerSyncPeriod is the period for syncing the number of -pods in horizontal pod autoscaler.

    +

    NamespaceControllerConfiguration holds configuration for NamespaceController +related features.

    HorizontalPodAutoscalerUpscaleForbiddenWindow [Required]
    -meta/v1.Duration +
    NodeIPAMController [Required]
    +NodeIPAMControllerConfiguration
    -

    HorizontalPodAutoscalerUpscaleForbiddenWindow is a period after which next upscale allowed.

    +

    NodeIPAMControllerConfiguration holds configuration for NodeIPAMController +related features.

    HorizontalPodAutoscalerDownscaleStabilizationWindow [Required]
    -meta/v1.Duration +
    NodeLifecycleController [Required]
    +NodeLifecycleControllerConfiguration
    -

    HorizontalPodAutoscalerDowncaleStabilizationWindow is a period for which autoscaler will look -backwards and not scale down below any recommendation it made during that period.

    +

    NodeLifecycleControllerConfiguration holds configuration for +NodeLifecycleController related features.

    HorizontalPodAutoscalerDownscaleForbiddenWindow [Required]
    -meta/v1.Duration +
    PersistentVolumeBinderController [Required]
    +PersistentVolumeBinderControllerConfiguration
    -

    HorizontalPodAutoscalerDownscaleForbiddenWindow is a period after which next downscale allowed.

    +

    PersistentVolumeBinderControllerConfiguration holds configuration for +PersistentVolumeBinderController related features.

    HorizontalPodAutoscalerTolerance [Required]
    -float64 +
    PodGCController [Required]
    +PodGCControllerConfiguration
    -

    HorizontalPodAutoscalerTolerance is the tolerance for when -resource usage suggests upscaling/downscaling

    +

    PodGCControllerConfiguration holds configuration for PodGCController +related features.

    HorizontalPodAutoscalerCPUInitializationPeriod [Required]
    -meta/v1.Duration +
    ReplicaSetController [Required]
    +ReplicaSetControllerConfiguration
    -

    HorizontalPodAutoscalerCPUInitializationPeriod is the period after pod start when CPU samples -might be skipped.

    +

    ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features.

    HorizontalPodAutoscalerInitialReadinessDelay [Required]
    -meta/v1.Duration +
    ReplicationController [Required]
    +ReplicationControllerConfiguration
    -

    HorizontalPodAutoscalerInitialReadinessDelay is period after pod start during which readiness -changes are treated as readiness being set for the first time. The only effect of this is that -HPA will disregard CPU samples from unready pods that had last readiness change during that -period.

    +

    ReplicationControllerConfiguration holds configuration for +ReplicationController related features.

    - -## `JobControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-JobControllerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    JobControllerConfiguration contains elements describing JobController.

    - - - - - - - - - -
    FieldDescription
    ConcurrentJobSyncs [Required]
    -int32 +
    ResourceQuotaController [Required]
    +ResourceQuotaControllerConfiguration
    -

    concurrentJobSyncs is the number of job objects that are -allowed to sync concurrently. Larger number = more responsive jobs, -but more CPU (and network) load.

    +

    ResourceQuotaControllerConfiguration holds configuration for +ResourceQuotaController related features.

    - -## `LegacySATokenCleanerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-LegacySATokenCleanerConfiguration} - - -**Appears in:** - -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) - - -

    LegacySATokenCleanerConfiguration contains elements describing LegacySATokenCleaner

    - - - - - - - - + + + + + + + + +
    FieldDescription
    CleanUpPeriod [Required]
    -meta/v1.Duration +
    SAController [Required]
    +SAControllerConfiguration +
    +

    SAControllerConfiguration holds configuration for ServiceAccountController +related features.

    +
    ServiceController [Required]
    +ServiceControllerConfiguration +
    +

    ServiceControllerConfiguration holds configuration for ServiceController +related features.

    +
    TTLAfterFinishedController [Required]
    +TTLAfterFinishedControllerConfiguration +
    +

    TTLAfterFinishedControllerConfiguration holds configuration for +TTLAfterFinishedController related features.

    +
    ValidatingAdmissionPolicyStatusController [Required]
    +ValidatingAdmissionPolicyStatusControllerConfiguration
    -

    CleanUpPeriod is the period of time since the last usage of an -auto-generated service account token before it can be deleted.

    +

    ValidatingAdmissionPolicyStatusControllerConfiguration holds configuration for +ValidatingAdmissionPolicyStatusController related features.

    -## `NamespaceControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-NamespaceControllerConfiguration} +## `AttachDetachControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-AttachDetachControllerConfiguration} **Appears in:** @@ -881,7 +747,7 @@ auto-generated service account token before it can be deleted.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    NamespaceControllerConfiguration contains elements describing NamespaceController.

    +

    AttachDetachControllerConfiguration contains elements describing AttachDetachController.

    @@ -889,34 +755,35 @@ auto-generated service account token before it can be deleted.

    - -
    NamespaceSyncPeriod [Required]
    -meta/v1.Duration +
    DisableAttachDetachReconcilerSync [Required]
    +bool
    -

    namespaceSyncPeriod is the period for syncing namespace life-cycle -updates.

    +

    Reconciler runs a periodic loop to reconcile the desired state of the with +the actual state of the world by triggering attach detach operations. +This flag enables or disables reconcile. Is false by default, and thus enabled.

    ConcurrentNamespaceSyncs [Required]
    -int32 +
    ReconcilerSyncLoopPeriod [Required]
    +meta/v1.Duration
    -

    concurrentNamespaceSyncs is the number of namespace objects that are -allowed to sync concurrently.

    +

    ReconcilerSyncLoopPeriod is the amount of time the reconciler sync states loop +wait between successive executions. Is set to 5 sec by default.

    -## `NodeIPAMControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-NodeIPAMControllerConfiguration} +## `CSRSigningConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-CSRSigningConfiguration} **Appears in:** -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) +- [CSRSigningControllerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-CSRSigningControllerConfiguration) -

    NodeIPAMControllerConfiguration contains elements describing NodeIpamController.

    +

    CSRSigningConfiguration holds information about a particular CSR signer

    @@ -924,45 +791,26 @@ allowed to sync concurrently.

    - - - - - - - - - - -
    ServiceCIDR [Required]
    +
    CertFile [Required]
    string
    -

    serviceCIDR is CIDR Range for Services in cluster.

    +

    certFile is the filename containing a PEM-encoded +X509 CA certificate used to issue certificates

    SecondaryServiceCIDR [Required]
    +
    KeyFile [Required]
    string
    -

    secondaryServiceCIDR is CIDR Range for Services in cluster. This is used in dual stack clusters. SecondaryServiceCIDR must be of different IP family than ServiceCIDR

    -
    NodeCIDRMaskSize [Required]
    -int32 -
    -

    NodeCIDRMaskSize is the mask size for node cidr in cluster.

    -
    NodeCIDRMaskSizeIPv4 [Required]
    -int32 -
    -

    NodeCIDRMaskSizeIPv4 is the mask size for node cidr in dual-stack cluster.

    -
    NodeCIDRMaskSizeIPv6 [Required]
    -int32 -
    -

    NodeCIDRMaskSizeIPv6 is the mask size for node cidr in dual-stack cluster.

    +

    keyFile is the filename containing a PEM-encoded +RSA or ECDSA private key used to issue certificates

    -## `NodeLifecycleControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-NodeLifecycleControllerConfiguration} +## `CSRSigningControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-CSRSigningControllerConfiguration} **Appears in:** @@ -970,7 +818,7 @@ allowed to sync concurrently.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController.

    +

    CSRSigningControllerConfiguration contains elements describing CSRSigningController.

    @@ -978,64 +826,62 @@ allowed to sync concurrently.

    - - - - - - -
    NodeEvictionRate [Required]
    -float32 +
    ClusterSigningCertFile [Required]
    +string
    -

    nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy

    +

    clusterSigningCertFile is the filename containing a PEM-encoded +X509 CA certificate used to issue cluster-scoped certificates

    SecondaryNodeEvictionRate [Required]
    -float32 +
    ClusterSigningKeyFile [Required]
    +string
    -

    secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy

    +

    clusterSigningCertFile is the filename containing a PEM-encoded +RSA or ECDSA private key used to issue cluster-scoped certificates

    NodeStartupGracePeriod [Required]
    -meta/v1.Duration +
    KubeletServingSignerConfiguration [Required]
    +CSRSigningConfiguration
    -

    nodeStartupGracePeriod is the amount of time which we allow starting a node to -be unresponsive before marking it unhealthy.

    +

    kubeletServingSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/kubelet-serving signer

    NodeMonitorGracePeriod [Required]
    -meta/v1.Duration +
    KubeletClientSignerConfiguration [Required]
    +CSRSigningConfiguration
    -

    nodeMontiorGracePeriod is the amount of time which we allow a running node to be -unresponsive before marking it unhealthy. Must be N times more than kubelet's -nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet -to post node status.

    +

    kubeletClientSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/kube-apiserver-client-kubelet

    PodEvictionTimeout [Required]
    -meta/v1.Duration +
    KubeAPIServerClientSignerConfiguration [Required]
    +CSRSigningConfiguration
    -

    podEvictionTimeout is the grace period for deleting pods on failed nodes.

    +

    kubeAPIServerClientSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/kube-apiserver-client

    LargeClusterSizeThreshold [Required]
    -int32 +
    LegacyUnknownSignerConfiguration [Required]
    +CSRSigningConfiguration
    -

    secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold

    +

    legacyUnknownSignerConfiguration holds the certificate and key used to issue certificates for the kubernetes.io/legacy-unknown

    UnhealthyZoneThreshold [Required]
    -float32 +
    ClusterSigningDuration [Required]
    +meta/v1.Duration
    -

    Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least -unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady

    +

    clusterSigningDuration is the max length of duration signed certificates will be given. +Individual CSRs may request shorter certs by setting spec.expirationSeconds.

    -## `PersistentVolumeBinderControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-PersistentVolumeBinderControllerConfiguration} +## `CronJobControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-CronJobControllerConfiguration} **Appears in:** @@ -1043,8 +889,7 @@ unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    PersistentVolumeBinderControllerConfiguration contains elements describing -PersistentVolumeBinderController.

    +

    CronJobControllerConfiguration contains elements describing CrongJob2Controller.

    @@ -1052,49 +897,27 @@ PersistentVolumeBinderController.

    - - - - - - - - - -
    PVClaimBinderSyncPeriod [Required]
    -meta/v1.Duration -
    -

    pvClaimBinderSyncPeriod is the period for syncing persistent volumes -and persistent volume claims.

    -
    VolumeConfiguration [Required]
    -VolumeConfiguration -
    -

    volumeConfiguration holds configuration for volume related features.

    -
    VolumeHostCIDRDenylist [Required]
    -[]string -
    -

    DEPRECATED: VolumeHostCIDRDenylist is a list of CIDRs that should not be reachable by the -controller from plugins.

    -
    VolumeHostAllowLocalLoopback [Required]
    -bool +
    ConcurrentCronJobSyncs [Required]
    +int32
    -

    DEPRECATED: VolumeHostAllowLocalLoopback indicates if local loopback hosts (127.0.0.1, etc) -should be allowed from plugins.

    +

    concurrentCronJobSyncs is the number of job objects that are +allowed to sync concurrently. Larger number = more responsive jobs, +but more CPU (and network) load.

    -## `PersistentVolumeRecyclerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-PersistentVolumeRecyclerConfiguration} +## `DaemonSetControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-DaemonSetControllerConfiguration} **Appears in:** -- [VolumeConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-VolumeConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    PersistentVolumeRecyclerConfiguration contains elements describing persistent volume plugins.

    +

    DaemonSetControllerConfiguration contains elements describing DaemonSetController.

    @@ -1102,69 +925,19 @@ should be allowed from plugins.

    - - - - - - - - - - - - - - - - - - -
    MaximumRetry [Required]
    -int32 -
    -

    maximumRetry is number of retries the PV recycler will execute on failure to recycle -PV.

    -
    MinimumTimeoutNFS [Required]
    -int32 -
    -

    minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler -pod.

    -
    PodTemplateFilePathNFS [Required]
    -string -
    -

    podTemplateFilePathNFS is the file path to a pod definition used as a template for -NFS persistent volume recycling

    -
    IncrementTimeoutNFS [Required]
    -int32 -
    -

    incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds -for an NFS scrubber pod.

    -
    PodTemplateFilePathHostPath [Required]
    -string -
    -

    podTemplateFilePathHostPath is the file path to a pod definition used as a template for -HostPath persistent volume recycling. This is for development and testing only and -will not work in a multi-node cluster.

    -
    MinimumTimeoutHostPath [Required]
    -int32 -
    -

    minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath -Recycler pod. This is for development and testing only and will not work in a multi-node -cluster.

    -
    IncrementTimeoutHostPath [Required]
    +
    ConcurrentDaemonSetSyncs [Required]
    int32
    -

    incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds -for a HostPath scrubber pod. This is for development and testing only and will not work -in a multi-node cluster.

    +

    concurrentDaemonSetSyncs is the number of daemonset objects that are +allowed to sync concurrently. Larger number = more responsive daemonset, +but more CPU (and network) load.

    -## `PodGCControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-PodGCControllerConfiguration} +## `DeploymentControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-DeploymentControllerConfiguration} **Appears in:** @@ -1172,7 +945,7 @@ in a multi-node cluster.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    PodGCControllerConfiguration contains elements describing PodGCController.

    +

    DeploymentControllerConfiguration contains elements describing DeploymentController.

    @@ -1180,19 +953,19 @@ in a multi-node cluster.

    -
    TerminatedPodGCThreshold [Required]
    +
    ConcurrentDeploymentSyncs [Required]
    int32
    -

    terminatedPodGCThreshold is the number of terminated pods that can exist -before the terminated pod garbage collector starts deleting terminated pods. -If <= 0, the terminated pod garbage collector is disabled.

    +

    concurrentDeploymentSyncs is the number of deployment objects that are +allowed to sync concurrently. Larger number = more responsive deployments, +but more CPU (and network) load.

    -## `ReplicaSetControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ReplicaSetControllerConfiguration} +## `DeprecatedControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-DeprecatedControllerConfiguration} **Appears in:** @@ -1200,27 +973,12 @@ If <= 0, the terminated pod garbage collector is disabled.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    ReplicaSetControllerConfiguration contains elements describing ReplicaSetController.

    +

    DeprecatedControllerConfiguration contains elements be deprecated.

    - - - - - - - - - -
    FieldDescription
    ConcurrentRSSyncs [Required]
    -int32 -
    -

    concurrentRSSyncs is the number of replica sets that are allowed to sync -concurrently. Larger number = more responsive replica management, but more -CPU (and network) load.

    -
    -## `ReplicationControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ReplicationControllerConfiguration} + +## `EndpointControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EndpointControllerConfiguration} **Appears in:** @@ -1228,7 +986,7 @@ CPU (and network) load.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    ReplicationControllerConfiguration contains elements describing ReplicationController.

    +

    EndpointControllerConfiguration contains elements describing EndpointController.

    @@ -1236,19 +994,28 @@ CPU (and network) load.

    - + + +
    ConcurrentRCSyncs [Required]
    +
    ConcurrentEndpointSyncs [Required]
    int32
    -

    concurrentRCSyncs is the number of replication controllers that are -allowed to sync concurrently. Larger number = more responsive replica -management, but more CPU (and network) load.

    +

    concurrentEndpointSyncs is the number of endpoint syncing operations +that will be done concurrently. Larger number = faster endpoint updating, +but more CPU (and network) load.

    +
    EndpointUpdatesBatchPeriod [Required]
    +meta/v1.Duration +
    +

    EndpointUpdatesBatchPeriod describes the length of endpoint updates batching period. +Processing of pod changes will be delayed by this duration to join them with potential +upcoming updates and reduce the overall number of endpoints updates.

    -## `ResourceQuotaControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ResourceQuotaControllerConfiguration} +## `EndpointSliceControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EndpointSliceControllerConfiguration} **Appears in:** @@ -1256,7 +1023,8 @@ management, but more CPU (and network) load.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    ResourceQuotaControllerConfiguration contains elements describing ResourceQuotaController.

    +

    EndpointSliceControllerConfiguration contains elements describing +EndpointSliceController.

    @@ -1264,27 +1032,37 @@ management, but more CPU (and network) load.

    - - + + +
    ResourceQuotaSyncPeriod [Required]
    -meta/v1.Duration +
    ConcurrentServiceEndpointSyncs [Required]
    +int32
    -

    resourceQuotaSyncPeriod is the period for syncing quota usage status -in the system.

    +

    concurrentServiceEndpointSyncs is the number of service endpoint syncing +operations that will be done concurrently. Larger number = faster +endpoint slice updating, but more CPU (and network) load.

    ConcurrentResourceQuotaSyncs [Required]
    +
    MaxEndpointsPerSlice [Required]
    int32
    -

    concurrentResourceQuotaSyncs is the number of resource quotas that are -allowed to sync concurrently. Larger number = more responsive quota -management, but more CPU (and network) load.

    +

    maxEndpointsPerSlice is the maximum number of endpoints that will be +added to an EndpointSlice. More endpoints per slice will result in fewer +and larger endpoint slices, but larger resources.

    +
    EndpointUpdatesBatchPeriod [Required]
    +meta/v1.Duration +
    +

    EndpointUpdatesBatchPeriod describes the length of endpoint updates batching period. +Processing of pod changes will be delayed by this duration to join them with potential +upcoming updates and reduce the overall number of endpoints updates.

    -## `SAControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-SAControllerConfiguration} +## `EndpointSliceMirroringControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EndpointSliceMirroringControllerConfiguration} **Appears in:** @@ -1292,7 +1070,8 @@ management, but more CPU (and network) load.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    SAControllerConfiguration contains elements describing ServiceAccountController.

    +

    EndpointSliceMirroringControllerConfiguration contains elements describing +EndpointSliceMirroringController.

    @@ -1300,34 +1079,39 @@ management, but more CPU (and network) load.

    - - -
    ServiceAccountKeyFile [Required]
    -string +
    MirroringConcurrentServiceEndpointSyncs [Required]
    +int32
    -

    serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key -used to sign service account tokens.

    +

    mirroringConcurrentServiceEndpointSyncs is the number of service endpoint +syncing operations that will be done concurrently. Larger number = faster +endpoint slice updating, but more CPU (and network) load.

    ConcurrentSATokenSyncs [Required]
    +
    MirroringMaxEndpointsPerSubset [Required]
    int32
    -

    concurrentSATokenSyncs is the number of service account token syncing operations -that will be done concurrently.

    +

    mirroringMaxEndpointsPerSubset is the maximum number of endpoints that +will be mirrored to an EndpointSlice for an EndpointSubset.

    RootCAFile [Required]
    -string +
    MirroringEndpointUpdatesBatchPeriod [Required]
    +meta/v1.Duration
    -

    rootCAFile is the root certificate authority will be included in service -account's token secret. This must be a valid PEM-encoded CA bundle.

    +

    mirroringEndpointUpdatesBatchPeriod can be used to batch EndpointSlice +updates. All updates triggered by EndpointSlice changes will be delayed +by up to 'mirroringEndpointUpdatesBatchPeriod'. If other addresses in the +same Endpoints resource change in that period, they will be batched to a +single EndpointSlice update. Default 0 value means that each Endpoints +update triggers an EndpointSlice update.

    -## `StatefulSetControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-StatefulSetControllerConfiguration} +## `EphemeralVolumeControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-EphemeralVolumeControllerConfiguration} **Appears in:** @@ -1335,7 +1119,7 @@ account's token secret. This must be a valid PEM-encoded CA bundle.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    StatefulSetControllerConfiguration contains elements describing StatefulSetController.

    +

    EphemeralVolumeControllerConfiguration contains elements describing EphemeralVolumeController.

    @@ -1343,19 +1127,19 @@ account's token secret. This must be a valid PEM-encoded CA bundle.

    -
    ConcurrentStatefulSetSyncs [Required]
    +
    ConcurrentEphemeralVolumeSyncs [Required]
    int32
    -

    concurrentStatefulSetSyncs is the number of statefulset objects that are -allowed to sync concurrently. Larger number = more responsive statefulsets, +

    ConcurrentEphemeralVolumeSyncseSyncs is the number of ephemeral volume syncing operations +that will be done concurrently. Larger number = faster ephemeral volume updating, but more CPU (and network) load.

    -## `TTLAfterFinishedControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-TTLAfterFinishedControllerConfiguration} +## `GarbageCollectorControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-GarbageCollectorControllerConfiguration} **Appears in:** @@ -1363,7 +1147,7 @@ but more CPU (and network) load.

    - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    TTLAfterFinishedControllerConfiguration contains elements describing TTLAfterFinishedController.

    +

    GarbageCollectorControllerConfiguration contains elements describing GarbageCollectorController.

    @@ -1371,26 +1155,42 @@ but more CPU (and network) load.

    - + + + + + +
    ConcurrentTTLSyncs [Required]
    +
    EnableGarbageCollector [Required]
    +bool +
    +

    enables the generic garbage collector. MUST be synced with the +corresponding flag of the kube-apiserver. WARNING: the generic garbage +collector is an alpha feature.

    +
    ConcurrentGCSyncs [Required]
    int32
    -

    concurrentTTLSyncs is the number of TTL-after-finished collector workers that are +

    concurrentGCSyncs is the number of garbage collector workers that are allowed to sync concurrently.

    GCIgnoredResources [Required]
    +[]GroupResource +
    +

    gcIgnoredResources is the list of GroupResources that garbage collection should ignore.

    +
    -## `ValidatingAdmissionPolicyStatusControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ValidatingAdmissionPolicyStatusControllerConfiguration} +## `GroupResource` {#kubecontrollermanager-config-k8s-io-v1alpha1-GroupResource} **Appears in:** -- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) +- [GarbageCollectorControllerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-GarbageCollectorControllerConfiguration) -

    ValidatingAdmissionPolicyStatusControllerConfiguration contains elements describing ValidatingAdmissionPolicyStatusController.

    +

    GroupResource describes an group resource.

    @@ -1398,32 +1198,32 @@ allowed to sync concurrently.

    - + + +
    ConcurrentPolicySyncs [Required]
    -int32 +
    Group [Required]
    +string
    -

    ConcurrentPolicySyncs is the number of policy objects that are -allowed to sync concurrently. Larger number = quicker type checking, -but more CPU (and network) load. -The default value is 5.

    +

    group is the group portion of the GroupResource.

    +
    Resource [Required]
    +string +
    +

    resource is the resource portion of the GroupResource.

    -## `VolumeConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-VolumeConfiguration} +## `HPAControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-HPAControllerConfiguration} **Appears in:** -- [PersistentVolumeBinderControllerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-PersistentVolumeBinderControllerConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    VolumeConfiguration contains all enumerated flags meant to configure all volume -plugins. From this config, the controller-manager binary will create many instances of -volume.VolumeConfig, each containing only the configuration needed for that plugin which -are then passed to the appropriate plugin. The ControllerManager binary is the only part -of the code which knows what plugins are supported and which flags correspond to each plugin.

    +

    HPAControllerConfiguration contains elements describing HPAController.

    @@ -1431,54 +1231,82 @@ of the code which knows what plugins are supported and which flags correspond to - - - - + + + + + + + + + + + +
    EnableHostPathProvisioning [Required]
    -bool +
    ConcurrentHorizontalPodAutoscalerSyncs [Required]
    +int32
    -

    enableHostPathProvisioning enables HostPath PV provisioning when running without a -cloud provider. This allows testing and development of provisioning features. HostPath -provisioning is not supported in any way, won't work in a multi-node cluster, and -should not be used for anything other than testing or development.

    +

    ConcurrentHorizontalPodAutoscalerSyncs is the number of HPA objects that are allowed to sync concurrently. +Larger number = more responsive HPA processing, but more CPU (and network) load.

    EnableDynamicProvisioning [Required]
    -bool +
    HorizontalPodAutoscalerSyncPeriod [Required]
    +meta/v1.Duration
    -

    enableDynamicProvisioning enables the provisioning of volumes when running within an environment -that supports dynamic provisioning. Defaults to true.

    +

    HorizontalPodAutoscalerSyncPeriod is the period for syncing the number of +pods in horizontal pod autoscaler.

    PersistentVolumeRecyclerConfiguration [Required]
    -PersistentVolumeRecyclerConfiguration +
    HorizontalPodAutoscalerUpscaleForbiddenWindow [Required]
    +meta/v1.Duration
    -

    persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins.

    +

    HorizontalPodAutoscalerUpscaleForbiddenWindow is a period after which next upscale allowed.

    FlexVolumePluginDir [Required]
    -string +
    HorizontalPodAutoscalerDownscaleStabilizationWindow [Required]
    +meta/v1.Duration
    -

    volumePluginDir is the full path of the directory in which the flex -volume plugin should search for additional third party volume plugins

    +

    HorizontalPodAutoscalerDowncaleStabilizationWindow is a period for which autoscaler will look +backwards and not scale down below any recommendation it made during that period.

    +
    HorizontalPodAutoscalerDownscaleForbiddenWindow [Required]
    +meta/v1.Duration +
    +

    HorizontalPodAutoscalerDownscaleForbiddenWindow is a period after which next downscale allowed.

    +
    HorizontalPodAutoscalerTolerance [Required]
    +float64 +
    +

    HorizontalPodAutoscalerTolerance is the tolerance for when +resource usage suggests upscaling/downscaling

    +
    HorizontalPodAutoscalerCPUInitializationPeriod [Required]
    +meta/v1.Duration +
    +

    HorizontalPodAutoscalerCPUInitializationPeriod is the period after pod start when CPU samples +might be skipped.

    +
    HorizontalPodAutoscalerInitialReadinessDelay [Required]
    +meta/v1.Duration +
    +

    HorizontalPodAutoscalerInitialReadinessDelay is period after pod start during which readiness +changes are treated as readiness being set for the first time. The only effect of this is that +HPA will disregard CPU samples from unready pods that had last readiness change during that +period.

    - - - -## `NodeControllerConfiguration` {#NodeControllerConfiguration} +## `JobControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-JobControllerConfiguration} **Appears in:** -- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    NodeControllerConfiguration contains elements describing NodeController.

    +

    JobControllerConfiguration contains elements describing JobController.

    @@ -1486,28 +1314,54 @@ volume plugin should search for additional third party volume plugins

    - + + +
    ConcurrentNodeSyncs [Required]
    +
    ConcurrentJobSyncs [Required]
    int32
    -

    ConcurrentNodeSyncs is the number of workers -concurrently synchronizing nodes

    +

    concurrentJobSyncs is the number of job objects that are +allowed to sync concurrently. Larger number = more responsive jobs, +but more CPU (and network) load.

    +
    + +## `LegacySATokenCleanerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-LegacySATokenCleanerConfiguration} + + +**Appears in:** + +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    LegacySATokenCleanerConfiguration contains elements describing LegacySATokenCleaner

    + + + + + + + + +
    FieldDescription
    CleanUpPeriod [Required]
    +meta/v1.Duration +
    +

    CleanUpPeriod is the period of time since the last usage of an +auto-generated service account token before it can be deleted.

    -## `ServiceControllerConfiguration` {#ServiceControllerConfiguration} +## `NamespaceControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-NamespaceControllerConfiguration} **Appears in:** -- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) - - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    ServiceControllerConfiguration contains elements describing ServiceController.

    +

    NamespaceControllerConfiguration contains elements describing NamespaceController.

    @@ -1515,92 +1369,88 @@ concurrently synchronizing nodes

    - + + +
    ConcurrentServiceSyncs [Required]
    +
    NamespaceSyncPeriod [Required]
    +meta/v1.Duration +
    +

    namespaceSyncPeriod is the period for syncing namespace life-cycle +updates.

    +
    ConcurrentNamespaceSyncs [Required]
    int32
    -

    concurrentServiceSyncs is the number of services that are -allowed to sync concurrently. Larger number = more responsive service -management, but more CPU (and network) load.

    +

    concurrentNamespaceSyncs is the number of namespace objects that are +allowed to sync concurrently.

    - - -## `CloudControllerManagerConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration} +## `NodeIPAMControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-NodeIPAMControllerConfiguration} +**Appears in:** -

    CloudControllerManagerConfiguration contains elements describing cloud-controller manager.

    +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    NodeIPAMControllerConfiguration contains elements describing NodeIpamController.

    - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    cloudcontrollermanager.config.k8s.io/v1alpha1
    kind
    string
    CloudControllerManagerConfiguration
    Generic [Required]
    -GenericControllerManagerConfiguration -
    -

    Generic holds configuration for a generic controller-manager

    -
    KubeCloudShared [Required]
    -KubeCloudSharedConfiguration +
    ServiceCIDR [Required]
    +string
    -

    KubeCloudSharedConfiguration holds configuration for shared related features -both in cloud controller manager and kube-controller manager.

    +

    serviceCIDR is CIDR Range for Services in cluster.

    NodeController [Required]
    -NodeControllerConfiguration +
    SecondaryServiceCIDR [Required]
    +string
    -

    NodeController holds configuration for node controller -related features.

    +

    secondaryServiceCIDR is CIDR Range for Services in cluster. This is used in dual stack clusters. SecondaryServiceCIDR must be of different IP family than ServiceCIDR

    ServiceController [Required]
    -ServiceControllerConfiguration +
    NodeCIDRMaskSize [Required]
    +int32
    -

    ServiceControllerConfiguration holds configuration for ServiceController -related features.

    +

    NodeCIDRMaskSize is the mask size for node cidr in cluster.

    NodeStatusUpdateFrequency [Required]
    -meta/v1.Duration +
    NodeCIDRMaskSizeIPv4 [Required]
    +int32
    -

    NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status

    +

    NodeCIDRMaskSizeIPv4 is the mask size for node cidr in dual-stack cluster.

    Webhook [Required]
    -WebhookConfiguration +
    NodeCIDRMaskSizeIPv6 [Required]
    +int32
    -

    Webhook is the configuration for cloud-controller-manager hosted webhooks

    +

    NodeCIDRMaskSizeIPv6 is the mask size for node cidr in dual-stack cluster.

    -## `CloudProviderConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudProviderConfiguration} +## `NodeLifecycleControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-NodeLifecycleControllerConfiguration} **Appears in:** -- [KubeCloudSharedConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-KubeCloudSharedConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    CloudProviderConfiguration contains basically elements about cloud provider.

    +

    NodeLifecycleControllerConfiguration contains elements describing NodeLifecycleController.

    @@ -1608,35 +1458,73 @@ related features.

    - - + + + + + + + + + + + + + + +
    Name [Required]
    -string +
    NodeEvictionRate [Required]
    +float32
    -

    Name is the provider for cloud services.

    +

    nodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is healthy

    CloudConfigFile [Required]
    -string +
    SecondaryNodeEvictionRate [Required]
    +float32
    -

    cloudConfigFile is the path to the cloud provider configuration file.

    +

    secondaryNodeEvictionRate is the number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy

    +
    NodeStartupGracePeriod [Required]
    +meta/v1.Duration +
    +

    nodeStartupGracePeriod is the amount of time which we allow starting a node to +be unresponsive before marking it unhealthy.

    +
    NodeMonitorGracePeriod [Required]
    +meta/v1.Duration +
    +

    nodeMontiorGracePeriod is the amount of time which we allow a running node to be +unresponsive before marking it unhealthy. Must be N times more than kubelet's +nodeStatusUpdateFrequency, where N means number of retries allowed for kubelet +to post node status.

    +
    PodEvictionTimeout [Required]
    +meta/v1.Duration +
    +

    podEvictionTimeout is the grace period for deleting pods on failed nodes.

    +
    LargeClusterSizeThreshold [Required]
    +int32 +
    +

    secondaryNodeEvictionRate is implicitly overridden to 0 for clusters smaller than or equal to largeClusterSizeThreshold

    +
    UnhealthyZoneThreshold [Required]
    +float32 +
    +

    Zone is treated as unhealthy in nodeEvictionRate and secondaryNodeEvictionRate when at least +unhealthyZoneThreshold (no less than 3) of Nodes in the zone are NotReady

    -## `KubeCloudSharedConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-KubeCloudSharedConfiguration} +## `PersistentVolumeBinderControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-PersistentVolumeBinderControllerConfiguration} **Appears in:** -- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) - - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    KubeCloudSharedConfiguration contains elements shared by both kube-controller manager -and cloud-controller manager, but not genericconfig.

    +

    PersistentVolumeBinderControllerConfiguration contains elements describing +PersistentVolumeBinderController.

    @@ -1644,109 +1532,155 @@ and cloud-controller manager, but not genericconfig.

    - - - - - +
    CloudProvider [Required]
    -CloudProviderConfiguration +
    PVClaimBinderSyncPeriod [Required]
    +meta/v1.Duration
    -

    CloudProviderConfiguration holds configuration for CloudProvider related features.

    +

    pvClaimBinderSyncPeriod is the period for syncing persistent volumes +and persistent volume claims.

    ExternalCloudVolumePlugin [Required]
    -string +
    VolumeConfiguration [Required]
    +VolumeConfiguration
    -

    externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". -It is currently used by the in repo cloud providers to handle node and volume control in the KCM.

    +

    volumeConfiguration holds configuration for volume related features.

    UseServiceAccountCredentials [Required]
    -bool +
    VolumeHostCIDRDenylist [Required]
    +[]string
    -

    useServiceAccountCredentials indicates whether controllers should be run with -individual service account credentials.

    +

    DEPRECATED: VolumeHostCIDRDenylist is a list of CIDRs that should not be reachable by the +controller from plugins.

    AllowUntaggedCloud [Required]
    +
    VolumeHostAllowLocalLoopback [Required]
    bool
    -

    run with untagged cloud instances

    +

    DEPRECATED: VolumeHostAllowLocalLoopback indicates if local loopback hosts (127.0.0.1, etc) +should be allowed from plugins.

    RouteReconciliationPeriod [Required]
    -meta/v1.Duration +
    + +## `PersistentVolumeRecyclerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-PersistentVolumeRecyclerConfiguration} + + +**Appears in:** + +- [VolumeConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-VolumeConfiguration) + + +

    PersistentVolumeRecyclerConfiguration contains elements describing persistent volume plugins.

    + + + + + + + + - - - - - - - +
    FieldDescription
    MaximumRetry [Required]
    +int32
    -

    routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider..

    +

    maximumRetry is number of retries the PV recycler will execute on failure to recycle +PV.

    NodeMonitorPeriod [Required]
    -meta/v1.Duration +
    MinimumTimeoutNFS [Required]
    +int32
    -

    nodeMonitorPeriod is the period for syncing NodeStatus in NodeController.

    +

    minimumTimeoutNFS is the minimum ActiveDeadlineSeconds to use for an NFS Recycler +pod.

    ClusterName [Required]
    +
    PodTemplateFilePathNFS [Required]
    string
    -

    clusterName is the instance prefix for the cluster.

    +

    podTemplateFilePathNFS is the file path to a pod definition used as a template for +NFS persistent volume recycling

    ClusterCIDR [Required]
    -string +
    IncrementTimeoutNFS [Required]
    +int32
    -

    clusterCIDR is CIDR Range for Pods in cluster.

    +

    incrementTimeoutNFS is the increment of time added per Gi to ActiveDeadlineSeconds +for an NFS scrubber pod.

    AllocateNodeCIDRs [Required]
    -bool +
    PodTemplateFilePathHostPath [Required]
    +string
    -

    AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if -ConfigureCloudRoutes is true, to be set on the cloud provider.

    +

    podTemplateFilePathHostPath is the file path to a pod definition used as a template for +HostPath persistent volume recycling. This is for development and testing only and +will not work in a multi-node cluster.

    CIDRAllocatorType [Required]
    -string +
    MinimumTimeoutHostPath [Required]
    +int32
    -

    CIDRAllocatorType determines what kind of pod CIDR allocator will be used.

    +

    minimumTimeoutHostPath is the minimum ActiveDeadlineSeconds to use for a HostPath +Recycler pod. This is for development and testing only and will not work in a multi-node +cluster.

    ConfigureCloudRoutes [Required]
    -bool +
    IncrementTimeoutHostPath [Required]
    +int32
    -

    configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs -to be configured on the cloud provider.

    +

    incrementTimeoutHostPath is the increment of time added per Gi to ActiveDeadlineSeconds +for a HostPath scrubber pod. This is for development and testing only and will not work +in a multi-node cluster.

    NodeSyncPeriod [Required]
    -meta/v1.Duration +
    + +## `PodGCControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-PodGCControllerConfiguration} + + +**Appears in:** + +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    PodGCControllerConfiguration contains elements describing PodGCController.

    + + + + + + + +
    FieldDescription
    TerminatedPodGCThreshold [Required]
    +int32
    -

    nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer -periods will result in fewer calls to cloud provider, but may delay addition -of new nodes to cluster.

    +

    terminatedPodGCThreshold is the number of terminated pods that can exist +before the terminated pod garbage collector starts deleting terminated pods. +If <= 0, the terminated pod garbage collector is disabled.

    -## `WebhookConfiguration` {#cloudcontrollermanager-config-k8s-io-v1alpha1-WebhookConfiguration} +## `ReplicaSetControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ReplicaSetControllerConfiguration} **Appears in:** -- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    WebhookConfiguration contains configuration related to -cloud-controller-manager hosted webhooks

    +

    ReplicaSetControllerConfiguration contains elements describing ReplicaSetController.

    @@ -1754,77 +1688,55 @@ cloud-controller-manager hosted webhooks

    -
    Webhooks [Required]
    -[]string +
    ConcurrentRSSyncs [Required]
    +int32
    -

    Webhooks is the list of webhooks to enable or disable -'*' means "all enabled by default webhooks" -'foo' means "enable 'foo'" -'-foo' means "disable 'foo'" -first item for a particular name wins

    +

    concurrentRSSyncs is the number of replica sets that are allowed to sync +concurrently. Larger number = more responsive replica management, but more +CPU (and network) load.

    - - - -## `LeaderMigrationConfiguration` {#controllermanager-config-k8s-io-v1alpha1-LeaderMigrationConfiguration} +## `ReplicationControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ReplicationControllerConfiguration} **Appears in:** -- [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    LeaderMigrationConfiguration provides versioned configuration for all migrating leader locks.

    +

    ReplicationControllerConfiguration contains elements describing ReplicationController.

    - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    controllermanager.config.k8s.io/v1alpha1
    kind
    string
    LeaderMigrationConfiguration
    leaderName [Required]
    -string -
    -

    LeaderName is the name of the leader election resource that protects the migration -E.g. 1-20-KCM-to-1-21-CCM

    -
    resourceLock [Required]
    -string -
    -

    ResourceLock indicates the resource object type that will be used to lock -Should be "leases" or "endpoints"

    -
    controllerLeaders [Required]
    -[]ControllerLeaderConfiguration +
    ConcurrentRCSyncs [Required]
    +int32
    -

    ControllerLeaders contains a list of migrating leader lock configurations

    +

    concurrentRCSyncs is the number of replication controllers that are +allowed to sync concurrently. Larger number = more responsive replica +management, but more CPU (and network) load.

    -## `ControllerLeaderConfiguration` {#controllermanager-config-k8s-io-v1alpha1-ControllerLeaderConfiguration} +## `ResourceQuotaControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ResourceQuotaControllerConfiguration} **Appears in:** -- [LeaderMigrationConfiguration](#controllermanager-config-k8s-io-v1alpha1-LeaderMigrationConfiguration) +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    ControllerLeaderConfiguration provides the configuration for a migrating leader lock.

    +

    ResourceQuotaControllerConfiguration contains elements describing ResourceQuotaController.

    @@ -1832,37 +1744,35 @@ Should be "leases" or "endpoints"

    - -
    name [Required]
    -string +
    ResourceQuotaSyncPeriod [Required]
    +meta/v1.Duration
    -

    Name is the name of the controller being migrated -E.g. service-controller, route-controller, cloud-node-controller, etc

    +

    resourceQuotaSyncPeriod is the period for syncing quota usage status +in the system.

    component [Required]
    -string +
    ConcurrentResourceQuotaSyncs [Required]
    +int32
    -

    Component is the name of the component in which the controller should be running. -E.g. kube-controller-manager, cloud-controller-manager, etc -Or '*' meaning the controller can be run under any component that participates in the migration

    +

    concurrentResourceQuotaSyncs is the number of resource quotas that are +allowed to sync concurrently. Larger number = more responsive quota +management, but more CPU (and network) load.

    -## `GenericControllerManagerConfiguration` {#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration} +## `SAControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-SAControllerConfiguration} **Appears in:** -- [CloudControllerManagerConfiguration](#cloudcontrollermanager-config-k8s-io-v1alpha1-CloudControllerManagerConfiguration) - - [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) -

    GenericControllerManagerConfiguration holds configuration for a generic controller-manager.

    +

    SAControllerConfiguration contains elements describing ServiceAccountController.

    @@ -1870,80 +1780,168 @@ Or '*' meaning the controller can be run under any component that participates i - - - - +
    Port [Required]
    -int32 +
    ServiceAccountKeyFile [Required]
    +string
    -

    port is the port that the controller-manager's http service runs on.

    +

    serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key +used to sign service account tokens.

    Address [Required]
    -string +
    ConcurrentSATokenSyncs [Required]
    +int32
    -

    address is the IP address to serve on (set to 0.0.0.0 for all interfaces).

    +

    concurrentSATokenSyncs is the number of service account token syncing operations +that will be done concurrently.

    MinResyncPeriod [Required]
    -meta/v1.Duration +
    RootCAFile [Required]
    +string
    -

    minResyncPeriod is the resync period in reflectors; will be random between -minResyncPeriod and 2*minResyncPeriod.

    +

    rootCAFile is the root certificate authority will be included in service +account's token secret. This must be a valid PEM-encoded CA bundle.

    ClientConnection [Required]
    -ClientConnectionConfiguration +
    + +## `StatefulSetControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-StatefulSetControllerConfiguration} + + +**Appears in:** + +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    StatefulSetControllerConfiguration contains elements describing StatefulSetController.

    + + + + + + + + - +
    FieldDescription
    ConcurrentStatefulSetSyncs [Required]
    +int32
    -

    ClientConnection specifies the kubeconfig file and client connection -settings for the proxy server to use when communicating with the apiserver.

    +

    concurrentStatefulSetSyncs is the number of statefulset objects that are +allowed to sync concurrently. Larger number = more responsive statefulsets, +but more CPU (and network) load.

    ControllerStartInterval [Required]
    -meta/v1.Duration +
    + +## `TTLAfterFinishedControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-TTLAfterFinishedControllerConfiguration} + + +**Appears in:** + +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    TTLAfterFinishedControllerConfiguration contains elements describing TTLAfterFinishedController.

    + + + + + + + + - +
    FieldDescription
    ConcurrentTTLSyncs [Required]
    +int32
    -

    How long to wait between starting controller managers

    +

    concurrentTTLSyncs is the number of TTL-after-finished collector workers that are +allowed to sync concurrently.

    LeaderElection [Required]
    -LeaderElectionConfiguration +
    + +## `ValidatingAdmissionPolicyStatusControllerConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-ValidatingAdmissionPolicyStatusControllerConfiguration} + + +**Appears in:** + +- [KubeControllerManagerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-KubeControllerManagerConfiguration) + + +

    ValidatingAdmissionPolicyStatusControllerConfiguration contains elements describing ValidatingAdmissionPolicyStatusController.

    + + + + + + + + - +
    FieldDescription
    ConcurrentPolicySyncs [Required]
    +int32
    -

    leaderElection defines the configuration of leader election client.

    +

    ConcurrentPolicySyncs is the number of policy objects that are +allowed to sync concurrently. Larger number = quicker type checking, +but more CPU (and network) load. +The default value is 5.

    Controllers [Required]
    -[]string +
    + +## `VolumeConfiguration` {#kubecontrollermanager-config-k8s-io-v1alpha1-VolumeConfiguration} + + +**Appears in:** + +- [PersistentVolumeBinderControllerConfiguration](#kubecontrollermanager-config-k8s-io-v1alpha1-PersistentVolumeBinderControllerConfiguration) + + +

    VolumeConfiguration contains all enumerated flags meant to configure all volume +plugins. From this config, the controller-manager binary will create many instances of +volume.VolumeConfig, each containing only the configuration needed for that plugin which +are then passed to the appropriate plugin. The ControllerManager binary is the only part +of the code which knows what plugins are supported and which flags correspond to each plugin.

    + + + + + + + + - - - diff --git a/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md index 328655b5d117e..e6a03cc7f02c5 100644 --- a/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md +++ b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md @@ -12,6 +12,7 @@ auto_generated: true - [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + ## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} @@ -20,8 +21,6 @@ auto_generated: true - [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1-KubeSchedulerConfiguration) - [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) @@ -82,8 +81,6 @@ client.

    - [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1-KubeSchedulerConfiguration) -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) @@ -118,8 +115,6 @@ enableProfiling is true.

    **Appears in:** -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1-KubeSchedulerConfiguration) - [GenericControllerManagerConfiguration](#controllermanager-config-k8s-io-v1alpha1-GenericControllerManagerConfiguration) @@ -201,7 +196,6 @@ during leader election cycles.

    FieldDescription
    EnableHostPathProvisioning [Required]
    +bool
    -

    Controllers is the list of controllers to enable or disable -'*' means "all enabled by default controllers" -'foo' means "enable 'foo'" -'-foo' means "disable 'foo'" -first item for a particular name wins

    +

    enableHostPathProvisioning enables HostPath PV provisioning when running without a +cloud provider. This allows testing and development of provisioning features. HostPath +provisioning is not supported in any way, won't work in a multi-node cluster, and +should not be used for anything other than testing or development.

    Debugging [Required]
    -DebuggingConfiguration +
    EnableDynamicProvisioning [Required]
    +bool
    -

    DebuggingConfiguration holds configuration for Debugging related features.

    +

    enableDynamicProvisioning enables the provisioning of volumes when running within an environment +that supports dynamic provisioning. Defaults to true.

    LeaderMigrationEnabled [Required]
    -bool +
    PersistentVolumeRecyclerConfiguration [Required]
    +PersistentVolumeRecyclerConfiguration
    -

    LeaderMigrationEnabled indicates whether Leader Migration should be enabled for the controller manager.

    +

    persistentVolumeRecyclerConfiguration holds configuration for persistent volume plugins.

    LeaderMigration [Required]
    -LeaderMigrationConfiguration +
    FlexVolumePluginDir [Required]
    +string
    -

    LeaderMigration holds the configuration for Leader Migration.

    +

    volumePluginDir is the full path of the directory in which the flex +volume plugin should search for additional third party volume plugins

    - ## `KubeProxyConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration} @@ -226,35 +220,66 @@ Kubernetes proxy server.

    featureGates is a map of feature names to bools that enable or disable alpha/experimental features.

    +clientConnection [Required]
    +ClientConnectionConfiguration + + +

    clientConnection specifies the kubeconfig file and client connection settings for the proxy +server to use when communicating with the apiserver.

    + + +logging [Required]
    +LoggingConfiguration + + +

    logging specifies the options of logging. +Refer to Logs Options +for more information.

    + + +hostnameOverride [Required]
    +string + + +

    hostnameOverride, if non-empty, will be used as the name of the Node that +kube-proxy is running on. If unset, the node name is assumed to be the same as +the node's hostname.

    + + bindAddress [Required]
    string -

    bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 -for all interfaces)

    +

    bindAddress can be used to override kube-proxy's idea of what its node's +primary IP is. Note that the name is a historical artifact, and kube-proxy does +not actually bind any sockets to this IP.

    healthzBindAddress [Required]
    string -

    healthzBindAddress is the IP address and port for the health check server to serve on, -defaulting to 0.0.0.0:10256

    +

    healthzBindAddress is the IP address and port for the health check server to +serve on, defaulting to "0.0.0.0:10256" (if bindAddress is unset or IPv4), or +"[::]:10256" (if bindAddress is IPv6).

    metricsBindAddress [Required]
    string -

    metricsBindAddress is the IP address and port for the metrics server to serve on, -defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)

    +

    metricsBindAddress is the IP address and port for the metrics server to serve +on, defaulting to "127.0.0.1:10249" (if bindAddress is unset or IPv4), or +"[::1]:10249" (if bindAddress is IPv6). (Set to "0.0.0.0:10249" / "[::]:10249" +to bind on all interfaces.)

    bindAddressHardFail [Required]
    bool -

    bindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit

    +

    bindAddressHardFail, if true, tells kube-proxy to treat failure to bind to a +port as fatal and exit

    enableProfiling [Required]
    @@ -265,28 +290,18 @@ defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)

    Profiling handlers will be handled by metrics server.

    -clusterCIDR [Required]
    -string - - -

    clusterCIDR is the CIDR range of the pods in the cluster. It is used to -bridge traffic coming from outside of the cluster. If not provided, -no off-cluster bridging will be performed.

    - - -hostnameOverride [Required]
    +showHiddenMetricsForVersion [Required]
    string -

    hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.

    +

    showHiddenMetricsForVersion is the version for which you want to show hidden metrics.

    -clientConnection [Required]
    -ClientConnectionConfiguration +mode [Required]
    +ProxyMode -

    clientConnection specifies the kubeconfig file and client connection settings for the proxy -server to use when communicating with the apiserver.

    +

    mode specifies which proxy mode to use.

    iptables [Required]
    @@ -303,92 +318,83 @@ server to use when communicating with the apiserver.

    ipvs contains ipvs-related configuration options.

    -oomScoreAdj [Required]
    -int32 +nftables [Required]
    +KubeProxyNFTablesConfiguration -

    oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within -the range [-1000, 1000]

    +

    nftables contains nftables-related configuration options.

    -mode [Required]
    -ProxyMode +winkernel [Required]
    +KubeProxyWinkernelConfiguration -

    mode specifies which proxy mode to use.

    +

    winkernel contains winkernel-related configuration options.

    -portRange [Required]
    -string +detectLocalMode [Required]
    +LocalMode -

    portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed -in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.

    +

    detectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR

    -conntrack [Required]
    -KubeProxyConntrackConfiguration +detectLocal [Required]
    +DetectLocalConfiguration -

    conntrack contains conntrack-related configuration options.

    +

    detectLocal contains optional configuration settings related to DetectLocalMode.

    -configSyncPeriod [Required]
    -meta/v1.Duration +clusterCIDR [Required]
    +string -

    configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater -than 0.

    +

    clusterCIDR is the CIDR range of the pods in the cluster. (For dual-stack +clusters, this can be a comma-separated dual-stack pair of CIDR ranges.). When +DetectLocalMode is set to LocalModeClusterCIDR, kube-proxy will consider +traffic to be local if its source IP is in this range. (Otherwise it is not +used.)

    nodePortAddresses [Required]
    []string -

    nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid -IP blocks. These values are as a parameter to select the interfaces where nodeport works. -In case someone would like to expose a service on localhost for local visit and some other interfaces for -particular purpose, a list of IP blocks would do that. -If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. -If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. -An empty string slice is meant to select all network interfaces.

    - - -winkernel [Required]
    -KubeProxyWinkernelConfiguration - - -

    winkernel contains winkernel-related configuration options.

    +

    nodePortAddresses is a list of CIDR ranges that contain valid node IPs. If set, +connections to NodePort services will only be accepted on node IPs in one of +the indicated ranges. If unset, NodePort connections will be accepted on all +local IPs.

    -showHiddenMetricsForVersion [Required]
    -string +oomScoreAdj [Required]
    +int32 -

    ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics.

    +

    oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within +the range [-1000, 1000]

    -detectLocalMode [Required]
    -LocalMode +conntrack [Required]
    +KubeProxyConntrackConfiguration -

    DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR

    +

    conntrack contains conntrack-related configuration options.

    -detectLocal [Required]
    -DetectLocalConfiguration +configSyncPeriod [Required]
    +meta/v1.Duration -

    DetectLocal contains optional configuration settings related to DetectLocalMode.

    +

    configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater +than 0.

    -logging [Required]
    -LoggingConfiguration +portRange [Required]
    +string -

    logging specifies the options of logging. -Refer to Logs Options -for more information.

    +

    portRange was previously used to configure the userspace proxy, but is now unused.

    @@ -414,18 +420,18 @@ for more information.

    string -

    BridgeInterface is a string argument which represents a single bridge interface name. -Kube-proxy considers traffic as local if originating from this given bridge. -This argument should be set if DetectLocalMode is set to LocalModeBridgeInterface.

    +

    bridgeInterface is a bridge interface name. When DetectLocalMode is set to +LocalModeBridgeInterface, kube-proxy will consider traffic to be local if +it originates from this bridge.

    interfaceNamePrefix [Required]
    string -

    InterfaceNamePrefix is a string argument which represents a single interface prefix name. -Kube-proxy considers traffic as local if originating from one or more interfaces which match -the given prefix. This argument should be set if DetectLocalMode is set to LocalModeInterfaceNamePrefix.

    +

    interfaceNamePrefix is an interface name prefix. When DetectLocalMode is set to +LocalModeInterfaceNamePrefix, kube-proxy will consider traffic to be local if +it originates from any interface whose name begins with this prefix.

    @@ -461,7 +467,7 @@ per CPU core (0 to leave the limit as-is and ignore min).

    min is the minimum value of connect-tracking records to allocate, -regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).

    +regardless of maxPerCore (set maxPerCore=0 to leave the limit as-is).

    tcpEstablishedTimeout [Required]
    @@ -481,6 +487,33 @@ in CLOSE_WAIT state will remain in the conntrack table. (e.g. '60s'). Must be greater than 0 to set.

    +tcpBeLiberal [Required]
    +bool + + +

    tcpBeLiberal, if true, kube-proxy will configure conntrack +to run in liberal mode for TCP connections and packets with +out-of-window sequence numbers won't be marked INVALID.

    + + +udpTimeout [Required]
    +meta/v1.Duration + + +

    udpTimeout is how long an idle UDP conntrack entry in +UNREPLIED state will remain in the conntrack table +(e.g. '30s'). Must be greater than 0 to set.

    + + +udpStreamTimeout [Required]
    +meta/v1.Duration + + +

    udpStreamTimeout is how long an idle UDP conntrack entry in +ASSURED state will remain in the conntrack table +(e.g. '300s'). Must be greater than 0 to set.

    + + @@ -506,38 +539,44 @@ details for the Kubernetes proxy server.

    masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using -the pure iptables proxy mode. Values must be within the range [0, 31].

    +the iptables or ipvs proxy mode. Values must be within the range [0, 31].

    masqueradeAll [Required]
    bool -

    masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.

    +

    masqueradeAll tells kube-proxy to SNAT all traffic sent to Service cluster IPs, +when using the iptables or ipvs proxy mode. This may be required with some CNI +plugins.

    localhostNodePorts [Required]
    bool -

    LocalhostNodePorts tells kube-proxy to allow service NodePorts to be accessed via -localhost (iptables mode only)

    +

    localhostNodePorts, if false, tells kube-proxy to disable the legacy behavior +of allowing NodePort services to be accessed via localhost. (Applies only to +iptables mode and IPv4; localhost NodePorts are never allowed with other proxy +modes or with IPv6.)

    syncPeriod [Required]
    meta/v1.Duration -

    syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', -'2h22m'). Must be greater than 0.

    +

    syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently +various re-synchronizing and cleanup operations are performed. Must be greater +than 0.

    minSyncPeriod [Required]
    meta/v1.Duration -

    minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', -'2h22m').

    +

    minSyncPeriod is the minimum period between iptables rule resyncs (e.g. '5s', +'1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will +result in an immediate iptables resync.

    @@ -564,30 +603,32 @@ details for the Kubernetes proxy server.

    meta/v1.Duration -

    syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', -'2h22m'). Must be greater than 0.

    +

    syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently +various re-synchronizing and cleanup operations are performed. Must be greater +than 0.

    minSyncPeriod [Required]
    meta/v1.Duration -

    minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', -'2h22m').

    +

    minSyncPeriod is the minimum period between IPVS rule resyncs (e.g. '5s', '1m', +'2h22m'). A value of 0 means every Service or EndpointSlice change will result +in an immediate IPVS resync.

    scheduler [Required]
    string -

    ipvs scheduler

    +

    scheduler is the IPVS scheduler to use

    excludeCIDRs [Required]
    []string -

    excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch +

    excludeCIDRs is a list of CIDRs which the ipvs proxier should not touch when cleaning up ipvs services.

    @@ -595,7 +636,7 @@ when cleaning up ipvs services.

    bool -

    strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries +

    strictARP configures arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface

    @@ -626,6 +667,60 @@ The default value is 0, which preserves the current timeout value on the system. +## `KubeProxyNFTablesConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyNFTablesConfiguration} + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +

    KubeProxyNFTablesConfiguration contains nftables-related configuration +details for the Kubernetes proxy server.

    + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    masqueradeBit [Required]
    +int32 +
    +

    masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using +the nftables proxy mode. Values must be within the range [0, 31].

    +
    masqueradeAll [Required]
    +bool +
    +

    masqueradeAll tells kube-proxy to SNAT all traffic sent to Service cluster IPs, +when using the nftables mode. This may be required with some CNI plugins.

    +
    syncPeriod [Required]
    +meta/v1.Duration +
    +

    syncPeriod is an interval (e.g. '5s', '1m', '2h22m') indicating how frequently +various re-synchronizing and cleanup operations are performed. Must be greater +than 0.

    +
    minSyncPeriod [Required]
    +meta/v1.Duration +
    +

    minSyncPeriod is the minimum period between iptables rule resyncs (e.g. '5s', +'1m', '2h22m'). A value of 0 means every Service or EndpointSlice change will +result in an immediate iptables resync.

    +
    + ## `KubeProxyWinkernelConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyWinkernelConfiguration} @@ -655,7 +750,7 @@ to create endpoints and policies

    string -

    sourceVip is the IP address of the source VIP endoint used for +

    sourceVip is the IP address of the source VIP endpoint used for NAT when loadbalancing

    @@ -671,7 +766,7 @@ with DSR

    string -

    RootHnsEndpointName is the name of hnsendpoint that is attached to +

    rootHnsEndpointName is the name of hnsendpoint that is attached to l2bridge for root network namespace

    @@ -679,7 +774,7 @@ l2bridge for root network namespace

    bool -

    ForwardHealthCheckVip forwards service VIP for health check port on +

    forwardHealthCheckVip forwards service VIP for health check port on Windows

    diff --git a/content/en/docs/reference/config-api/kube-scheduler-config.v1.md b/content/en/docs/reference/config-api/kube-scheduler-config.v1.md index cb07bc0654b23..886523e82e4b8 100644 --- a/content/en/docs/reference/config-api/kube-scheduler-config.v1.md +++ b/content/en/docs/reference/config-api/kube-scheduler-config.v1.md @@ -19,14 +19,13 @@ auto_generated: true - [VolumeBindingArgs](#kubescheduler-config-k8s-io-v1-VolumeBindingArgs) + ## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} **Appears in:** -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1-KubeSchedulerConfiguration) @@ -83,8 +82,6 @@ client.

    **Appears in:** -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1-KubeSchedulerConfiguration) @@ -121,8 +118,6 @@ enableProfiling is true.

    - [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1-KubeSchedulerConfiguration) -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) -

    LeaderElectionConfiguration defines the configuration of leader election clients for components that can run with leader election enabled.

    @@ -200,7 +195,6 @@ during leader election cycles.

    - ## `DefaultPreemptionArgs` {#kubescheduler-config-k8s-io-v1-DefaultPreemptionArgs} @@ -402,7 +396,7 @@ Defaults to false.

    addedAffinity
    -core/v1.NodeAffinity +core/v1.NodeAffinity

    AddedAffinity is applied to all Pods additionally to the NodeAffinity @@ -501,7 +495,7 @@ The default strategy is LeastAllocated with an equal "cpu" and "m defaultConstraints
    -[]core/v1.TopologySpreadConstraint +[]core/v1.TopologySpreadConstraint

    DefaultConstraints defines topology spread constraints to be applied to diff --git a/content/en/docs/reference/config-api/kube-scheduler-config.v1beta3.md b/content/en/docs/reference/config-api/kube-scheduler-config.v1beta3.md deleted file mode 100644 index 6fc64f5bba2d4..0000000000000 --- a/content/en/docs/reference/config-api/kube-scheduler-config.v1beta3.md +++ /dev/null @@ -1,1253 +0,0 @@ ---- -title: kube-scheduler Configuration (v1beta3) -content_type: tool-reference -package: kubescheduler.config.k8s.io/v1beta3 -auto_generated: true ---- - - -## Resource Types - - -- [DefaultPreemptionArgs](#kubescheduler-config-k8s-io-v1beta3-DefaultPreemptionArgs) -- [InterPodAffinityArgs](#kubescheduler-config-k8s-io-v1beta3-InterPodAffinityArgs) -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) -- [NodeAffinityArgs](#kubescheduler-config-k8s-io-v1beta3-NodeAffinityArgs) -- [NodeResourcesBalancedAllocationArgs](#kubescheduler-config-k8s-io-v1beta3-NodeResourcesBalancedAllocationArgs) -- [NodeResourcesFitArgs](#kubescheduler-config-k8s-io-v1beta3-NodeResourcesFitArgs) -- [PodTopologySpreadArgs](#kubescheduler-config-k8s-io-v1beta3-PodTopologySpreadArgs) -- [VolumeBindingArgs](#kubescheduler-config-k8s-io-v1beta3-VolumeBindingArgs) - - - -## `DefaultPreemptionArgs` {#kubescheduler-config-k8s-io-v1beta3-DefaultPreemptionArgs} - - - -

    DefaultPreemptionArgs holds arguments used to configure the -DefaultPreemption plugin.

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    DefaultPreemptionArgs
    minCandidateNodesPercentage [Required]
    -int32 -
    -

    MinCandidateNodesPercentage is the minimum number of candidates to -shortlist when dry running preemption as a percentage of number of nodes. -Must be in the range [0, 100]. Defaults to 10% of the cluster size if -unspecified.

    -
    minCandidateNodesAbsolute [Required]
    -int32 -
    -

    MinCandidateNodesAbsolute is the absolute minimum number of candidates to -shortlist. The likely number of candidates enumerated for dry running -preemption is given by the formula: -numCandidates = max(numNodes * minCandidateNodesPercentage, minCandidateNodesAbsolute) -We say "likely" because there are other factors such as PDB violations -that play a role in the number of candidates shortlisted. Must be at least -0 nodes. Defaults to 100 nodes if unspecified.

    -
    - -## `InterPodAffinityArgs` {#kubescheduler-config-k8s-io-v1beta3-InterPodAffinityArgs} - - - -

    InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin.

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    InterPodAffinityArgs
    hardPodAffinityWeight [Required]
    -int32 -
    -

    HardPodAffinityWeight is the scoring weight for existing pods with a -matching hard affinity to the incoming pod.

    -
    ignorePreferredTermsOfExistingPods [Required]
    -bool -
    -

    IgnorePreferredTermsOfExistingPods configures the scheduler to ignore existing pods' preferred affinity -rules when scoring candidate nodes, unless the incoming pod has inter-pod affinities.

    -
    - -## `KubeSchedulerConfiguration` {#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration} - - - -

    KubeSchedulerConfiguration configures a scheduler

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    KubeSchedulerConfiguration
    parallelism [Required]
    -int32 -
    -

    Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16

    -
    leaderElection [Required]
    -LeaderElectionConfiguration -
    -

    LeaderElection defines the configuration of leader election client.

    -
    clientConnection [Required]
    -ClientConnectionConfiguration -
    -

    ClientConnection specifies the kubeconfig file and client connection -settings for the proxy server to use when communicating with the apiserver.

    -
    DebuggingConfiguration [Required]
    -DebuggingConfiguration -
    (Members of DebuggingConfiguration are embedded into this type.) -

    DebuggingConfiguration holds configuration for Debugging related features -TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration

    -
    percentageOfNodesToScore [Required]
    -int32 -
    -

    PercentageOfNodesToScore is the percentage of all nodes that once found feasible -for running a pod, the scheduler stops its search for more feasible nodes in -the cluster. This helps improve scheduler's performance. Scheduler always tries to find -at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. -Example: if the cluster size is 500 nodes and the value of this flag is 30, -then scheduler stops finding further feasible nodes once it finds 150 feasible ones. -When the value is 0, default percentage (5%--50% based on the size of the cluster) of the -nodes will be scored.

    -
    podInitialBackoffSeconds [Required]
    -int64 -
    -

    PodInitialBackoffSeconds is the initial backoff for unschedulable pods. -If specified, it must be greater than 0. If this value is null, the default value (1s) -will be used.

    -
    podMaxBackoffSeconds [Required]
    -int64 -
    -

    PodMaxBackoffSeconds is the max backoff for unschedulable pods. -If specified, it must be greater than podInitialBackoffSeconds. If this value is null, -the default value (10s) will be used.

    -
    profiles [Required]
    -[]KubeSchedulerProfile -
    -

    Profiles are scheduling profiles that kube-scheduler supports. Pods can -choose to be scheduled under a particular profile by setting its associated -scheduler name. Pods that don't specify any scheduler name are scheduled -with the "default-scheduler" profile, if present here.

    -
    extenders [Required]
    -[]Extender -
    -

    Extenders are the list of scheduler extenders, each holding the values of how to communicate -with the extender. These extenders are shared by all scheduler profiles.

    -
    - -## `NodeAffinityArgs` {#kubescheduler-config-k8s-io-v1beta3-NodeAffinityArgs} - - - -

    NodeAffinityArgs holds arguments to configure the NodeAffinity plugin.

    - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    NodeAffinityArgs
    addedAffinity
    -core/v1.NodeAffinity -
    -

    AddedAffinity is applied to all Pods additionally to the NodeAffinity -specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity -AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes -match). -When AddedAffinity is used, some Pods with affinity requirements that match -a specific Node (such as Daemonset Pods) might remain unschedulable.

    -
    - -## `NodeResourcesBalancedAllocationArgs` {#kubescheduler-config-k8s-io-v1beta3-NodeResourcesBalancedAllocationArgs} - - - -

    NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin.

    - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    NodeResourcesBalancedAllocationArgs
    resources [Required]
    -[]ResourceSpec -
    -

    Resources to be managed, the default is "cpu" and "memory" if not specified.

    -
    - -## `NodeResourcesFitArgs` {#kubescheduler-config-k8s-io-v1beta3-NodeResourcesFitArgs} - - - -

    NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin.

    - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    NodeResourcesFitArgs
    ignoredResources [Required]
    -[]string -
    -

    IgnoredResources is the list of resources that NodeResources fit filter -should ignore. This doesn't apply to scoring.

    -
    ignoredResourceGroups [Required]
    -[]string -
    -

    IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. -e.g. if group is ["example.com"], it will ignore all resource names that begin -with "example.com", such as "example.com/aaa" and "example.com/bbb". -A resource group name can't contain '/'. This doesn't apply to scoring.

    -
    scoringStrategy [Required]
    -ScoringStrategy -
    -

    ScoringStrategy selects the node resource scoring strategy. -The default strategy is LeastAllocated with an equal "cpu" and "memory" weight.

    -
    - -## `PodTopologySpreadArgs` {#kubescheduler-config-k8s-io-v1beta3-PodTopologySpreadArgs} - - - -

    PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin.

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    PodTopologySpreadArgs
    defaultConstraints
    -[]core/v1.TopologySpreadConstraint -
    -

    DefaultConstraints defines topology spread constraints to be applied to -Pods that don't define any in pod.spec.topologySpreadConstraints. -.defaultConstraints[*].labelSelectors must be empty, as they are -deduced from the Pod's membership to Services, ReplicationControllers, -ReplicaSets or StatefulSets. -When not empty, .defaultingType must be "List".

    -
    defaultingType
    -PodTopologySpreadConstraintsDefaulting -
    -

    DefaultingType determines how .defaultConstraints are deduced. Can be one -of "System" or "List".

    -
      -
    • "System": Use kubernetes defined constraints that spread Pods among -Nodes and Zones.
    • -
    • "List": Use constraints defined in .defaultConstraints.
    • -
    -

    Defaults to "System".

    -
    - -## `VolumeBindingArgs` {#kubescheduler-config-k8s-io-v1beta3-VolumeBindingArgs} - - - -

    VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin.

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    apiVersion
    string
    kubescheduler.config.k8s.io/v1beta3
    kind
    string
    VolumeBindingArgs
    bindTimeoutSeconds [Required]
    -int64 -
    -

    BindTimeoutSeconds is the timeout in seconds in volume binding operation. -Value must be non-negative integer. The value zero indicates no waiting. -If this value is nil, the default value (600) will be used.

    -
    shape
    -[]UtilizationShapePoint -
    -

    Shape specifies the points defining the score function shape, which is -used to score nodes based on the utilization of statically provisioned -PVs. The utilization is calculated by dividing the total requested -storage of the pod by the total capacity of feasible PVs on each node. -Each point contains utilization (ranges from 0 to 100) and its -associated score (ranges from 0 to 10). You can turn the priority by -specifying different scores for different utilization numbers. -The default shape points are:

    -
      -
    1. 0 for 0 utilization
    2. -
    3. 10 for 100 utilization -All points must be sorted in increasing order by utilization.
    4. -
    -
    - -## `Extender` {#kubescheduler-config-k8s-io-v1beta3-Extender} - - -**Appears in:** - -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - -

    Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, -it is assumed that the extender chose not to provide that extension.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    urlPrefix [Required]
    -string -
    -

    URLPrefix at which the extender is available

    -
    filterVerb [Required]
    -string -
    -

    Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.

    -
    preemptVerb [Required]
    -string -
    -

    Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender.

    -
    prioritizeVerb [Required]
    -string -
    -

    Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.

    -
    weight [Required]
    -int64 -
    -

    The numeric multiplier for the node scores that the prioritize call generates. -The weight should be a positive integer

    -
    bindVerb [Required]
    -string -
    -

    Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. -If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender -can implement this function.

    -
    enableHTTPS [Required]
    -bool -
    -

    EnableHTTPS specifies whether https should be used to communicate with the extender

    -
    tlsConfig [Required]
    -ExtenderTLSConfig -
    -

    TLSConfig specifies the transport layer security config

    -
    httpTimeout [Required]
    -meta/v1.Duration -
    -

    HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize -timeout is ignored, k8s/other extenders priorities are used to select the node.

    -
    nodeCacheCapable [Required]
    -bool -
    -

    NodeCacheCapable specifies that the extender is capable of caching node information, -so the scheduler should only send minimal information about the eligible nodes -assuming that the extender already cached full details of all nodes in the cluster

    -
    managedResources
    -[]ExtenderManagedResource -
    -

    ManagedResources is a list of extended resources that are managed by -this extender.

    -
      -
    • A pod will be sent to the extender on the Filter, Prioritize and Bind -(if the extender is the binder) phases iff the pod requests at least -one of the extended resources in this list. If empty or unspecified, -all pods will be sent to this extender.
    • -
    • If IgnoredByScheduler is set to true for a resource, kube-scheduler -will skip checking the resource in predicates.
    • -
    -
    ignorable [Required]
    -bool -
    -

    Ignorable specifies if the extender is ignorable, i.e. scheduling should not -fail when the extender returns an error or is not reachable.

    -
    - -## `ExtenderManagedResource` {#kubescheduler-config-k8s-io-v1beta3-ExtenderManagedResource} - - -**Appears in:** - -- [Extender](#kubescheduler-config-k8s-io-v1beta3-Extender) - - -

    ExtenderManagedResource describes the arguments of extended resources -managed by an extender.

    - - - - - - - - - - - - - - -
    FieldDescription
    name [Required]
    -string -
    -

    Name is the extended resource name.

    -
    ignoredByScheduler [Required]
    -bool -
    -

    IgnoredByScheduler indicates whether kube-scheduler should ignore this -resource when applying predicates.

    -
    - -## `ExtenderTLSConfig` {#kubescheduler-config-k8s-io-v1beta3-ExtenderTLSConfig} - - -**Appears in:** - -- [Extender](#kubescheduler-config-k8s-io-v1beta3-Extender) - - -

    ExtenderTLSConfig contains settings to enable TLS with extender

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    insecure [Required]
    -bool -
    -

    Server should be accessed without verifying the TLS certificate. For testing only.

    -
    serverName [Required]
    -string -
    -

    ServerName is passed to the server for SNI and is used in the client to check server -certificates against. If ServerName is empty, the hostname used to contact the -server is used.

    -
    certFile [Required]
    -string -
    -

    Server requires TLS client certificate authentication

    -
    keyFile [Required]
    -string -
    -

    Server requires TLS client certificate authentication

    -
    caFile [Required]
    -string -
    -

    Trusted root certificates for server

    -
    certData [Required]
    -[]byte -
    -

    CertData holds PEM-encoded bytes (typically read from a client certificate file). -CertData takes precedence over CertFile

    -
    keyData [Required]
    -[]byte -
    -

    KeyData holds PEM-encoded bytes (typically read from a client certificate key file). -KeyData takes precedence over KeyFile

    -
    caData [Required]
    -[]byte -
    -

    CAData holds PEM-encoded bytes (typically read from a root certificates bundle). -CAData takes precedence over CAFile

    -
    - -## `KubeSchedulerProfile` {#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerProfile} - - -**Appears in:** - -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - -

    KubeSchedulerProfile is a scheduling profile.

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    schedulerName [Required]
    -string -
    -

    SchedulerName is the name of the scheduler associated to this profile. -If SchedulerName matches with the pod's "spec.schedulerName", then the pod -is scheduled with this profile.

    -
    plugins [Required]
    -Plugins -
    -

    Plugins specify the set of plugins that should be enabled or disabled. -Enabled plugins are the ones that should be enabled in addition to the -default plugins. Disabled plugins are any of the default plugins that -should be disabled. -When no enabled or disabled plugin is specified for an extension point, -default plugins for that extension point will be used if there is any. -If a QueueSort plugin is specified, the same QueueSort Plugin and -PluginConfig must be specified for all profiles.

    -
    pluginConfig [Required]
    -[]PluginConfig -
    -

    PluginConfig is an optional set of custom plugin arguments for each plugin. -Omitting config args for a plugin is equivalent to using the default config -for that plugin.

    -
    - -## `Plugin` {#kubescheduler-config-k8s-io-v1beta3-Plugin} - - -**Appears in:** - -- [PluginSet](#kubescheduler-config-k8s-io-v1beta3-PluginSet) - - -

    Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins.

    - - - - - - - - - - - - - - -
    FieldDescription
    name [Required]
    -string -
    -

    Name defines the name of plugin

    -
    weight [Required]
    -int32 -
    -

    Weight defines the weight of plugin, only used for Score plugins.

    -
    - -## `PluginConfig` {#kubescheduler-config-k8s-io-v1beta3-PluginConfig} - - -**Appears in:** - -- [KubeSchedulerProfile](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerProfile) - - -

    PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. -A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. -It is up to the plugin to process these Args.

    - - - - - - - - - - - - - - -
    FieldDescription
    name [Required]
    -string -
    -

    Name defines the name of plugin being configured

    -
    args [Required]
    -k8s.io/apimachinery/pkg/runtime.RawExtension -
    -

    Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure.

    -
    - -## `PluginSet` {#kubescheduler-config-k8s-io-v1beta3-PluginSet} - - -**Appears in:** - -- [Plugins](#kubescheduler-config-k8s-io-v1beta3-Plugins) - - -

    PluginSet specifies enabled and disabled plugins for an extension point. -If an array is empty, missing, or nil, default plugins at that extension point will be used.

    - - - - - - - - - - - - - - -
    FieldDescription
    enabled [Required]
    -[]Plugin -
    -

    Enabled specifies plugins that should be enabled in addition to default plugins. -If the default plugin is also configured in the scheduler config file, the weight of plugin will -be overridden accordingly. -These are called after default plugins and in the same order specified here.

    -
    disabled [Required]
    -[]Plugin -
    -

    Disabled specifies default plugins that should be disabled. -When all default plugins need to be disabled, an array containing only one "*" should be provided.

    -
    - -## `Plugins` {#kubescheduler-config-k8s-io-v1beta3-Plugins} - - -**Appears in:** - -- [KubeSchedulerProfile](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerProfile) - - -

    Plugins include multiple extension points. When specified, the list of plugins for -a particular extension point are the only ones enabled. If an extension point is -omitted from the config, then the default set of plugins is used for that extension point. -Enabled plugins are called in the order specified here, after default plugins. If they need to -be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    preEnqueue [Required]
    -PluginSet -
    -

    PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue.

    -
    queueSort [Required]
    -PluginSet -
    -

    QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue.

    -
    preFilter [Required]
    -PluginSet -
    -

    PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework.

    -
    filter [Required]
    -PluginSet -
    -

    Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod.

    -
    postFilter [Required]
    -PluginSet -
    -

    PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod.

    -
    preScore [Required]
    -PluginSet -
    -

    PreScore is a list of plugins that are invoked before scoring.

    -
    score [Required]
    -PluginSet -
    -

    Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase.

    -
    reserve [Required]
    -PluginSet -
    -

    Reserve is a list of plugins invoked when reserving/unreserving resources -after a node is assigned to run the pod.

    -
    permit [Required]
    -PluginSet -
    -

    Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod.

    -
    preBind [Required]
    -PluginSet -
    -

    PreBind is a list of plugins that should be invoked before a pod is bound.

    -
    bind [Required]
    -PluginSet -
    -

    Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. -The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success.

    -
    postBind [Required]
    -PluginSet -
    -

    PostBind is a list of plugins that should be invoked after a pod is successfully bound.

    -
    multiPoint [Required]
    -PluginSet -
    -

    MultiPoint is a simplified config section to enable plugins for all valid extension points. -Plugins enabled through MultiPoint will automatically register for every individual extension -point the plugin has implemented. Disabling a plugin through MultiPoint disables that behavior. -The same is true for disabling "*" through MultiPoint (no default plugins will be automatically registered). -Plugins can still be disabled through their individual extension points.

    -

    In terms of precedence, plugin config follows this basic hierarchy

    -
      -
    1. Specific extension points
    2. -
    3. Explicitly configured MultiPoint plugins
    4. -
    5. The set of default plugins, as MultiPoint plugins -This implies that a higher precedence plugin will run first and overwrite any settings within MultiPoint. -Explicitly user-configured plugins also take a higher precedence over default plugins. -Within this hierarchy, an Enabled setting takes precedence over Disabled. For example, if a plugin is -set in both multiPoint.Enabled and multiPoint.Disabled, the plugin will be enabled. Similarly, -including multiPoint.Disabled = '*' and multiPoint.Enabled = pluginA will still register that specific -plugin through MultiPoint. This follows the same behavior as all other extension point configurations.
    6. -
    -
    - -## `PodTopologySpreadConstraintsDefaulting` {#kubescheduler-config-k8s-io-v1beta3-PodTopologySpreadConstraintsDefaulting} - -(Alias of `string`) - -**Appears in:** - -- [PodTopologySpreadArgs](#kubescheduler-config-k8s-io-v1beta3-PodTopologySpreadArgs) - - -

    PodTopologySpreadConstraintsDefaulting defines how to set default constraints -for the PodTopologySpread plugin.

    - - - - -## `RequestedToCapacityRatioParam` {#kubescheduler-config-k8s-io-v1beta3-RequestedToCapacityRatioParam} - - -**Appears in:** - -- [ScoringStrategy](#kubescheduler-config-k8s-io-v1beta3-ScoringStrategy) - - -

    RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters

    - - - - - - - - - - - -
    FieldDescription
    shape [Required]
    -[]UtilizationShapePoint -
    -

    Shape is a list of points defining the scoring function shape.

    -
    - -## `ResourceSpec` {#kubescheduler-config-k8s-io-v1beta3-ResourceSpec} - - -**Appears in:** - -- [NodeResourcesBalancedAllocationArgs](#kubescheduler-config-k8s-io-v1beta3-NodeResourcesBalancedAllocationArgs) - -- [ScoringStrategy](#kubescheduler-config-k8s-io-v1beta3-ScoringStrategy) - - -

    ResourceSpec represents a single resource.

    - - - - - - - - - - - - - - -
    FieldDescription
    name [Required]
    -string -
    -

    Name of the resource.

    -
    weight [Required]
    -int64 -
    -

    Weight of the resource.

    -
    - -## `ScoringStrategy` {#kubescheduler-config-k8s-io-v1beta3-ScoringStrategy} - - -**Appears in:** - -- [NodeResourcesFitArgs](#kubescheduler-config-k8s-io-v1beta3-NodeResourcesFitArgs) - - -

    ScoringStrategy define ScoringStrategyType for node resource plugin

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    type [Required]
    -ScoringStrategyType -
    -

    Type selects which strategy to run.

    -
    resources [Required]
    -[]ResourceSpec -
    -

    Resources to consider when scoring. -The default resource set includes "cpu" and "memory" with an equal weight. -Allowed weights go from 1 to 100. -Weight defaults to 1 if not specified or explicitly set to 0.

    -
    requestedToCapacityRatio [Required]
    -RequestedToCapacityRatioParam -
    -

    Arguments specific to RequestedToCapacityRatio strategy.

    -
    - -## `ScoringStrategyType` {#kubescheduler-config-k8s-io-v1beta3-ScoringStrategyType} - -(Alias of `string`) - -**Appears in:** - -- [ScoringStrategy](#kubescheduler-config-k8s-io-v1beta3-ScoringStrategy) - - -

    ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin.

    - - - - -## `UtilizationShapePoint` {#kubescheduler-config-k8s-io-v1beta3-UtilizationShapePoint} - - -**Appears in:** - -- [VolumeBindingArgs](#kubescheduler-config-k8s-io-v1beta3-VolumeBindingArgs) - -- [RequestedToCapacityRatioParam](#kubescheduler-config-k8s-io-v1beta3-RequestedToCapacityRatioParam) - - -

    UtilizationShapePoint represents single point of priority function shape.

    - - - - - - - - - - - - - - -
    FieldDescription
    utilization [Required]
    -int32 -
    -

    Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100.

    -
    score [Required]
    -int32 -
    -

    Score assigned to given utilization (y axis). Valid values are 0 to 10.

    -
    - - - - -## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} - - -**Appears in:** - -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - -

    ClientConnectionConfiguration contains details for constructing a client.

    - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    kubeconfig [Required]
    -string -
    -

    kubeconfig is the path to a KubeConfig file.

    -
    acceptContentTypes [Required]
    -string -
    -

    acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the -default value of 'application/json'. This field will control all connections to the server used by a particular -client.

    -
    contentType [Required]
    -string -
    -

    contentType is the content type used when sending data to the server from this client.

    -
    qps [Required]
    -float32 -
    -

    qps controls the number of queries per second allowed for this connection.

    -
    burst [Required]
    -int32 -
    -

    burst allows extra queries to accumulate when a client is exceeding its rate.

    -
    - -## `DebuggingConfiguration` {#DebuggingConfiguration} - - -**Appears in:** - -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - -

    DebuggingConfiguration holds configuration for Debugging related features.

    - - - - - - - - - - - - - - -
    FieldDescription
    enableProfiling [Required]
    -bool -
    -

    enableProfiling enables profiling via web interface host:port/debug/pprof/

    -
    enableContentionProfiling [Required]
    -bool -
    -

    enableContentionProfiling enables block profiling, if -enableProfiling is true.

    -
    - -## `LeaderElectionConfiguration` {#LeaderElectionConfiguration} - - -**Appears in:** - -- [KubeSchedulerConfiguration](#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerConfiguration) - - -

    LeaderElectionConfiguration defines the configuration of leader election -clients for components that can run with leader election enabled.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    leaderElect [Required]
    -bool -
    -

    leaderElect enables a leader election client to gain leadership -before executing the main loop. Enable this when running replicated -components for high availability.

    -
    leaseDuration [Required]
    -meta/v1.Duration -
    -

    leaseDuration is the duration that non-leader candidates will wait -after observing a leadership renewal until attempting to acquire -leadership of a led but unrenewed leader slot. This is effectively the -maximum duration that a leader can be stopped before it is replaced -by another candidate. This is only applicable if leader election is -enabled.

    -
    renewDeadline [Required]
    -meta/v1.Duration -
    -

    renewDeadline is the interval between attempts by the acting master to -renew a leadership slot before it stops leading. This must be less -than or equal to the lease duration. This is only applicable if leader -election is enabled.

    -
    retryPeriod [Required]
    -meta/v1.Duration -
    -

    retryPeriod is the duration the clients should wait between attempting -acquisition and renewal of a leadership. This is only applicable if -leader election is enabled.

    -
    resourceLock [Required]
    -string -
    -

    resourceLock indicates the resource object type that will be used to lock -during leader election cycles.

    -
    resourceName [Required]
    -string -
    -

    resourceName indicates the name of resource object that will be used to lock -during leader election cycles.

    -
    resourceNamespace [Required]
    -string -
    -

    resourceName indicates the namespace of resource object that will be used to lock -during leader election cycles.

    -
    \ No newline at end of file diff --git a/content/en/docs/reference/config-api/kubeadm-config.v1beta3.md b/content/en/docs/reference/config-api/kubeadm-config.v1beta3.md index 3972691620bb0..bb4cec56503a7 100644 --- a/content/en/docs/reference/config-api/kubeadm-config.v1beta3.md +++ b/content/en/docs/reference/config-api/kubeadm-config.v1beta3.md @@ -264,12 +264,115 @@ node only (e.g. the node ip).

    - [JoinConfiguration](#kubeadm-k8s-io-v1beta3-JoinConfiguration) + + +## `BootstrapToken` {#BootstrapToken} + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) + + +

    BootstrapToken describes one bootstrap token, stored as a Secret in the cluster

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    token [Required]
    +BootstrapTokenString +
    +

    token is used for establishing bidirectional trust between nodes and control-planes. +Used for joining nodes in the cluster.

    +
    description
    +string +
    +

    description sets a human-friendly message why this token exists and what it's used +for, so other administrators can know its purpose.

    +
    ttl
    +meta/v1.Duration +
    +

    ttl defines the time to live for this token. Defaults to 24h. +expires and ttl are mutually exclusive.

    +
    expires
    +meta/v1.Time +
    +

    expires specifies the timestamp when this token expires. Defaults to being set +dynamically at runtime based on the ttl. expires and ttl are mutually exclusive.

    +
    usages
    +[]string +
    +

    usages describes the ways in which this token can be used. Can by default be used +for establishing bidirectional trust, but that can be changed here.

    +
    groups
    +[]string +
    +

    groups specifies the extra groups that this token will authenticate as when/if +used for authentication

    +
    + +## `BootstrapTokenString` {#BootstrapTokenString} + + +**Appears in:** + +- [BootstrapToken](#BootstrapToken) + + +

    BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used +for both validation of the practically of the API server from a joining node's point +of view and as an authentication method for the node in the bootstrap phase of +"kubeadm join". This token is and should be short-lived.

    + + + + + + + + + + + + + + +
    FieldDescription
    - [Required]
    +string +
    + No description provided.
    - [Required]
    +string +
    + No description provided.
    + ## `ClusterConfiguration` {#kubeadm-k8s-io-v1beta3-ClusterConfiguration} -

    ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster

    +

    ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster.

    @@ -436,7 +539,8 @@ interface and use that, but in case that process fails you may set the desired v @@ -925,14 +1177,14 @@ Kubeadm has no knowledge of where certificate files live and they must be suppli []string @@ -940,7 +1192,7 @@ Required if using a TLS connection.

    string @@ -948,7 +1200,7 @@ Required if using a TLS connection.

    string @@ -963,7 +1215,8 @@ Required if using a TLS connection.

    - [Discovery](#kubeadm-k8s-io-v1beta4-Discovery) -

    FileDiscovery is used to specify a file or URL to a kubeconfig file from which to load cluster information

    +

    FileDiscovery is used to specify a file or URL to a kubeconfig file from which to load +cluster information.

    certificateKey sets the key with which certificates and keys are encrypted prior to being -uploaded in a Secret in the cluster during the uploadcerts init phase.

    +uploaded in a Secret in the cluster during the uploadcerts init phase. +The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    skipPhases
    @@ -611,7 +715,7 @@ certificate.

    - [Discovery](#kubeadm-k8s-io-v1beta3-Discovery) -

    BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery

    +

    BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery.

    @@ -934,7 +1038,7 @@ file from which to load cluster information.

    @@ -1027,7 +1132,7 @@ The corresponding encryption key is in the InitConfiguration.

    - [Etcd](#kubeadm-k8s-io-v1beta3-Etcd) -

    LocalEtcd describes that kubeadm should run an etcd cluster locally

    +

    LocalEtcd describes that kubeadm should run an etcd cluster locally.

    pathType
    -core/v1.HostPathType +core/v1.HostPathType

    pathType is the type of the hostPath.

    @@ -1013,7 +1117,8 @@ deployed on this node.

    certificateKey is the key that is used for decryption of certificates after they are downloaded from the secret upon joining a new control plane node. -The corresponding encryption key is in the InitConfiguration.

    +The corresponding encryption key is in the InitConfiguration. +The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    @@ -1086,7 +1191,7 @@ signing certificate.

    - [ClusterConfiguration](#kubeadm-k8s-io-v1beta3-ClusterConfiguration) -

    Networking contains elements describing cluster's networking configuration

    +

    Networking contains elements describing cluster's networking configuration.

    @@ -1129,7 +1234,7 @@ signing certificate.

    NodeRegistrationOptions holds fields that relate to registering a new control-plane or -node to the cluster, either via "kubeadm init" or "kubeadm join"

    +node to the cluster, either via kubeadm init or kubeadm join.

    @@ -1157,7 +1262,7 @@ This information will be annotated to the Node API object, for later re-use.

    taints [Required]
    -[]core/v1.Taint +[]core/v1.Taint

    taints specifies the taints the Node API object should be registered with. @@ -1189,7 +1294,7 @@ Value all ignores errors from all checks.

    imagePullPolicy
    -core/v1.PullPolicy +core/v1.PullPolicy

    imagePullPolicy specifies the policy for image pulling during kubeadm "init" and @@ -1237,107 +1342,4 @@ first alpha-numerically.

    - - - - -## `BootstrapToken` {#BootstrapToken} - - -**Appears in:** - -- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) - - -

    BootstrapToken describes one bootstrap token, stored as a Secret in the cluster

    - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    token [Required]
    -BootstrapTokenString -
    -

    token is used for establishing bidirectional trust between nodes and control-planes. -Used for joining nodes in the cluster.

    -
    description
    -string -
    -

    description sets a human-friendly message why this token exists and what it's used -for, so other administrators can know its purpose.

    -
    ttl
    -meta/v1.Duration -
    -

    ttl defines the time to live for this token. Defaults to 24h. -expires and ttl are mutually exclusive.

    -
    expires
    -meta/v1.Time -
    -

    expires specifies the timestamp when this token expires. Defaults to being set -dynamically at runtime based on the ttl. expires and ttl are mutually exclusive.

    -
    usages
    -[]string -
    -

    usages describes the ways in which this token can be used. Can by default be used -for establishing bidirectional trust, but that can be changed here.

    -
    groups
    -[]string -
    -

    groups specifies the extra groups that this token will authenticate as when/if -used for authentication

    -
    - -## `BootstrapTokenString` {#BootstrapTokenString} - - -**Appears in:** - -- [BootstrapToken](#BootstrapToken) - - -

    BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used -for both validation of the practically of the API server from a joining node's point -of view and as an authentication method for the node in the bootstrap phase of -"kubeadm join". This token is and should be short-lived.

    - - - - - - - - - - - - - - -
    FieldDescription
    - [Required]
    -string -
    - No description provided.
    - [Required]
    -string -
    - No description provided.
    \ No newline at end of file + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/kubeadm-config.v1beta4.md b/content/en/docs/reference/config-api/kubeadm-config.v1beta4.md index f7349db30c606..c194ccacd1280 100644 --- a/content/en/docs/reference/config-api/kubeadm-config.v1beta4.md +++ b/content/en/docs/reference/config-api/kubeadm-config.v1beta4.md @@ -16,6 +16,17 @@ Use APIServer.ExtraEnvs, ControllerManager.ExtraEnvs, Etcd.Local.ExtraEnvs.
  • The ResetConfiguration API type is now supported in v1beta4. Users are able to reset a node by passing a --config file to kubeadm reset.
  • +
  • dry-run mode is now configureable in InitConfiguration and JoinConfiguration config files.
  • +
  • Replace the existing string/string extra argument maps with structured extra arguments +that support duplicates. The change applies to ClusterConfiguration - APIServer.ExtraArgs, +ControllerManager.ExtraArgs, Scheduler.ExtraArgs. Also to NodeRegistrationOptions.KubeletExtraArgs.
  • +
  • Add ClusterConfiguration.EncryptionAlgorithm that can be used to set the asymmetric +encryption algorithm used for this cluster's keys and certificates. Can be "RSA" +(default algorithm, key size is 2048) or "ECDSA" (uses the P-256 elliptic curve).
  • +
  • Add ClusterConfiguration.DNS.Disabled and ClusterConfiguration.Proxy.Disabled +that can be used to disable the CoreDNS and kube-proxy addons during cluster +initialization. Skipping the related addons phases, during cluster creation will +set the same fields to false.
  • Migration from old kubeadm config versions

      @@ -291,12 +302,117 @@ node only (e.g. the node ip).

      - [ResetConfiguration](#kubeadm-k8s-io-v1beta4-ResetConfiguration) + + +## `BootstrapToken` {#BootstrapToken} + + +**Appears in:** + +- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) + +- [InitConfiguration](#kubeadm-k8s-io-v1beta4-InitConfiguration) + + +

      BootstrapToken describes one bootstrap token, stored as a Secret in the cluster

      + + + + + + + + + + + + + + + + + + + + + + + + + + +
      FieldDescription
      token [Required]
      +BootstrapTokenString +
      +

      token is used for establishing bidirectional trust between nodes and control-planes. +Used for joining nodes in the cluster.

      +
      description
      +string +
      +

      description sets a human-friendly message why this token exists and what it's used +for, so other administrators can know its purpose.

      +
      ttl
      +meta/v1.Duration +
      +

      ttl defines the time to live for this token. Defaults to 24h. +expires and ttl are mutually exclusive.

      +
      expires
      +meta/v1.Time +
      +

      expires specifies the timestamp when this token expires. Defaults to being set +dynamically at runtime based on the ttl. expires and ttl are mutually exclusive.

      +
      usages
      +[]string +
      +

      usages describes the ways in which this token can be used. Can by default be used +for establishing bidirectional trust, but that can be changed here.

      +
      groups
      +[]string +
      +

      groups specifies the extra groups that this token will authenticate as when/if +used for authentication

      +
      + +## `BootstrapTokenString` {#BootstrapTokenString} + + +**Appears in:** + +- [BootstrapToken](#BootstrapToken) + + +

      BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used +for both validation of the practically of the API server from a joining node's point +of view and as an authentication method for the node in the bootstrap phase of +"kubeadm join". This token is and should be short-lived.

      + + + + + + + + + + + + + + +
      FieldDescription
      - [Required]
      +string +
      + No description provided.
      - [Required]
      +string +
      + No description provided.
      + ## `ClusterConfiguration` {#kubeadm-k8s-io-v1beta4-ClusterConfiguration} -

      ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster

      +

      ClusterConfiguration contains cluster-wide configuration for a kubeadm cluster.

      @@ -311,97 +427,116 @@ node only (e.g. the node ip).

      Etcd + + + + + + @@ -412,7 +547,10 @@ will be used for all the other images.

      InitConfiguration contains a list of elements that is specific "kubeadm init"-only runtime -information.

      +information. +kubeadm init-only information. These fields are solely used the first time kubeadm init runs. +After that, the information in the fields IS NOT uploaded to the kubeadm-config ConfigMap +that is used by kubeadm upgrade for instance. These fields must be omitempty.

      -

      Etcd holds configuration for etcd.

      +

      etcd holds the configuration for etcd.

      networking
      Networking
      -

      Networking holds configuration for the networking topology of the cluster.

      +

      networking holds configuration for the networking topology of the cluster.

      kubernetesVersion
      string
      -

      KubernetesVersion is the target version of the control plane.

      +

      kubernetesVersion is the target version of the control plane.

      controlPlaneEndpoint
      string
      -

      ControlPlaneEndpoint sets a stable IP address or DNS name for the control plane; it -can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. -In case the ControlPlaneEndpoint is not specified, the AdvertiseAddress + BindPort -are used; in case the ControlPlaneEndpoint is specified but without a TCP port, -the BindPort is used. -Possible usages are: -e.g. In a cluster with more than one control plane instances, this field should be +

      ontrolPlaneEndpoint sets a stable IP address or DNS name for the control plane; +It can be a valid IP address or a RFC-1123 DNS subdomain, both with optional TCP port. +In case the controlPlaneEndpoint is not specified, the advertiseAddress + bindPort +are used; in case the controlPlaneEndpoint is specified but without a TCP port, +the bindPort is used. +Possible usages are:

      +
        +
      • In a cluster with more than one control plane instances, this field should be assigned the address of the external load balancer in front of the -control plane instances. -e.g. in environments with enforced node recycling, the ControlPlaneEndpoint -could be used for assigning a stable DNS to the control plane.

        +control plane instances.
      • +
      • In environments with enforced node recycling, the controlPlaneEndpoint +could be used for assigning a stable DNS to the control plane.
      • +
      apiServer
      APIServer
      -

      APIServer contains extra settings for the API server control plane component

      +

      apiServer contains extra settings for the API server.

      controllerManager
      ControlPlaneComponent
      -

      ControllerManager contains extra settings for the controller manager control plane component

      +

      controllerManager contains extra settings for the controller manager.

      scheduler
      ControlPlaneComponent
      -

      Scheduler contains extra settings for the scheduler control plane component

      +

      scheduler contains extra settings for the scheduler.

      dns
      DNS
      -

      DNS defines the options for the DNS add-on installed in the cluster.

      +

      dns defines the options for the DNS add-on installed in the cluster.

      +
      proxy [Required]
      +Proxy +
      +

      proxy defines the options for the proxy add-on installed in the cluster.

      certificatesDir
      string
      -

      CertificatesDir specifies where to store or look for all required certificates.

      +

      certificatesDir specifies where to store or look for all required certificates.

      imageRepository
      string
      -

      ImageRepository sets the container registry to pull images from. -If empty, registry.k8s.io will be used by default; in case of kubernetes version is a CI build (kubernetes version starts with ci/) -gcr.io/k8s-staging-ci-images will be used as a default for control plane components and for kube-proxy, while registry.k8s.io -will be used for all the other images.

      +

      imageRepository sets the container registry to pull images from. +If empty, registry.k8s.io will be used by default. +In case of kubernetes version is a CI build (kubernetes version starts with ci/) +gcr.io/k8s-staging-ci-images will be used as a default for control plane components +and for kube-proxy, while registry.k8s.io will be used for all the other images.

      featureGates
      map[string]bool
      -

      FeatureGates enabled by the user.

      +

      featureGates contains the feature gates enabled by the user.

      clusterName
      string
      -

      The cluster name

      +

      The cluster name.

      +
      encryptionAlgorithm
      +EncryptionAlgorithmType +
      +

      encryptionAlgorithm holds the type of asymmetric encryption algorithm used for keys and +certificates. Can be "RSA" (default algorithm, key size is 2048) or "ECDSA" (uses the +P-256 elliptic curve).

      @@ -424,55 +562,66 @@ information.

      + + + @@ -493,19 +642,28 @@ The flag "--skip-phases" takes precedence over this field.

      + + + @@ -513,32 +671,32 @@ Defaults to "/etc/kubernetes/pki/ca.crt".

      Discovery @@ -548,7 +706,8 @@ The flag "--skip-phases" takes precedence over this field.

      -

      ResetConfiguration contains a list of fields that are specifically "kubeadm reset"-only runtime information.

      +

      ResetConfiguration contains a list of fields that are specifically kubeadm reset-only +runtime information.

      bootstrapTokens
      -[]invalid type +[]BootstrapToken
      -

      BootstrapTokens is respected at kubeadm init time and describes a set of Bootstrap Tokens to create. +

      bootstrapTokens is respected at kubeadm init time and describes a set of Bootstrap Tokens to create. This information IS NOT uploaded to the kubeadm cluster configmap, partly because of its sensitive nature

      dryRun [Required]
      +bool +
      +

      dryRun tells if the dry run mode is enabled, don't apply any change in dry run mode, +just out put what would be done.

      +
      nodeRegistration
      NodeRegistrationOptions
      -

      NodeRegistration holds fields that relate to registering the new control-plane node to the cluster

      +

      nodeRegistration holds fields that relate to registering the new control-plane node +to the cluster.

      localAPIEndpoint
      APIEndpoint
      -

      LocalAPIEndpoint represents the endpoint of the API server instance that's deployed on this control plane node -In HA setups, this differs from ClusterConfiguration.ControlPlaneEndpoint in the sense that ControlPlaneEndpoint -is the global endpoint for the cluster, which then loadbalances the requests to each individual API server. This -configuration object lets you customize what IP/DNS name and port the local API server advertises it's accessible -on. By default, kubeadm tries to auto-detect the IP of the default interface and use that, but in case that process -fails you may set the desired value here.

      +

      localAPIEndpoint represents the endpoint of the API server instance that's deployed on this +control plane node. In HA setups, this differs from ClusterConfiguration.controlPlaneEndpoint +in the sense that ontrolPlaneEndpoint is the global endpoint for the cluster, which then +loadbalances the requests to each individual API server. +This configuration object lets you customize what IP/DNS name and port the local API server +advertises it's accessible on. By default, kubeadm tries to auto-detect the IP of the default +interface and use that, but in case that process fails you may set the desired value here.

      certificateKey
      string
      -

      CertificateKey sets the key with which certificates and keys are encrypted prior to being uploaded in -a secret in the cluster during the uploadcerts init phase.

      +

      certificateKey sets the key with which certificates and keys are encrypted prior to being +uploaded in a Secret in the cluster during the uploadcerts init phase. +The certificate key is a hex encoded string that is an AES key of size 32 bytes.

      skipPhases
      []string
      -

      SkipPhases is a list of phases to skip during command execution. -The list of phases can be obtained with the "kubeadm init --help" command. -The flag "--skip-phases" takes precedence over this field.

      +

      skipPhases is a list of phases to skip during command execution. +The list of phases can be obtained with the kubeadm init --help command. +The flag --skip-phases takes precedence over this field.

      patches
      Patches
      -

      Patches contains options related to applying patches to components deployed by kubeadm during -"kubeadm init".

      +

      patches contains options related to applying patches to components deployed by kubeadm during +kubeadm init.

      kind
      string
      JoinConfiguration
      dryRun
      +bool +
      +

      dryRun tells if the dry run mode is enabled, don't apply any change if it is set, +just output what would be done.

      +
      nodeRegistration
      NodeRegistrationOptions
      -

      NodeRegistration holds fields that relate to registering the new control-plane node to the cluster

      +

      nodeRegistration holds fields that relate to registering the new control-plane +node to the cluster

      caCertPath
      string
      -

      CACertPath is the path to the SSL certificate authority used to -secure comunications between node and control-plane. +

      caCertPath is the path to the SSL certificate authority used to secure comunications +between node and control-plane. Defaults to "/etc/kubernetes/pki/ca.crt".

      -

      Discovery specifies the options for the kubelet to use during the TLS Bootstrap process

      +

      discovery specifies the options for the kubelet to use during the TLS bootstrap process.

      controlPlane
      JoinControlPlane
      -

      ControlPlane defines the additional control plane instance to be deployed on the joining node. -If nil, no additional control plane instance will be deployed.

      +

      controlPlane defines the additional control plane instance to be deployed on the +joining node. If nil, no additional control plane instance will be deployed.

      skipPhases
      []string
      -

      SkipPhases is a list of phases to skip during command execution. -The list of phases can be obtained with the "kubeadm join --help" command. -The flag "--skip-phases" takes precedence over this field.

      +

      skipPhases is a list of phases to skip during command execution. +The list of phases can be obtained with the kubeadm join --help command. +The flag --skip-phases takes precedence over this field.

      patches
      Patches
      -

      Patches contains options related to applying patches to components deployed by kubeadm during -"kubeadm join".

      +

      patches contains options related to applying patches to components deployed +by kubeadm during kubeadm join.

      @@ -563,52 +722,58 @@ The flag "--skip-phases" takes precedence over this field.

      bool @@ -636,14 +801,14 @@ The list of phases can be obtained with the "kubeadm reset phase --help&quo string @@ -676,14 +841,52 @@ Defaults to 6443.

      []string + + +
      -

      CleanupTmpDir specifies whether the "/etc/kubernetes/tmp" directory should be cleaned during the reset process.

      +

      cleanupTmpDir specifies whether the "/etc/kubernetes/tmp" directory should be cleaned +during the reset process.

      certificatesDir
      string
      -

      CertificatesDir specifies the directory where the certificates are stored. If specified, it will be cleaned during the reset process.

      +

      certificatesDir specifies the directory where the certificates are stored. +If specified, it will be cleaned during the reset process.

      criSocket
      string
      -

      CRISocket is used to retrieve container runtime info and used for the removal of the containers. -If CRISocket is not specified by flag or config file, kubeadm will try to detect one valid CRISocket instead.

      +

      criSocket is used to retrieve container runtime inforomation and used for the +removal of the containers. +If criSocket is not specified by flag or config file, kubeadm will try to detect +one valid CRI socket instead.

      dryRun
      bool
      -

      DryRun tells if the dry run mode is enabled, don't apply any change if it is and just output what would be done.

      +

      dryRun tells if the dry run mode is enabled, don't apply any change if it is set +and just output what would be done.

      force
      bool
      -

      Force flag instructs kubeadm to reset the node without prompting for confirmation.

      +

      The force flag instructs kubeadm to reset the node without prompting for confirmation.

      ignorePreflightErrors
      []string
      -

      IgnorePreflightErrors provides a slice of pre-flight errors to be ignored during the reset process, e.g. 'IsPrivilegedUser,Swap'. -Value 'all' ignores errors from all checks.

      +

      ignorePreflightErrors provides a list of pre-flight errors to be ignored during +the reset process, e.g. IsPrivilegedUser,Swap. +Value all ignores errors from all checks.

      skipPhases
      []string
      -

      SkipPhases is a list of phases to skip during command execution. -The list of phases can be obtained with the "kubeadm reset phase --help" command.

      +

      skipPhases is a list of phases to skip during command execution. +The list of phases can be obtained with the kubeadm reset phase --help command.

      -

      AdvertiseAddress sets the IP address for the API server to advertise.

      +

      dvertiseAddress sets the IP address for the API server to advertise.

      bindPort
      int32
      -

      BindPort sets the secure port for the API Server to bind to. +

      bindPort sets the secure port for the API Server to bind to. Defaults to 6443.

      -

      CertSANs sets extra Subject Alternative Names for the API Server signing cert.

      +

      certSANs sets extra Subject Alternative Names (SANs) for the API Server signing +certificate.

      timeoutForControlPlane
      meta/v1.Duration
      -

      TimeoutForControlPlane controls the timeout that we use for API server to appear

      +

      timeoutForControlPlane controls the timeout that we use for API server to appear.

      +
      + +## `Arg` {#kubeadm-k8s-io-v1beta4-Arg} + + +**Appears in:** + +- [ControlPlaneComponent](#kubeadm-k8s-io-v1beta4-ControlPlaneComponent) + +- [LocalEtcd](#kubeadm-k8s-io-v1beta4-LocalEtcd) + +- [NodeRegistrationOptions](#kubeadm-k8s-io-v1beta4-NodeRegistrationOptions) + + +

      Arg represents an argument with a name and a value.

      + + + + + + + + + + + + @@ -697,7 +900,7 @@ Defaults to 6443.

      - [Discovery](#kubeadm-k8s-io-v1beta4-Discovery) -

      BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery

      +

      BootstrapTokenDiscovery is used to set the options for bootstrap token based discovery.

      FieldDescription
      name [Required]
      +string +
      +

      The name of the argument.

      +
      value [Required]
      +string +
      +

      The value of the argument.

      @@ -709,37 +912,37 @@ Defaults to 6443.

      string @@ -764,29 +967,29 @@ the security of kubeadm since other nodes can impersonate the control-plane.

      @@ -812,7 +1015,14 @@ Environment variables passed using ExtraEnvs will override any existing environm ImageMeta + + + @@ -838,33 +1048,75 @@ Environment variables passed using ExtraEnvs will override any existing environm BootstrapTokenDiscovery + + +
      -

      Token is a token used to validate cluster information -fetched from the control-plane.

      +

      token is a token used to validate cluster information fetched from the +control-plane.

      apiServerEndpoint
      string
      -

      APIServerEndpoint is an IP or domain name to the API server from which info will be fetched.

      +

      apiServerEndpoint is an IP or domain name to the API server from which +information will be fetched.

      caCertHashes
      []string
      -

      CACertHashes specifies a set of public key pins to verify -when token-based discovery is used. The root CA found during discovery -must match one of these values. Specifying an empty set disables root CA -pinning, which can be unsafe. Each hash is specified as ":", -where the only currently supported type is "sha256". This is a hex-encoded -SHA-256 hash of the Subject Public Key Info (SPKI) object in DER-encoded -ASN.1. These hashes can be calculated using, for example, OpenSSL.

      +

      caCertHashes specifies a set of public key pins to verify when token-based discovery +is used. The root CA found during discovery must match one of these values. +Specifying an empty set disables root CA pinning, which can be unsafe. +Each hash is specified as <type>:<value>, where the only currently supported type is +"sha256". This is a hex-encoded SHA-256 hash of the Subject Public Key Info (SPKI) +object in DER-encoded ASN.1. These hashes can be // calculated using, for example, OpenSSL.

      unsafeSkipCAVerification
      bool
      -

      UnsafeSkipCAVerification allows token-based discovery -without CA verification via CACertHashes. This can weaken -the security of kubeadm since other nodes can impersonate the control-plane.

      +

      unsafeSkipCAVerification allows token-based discovery without CA verification +via caCertHashes. This can weaken the security of kubeadm since other nodes can +impersonate the control-plane.

      extraArgs
      -map[string]string +[]Arg
      -

      ExtraArgs is an extra set of flags to pass to the control plane component. -A key in this map is the flag name as it appears on the -command line except without leading dash(es). -TODO: This is temporary and ideally we would like to switch all components to -use ComponentConfig + ConfigMaps.

      +

      extraArgs is an extra set of flags to pass to the control plane component. +An argument name in this list is the flag name as it appears on the +command line except without leading dash(es). Extra arguments will override existing +default arguments. Duplicate extra arguments are allowed.

      extraVolumes
      []HostPathMount
      -

      ExtraVolumes is an extra set of host volumes, mounted to the control plane component.

      +

      extraVolumes is an extra set of host volumes, mounted to the control plane component.

      extraEnvs
      -[]core/v1.EnvVar +[]EnvVar
      -

      ExtraEnvs is an extra set of environment variables to pass to the control plane component. -Environment variables passed using ExtraEnvs will override any existing environment variables, or *_proxy environment variables that kubeadm adds by default.

      +

      extraEnvs is an extra set of environment variables to pass to the control plane component. +Environment variables passed using extraEnvs will override any existing environment variables, +or *_proxy environment variables that kubeadm adds by default.

      (Members of ImageMeta are embedded into this type.) -

      ImageMeta allows to customize the image used for the DNS component

      +

      imageMeta allows to customize the image used for the DNS addon.

      +
      disabled [Required]
      +bool +
      +

      disabled specifies whether to disable this addon in the cluster.

      -

      BootstrapToken is used to set the options for bootstrap token based discovery -BootstrapToken and File are mutually exclusive

      +

      bootstrapToken is used to set the options for bootstrap token based discovery. +bootstrapToken and file are mutually exclusive.

      file
      FileDiscovery
      -

      File is used to specify a file or URL to a kubeconfig file from which to load cluster information -BootstrapToken and File are mutually exclusive

      +

      file is used to specify a file or URL to a kubeconfig file from which to load +cluster information. bootstrapToken and file are mutually exclusive.

      tlsBootstrapToken
      string
      -

      TLSBootstrapToken is a token used for TLS bootstrapping. -If .BootstrapToken is set, this field is defaulted to .BootstrapToken.Token, but can be overridden. -If .File is set, this field must be set in case the KubeConfigFile does not contain any other authentication information

      +

      tlsBootstrapToken is a token used for TLS bootstrapping. +If bootstrapToken is set, this field is defaulted to bootstrapToken.token, but +can be overridden. If file is set, this field must be set in case the KubeConfigFile +does not contain any other authentication information.

      timeout
      meta/v1.Duration
      -

      Timeout modifies the discovery timeout

      +

      timeout modifies the discovery timeout.

      +
      + +## `EncryptionAlgorithmType` {#kubeadm-k8s-io-v1beta4-EncryptionAlgorithmType} + +(Alias of `string`) + +**Appears in:** + +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta4-ClusterConfiguration) + + +

      EncryptionAlgorithmType can define an asymmetric encryption algorithm type.

      + + + + +## `EnvVar` {#kubeadm-k8s-io-v1beta4-EnvVar} + + +**Appears in:** + +- [ControlPlaneComponent](#kubeadm-k8s-io-v1beta4-ControlPlaneComponent) + +- [LocalEtcd](#kubeadm-k8s-io-v1beta4-LocalEtcd) + + +

      EnvVar represents an environment variable present in a Container.

      + + + + + + + + +
      FieldDescription
      EnvVar [Required]
      +core/v1.EnvVar
      (Members of EnvVar are embedded into this type.) + No description provided.
      @@ -889,16 +1141,16 @@ If .File is set, this field must be set in case the KubeConfigF LocalEtcd
    -

    Local provides configuration knobs for configuring the local etcd instance -Local and External are mutually exclusive

    +

    local provides configuration knobs for configuring the local etcd instance. +local and external are mutually exclusive.

    external
    ExternalEtcd
    -

    External describes how to connect to an external etcd cluster -Local and External are mutually exclusive

    +

    external describes how to connect to an external etcd cluster. +local and external are mutually exclusive.

    -

    Endpoints of etcd members. Required for ExternalEtcd.

    +

    endpoints contains the list of etcd members.

    caFile [Required]
    string
    -

    CAFile is an SSL Certificate Authority file used to secure etcd communication. +

    caFile is an SSL Certificate Authority (CA) file used to secure etcd communication. Required if using a TLS connection.

    -

    CertFile is an SSL certification file used to secure etcd communication. +

    certFile is an SSL certification file used to secure etcd communication. Required if using a TLS connection.

    -

    KeyFile is an SSL key file used to secure etcd communication. +

    keyFile is an SSL key file used to secure etcd communication. Required if using a TLS connection.

    @@ -975,7 +1228,8 @@ Required if using a TLS connection.

    string @@ -989,8 +1243,7 @@ Required if using a TLS connection.

    - [ControlPlaneComponent](#kubeadm-k8s-io-v1beta4-ControlPlaneComponent) -

    HostPathMount contains elements describing volumes that are mounted from the -host.

    +

    HostPathMount contains elements describing volumes that are mounted from the host.

    -

    KubeConfigPath is used to specify the actual file path or URL to the kubeconfig file from which to load cluster information

    +

    kubeConfigPath is used to specify the actual file path or URL to the kubeconfig +file from which to load cluster information.

    @@ -1002,36 +1255,35 @@ host.

    string @@ -1060,16 +1312,17 @@ originated from the Kubernetes/Kubernetes release process

    string @@ -1095,15 +1348,18 @@ In case this value is set, kubeadm does not change automatically the version of APIEndpoint @@ -1117,7 +1373,7 @@ upon joining a new control plane node. The corresponding encryption key is in th - [Etcd](#kubeadm-k8s-io-v1beta4-Etcd) -

    LocalEtcd describes that kubeadm should run an etcd cluster locally

    +

    LocalEtcd describes that kubeadm should run an etcd cluster locally.

    -

    Name of the volume inside the pod template.

    +

    name is the name of the volume inside the Pod template.

    hostPath [Required]
    string
    -

    HostPath is the path in the host that will be mounted inside -the pod.

    +

    hostPath is the path in the host that will be mounted inside the Pod.

    mountPath [Required]
    string
    -

    MountPath is the path inside the pod where hostPath will be mounted.

    +

    mountPath is the path inside the Pod where hostPath will be mounted.

    readOnly
    bool
    -

    ReadOnly controls write access to the volume

    +

    readOnly controls write access to the volume.

    pathType
    -core/v1.HostPathType +core/v1.HostPathType
    -

    PathType is the type of the HostPath.

    +

    pathType is the type of the hostPath.

    -

    ImageRepository sets the container registry to pull images from. -if not set, the ImageRepository defined in ClusterConfiguration will be used instead.

    +

    imageRepository sets the container registry to pull images from. +if not set, the imageRepository defined in ClusterConfiguration will be used instead.

    imageTag
    string
    -

    ImageTag allows to specify a tag for the image. -In case this value is set, kubeadm does not change automatically the version of the above components during upgrades.

    +

    imageTag allows to specify a tag for the image. +In case this value is set, kubeadm does not change automatically the version of +the above components during upgrades.

    -

    LocalAPIEndpoint represents the endpoint of the API server instance to be deployed on this node.

    +

    localAPIEndpoint represents the endpoint of the API server instance to be +deployed on this node.

    certificateKey
    string
    -

    CertificateKey is the key that is used for decryption of certificates after they are downloaded from the secret -upon joining a new control plane node. The corresponding encryption key is in the InitConfiguration.

    +

    certificateKey is the key that is used for decryption of certificates after +they are downloaded from the Secret upon joining a new control plane node. +The corresponding encryption key is in the InitConfiguration. +The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    @@ -1136,40 +1392,45 @@ upon joining a new control plane node. The corresponding encryption key is in th string - @@ -1183,7 +1444,7 @@ Environment variables passed using ExtraEnvs will override any existing environm - [ClusterConfiguration](#kubeadm-k8s-io-v1beta4-ClusterConfiguration) -

    Networking contains elements describing cluster's networking configuration

    +

    Networking contains elements describing cluster's networking configuration.

    -

    DataDir is the directory etcd will place its data. +

    dataDir is the directory etcd will place its data. Defaults to "/var/lib/etcd".

    extraArgs
    -map[string]string +
    extraArgs [Required]
    +[]Arg
    -

    ExtraArgs are extra arguments provided to the etcd binary -when run inside a static pod. -A key in this map is the flag name as it appears on the -command line except without leading dash(es).

    +

    extraArgs are extra arguments provided to the etcd binary when run +inside a static Pod. An argument name in this list is the flag name as +it appears on the command line except without leading dash(es). +Extra arguments will override existing default arguments. +Duplicate extra arguments are allowed.

    extraEnvs
    -[]core/v1.EnvVar +[]EnvVar
    -

    ExtraEnvs is an extra set of environment variables to pass to the control plane component. -Environment variables passed using ExtraEnvs will override any existing environment variables, or *_proxy environment variables that kubeadm adds by default.

    +

    extraEnvs is an extra set of environment variables to pass to the +control plane component. Environment variables passed using extraEnvs +will override any existing environment variables, or *_proxy environment +variables that kubeadm adds by default.

    serverCertSANs
    []string
    -

    ServerCertSANs sets extra Subject Alternative Names for the etcd server signing cert.

    +

    serverCertSANs sets extra Subject Alternative Names (SANs) for the etcd +server signing certificate.

    peerCertSANs
    []string
    -

    PeerCertSANs sets extra Subject Alternative Names for the etcd peer signing cert.

    +

    peerCertSANs sets extra Subject Alternative Names (SANs) for the etcd peer +signing certificate.

    @@ -1195,21 +1456,21 @@ Environment variables passed using ExtraEnvs will override any existing environm string @@ -1225,7 +1486,8 @@ Environment variables passed using ExtraEnvs will override any existing environm - [JoinConfiguration](#kubeadm-k8s-io-v1beta4-JoinConfiguration) -

    NodeRegistrationOptions holds fields that relate to registering a new control-plane or node to the cluster, either via "kubeadm init" or "kubeadm join"

    +

    NodeRegistrationOptions holds fields that relate to registering a new control-plane or +node to the cluster, either via kubeadm init or kubeadm join.

    -

    ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12".

    +

    serviceSubnet is the subnet used by Kubernetes Services. Defaults to "10.96.0.0/12".

    podSubnet
    string
    -

    PodSubnet is the subnet used by pods.

    +

    podSubnet is the subnet used by Pods.

    dnsDomain
    string
    -

    DNSDomain is the dns domain used by k8s services. Defaults to "cluster.local".

    +

    dnsDomain is the dns domain used by Kubernetes Services. Defaults to "cluster.local".

    @@ -1237,8 +1499,10 @@ Environment variables passed using ExtraEnvs will override any existing environm string @@ -1246,44 +1510,52 @@ Defaults to the hostname of the node if not provided.

    string @@ -1311,10 +1583,12 @@ If this field is unset kubeadm will default it to "IfNotPresent", or p string
    -

    Name is the .Metadata.Name field of the Node API object that will be created in this kubeadm init or kubeadm join operation. -This field is also used in the CommonName field of the kubelet's client certificate to the API server. +

    name is the .Metadata.Name field of the Node API object that will be created in this +kubeadm init or kubeadm join operation. +This field is also used in the CommonName field of the kubelet's client certificate to +the API server. Defaults to the hostname of the node if not provided.

    -

    CRISocket is used to retrieve container runtime info. This information will be annotated to the Node API object, for later re-use

    +

    criSocket is used to retrieve container runtime info. +This information will be annotated to the Node API object, for later re-use.

    taints [Required]
    -[]core/v1.Taint +[]core/v1.Taint
    -

    Taints specifies the taints the Node API object should be registered with. If this field is unset, i.e. nil, -it will be defaulted with a control-plane taint for control-plane nodes. If you don't want to taint your control-plane -node, set this field to an empty slice, i.e. taints: [] in the YAML file. This field is solely used for Node registration.

    +

    taints specifies the taints the Node API object should be registered with. +If this field is unset, i.e. nil, it will be defaulted with a control-plane taint for control-plane nodes. +If you don't want to taint your control-plane node, set this field to an empty list, +i.e. taints: [] in the YAML file. This field is solely used for Node registration.

    kubeletExtraArgs
    -map[string]string +[]Arg
    -

    KubeletExtraArgs passes through extra arguments to the kubelet. The arguments here are passed to the kubelet command line via the environment file -kubeadm writes at runtime for the kubelet to source. This overrides the generic base-level configuration in the kubelet-config ConfigMap -Flags have higher priority when parsing. These values are local and specific to the node kubeadm is executing on. -A key in this map is the flag name as it appears on the -command line except without leading dash(es).

    +

    kubeletExtraArgs passes through extra arguments to the kubelet. +The arguments here are passed to the kubelet command line via the environment file +kubeadm writes at runtime for the kubelet to source. +This overrides the generic base-level configuration in the kubelet-config ConfigMap. +Flags have higher priority when parsing. These values are local and specific to the node +kubeadm is executing on. An argument name in this list is the flag name as it appears on the +command line except without leading dash(es). Extra arguments will override existing +default arguments. Duplicate extra arguments are allowed.

    ignorePreflightErrors
    []string
    -

    IgnorePreflightErrors provides a slice of pre-flight errors to be ignored when the current node is registered, e.g. 'IsPrivilegedUser,Swap'. +

    ignorePreflightErrors provides a slice of pre-flight errors to be ignored when +the current node is registered, e.g. 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

    imagePullPolicy
    -core/v1.PullPolicy +core/v1.PullPolicy
    -

    ImagePullPolicy specifies the policy for image pulling during kubeadm "init" and "join" operations. +

    imagePullPolicy specifies the policy for image pulling during kubeadm init and +join operations. The value of this field must be one of "Always", "IfNotPresent" or "Never". -If this field is unset kubeadm will default it to "IfNotPresent", or pull the required images if not present on the host.

    +If this field is unset kubeadm will default it to "IfNotPresent", or pull the required +images if not present on the host.

    -

    Directory is a path to a directory that contains files named "target[suffix][+patchtype].extension". +

    directory is a path to a directory that contains files named +"target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "target" can be one of "kube-apiserver", "kube-controller-manager", "kube-scheduler", "etcd", "kubeletconfiguration". -"patchtype" can be one of "strategic" "merge" or "json" and they match the patch formats supported by kubectl. +"patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats +supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

    @@ -1322,19 +1596,16 @@ first alpha-numerically.

    - - - -## `BootstrapToken` {#BootstrapToken} +## `Proxy` {#kubeadm-k8s-io-v1beta4-Proxy} **Appears in:** -- [InitConfiguration](#kubeadm-k8s-io-v1beta3-InitConfiguration) +- [ClusterConfiguration](#kubeadm-k8s-io-v1beta4-ClusterConfiguration) -

    BootstrapToken describes one bootstrap token, stored as a Secret in the cluster

    +

    Proxy defines the proxy addon that should be used in the cluster.

    @@ -1342,87 +1613,13 @@ first alpha-numerically.

    - - - - - - - - - - - - - - - -
    token [Required]
    -BootstrapTokenString -
    -

    token is used for establishing bidirectional trust between nodes and control-planes. -Used for joining nodes in the cluster.

    -
    description
    -string -
    -

    description sets a human-friendly message why this token exists and what it's used -for, so other administrators can know its purpose.

    -
    ttl
    -meta/v1.Duration -
    -

    ttl defines the time to live for this token. Defaults to 24h. -expires and ttl are mutually exclusive.

    -
    expires
    -meta/v1.Time -
    -

    expires specifies the timestamp when this token expires. Defaults to being set -dynamically at runtime based on the ttl. expires and ttl are mutually exclusive.

    -
    usages
    -[]string -
    -

    usages describes the ways in which this token can be used. Can by default be used -for establishing bidirectional trust, but that can be changed here.

    -
    groups
    -[]string +
    disabled [Required]
    +bool
    -

    groups specifies the extra groups that this token will authenticate as when/if -used for authentication

    +

    disabled specifies whether to disable this addon in the cluster.

    - -## `BootstrapTokenString` {#BootstrapTokenString} - - -**Appears in:** - -- [BootstrapToken](#BootstrapToken) - - -

    BootstrapTokenString is a token of the format abcdef.abcdef0123456789 that is used -for both validation of the practically of the API server from a joining node's point -of view and as an authentication method for the node in the bootstrap phase of -"kubeadm join". This token is and should be short-lived.

    - - - - - - - - - - - - - - -
    FieldDescription
    - [Required]
    -string -
    - No description provided.
    - [Required]
    -string -
    - No description provided.
    \ No newline at end of file + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/kubeconfig.v1.md b/content/en/docs/reference/config-api/kubeconfig.v1.md index 42cf3bd7cc9c6..72a5c63358ce8 100644 --- a/content/en/docs/reference/config-api/kubeconfig.v1.md +++ b/content/en/docs/reference/config-api/kubeconfig.v1.md @@ -11,6 +11,83 @@ auto_generated: true - [Config](#Config) + + +## `Config` {#Config} + + + +

    Config holds the information needed to build connect to remote kubernetes clusters as a given user

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    FieldDescription
    apiVersion
    string
    /v1
    kind
    string
    Config
    kind
    +string +
    +

    Legacy field from pkg/api/types.go TypeMeta. +TODO(jlowdermilk): remove this after eliminating downstream dependencies.

    +
    apiVersion
    +string +
    +

    Legacy field from pkg/api/types.go TypeMeta. +TODO(jlowdermilk): remove this after eliminating downstream dependencies.

    +
    preferences [Required]
    +Preferences +
    +

    Preferences holds general information to be use for cli interactions

    +
    clusters [Required]
    +[]NamedCluster +
    +

    Clusters is a map of referencable names to cluster configs

    +
    users [Required]
    +[]NamedAuthInfo +
    +

    AuthInfos is a map of referencable names to user configs

    +
    contexts [Required]
    +[]NamedContext +
    +

    Contexts is a map of referencable names to context configs

    +
    current-context [Required]
    +string +
    +

    CurrentContext is the name of the context that you would like to use by default

    +
    extensions
    +[]NamedExtension +
    +

    Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields

    +
    ## `AuthInfo` {#AuthInfo} diff --git a/content/en/docs/reference/config-api/kubelet-config.v1.md b/content/en/docs/reference/config-api/kubelet-config.v1.md index cd7d676e072db..83dbc993f1993 100644 --- a/content/en/docs/reference/config-api/kubelet-config.v1.md +++ b/content/en/docs/reference/config-api/kubelet-config.v1.md @@ -11,7 +11,6 @@ auto_generated: true - [CredentialProviderConfig](#kubelet-config-k8s-io-v1-CredentialProviderConfig) - ## `CredentialProviderConfig` {#kubelet-config-k8s-io-v1-CredentialProviderConfig} @@ -81,9 +80,9 @@ to provide credentials. Images are expected to contain the registry domain and URL path.

    Each entry in matchImages is a pattern which can optionally contain a port and a path. Globs can be used in the domain, but not in the port or the path. Globs are supported -as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'. -Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match -a single subdomain segment, so *.io does not match *.k8s.io.

    +as subdomains like '.k8s.io' or 'k8s..io', and top-level-domains such as 'k8s.'. +Matching partial subdomains like 'app.k8s.io' is also supported. Each glob can only match +a single subdomain segment, so *.io does not match *.k8s.io.

    A match exists between an image and a matchImage when all of the below are true:

    • Both contain the same number of domain parts and each part matches.
    • @@ -93,9 +92,9 @@ a single subdomain segment, so *.io does not match *.k8s.io.

      Example values of matchImages:

      • 123456789.dkr.ecr.us-east-1.amazonaws.com
      • -
      • *.azurecr.io
      • +
      • *.azurecr.io
      • gcr.io
      • -
      • *.*.registry.io
      • +
      • ..registry.io
      • registry.io:8080/path
      @@ -169,4 +168,4 @@ credential plugin.

      - + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/kubelet-config.v1alpha1.md b/content/en/docs/reference/config-api/kubelet-config.v1alpha1.md index 6082c2f7ecfe1..99602ebceef6f 100644 --- a/content/en/docs/reference/config-api/kubelet-config.v1alpha1.md +++ b/content/en/docs/reference/config-api/kubelet-config.v1alpha1.md @@ -11,7 +11,6 @@ auto_generated: true - [CredentialProviderConfig](#kubelet-config-k8s-io-v1alpha1-CredentialProviderConfig) - ## `CredentialProviderConfig` {#kubelet-config-k8s-io-v1alpha1-CredentialProviderConfig} diff --git a/content/en/docs/reference/config-api/kubelet-config.v1beta1.md b/content/en/docs/reference/config-api/kubelet-config.v1beta1.md index 877e3c2240468..5751831a8393a 100644 --- a/content/en/docs/reference/config-api/kubelet-config.v1beta1.md +++ b/content/en/docs/reference/config-api/kubelet-config.v1beta1.md @@ -14,6 +14,279 @@ auto_generated: true - [SerializedNodeConfigSource](#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource) + + +## `FormatOptions` {#FormatOptions} + + +**Appears in:** + +- [LoggingConfiguration](#LoggingConfiguration) + + +

      FormatOptions contains options for the different logging formats.

      + + + + + + + + + + + +
      FieldDescription
      json [Required]
      +JSONOptions +
      +

      [Alpha] JSON contains options for logging format "json". +Only available when the LoggingAlphaOptions feature gate is enabled.

      +
      + +## `JSONOptions` {#JSONOptions} + + +**Appears in:** + +- [FormatOptions](#FormatOptions) + + +

      JSONOptions contains options for logging format "json".

      + + + + + + + + + + + + + + +
      FieldDescription
      splitStream [Required]
      +bool +
      +

      [Alpha] SplitStream redirects error messages to stderr while +info messages go to stdout, with buffering. The default is to write +both to stdout, without buffering. Only available when +the LoggingAlphaOptions feature gate is enabled.

      +
      infoBufferSize [Required]
      +k8s.io/apimachinery/pkg/api/resource.QuantityValue +
      +

      [Alpha] InfoBufferSize sets the size of the info stream when +using split streams. The default is zero, which disables buffering. +Only available when the LoggingAlphaOptions feature gate is enabled.

      +
      + +## `LogFormatFactory` {#LogFormatFactory} + + + +

      LogFormatFactory provides support for a certain additional, +non-default log format.

      + + + + +## `LoggingConfiguration` {#LoggingConfiguration} + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +

      LoggingConfiguration contains logging options.

      + + + + + + + + + + + + + + + + + + + + + + + +
      FieldDescription
      format [Required]
      +string +
      +

      Format Flag specifies the structure of log messages. +default value of format is text

      +
      flushFrequency [Required]
      +TimeOrMetaDuration +
      +

      Maximum time between log flushes. +If a string, parsed as a duration (i.e. "1s") +If an int, the maximum number of nanoseconds (i.e. 1s = 1000000000). +Ignored if the selected logging backend writes log messages without buffering.

      +
      verbosity [Required]
      +VerbosityLevel +
      +

      Verbosity is the threshold that determines which log messages are +logged. Default is zero which logs only the most important +messages. Higher values enable additional messages. Error messages +are always logged.

      +
      vmodule [Required]
      +VModuleConfiguration +
      +

      VModule overrides the verbosity threshold for individual files. +Only supported for "text" log format.

      +
      options [Required]
      +FormatOptions +
      +

      [Alpha] Options holds additional parameters that are specific +to the different logging formats. Only the options for the selected +format get used, but all of them get validated. +Only available when the LoggingAlphaOptions feature gate is enabled.

      +
      + +## `LoggingOptions` {#LoggingOptions} + + + +

      LoggingOptions can be used with ValidateAndApplyWithOptions to override +certain global defaults.

      + + + + + + + + + + + + + + +
      FieldDescription
      ErrorStream [Required]
      +io.Writer +
      +

      ErrorStream can be used to override the os.Stderr default.

      +
      InfoStream [Required]
      +io.Writer +
      +

      InfoStream can be used to override the os.Stdout default.

      +
      + +## `TimeOrMetaDuration` {#TimeOrMetaDuration} + + +**Appears in:** + +- [LoggingConfiguration](#LoggingConfiguration) + + +

      TimeOrMetaDuration is present only for backwards compatibility for the +flushFrequency field, and new fields should use metav1.Duration.

      + + + + + + + + + + + + + + +
      FieldDescription
      Duration [Required]
      +meta/v1.Duration +
      +

      Duration holds the duration

      +
      - [Required]
      +bool +
      +

      SerializeAsString controls whether the value is serialized as a string or an integer

      +
      + +## `TracingConfiguration` {#TracingConfiguration} + + +**Appears in:** + +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) + + +

      TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients.

      + + + + + + + + + + + + + + +
      FieldDescription
      endpoint
      +string +
      +

      Endpoint of the collector this component will report traces to. +The connection is insecure, and does not currently support TLS. +Recommended is unset, and endpoint is the otlp grpc default, localhost:4317.

      +
      samplingRatePerMillion
      +int32 +
      +

      SamplingRatePerMillion is the number of samples to collect per million spans. +Recommended is unset. If unset, sampler respects its parent span's sampling +rate, but otherwise never samples.

      +
      + +## `VModuleConfiguration` {#VModuleConfiguration} + +(Alias of `[]k8s.io/component-base/logs/api/v1.VModuleItem`) + +**Appears in:** + +- [LoggingConfiguration](#LoggingConfiguration) + + +

      VModuleConfiguration is a collection of individual file names or patterns +and the corresponding verbosity threshold.

      + + + + +## `VerbosityLevel` {#VerbosityLevel} + +(Alias of `uint32`) + +**Appears in:** + +- [LoggingConfiguration](#LoggingConfiguration) + + + +

      VerbosityLevel represents a klog or logr verbosity threshold.

      + + + + ## `CredentialProviderConfig` {#kubelet-config-k8s-io-v1beta1-CredentialProviderConfig} @@ -399,6 +672,16 @@ garbage collected. Default: "2m"

      +imageMaximumGCAge
      +meta/v1.Duration + + +

      imageMaximumGCAge is the maximum age an image can be unused before it is garbage collected. +The default of this field is "0s", which disables this field--meaning images won't be garbage +collected based on being unused for too long. +Default: "0s" (disabled)

      + + imageGCHighThresholdPercent
      int32 @@ -1175,7 +1458,7 @@ Default: 0.9

      registerWithTaints
      -[]core/v1.Taint +[]core/v1.Taint

      registerWithTaints are an array of taints to add to a node object when @@ -1193,321 +1476,87 @@ Default: true

      tracing
      -TracingConfiguration - - -

      Tracing specifies the versioned configuration for OpenTelemetry tracing clients. -See https://kep.k8s.io/2832 for more details. -Default: nil

      - - -localStorageCapacityIsolation
      -bool - - -

      LocalStorageCapacityIsolation enables local ephemeral storage isolation feature. The default setting is true. -This feature allows users to set request/limit for container's ephemeral storage and manage it in a similar way -as cpu and memory. It also allows setting sizeLimit for emptyDir volume, which will trigger pod eviction if disk -usage from the volume exceeds the limit. -This feature depends on the capability of detecting correct root file system disk usage. For certain systems, -such as kind rootless, if this capability cannot be supported, the feature LocalStorageCapacityIsolation should be -disabled. Once disabled, user should not set request/limit for container's ephemeral storage, or sizeLimit for emptyDir. -Default: true

      - - -containerRuntimeEndpoint [Required]
      -string - - -

      ContainerRuntimeEndpoint is the endpoint of container runtime. -Unix Domain Sockets are supported on Linux, while npipe and tcp endpoints are supported on Windows. -Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'

      - - -imageServiceEndpoint
      -string - - -

      ImageServiceEndpoint is the endpoint of container image service. -Unix Domain Socket are supported on Linux, while npipe and tcp endpoints are supported on Windows. -Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'. -If not specified, the value in containerRuntimeEndpoint is used.

      - - - - - -## `SerializedNodeConfigSource` {#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource} - - - -

      SerializedNodeConfigSource allows us to serialize v1.NodeConfigSource. -This type is used internally by the Kubelet for tracking checkpointed dynamic configs. -It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet.

      - - - - - - - - - - - - - - -
      FieldDescription
      apiVersion
      string
      kubelet.config.k8s.io/v1beta1
      kind
      string
      SerializedNodeConfigSource
      source
      -core/v1.NodeConfigSource -
      -

      source is the source that we are serializing.

      -
      - -## `CredentialProvider` {#kubelet-config-k8s-io-v1beta1-CredentialProvider} - - -**Appears in:** - -- [CredentialProviderConfig](#kubelet-config-k8s-io-v1beta1-CredentialProviderConfig) - - -

      CredentialProvider represents an exec plugin to be invoked by the kubelet. The plugin is only -invoked when an image being pulled matches the images handled by the plugin (see matchImages).

      - - - - - - - - - - - - - - - - - - - - - - - - - - -
      FieldDescription
      name [Required]
      -string -
      -

      name is the required name of the credential provider. It must match the name of the -provider executable as seen by the kubelet. The executable must be in the kubelet's -bin directory (set by the --image-credential-provider-bin-dir flag).

      -
      matchImages [Required]
      -[]string -
      -

      matchImages is a required list of strings used to match against images in order to -determine if this provider should be invoked. If one of the strings matches the -requested image from the kubelet, the plugin will be invoked and given a chance -to provide credentials. Images are expected to contain the registry domain -and URL path.

      -

      Each entry in matchImages is a pattern which can optionally contain a port and a path. -Globs can be used in the domain, but not in the port or the path. Globs are supported -as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'. -Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match -a single subdomain segment, so *.io does not match *.k8s.io.

      -

      A match exists between an image and a matchImage when all of the below are true:

      -
        -
      • Both contain the same number of domain parts and each part matches.
      • -
      • The URL path of an imageMatch must be a prefix of the target image URL path.
      • -
      • If the imageMatch contains a port, then the port must match in the image as well.
      • -
      -

      Example values of matchImages:

      -
        -
      • 123456789.dkr.ecr.us-east-1.amazonaws.com
      • -
      • *.azurecr.io
      • -
      • gcr.io
      • -
      • *.*.registry.io
      • -
      • registry.io:8080/path
      • -
      -
      defaultCacheDuration [Required]
      -meta/v1.Duration -
      -

      defaultCacheDuration is the default duration the plugin will cache credentials in-memory -if a cache duration is not provided in the plugin response. This field is required.

      -
      apiVersion [Required]
      -string -
      -

      Required input version of the exec CredentialProviderRequest. The returned CredentialProviderResponse -MUST use the same encoding version as the input. Current supported values are:

      -
        -
      • credentialprovider.kubelet.k8s.io/v1beta1
      • -
      -
      args
      -[]string -
      -

      Arguments to pass to the command when executing it.

      -
      env
      -[]ExecEnvVar -
      -

      Env defines additional environment variables to expose to the process. These -are unioned with the host's environment, as well as variables client-go uses -to pass argument to the plugin.

      -
      - -## `ExecEnvVar` {#kubelet-config-k8s-io-v1beta1-ExecEnvVar} - - -**Appears in:** - -- [CredentialProvider](#kubelet-config-k8s-io-v1beta1-CredentialProvider) - - -

      ExecEnvVar is used for setting environment variables when executing an exec-based -credential plugin.

      - - - - - - - - - - - - - - -
      FieldDescription
      name [Required]
      -string -
      - No description provided.
      value [Required]
      -string -
      - No description provided.
      - -## `KubeletAnonymousAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAnonymousAuthentication} - - -**Appears in:** - -- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) - - - - - - - - - - -
      FieldDescription
      enabled
      -bool +TracingConfiguration
      -

      enabled allows anonymous requests to the kubelet server. -Requests that are not rejected by another authentication method are treated as -anonymous requests. -Anonymous requests have a username of system:anonymous, and a group name of -system:unauthenticated.

      +

      Tracing specifies the versioned configuration for OpenTelemetry tracing clients. +See https://kep.k8s.io/2832 for more details. +Default: nil

      - -## `KubeletAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAuthentication} - - -**Appears in:** - -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) - - - - - - - - - - -
      FieldDescription
      x509
      -KubeletX509Authentication +
      localStorageCapacityIsolation
      +bool
      -

      x509 contains settings related to x509 client certificate authentication.

      +

      LocalStorageCapacityIsolation enables local ephemeral storage isolation feature. The default setting is true. +This feature allows users to set request/limit for container's ephemeral storage and manage it in a similar way +as cpu and memory. It also allows setting sizeLimit for emptyDir volume, which will trigger pod eviction if disk +usage from the volume exceeds the limit. +This feature depends on the capability of detecting correct root file system disk usage. For certain systems, +such as kind rootless, if this capability cannot be supported, the feature LocalStorageCapacityIsolation should be +disabled. Once disabled, user should not set request/limit for container's ephemeral storage, or sizeLimit for emptyDir. +Default: true

      webhook
      -KubeletWebhookAuthentication +
      containerRuntimeEndpoint [Required]
      +string
      -

      webhook contains settings related to webhook bearer token authentication.

      +

      ContainerRuntimeEndpoint is the endpoint of container runtime. +Unix Domain Sockets are supported on Linux, while npipe and tcp endpoints are supported on Windows. +Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'

      anonymous
      -KubeletAnonymousAuthentication +
      imageServiceEndpoint
      +string
      -

      anonymous contains settings related to anonymous authentication.

      +

      ImageServiceEndpoint is the endpoint of container image service. +Unix Domain Socket are supported on Linux, while npipe and tcp endpoints are supported on Windows. +Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'. +If not specified, the value in containerRuntimeEndpoint is used.

      -## `KubeletAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorization} +## `SerializedNodeConfigSource` {#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource} -**Appears in:** - -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +

      SerializedNodeConfigSource allows us to serialize v1.NodeConfigSource. +This type is used internally by the Kubelet for tracking checkpointed dynamic configs. +It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet.

      + + + - - - -
      FieldDescription
      apiVersion
      string
      kubelet.config.k8s.io/v1beta1
      kind
      string
      SerializedNodeConfigSource
      mode
      -KubeletAuthorizationMode -
      -

      mode is the authorization mode to apply to requests to the kubelet server. -Valid values are AlwaysAllow and Webhook. -Webhook mode uses the SubjectAccessReview API to determine authorization.

      -
      webhook
      -KubeletWebhookAuthorization +
      source
      +core/v1.NodeConfigSource
      -

      webhook contains settings related to Webhook authorization.

      +

      source is the source that we are serializing.

      -## `KubeletAuthorizationMode` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorizationMode} +## `CredentialProvider` {#kubelet-config-k8s-io-v1beta1-CredentialProvider} -(Alias of `string`) **Appears in:** -- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) - - - - - -## `KubeletWebhookAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthentication} - - -**Appears in:** +- [CredentialProviderConfig](#kubelet-config-k8s-io-v1beta1-CredentialProviderConfig) -- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) +

      CredentialProvider represents an exec plugin to be invoked by the kubelet. The plugin is only +invoked when an image being pulled matches the images handled by the plugin (see matchImages).

      @@ -1515,165 +1564,122 @@ Webhook mode uses the SubjectAccessReview API to determine authorization.

      - - - -
      enabled
      -bool +
      name [Required]
      +string
      -

      enabled allows bearer token authentication backed by the -tokenreviews.authentication.k8s.io API.

      +

      name is the required name of the credential provider. It must match the name of the +provider executable as seen by the kubelet. The executable must be in the kubelet's +bin directory (set by the --image-credential-provider-bin-dir flag).

      cacheTTL
      -meta/v1.Duration +
      matchImages [Required]
      +[]string
      -

      cacheTTL enables caching of authentication results

      +

      matchImages is a required list of strings used to match against images in order to +determine if this provider should be invoked. If one of the strings matches the +requested image from the kubelet, the plugin will be invoked and given a chance +to provide credentials. Images are expected to contain the registry domain +and URL path.

      +

      Each entry in matchImages is a pattern which can optionally contain a port and a path. +Globs can be used in the domain, but not in the port or the path. Globs are supported +as subdomains like '.k8s.io' or 'k8s..io', and top-level-domains such as 'k8s.'. +Matching partial subdomains like 'app.k8s.io' is also supported. Each glob can only match +a single subdomain segment, so *.io does not match *.k8s.io.

      +

      A match exists between an image and a matchImage when all of the below are true:

      +
        +
      • Both contain the same number of domain parts and each part matches.
      • +
      • The URL path of an imageMatch must be a prefix of the target image URL path.
      • +
      • If the imageMatch contains a port, then the port must match in the image as well.
      • +
      +

      Example values of matchImages:

      +
        +
      • 123456789.dkr.ecr.us-east-1.amazonaws.com
      • +
      • *.azurecr.io
      • +
      • gcr.io
      • +
      • ..registry.io
      • +
      • registry.io:8080/path
      • +
      - -## `KubeletWebhookAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthorization} - - -**Appears in:** - -- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) - - - - - - - - - - - -
      FieldDescription
      cacheAuthorizedTTL
      +
      defaultCacheDuration [Required]
      meta/v1.Duration
      -

      cacheAuthorizedTTL is the duration to cache 'authorized' responses from the -webhook authorizer.

      +

      defaultCacheDuration is the default duration the plugin will cache credentials in-memory +if a cache duration is not provided in the plugin response. This field is required.

      cacheUnauthorizedTTL
      -meta/v1.Duration +
      apiVersion [Required]
      +string
      -

      cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from -the webhook authorizer.

      +

      Required input version of the exec CredentialProviderRequest. The returned CredentialProviderResponse +MUST use the same encoding version as the input. Current supported values are:

      +
        +
      • credentialprovider.kubelet.k8s.io/v1beta1
      • +
      - -## `KubeletX509Authentication` {#kubelet-config-k8s-io-v1beta1-KubeletX509Authentication} - - -**Appears in:** - -- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) - - - - - - - - - - -
      FieldDescription
      clientCAFile
      -string +
      args
      +[]string
      -

      clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request -presenting a client certificate signed by one of the authorities in the bundle -is authenticated with a username corresponding to the CommonName, -and groups corresponding to the Organization in the client certificate.

      +

      Arguments to pass to the command when executing it.

      - -## `MemoryReservation` {#kubelet-config-k8s-io-v1beta1-MemoryReservation} - - -**Appears in:** - -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) - - -

      MemoryReservation specifies the memory reservation of different types for each NUMA node

      - - - - - - - - - - -
      FieldDescription
      numaNode [Required]
      -int32 +
      env
      +[]ExecEnvVar
      - No description provided.
      limits [Required]
      -core/v1.ResourceList +

      Env defines additional environment variables to expose to the process. These +are unioned with the host's environment, as well as variables client-go uses +to pass argument to the plugin.

      - No description provided.
      -## `MemorySwapConfiguration` {#kubelet-config-k8s-io-v1beta1-MemorySwapConfiguration} +## `ExecEnvVar` {#kubelet-config-k8s-io-v1beta1-ExecEnvVar} **Appears in:** -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +- [CredentialProvider](#kubelet-config-k8s-io-v1beta1-CredentialProvider) +

      ExecEnvVar is used for setting environment variables when executing an exec-based +credential plugin.

      + - + + +
      FieldDescription
      swapBehavior
      +
      name [Required]
      string
      -

      swapBehavior configures swap memory available to container workloads. May be one of -"", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit -"UnlimitedSwap": workloads can use unlimited swap, up to the allocatable limit.

      + No description provided.
      value [Required]
      +string
      + No description provided.
      -## `ResourceChangeDetectionStrategy` {#kubelet-config-k8s-io-v1beta1-ResourceChangeDetectionStrategy} - -(Alias of `string`) - -**Appears in:** - -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) - - -

      ResourceChangeDetectionStrategy denotes a mode in which internal -managers (secret, configmap) are discovering object changes.

      - - - - -## `ShutdownGracePeriodByPodPriority` {#kubelet-config-k8s-io-v1beta1-ShutdownGracePeriodByPodPriority} +## `KubeletAnonymousAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAnonymousAuthentication} **Appears in:** -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) - +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) -

      ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based on their associated priority class value

      @@ -1681,35 +1687,27 @@ managers (secret, configmap) are discovering object changes.

      - - - -
      priority [Required]
      -int32 -
      -

      priority is the priority value associated with the shutdown grace period

      -
      shutdownGracePeriodSeconds [Required]
      -int64 +
      enabled
      +bool
      -

      shutdownGracePeriodSeconds is the shutdown grace period in seconds

      +

      enabled allows anonymous requests to the kubelet server. +Requests that are not rejected by another authentication method are treated as +anonymous requests. +Anonymous requests have a username of system:anonymous, and a group name of +system:unauthenticated.

      - - - -## `FormatOptions` {#FormatOptions} +## `KubeletAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAuthentication} **Appears in:** -- [LoggingConfiguration](#LoggingConfiguration) - +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) -

      FormatOptions contains options for the different logging formats.

      @@ -1717,26 +1715,37 @@ managers (secret, configmap) are discovering object changes.

      - + + + + + +
      json [Required]
      -JSONOptions +
      x509
      +KubeletX509Authentication
      -

      [Alpha] JSON contains options for logging format "json". -Only available when the LoggingAlphaOptions feature gate is enabled.

      +

      x509 contains settings related to x509 client certificate authentication.

      +
      webhook
      +KubeletWebhookAuthentication +
      +

      webhook contains settings related to webhook bearer token authentication.

      +
      anonymous
      +KubeletAnonymousAuthentication +
      +

      anonymous contains settings related to anonymous authentication.

      -## `JSONOptions` {#JSONOptions} +## `KubeletAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorization} **Appears in:** -- [FormatOptions](#FormatOptions) - +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) -

      JSONOptions contains options for logging format "json".

      @@ -1744,47 +1753,44 @@ Only available when the LoggingAlphaOptions feature gate is enabled.

      - -
      splitStream [Required]
      -bool +
      mode
      +KubeletAuthorizationMode
      -

      [Alpha] SplitStream redirects error messages to stderr while -info messages go to stdout, with buffering. The default is to write -both to stdout, without buffering. Only available when -the LoggingAlphaOptions feature gate is enabled.

      +

      mode is the authorization mode to apply to requests to the kubelet server. +Valid values are AlwaysAllow and Webhook. +Webhook mode uses the SubjectAccessReview API to determine authorization.

      infoBufferSize [Required]
      -k8s.io/apimachinery/pkg/api/resource.QuantityValue +
      webhook
      +KubeletWebhookAuthorization
      -

      [Alpha] InfoBufferSize sets the size of the info stream when -using split streams. The default is zero, which disables buffering. -Only available when the LoggingAlphaOptions feature gate is enabled.

      +

      webhook contains settings related to Webhook authorization.

      -## `LogFormatFactory` {#LogFormatFactory} +## `KubeletAuthorizationMode` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorizationMode} +(Alias of `string`) +**Appears in:** -

      LogFormatFactory provides support for a certain additional, -non-default log format.

      +- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) -## `LoggingConfiguration` {#LoggingConfiguration} + +## `KubeletWebhookAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthentication} **Appears in:** -- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) - +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) -

      LoggingConfiguration contains logging options.

      @@ -1792,61 +1798,64 @@ non-default log format.

      - - - - - - +
      format [Required]
      -string -
      -

      Format Flag specifies the structure of log messages. -default value of format is text

      -
      flushFrequency [Required]
      -TimeOrMetaDuration +
      enabled
      +bool
      -

      Maximum time between log flushes. -If a string, parsed as a duration (i.e. "1s") -If an int, the maximum number of nanoseconds (i.e. 1s = 1000000000). -Ignored if the selected logging backend writes log messages without buffering.

      +

      enabled allows bearer token authentication backed by the +tokenreviews.authentication.k8s.io API.

      verbosity [Required]
      -VerbosityLevel +
      cacheTTL
      +meta/v1.Duration
      -

      Verbosity is the threshold that determines which log messages are -logged. Default is zero which logs only the most important -messages. Higher values enable additional messages. Error messages -are always logged.

      +

      cacheTTL enables caching of authentication results

      vmodule [Required]
      -VModuleConfiguration +
      + +## `KubeletWebhookAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthorization} + + +**Appears in:** + +- [KubeletAuthorization](#kubelet-config-k8s-io-v1beta1-KubeletAuthorization) + + + + + + + + + -
      FieldDescription
      cacheAuthorizedTTL
      +meta/v1.Duration
      -

      VModule overrides the verbosity threshold for individual files. -Only supported for "text" log format.

      +

      cacheAuthorizedTTL is the duration to cache 'authorized' responses from the +webhook authorizer.

      options [Required]
      -FormatOptions +
      cacheUnauthorizedTTL
      +meta/v1.Duration
      -

      [Alpha] Options holds additional parameters that are specific -to the different logging formats. Only the options for the selected -format get used, but all of them get validated. -Only available when the LoggingAlphaOptions feature gate is enabled.

      +

      cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from +the webhook authorizer.

      -## `LoggingOptions` {#LoggingOptions} +## `KubeletX509Authentication` {#kubelet-config-k8s-io-v1beta1-KubeletX509Authentication} +**Appears in:** + +- [KubeletAuthentication](#kubelet-config-k8s-io-v1beta1-KubeletAuthentication) -

      LoggingOptions can be used with ValidateAndApplyWithOptions to override -certain global defaults.

      @@ -1854,33 +1863,28 @@ certain global defaults.

      - - - -
      ErrorStream [Required]
      -io.Writer -
      -

      ErrorStream can be used to override the os.Stderr default.

      -
      InfoStream [Required]
      -io.Writer +
      clientCAFile
      +string
      -

      InfoStream can be used to override the os.Stdout default.

      +

      clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request +presenting a client certificate signed by one of the authorities in the bundle +is authenticated with a username corresponding to the CommonName, +and groups corresponding to the Organization in the client certificate.

      -## `TimeOrMetaDuration` {#TimeOrMetaDuration} +## `MemoryReservation` {#kubelet-config-k8s-io-v1beta1-MemoryReservation} **Appears in:** -- [LoggingConfiguration](#LoggingConfiguration) +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) -

      TimeOrMetaDuration is present only for backwards compatibility for the -flushFrequency field, and new fields should use metav1.Duration.

      +

      MemoryReservation specifies the memory reservation of different types for each NUMA node

      @@ -1888,24 +1892,22 @@ flushFrequency field, and new fields should use metav1.Duration.

      - + No description provided. - + No description provided.
      Duration [Required]
      -meta/v1.Duration +
      numaNode [Required]
      +int32
      -

      Duration holds the duration

      -
      - [Required]
      -bool +
      limits [Required]
      +core/v1.ResourceList
      -

      SerializeAsString controls whether the value is serialized as a string or an integer

      -
      -## `TracingConfiguration` {#TracingConfiguration} +## `MemorySwapConfiguration` {#kubelet-config-k8s-io-v1beta1-MemorySwapConfiguration} **Appears in:** @@ -1913,60 +1915,69 @@ flushFrequency field, and new fields should use metav1.Duration.

      - [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) -

      TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients.

      - - - - -
      FieldDescription
      endpoint
      +
      swapBehavior
      string
      -

      Endpoint of the collector this component will report traces to. -The connection is insecure, and does not currently support TLS. -Recommended is unset, and endpoint is the otlp grpc default, localhost:4317.

      -
      samplingRatePerMillion
      -int32 -
      -

      SamplingRatePerMillion is the number of samples to collect per million spans. -Recommended is unset. If unset, sampler respects its parent span's sampling -rate, but otherwise never samples.

      +

      swapBehavior configures swap memory available to container workloads. May be one of +"", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit +"UnlimitedSwap": workloads can use unlimited swap, up to the allocatable limit.

      -## `VModuleConfiguration` {#VModuleConfiguration} +## `ResourceChangeDetectionStrategy` {#kubelet-config-k8s-io-v1beta1-ResourceChangeDetectionStrategy} -(Alias of `[]k8s.io/component-base/logs/api/v1.VModuleItem`) +(Alias of `string`) **Appears in:** -- [LoggingConfiguration](#LoggingConfiguration) +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) -

      VModuleConfiguration is a collection of individual file names or patterns -and the corresponding verbosity threshold.

      +

      ResourceChangeDetectionStrategy denotes a mode in which internal +managers (secret, configmap) are discovering object changes.

      -## `VerbosityLevel` {#VerbosityLevel} +## `ShutdownGracePeriodByPodPriority` {#kubelet-config-k8s-io-v1beta1-ShutdownGracePeriodByPodPriority} -(Alias of `uint32`) **Appears in:** -- [LoggingConfiguration](#LoggingConfiguration) - +- [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) -

      VerbosityLevel represents a klog or logr verbosity threshold.

      +

      ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based on their associated priority class value

      + + + + + + + + + + + + +
      FieldDescription
      priority [Required]
      +int32 +
      +

      priority is the priority value associated with the shutdown grace period

      +
      shutdownGracePeriodSeconds [Required]
      +int64 +
      +

      shutdownGracePeriodSeconds is the shutdown grace period in seconds

      +
      + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/kubelet-credentialprovider.v1.md b/content/en/docs/reference/config-api/kubelet-credentialprovider.v1.md index 9c8b754443e5a..8575b92303461 100644 --- a/content/en/docs/reference/config-api/kubelet-credentialprovider.v1.md +++ b/content/en/docs/reference/config-api/kubelet-credentialprovider.v1.md @@ -12,7 +12,6 @@ auto_generated: true - [CredentialProviderRequest](#credentialprovider-kubelet-k8s-io-v1-CredentialProviderRequest) - [CredentialProviderResponse](#credentialprovider-kubelet-k8s-io-v1-CredentialProviderResponse) - ## `CredentialProviderRequest` {#credentialprovider-kubelet-k8s-io-v1-CredentialProviderRequest} @@ -80,7 +79,7 @@ CredentialProviderConfig. If set to 0, the kubelet will not cache the provided A auth
      -map[string]k8s.io/kubelet/pkg/apis/credentialprovider/v1.AuthConfig +map[string]AuthConfig

      auth is a map containing authentication information passed into the kubelet. @@ -89,9 +88,9 @@ should be valid for all images that match against this key. A plugin should set this field to null if no valid credentials can be returned for the requested image.

      Each key in the map is a pattern which can optionally contain a port and a path. Globs can be used in the domain, but not in the port or the path. Globs are supported -as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'. -Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match -a single subdomain segment, so *.io does not match *.k8s.io.

      +as subdomains like '.k8s.io' or 'k8s..io', and top-level-domains such as 'k8s.'. +Matching partial subdomains like 'app.k8s.io' is also supported. Each glob can only match +a single subdomain segment, so *.io does not match *.k8s.io.

      The kubelet will match images against the key when all of the below are true:

      • Both contain the same number of domain parts and each part matches.
      • @@ -108,9 +107,9 @@ stopping after the first successfully authenticated pull.

        Example keys:

        • 123456789.dkr.ecr.us-east-1.amazonaws.com
        • -
        • *.azurecr.io
        • +
        • *.azurecr.io
        • gcr.io
        • -
        • *.*.registry.io
        • +
        • ..registry.io
        • registry.io:8080/path
        @@ -166,4 +165,4 @@ An empty password is valid.

        - + \ No newline at end of file diff --git a/content/en/docs/reference/config-api/kubelet-credentialprovider.v1alpha1.md b/content/en/docs/reference/config-api/kubelet-credentialprovider.v1alpha1.md deleted file mode 100644 index c8a7bd682e60a..0000000000000 --- a/content/en/docs/reference/config-api/kubelet-credentialprovider.v1alpha1.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Kubelet CredentialProvider (v1alpha1) -content_type: tool-reference -package: credentialprovider.kubelet.k8s.io/v1alpha1 -auto_generated: true ---- - - -## Resource Types - - -- [CredentialProviderRequest](#credentialprovider-kubelet-k8s-io-v1alpha1-CredentialProviderRequest) -- [CredentialProviderResponse](#credentialprovider-kubelet-k8s-io-v1alpha1-CredentialProviderResponse) - - - -## `CredentialProviderRequest` {#credentialprovider-kubelet-k8s-io-v1alpha1-CredentialProviderRequest} - - - -

        CredentialProviderRequest includes the image that the kubelet requires authentication for. -Kubelet will pass this request object to the plugin via stdin. In general, plugins should -prefer responding with the same apiVersion they were sent.

        - - - - - - - - - - - - - - -
        FieldDescription
        apiVersion
        string
        credentialprovider.kubelet.k8s.io/v1alpha1
        kind
        string
        CredentialProviderRequest
        image [Required]
        -string -
        -

        image is the container image that is being pulled as part of the -credential provider plugin request. Plugins may optionally parse the image -to extract any information required to fetch credentials.

        -
        - -## `CredentialProviderResponse` {#credentialprovider-kubelet-k8s-io-v1alpha1-CredentialProviderResponse} - - - -

        CredentialProviderResponse holds credentials that the kubelet should use for the specified -image provided in the original request. Kubelet will read the response from the plugin via stdout. -This response should be set to the same apiVersion as CredentialProviderRequest.

        - - - - - - - - - - - - - - - - - - - - -
        FieldDescription
        apiVersion
        string
        credentialprovider.kubelet.k8s.io/v1alpha1
        kind
        string
        CredentialProviderResponse
        cacheKeyType [Required]
        -PluginCacheKeyType -
        -

        cacheKeyType indiciates the type of caching key to use based on the image provided -in the request. There are three valid values for the cache key type: Image, Registry, and -Global. If an invalid value is specified, the response will NOT be used by the kubelet.

        -
        cacheDuration
        -meta/v1.Duration -
        -

        cacheDuration indicates the duration the provided credentials should be cached for. -The kubelet will use this field to set the in-memory cache duration for credentials -in the AuthConfig. If null, the kubelet will use defaultCacheDuration provided in -CredentialProviderConfig. If set to 0, the kubelet will not cache the provided AuthConfig.

        -
        auth
        -map[string]k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1.AuthConfig -
        -

        auth is a map containing authentication information passed into the kubelet. -Each key is a match image string (more on this below). The corresponding authConfig value -should be valid for all images that match against this key. A plugin should set -this field to null if no valid credentials can be returned for the requested image.

        -

        Each key in the map is a pattern which can optionally contain a port and a path. -Globs can be used in the domain, but not in the port or the path. Globs are supported -as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'. -Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match -a single subdomain segment, so *.io does not match *.k8s.io.

        -

        The kubelet will match images against the key when all of the below are true:

        -
          -
        • Both contain the same number of domain parts and each part matches.
        • -
        • The URL path of an imageMatch must be a prefix of the target image URL path.
        • -
        • If the imageMatch contains a port, then the port must match in the image as well.
        • -
        -

        When multiple keys are returned, the kubelet will traverse all keys in reverse order so that:

        -
          -
        • longer keys come before shorter keys with the same prefix
        • -
        • non-wildcard keys come before wildcard keys with the same prefix.
        • -
        -

        For any given match, the kubelet will attempt an image pull with the provided credentials, -stopping after the first successfully authenticated pull.

        -

        Example keys:

        -
          -
        • 123456789.dkr.ecr.us-east-1.amazonaws.com
        • -
        • *.azurecr.io
        • -
        • gcr.io
        • -
        • *.*.registry.io
        • -
        • registry.io:8080/path
        • -
        -
        - -## `AuthConfig` {#credentialprovider-kubelet-k8s-io-v1alpha1-AuthConfig} - - -**Appears in:** - -- [CredentialProviderResponse](#credentialprovider-kubelet-k8s-io-v1alpha1-CredentialProviderResponse) - - -

        AuthConfig contains authentication information for a container registry. -Only username/password based authentication is supported today, but more authentication -mechanisms may be added in the future.

        - - - - - - - - - - - - - - -
        FieldDescription
        username [Required]
        -string -
        -

        username is the username used for authenticating to the container registry -An empty username is valid.

        -
        password [Required]
        -string -
        -

        password is the password used for authenticating to the container registry -An empty password is valid.

        -
        - -## `PluginCacheKeyType` {#credentialprovider-kubelet-k8s-io-v1alpha1-PluginCacheKeyType} - -(Alias of `string`) - -**Appears in:** - -- [CredentialProviderResponse](#credentialprovider-kubelet-k8s-io-v1alpha1-CredentialProviderResponse) - - - - - diff --git a/content/en/docs/reference/config-api/kubelet-credentialprovider.v1beta1.md b/content/en/docs/reference/config-api/kubelet-credentialprovider.v1beta1.md deleted file mode 100644 index 7384939b5f35b..0000000000000 --- a/content/en/docs/reference/config-api/kubelet-credentialprovider.v1beta1.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Kubelet CredentialProvider (v1beta1) -content_type: tool-reference -package: credentialprovider.kubelet.k8s.io/v1beta1 -auto_generated: true ---- - - -## Resource Types - - -- [CredentialProviderRequest](#credentialprovider-kubelet-k8s-io-v1beta1-CredentialProviderRequest) -- [CredentialProviderResponse](#credentialprovider-kubelet-k8s-io-v1beta1-CredentialProviderResponse) - - - -## `CredentialProviderRequest` {#credentialprovider-kubelet-k8s-io-v1beta1-CredentialProviderRequest} - - - -

        CredentialProviderRequest includes the image that the kubelet requires authentication for. -Kubelet will pass this request object to the plugin via stdin. In general, plugins should -prefer responding with the same apiVersion they were sent.

        - - - - - - - - - - - - - - -
        FieldDescription
        apiVersion
        string
        credentialprovider.kubelet.k8s.io/v1beta1
        kind
        string
        CredentialProviderRequest
        image [Required]
        -string -
        -

        image is the container image that is being pulled as part of the -credential provider plugin request. Plugins may optionally parse the image -to extract any information required to fetch credentials.

        -
        - -## `CredentialProviderResponse` {#credentialprovider-kubelet-k8s-io-v1beta1-CredentialProviderResponse} - - - -

        CredentialProviderResponse holds credentials that the kubelet should use for the specified -image provided in the original request. Kubelet will read the response from the plugin via stdout. -This response should be set to the same apiVersion as CredentialProviderRequest.

        - - - - - - - - - - - - - - - - - - - - -
        FieldDescription
        apiVersion
        string
        credentialprovider.kubelet.k8s.io/v1beta1
        kind
        string
        CredentialProviderResponse
        cacheKeyType [Required]
        -PluginCacheKeyType -
        -

        cacheKeyType indiciates the type of caching key to use based on the image provided -in the request. There are three valid values for the cache key type: Image, Registry, and -Global. If an invalid value is specified, the response will NOT be used by the kubelet.

        -
        cacheDuration
        -meta/v1.Duration -
        -

        cacheDuration indicates the duration the provided credentials should be cached for. -The kubelet will use this field to set the in-memory cache duration for credentials -in the AuthConfig. If null, the kubelet will use defaultCacheDuration provided in -CredentialProviderConfig. If set to 0, the kubelet will not cache the provided AuthConfig.

        -
        auth
        -map[string]k8s.io/kubelet/pkg/apis/credentialprovider/v1beta1.AuthConfig -
        -

        auth is a map containing authentication information passed into the kubelet. -Each key is a match image string (more on this below). The corresponding authConfig value -should be valid for all images that match against this key. A plugin should set -this field to null if no valid credentials can be returned for the requested image.

        -

        Each key in the map is a pattern which can optionally contain a port and a path. -Globs can be used in the domain, but not in the port or the path. Globs are supported -as subdomains like '*.k8s.io' or 'k8s.*.io', and top-level-domains such as 'k8s.*'. -Matching partial subdomains like 'app*.k8s.io' is also supported. Each glob can only match -a single subdomain segment, so *.io does not match *.k8s.io.

        -

        The kubelet will match images against the key when all of the below are true:

        -
          -
        • Both contain the same number of domain parts and each part matches.
        • -
        • The URL path of an imageMatch must be a prefix of the target image URL path.
        • -
        • If the imageMatch contains a port, then the port must match in the image as well.
        • -
        -

        When multiple keys are returned, the kubelet will traverse all keys in reverse order so that:

        -
          -
        • longer keys come before shorter keys with the same prefix
        • -
        • non-wildcard keys come before wildcard keys with the same prefix.
        • -
        -

        For any given match, the kubelet will attempt an image pull with the provided credentials, -stopping after the first successfully authenticated pull.

        -

        Example keys:

        -
          -
        • 123456789.dkr.ecr.us-east-1.amazonaws.com
        • -
        • *.azurecr.io
        • -
        • gcr.io
        • -
        • *.*registry.io
        • -
        • registry.io:8080/path
        • -
        -
        - -## `AuthConfig` {#credentialprovider-kubelet-k8s-io-v1beta1-AuthConfig} - - -**Appears in:** - -- [CredentialProviderResponse](#credentialprovider-kubelet-k8s-io-v1beta1-CredentialProviderResponse) - - -

        AuthConfig contains authentication information for a container registry. -Only username/password based authentication is supported today, but more authentication -mechanisms may be added in the future.

        - - - - - - - - - - - - - - -
        FieldDescription
        username [Required]
        -string -
        -

        username is the username used for authenticating to the container registry -An empty username is valid.

        -
        password [Required]
        -string -
        -

        password is the password used for authenticating to the container registry -An empty password is valid.

        -
        - -## `PluginCacheKeyType` {#credentialprovider-kubelet-k8s-io-v1beta1-PluginCacheKeyType} - -(Alias of `string`) - -**Appears in:** - -- [CredentialProviderResponse](#credentialprovider-kubelet-k8s-io-v1beta1-CredentialProviderResponse) - - - - - diff --git a/content/en/docs/reference/glossary/container-runtime-interface.md b/content/en/docs/reference/glossary/container-runtime-interface.md index 28a67cbbb40be..c2dab628efb04 100644 --- a/content/en/docs/reference/glossary/container-runtime-interface.md +++ b/content/en/docs/reference/glossary/container-runtime-interface.md @@ -11,12 +11,12 @@ tags: - cri --- -The main protocol for the communication between the kubelet and Container Runtime. +The main protocol for the communication between the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} and Container Runtime. The Kubernetes Container Runtime Interface (CRI) defines the main [gRPC](https://grpc.io) protocol for the communication between the -[cluster components](/docs/concepts/overview/components/#node-components) +[node components](/docs/concepts/overview/components/#node-components) {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} and {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}. diff --git a/content/en/docs/reference/glossary/cri.md b/content/en/docs/reference/glossary/cri.md index a5c0319f5ad0e..55861df7a6889 100644 --- a/content/en/docs/reference/glossary/cri.md +++ b/content/en/docs/reference/glossary/cri.md @@ -12,7 +12,7 @@ tags: - fundamental --- The container runtime interface (CRI) is an API for container runtimes -to integrate with kubelet on a node. +to integrate with the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} on a node. For more information, see the [CRI](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md) API and specifications. diff --git a/content/en/docs/reference/glossary/dockershim.md b/content/en/docs/reference/glossary/dockershim.md index d726679064b2b..446e6b5ae71e1 100644 --- a/content/en/docs/reference/glossary/dockershim.md +++ b/content/en/docs/reference/glossary/dockershim.md @@ -10,7 +10,7 @@ aka: tags: - fundamental --- -The dockershim is a component of Kubernetes version 1.23 and earlier. It allows the kubelet +The dockershim is a component of Kubernetes version 1.23 and earlier. It allows the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} to communicate with {{< glossary_tooltip text="Docker Engine" term_id="docker" >}}. diff --git a/content/en/docs/reference/glossary/gateway.md b/content/en/docs/reference/glossary/gateway.md new file mode 100644 index 0000000000000..1b8b647671e1e --- /dev/null +++ b/content/en/docs/reference/glossary/gateway.md @@ -0,0 +1,20 @@ +--- +title: Gateway API +id: gateway-api +date: 2023-10-19 +full_link: /docs/concepts/services-networking/gateway/ +short_description: > + An API for modeling service networking in Kubernetes. + +aka: +tags: +- networking +- architecture +- extension +--- + A family of API kinds for modeling service networking in Kubernetes. + + + +Gateway API provides a family of extensible, role-oriented, protocol-aware +API kinds for modeling service networking in Kubernetes. diff --git a/content/en/docs/reference/glossary/group-version-resource.md b/content/en/docs/reference/glossary/group-version-resource.md new file mode 100644 index 0000000000000..cdd208fd5ed8e --- /dev/null +++ b/content/en/docs/reference/glossary/group-version-resource.md @@ -0,0 +1,18 @@ +--- +title: Group Version Resource +id: gvr +date: 2023-07-24 +short_description: > + The API group, API version and name of a Kubernetes API. + +aka: ["GVR"] +tags: +- architecture +--- +Means of representing unique Kubernetes API resource. + + + +Group Version Resources (GVRs) specify the API group, API version, and resource (name for the object kind as it appears in the URI) associated with accessing a particular id of object in Kubernetes. +GVRs let you define and distinguish different Kubernetes objects, and to specify a way of accessing +objects that is stable even as APIs change. \ No newline at end of file diff --git a/content/en/docs/reference/glossary/mirror-pod.md b/content/en/docs/reference/glossary/mirror-pod.md index c925e4d67219e..7f5c60c6b370f 100644 --- a/content/en/docs/reference/glossary/mirror-pod.md +++ b/content/en/docs/reference/glossary/mirror-pod.md @@ -9,7 +9,7 @@ aka: tags: - fundamental --- - A {{< glossary_tooltip text="pod" term_id="pod" >}} object that a kubelet uses + A {{< glossary_tooltip text="pod" term_id="pod" >}} object that a {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} uses to represent a {{< glossary_tooltip text="static pod" term_id="static-pod" >}} diff --git a/content/en/docs/reference/glossary/object.md b/content/en/docs/reference/glossary/object.md index 6365c7839fb1a..b1b7346254b80 100644 --- a/content/en/docs/reference/glossary/object.md +++ b/content/en/docs/reference/glossary/object.md @@ -4,7 +4,7 @@ id: object date: 2020-10-12 full_link: /docs/concepts/overview/working-with-objects/#kubernetes-objects short_description: > - A entity in the Kubernetes system, representing part of the state of your cluster. + An entity in the Kubernetes system, representing part of the state of your cluster. aka: tags: - fundamental diff --git a/content/en/docs/reference/glossary/probe.md b/content/en/docs/reference/glossary/probe.md index ed93da61a5b61..76908dbe6904e 100644 --- a/content/en/docs/reference/glossary/probe.md +++ b/content/en/docs/reference/glossary/probe.md @@ -10,7 +10,7 @@ short_description: > tags: - tool --- -A check that the kubelet periodically performs against a container that is +A check that the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} periodically performs against a container that is running in a pod, that will define container's state and health and informing container's lifecycle. diff --git a/content/en/docs/reference/glossary/spec.md b/content/en/docs/reference/glossary/spec.md new file mode 100644 index 0000000000000..3757b63430ab2 --- /dev/null +++ b/content/en/docs/reference/glossary/spec.md @@ -0,0 +1,21 @@ +--- +title: Spec +id: spec +date: 2023-12-17 +full_link: /docs/concepts/overview/working-with-objects/#object-spec-and-status +short_description: > + This field in Kubernetes manifests defines the desired state or configuration for specific Kubernetes objects. + +aka: +tags: +- fundamental +- architecture +--- + Defines how each object, like Pods or Services, should be configured and its desired state. + + +Almost every Kubernetes object includes two nested object fields that govern the object's configuration: the object spec and the object status. For objects that have a spec, you have to set this when you create the object, providing a description of the characteristics you want the resource to have: its desired state. + +It varies for different objects like Pods, StatefulSets, and Services, detailing settings such as containers, volumes, replicas, ports, +and other specifications unique to each object type. This field encapsulates what state Kubernetes should maintain for the defined +object. diff --git a/content/en/docs/reference/glossary/static-pod.md b/content/en/docs/reference/glossary/static-pod.md index 565bc02a881f7..6d7ac3784ab98 100644 --- a/content/en/docs/reference/glossary/static-pod.md +++ b/content/en/docs/reference/glossary/static-pod.md @@ -11,7 +11,7 @@ tags: - fundamental --- -A {{< glossary_tooltip text="pod" term_id="pod" >}} managed directly by the kubelet +A {{< glossary_tooltip text="pod" term_id="pod" >}} managed directly by the {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} daemon on a specific node, diff --git a/content/en/docs/reference/glossary/workload.md b/content/en/docs/reference/glossary/workload.md index f2fabbab77ed6..6b4f88ded70ae 100644 --- a/content/en/docs/reference/glossary/workload.md +++ b/content/en/docs/reference/glossary/workload.md @@ -1,6 +1,6 @@ --- title: Workload -id: workloads +id: workload date: 2019-02-13 full_link: /docs/concepts/workloads/ short_description: > diff --git a/content/en/docs/reference/instrumentation/metrics.md b/content/en/docs/reference/instrumentation/metrics.md index a94953a5655e8..0bc4338b60431 100644 --- a/content/en/docs/reference/instrumentation/metrics.md +++ b/content/en/docs/reference/instrumentation/metrics.md @@ -8,7 +8,7 @@ description: >- ## Metrics (v1.29) - + This page details the metrics that different Kubernetes components export. You can query the metrics endpoint for these components using an HTTP scrape, and fetch the current metrics data in Prometheus format. @@ -635,25 +635,25 @@ Alpha metrics do not have any API guarantees. These metrics must be used at your
      • protocoltransport
    apiserver_encryption_config_controller_automatic_reload_failures_total
    -
    Total number of failed automatic reloads of encryption configuration.
    +
    Total number of failed automatic reloads of encryption configuration split by apiserver identity.
    • ALPHA
    • Counter
    • -
    +
  • apiserver_id_hash
  • apiserver_encryption_config_controller_automatic_reload_last_timestamp_seconds
    -
    Timestamp of the last successful or failed automatic reload of encryption configuration.
    +
    Timestamp of the last successful or failed automatic reload of encryption configuration split by apiserver identity.
    • ALPHA
    • Gauge
    • -
    • status
    +
  • apiserver_id_hashstatus
  • apiserver_encryption_config_controller_automatic_reload_success_total
    -
    Total number of successful automatic reloads of encryption configuration.
    +
    Total number of successful automatic reloads of encryption configuration split by apiserver identity.
    • ALPHA
    • Counter
    • -
    +
  • apiserver_id_hash
  • apiserver_envelope_encryption_dek_cache_fill_percent
    Percent of the cache slots currently occupied by cached DEKs.
    @@ -688,21 +688,21 @@ Alpha metrics do not have any API guarantees. These metrics must be used at your
    • ALPHA
    • Gauge
    • -
    • key_id_hashprovider_nametransformation_type
    +
  • apiserver_id_hashkey_id_hashprovider_nametransformation_type
  • apiserver_envelope_encryption_key_id_hash_status_last_timestamp_seconds
    The last time in seconds when a keyID was returned by the Status RPC call.
    • ALPHA
    • Gauge
    • -
    • key_id_hashprovider_name
    +
  • apiserver_id_hashkey_id_hashprovider_name
  • apiserver_envelope_encryption_key_id_hash_total
    -
    Number of times a keyID is used split by transformation type and provider.
    +
    Number of times a keyID is used split by transformation type, provider, and apiserver identity.
    • ALPHA
    • Counter
    • -
    • key_id_hashprovider_nametransformation_type
    +
  • apiserver_id_hashkey_id_hashprovider_nametransformation_type
  • apiserver_envelope_encryption_kms_operations_latency_seconds
    KMS operation duration with gRPC error code status total.
    diff --git a/content/en/docs/reference/instrumentation/slis.md b/content/en/docs/reference/instrumentation/slis.md index 3b559a398c914..e520d0a9344b8 100644 --- a/content/en/docs/reference/instrumentation/slis.md +++ b/content/en/docs/reference/instrumentation/slis.md @@ -9,7 +9,7 @@ weight: 20 -{{< feature-state for_k8s_version="v1.27" state="beta" >}} +{{< feature-state for_k8s_version="v1.29" state="stable" >}} By default, Kubernetes {{< skew currentVersion >}} publishes Service Level Indicator (SLI) metrics for each Kubernetes component binary. This metric endpoint is exposed on the serving diff --git a/content/en/docs/reference/issues-security/security.md b/content/en/docs/reference/issues-security/security.md index e5d2a565ddfdc..64333620ee3a2 100644 --- a/content/en/docs/reference/issues-security/security.md +++ b/content/en/docs/reference/issues-security/security.md @@ -13,21 +13,27 @@ weight: 20 This page describes Kubernetes security and disclosure information. - ## Security Announcements -Join the [kubernetes-security-announce](https://groups.google.com/forum/#!forum/kubernetes-security-announce) group for emails about security and major API announcements. +Join the [kubernetes-security-announce](https://groups.google.com/forum/#!forum/kubernetes-security-announce) +group for emails about security and major API announcements. ## Report a Vulnerability -We're extremely grateful for security researchers and users that report vulnerabilities to the Kubernetes Open Source Community. All reports are thoroughly investigated by a set of community volunteers. +We're extremely grateful for security researchers and users that report vulnerabilities to +the Kubernetes Open Source Community. All reports are thoroughly investigated by a set of community volunteers. -To make a report, submit your vulnerability to the [Kubernetes bug bounty program](https://hackerone.com/kubernetes). This allows triage and handling of the vulnerability with standardized response times. +To make a report, submit your vulnerability to the [Kubernetes bug bounty program](https://hackerone.com/kubernetes). +This allows triage and handling of the vulnerability with standardized response times. -You can also email the private [security@kubernetes.io](mailto:security@kubernetes.io) list with the security details and the details expected for [all Kubernetes bug reports](https://github.com/kubernetes/kubernetes/blob/master/.github/ISSUE_TEMPLATE/bug-report.yaml). +You can also email the private [security@kubernetes.io](mailto:security@kubernetes.io) +list with the security details and the details expected for +[all Kubernetes bug reports](https://github.com/kubernetes/kubernetes/blob/master/.github/ISSUE_TEMPLATE/bug-report.yaml). -You may encrypt your email to this list using the GPG keys of the [Security Response Committee members](https://git.k8s.io/security/README.md#product-security-committee-psc). Encryption using GPG is NOT required to make a disclosure. +You may encrypt your email to this list using the GPG keys of the +[Security Response Committee members](https://git.k8s.io/security/README.md#product-security-committee-psc). +Encryption using GPG is NOT required to make a disclosure. ### When Should I Report a Vulnerability? @@ -36,7 +42,6 @@ You may encrypt your email to this list using the GPG keys of the [Security Resp - You think you discovered a vulnerability in another project that Kubernetes depends on - For projects with their own vulnerability reporting and disclosure process, please report it directly there - ### When Should I NOT Report a Vulnerability? - You need help tuning Kubernetes components for security @@ -45,13 +50,19 @@ You may encrypt your email to this list using the GPG keys of the [Security Resp ## Security Vulnerability Response -Each report is acknowledged and analyzed by Security Response Committee members within 3 working days. This will set off the [Security Release Process](https://git.k8s.io/security/security-release-process.md#disclosures). +Each report is acknowledged and analyzed by Security Response Committee members within 3 working days. +This will set off the [Security Release Process](https://git.k8s.io/security/security-release-process.md#disclosures). -Any vulnerability information shared with Security Response Committee stays within Kubernetes project and will not be disseminated to other projects unless it is necessary to get the issue fixed. +Any vulnerability information shared with Security Response Committee stays within Kubernetes project +and will not be disseminated to other projects unless it is necessary to get the issue fixed. As the security issue moves from triage, to identified fix, to release planning we will keep the reporter updated. ## Public Disclosure Timing -A public disclosure date is negotiated by the Kubernetes Security Response Committee and the bug submitter. We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) to a few weeks. For a vulnerability with a straightforward mitigation, we expect report date to disclosure date to be on the order of 7 days. The Kubernetes Security Response Committee holds the final say when setting a disclosure date. - +A public disclosure date is negotiated by the Kubernetes Security Response Committee and the bug submitter. +We prefer to fully disclose the bug as soon as possible once a user mitigation is available. It is reasonable +to delay disclosure when the bug or the fix is not yet fully understood, the solution is not well-tested, +or for vendor coordination. The timeframe for disclosure is from immediate (especially if it's already publicly known) +to a few weeks. For a vulnerability with a straightforward mitigation, we expect report date to disclosure date +to be on the order of 7 days. The Kubernetes Security Response Committee holds the final say when setting a disclosure date. diff --git a/content/en/docs/reference/kubectl/_index.md b/content/en/docs/reference/kubectl/_index.md index aefaae1fcc1cf..f04d5edcf9d98 100644 --- a/content/en/docs/reference/kubectl/_index.md +++ b/content/en/docs/reference/kubectl/_index.md @@ -25,7 +25,8 @@ For details about each command, including all the supported flags and subcommand For installation instructions, see [Installing kubectl](/docs/tasks/tools/#kubectl); for a quick guide, see the [cheat sheet](/docs/reference/kubectl/cheatsheet/). -If you're used to using the `docker` command-line tool, [`kubectl` for Docker Users](/docs/reference/kubectl/docker-cli-to-kubectl/) explains some equivalent commands for Kubernetes. +If you're used to using the `docker` command-line tool, +[`kubectl` for Docker Users](/docs/reference/kubectl/docker-cli-to-kubectl/) explains some equivalent commands for Kubernetes. @@ -39,37 +40,41 @@ kubectl [command] [TYPE] [NAME] [flags] where `command`, `TYPE`, `NAME`, and `flags` are: -* `command`: Specifies the operation that you want to perform on one or more resources, -for example `create`, `get`, `describe`, `delete`. +* `command`: Specifies the operation that you want to perform on one or more resources, + for example `create`, `get`, `describe`, `delete`. * `TYPE`: Specifies the [resource type](#resource-types). Resource types are case-insensitive and you can specify the singular, plural, or abbreviated forms. For example, the following commands produce the same output: - ```shell - kubectl get pod pod1 - kubectl get pods pod1 - kubectl get po pod1 - ``` + ```shell + kubectl get pod pod1 + kubectl get pods pod1 + kubectl get po pod1 + ``` -* `NAME`: Specifies the name of the resource. Names are case-sensitive. If the name is omitted, details for all resources are displayed, for example `kubectl get pods`. +* `NAME`: Specifies the name of the resource. Names are case-sensitive. If the name is omitted, + details for all resources are displayed, for example `kubectl get pods`. - When performing an operation on multiple resources, you can specify each resource by type and name or specify one or more files: + When performing an operation on multiple resources, you can specify each resource by + type and name or specify one or more files: - * To specify resources by type and name: + * To specify resources by type and name: - * To group resources if they are all the same type: `TYPE1 name1 name2 name<#>`.
    + * To group resources if they are all the same type: `TYPE1 name1 name2 name<#>`.
    Example: `kubectl get pod example-pod1 example-pod2` - * To specify multiple resource types individually: `TYPE1/name1 TYPE1/name2 TYPE2/name3 TYPE<#>/name<#>`.
    + * To specify multiple resource types individually: `TYPE1/name1 TYPE1/name2 TYPE2/name3 TYPE<#>/name<#>`.
    Example: `kubectl get pod/example-pod1 replicationcontroller/example-rc1` - * To specify resources with one or more files: `-f file1 -f file2 -f file<#>` + * To specify resources with one or more files: `-f file1 -f file2 -f file<#>` - * [Use YAML rather than JSON](/docs/concepts/configuration/overview/#general-configuration-tips) since YAML tends to be more user-friendly, especially for configuration files.
    - Example: `kubectl get -f ./pod.yaml` + * [Use YAML rather than JSON](/docs/concepts/configuration/overview/#general-configuration-tips) + since YAML tends to be more user-friendly, especially for configuration files.
    + Example: `kubectl get -f ./pod.yaml` -* `flags`: Specifies optional flags. For example, you can use the `-s` or `--server` flags to specify the address and port of the Kubernetes API server.
    +* `flags`: Specifies optional flags. For example, you can use the `-s` or `--server` flags + to specify the address and port of the Kubernetes API server.
    {{< caution >}} Flags that you specify from the command line override default values and any corresponding environment variables. @@ -79,19 +84,29 @@ If you need help, run `kubectl help` from the terminal window. ## In-cluster authentication and namespace overrides -By default `kubectl` will first determine if it is running within a pod, and thus in a cluster. It starts by checking for the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment variables and the existence of a service account token file at `/var/run/secrets/kubernetes.io/serviceaccount/token`. If all three are found in-cluster authentication is assumed. +By default `kubectl` will first determine if it is running within a pod, and thus in a cluster. +It starts by checking for the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment +variables and the existence of a service account token file at `/var/run/secrets/kubernetes.io/serviceaccount/token`. +If all three are found in-cluster authentication is assumed. -To maintain backwards compatibility, if the `POD_NAMESPACE` environment variable is set during in-cluster authentication it will override the default namespace from the service account token. Any manifests or tools relying on namespace defaulting will be affected by this. +To maintain backwards compatibility, if the `POD_NAMESPACE` environment variable is set +during in-cluster authentication it will override the default namespace from the +service account token. Any manifests or tools relying on namespace defaulting will be affected by this. **`POD_NAMESPACE` environment variable** -If the `POD_NAMESPACE` environment variable is set, cli operations on namespaced resources will default to the variable value. For example, if the variable is set to `seattle`, `kubectl get pods` would return pods in the `seattle` namespace. This is because pods are a namespaced resource, and no namespace was provided in the command. Review the output of `kubectl api-resources` to determine if a resource is namespaced. +If the `POD_NAMESPACE` environment variable is set, cli operations on namespaced resources +will default to the variable value. For example, if the variable is set to `seattle`, +`kubectl get pods` would return pods in the `seattle` namespace. This is because pods are +a namespaced resource, and no namespace was provided in the command. Review the output +of `kubectl api-resources` to determine if a resource is namespaced. -Explicit use of `--namespace ` overrides this behavior. +Explicit use of `--namespace ` overrides this behavior. **How kubectl handles ServiceAccount tokens** If: + * there is Kubernetes service account token file mounted at `/var/run/secrets/kubernetes.io/serviceaccount/token`, and * the `KUBERNETES_SERVICE_HOST` environment variable is set, and @@ -156,7 +171,7 @@ Operation | Syntax | Description `scale` | kubectl scale (-f FILENAME | TYPE NAME | TYPE/NAME) --replicas=COUNT [--resource-version=version] [--current-replicas=count] [flags] | Update the size of the specified replication controller. `set` | `kubectl set SUBCOMMAND [options]` | Configure application resources. `taint` | `kubectl taint NODE NAME KEY_1=VAL_1:TAINT_EFFECT_1 ... KEY_N=VAL_N:TAINT_EFFECT_N [options]` | Update the taints on one or more nodes. -`top` | `kubectl top (POD | NODE) [flags] [options]` | Display Resource (CPU/Memory/Storage) usage of pod or node. +`top` | kubectl top (POD | NODE) [flags] [options] | Display Resource (CPU/Memory/Storage) usage of pod or node. `uncordon` | `kubectl uncordon NODE [options]` | Mark node as schedulable. `version` | `kubectl version [--client] [flags]` | Display the Kubernetes version running on the client and server. `wait` | kubectl wait ([-f FILENAME] | resource.group/resource.name | resource.group [(-l label | --all)]) [--for=delete|--for condition=available] [options] | Experimental: Wait for a specific condition on one or many resources. @@ -230,11 +245,15 @@ The following table includes a list of all the supported resource types and thei ## Output options -Use the following sections for information about how you can format or sort the output of certain commands. For details about which commands support the various output options, see the [kubectl](/docs/reference/kubectl/kubectl/) reference documentation. +Use the following sections for information about how you can format or sort the output +of certain commands. For details about which commands support the various output options, +see the [kubectl](/docs/reference/kubectl/kubectl/) reference documentation. ### Formatting output -The default output format for all `kubectl` commands is the human readable plain-text format. To output details to your terminal window in a specific format, you can add either the `-o` or `--output` flags to a supported `kubectl` command. +The default output format for all `kubectl` commands is the human readable plain-text format. +To output details to your terminal window in a specific format, you can add either the `-o` +or `--output` flags to a supported `kubectl` command. #### Syntax @@ -324,7 +343,9 @@ pod-name 1m ### Sorting list objects -To output objects to a sorted list in your terminal window, you can add the `--sort-by` flag to a supported `kubectl` command. Sort your objects by specifying any numeric or string field with the `--sort-by` flag. To specify a field, use a [jsonpath](/docs/reference/kubectl/jsonpath/) expression. +To output objects to a sorted list in your terminal window, you can add the `--sort-by` flag +to a supported `kubectl` command. Sort your objects by specifying any numeric or string field +with the `--sort-by` flag. To specify a field, use a [jsonpath](/docs/reference/kubectl/jsonpath/) expression. #### Syntax @@ -508,10 +529,12 @@ The following kubectl-compatible plugins are available: `kubectl plugin list` also warns you about plugins that are not executable, or that are shadowed by other plugins; for example: + ```shell sudo chmod -x /usr/local/bin/kubectl-foo # remove execute permission kubectl plugin list ``` + ``` The following kubectl-compatible plugins are available: @@ -529,8 +552,10 @@ of the existing kubectl commands: ```shell cat ./kubectl-whoami ``` + The next few examples assume that you already made `kubectl-whoami` have the following contents: + ```shell #!/bin/bash diff --git a/content/en/docs/reference/kubectl/jsonpath.md b/content/en/docs/reference/kubectl/jsonpath.md index 230ded2640523..9b9b270f71f64 100644 --- a/content/en/docs/reference/kubectl/jsonpath.md +++ b/content/en/docs/reference/kubectl/jsonpath.md @@ -34,7 +34,12 @@ Given the JSON input: "items":[ { "kind":"None", - "metadata":{"name":"127.0.0.1"}, + "metadata":{ + "name":"127.0.0.1", + "labels":{ + "kubernetes.io/hostname":"127.0.0.1" + } + }, "status":{ "capacity":{"cpu":"4"}, "addresses":[{"type": "LegacyHostIP", "address":"127.0.0.1"}] @@ -65,18 +70,19 @@ Given the JSON input: } ``` -Function | Description | Example | Result ---------------------|---------------------------|-----------------------------------------------------------------|------------------ -`text` | the plain text | `kind is {.kind}` | `kind is List` -`@` | the current object | `{@}` | the same as input -`.` or `[]` | child operator | `{.kind}`, `{['kind']}` or `{['name\.type']}` | `List` -`..` | recursive descent | `{..name}` | `127.0.0.1 127.0.0.2 myself e2e` -`*` | wildcard. Get all objects | `{.items[*].metadata.name}` | `[127.0.0.1 127.0.0.2]` -`[start:end:step]` | subscript operator | `{.users[0].name}` | `myself` -`[,]` | union operator | `{.items[*]['metadata.name', 'status.capacity']}` | `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]` -`?()` | filter | `{.users[?(@.name=="e2e")].user.password}` | `secret` -`range`, `end` | iterate list | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]` -`''` | quote interpreted string | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2` +Function | Description | Example | Result +--------------------|------------------------------|-----------------------------------------------------------------|------------------ +`text` | the plain text | `kind is {.kind}` | `kind is List` +`@` | the current object | `{@}` | the same as input +`.` or `[]` | child operator | `{.kind}`, `{['kind']}` or `{['name\.type']}` | `List` +`..` | recursive descent | `{..name}` | `127.0.0.1 127.0.0.2 myself e2e` +`*` | wildcard. Get all objects | `{.items[*].metadata.name}` | `[127.0.0.1 127.0.0.2]` +`[start:end:step]` | subscript operator | `{.users[0].name}` | `myself` +`[,]` | union operator | `{.items[*]['metadata.name', 'status.capacity']}` | `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]` +`?()` | filter | `{.users[?(@.name=="e2e")].user.password}` | `secret` +`range`, `end` | iterate list | `{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}` | `[127.0.0.1, map[cpu:4]] [127.0.0.2, map[cpu:8]]` +`''` | quote interpreted string | `{range .items[*]}{.metadata.name}{'\t'}{end}` | `127.0.0.1 127.0.0.2` +`\` | escape termination character | `{.items[0].metadata.labels.kubernetes\.io/hostname}` | `127.0.0.1` Examples using `kubectl` and JSONPath expressions: @@ -87,6 +93,7 @@ kubectl get pods -o=jsonpath='{.items[0]}' kubectl get pods -o=jsonpath='{.items[0].metadata.name}' kubectl get pods -o=jsonpath="{.items[*]['metadata.name', 'status.capacity']}" kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.startTime}{"\n"}{end}' +kubectl get pods -o=jsonpath='{.items[0].metadata.labels.kubernetes\.io/hostname}' ``` {{< note >}} diff --git a/content/en/docs/reference/kubectl/kubectl.md b/content/en/docs/reference/kubectl/kubectl.md index 8d6e8aae0c3c0..80377dce33e70 100644 --- a/content/en/docs/reference/kubectl/kubectl.md +++ b/content/en/docs/reference/kubectl/kubectl.md @@ -370,10 +370,10 @@ kubectl [flags] -KUBECTL_INTERACTIVE_DELETE +KUBECTL_REMOTE_COMMAND_WEBSOCKETS -When set to true, the --interactive flag in the kubectl delete command will be activated, allowing users to preview and confirm resources before proceeding to delete by passing this flag. +When set to true, the kubectl exec, cp, and attach commands will attempt to stream using the websockets protocol. If the upgrade to websockets fails, the commands will fallback to use the current SPDY protocol. diff --git a/content/en/docs/reference/kubectl/cheatsheet.md b/content/en/docs/reference/kubectl/quick-reference.md similarity index 98% rename from content/en/docs/reference/kubectl/cheatsheet.md rename to content/en/docs/reference/kubectl/quick-reference.md index b933c6eaeecd8..d0140aebf2aa0 100644 --- a/content/en/docs/reference/kubectl/cheatsheet.md +++ b/content/en/docs/reference/kubectl/quick-reference.md @@ -1,5 +1,5 @@ --- -title: kubectl Cheat Sheet +title: kubectl Quick Reference reviewers: - erictune - krousey @@ -213,7 +213,7 @@ kubectl get pods --field-selector=status.phase=Running kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}' # List Names of Pods that belong to Particular RC -# "jq" command useful for transformations that are too complex for jsonpath, it can be found at https://stedolan.github.io/jq/ +# "jq" command useful for transformations that are too complex for jsonpath, it can be found at https://jqlang.github.io/jq/ sel=${$(kubectl get rc my-rc --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')%?} echo $(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name}) @@ -224,6 +224,9 @@ kubectl get pods --show-labels JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}' \ && kubectl get nodes -o jsonpath="$JSONPATH" | grep "Ready=True" +# Check which nodes are ready with custom-columns +kubectl get node -o custom-columns='NODE_NAME:.metadata.name,STATUS:.status.conditions[?(@.type=="Ready")].status' + # Output decoded secrets without external tools kubectl get secret my-secret -o go-template='{{range $k,$v := .data}}{{"### "}}{{$k}}{{"\n"}}{{$v|base64decode}}{{"\n\n"}}{{end}}' @@ -347,7 +350,7 @@ kubectl logs my-pod # dump pod logs (stdout) kubectl logs -l name=myLabel # dump pod logs, with label name=myLabel (stdout) kubectl logs my-pod --previous # dump pod logs (stdout) for a previous instantiation of a container kubectl logs my-pod -c my-container # dump pod container logs (stdout, multi-container case) -kubectl logs -l name=myLabel -c my-container # dump pod logs, with label name=myLabel (stdout) +kubectl logs -l name=myLabel -c my-container # dump pod container logs, with label name=myLabel (stdout) kubectl logs my-pod -c my-container --previous # dump pod container logs (stdout, multi-container case) for a previous instantiation of a container kubectl logs -f my-pod # stream pod logs (stdout) kubectl logs -f my-pod -c my-container # stream pod container logs (stdout, multi-container case) diff --git a/content/en/docs/reference/labels-annotations-taints/_index.md b/content/en/docs/reference/labels-annotations-taints/_index.md index bcdb3b4f0393d..04cb002f47bfa 100644 --- a/content/en/docs/reference/labels-annotations-taints/_index.md +++ b/content/en/docs/reference/labels-annotations-taints/_index.md @@ -28,7 +28,7 @@ Type: Annotation Example: `apf.kubernetes.io/autoupdate-spec: "true"` -Used on: [`FlowSchema` and `PriorityLevelConfiguration` Objects](/concepts/cluster-administration/flow-control/#defaults) +Used on: [`FlowSchema` and `PriorityLevelConfiguration` Objects](/docs/concepts/cluster-administration/flow-control/#defaults) If this annotation is set to true on a FlowSchema or PriorityLevelConfiguration, the `spec` for that object is managed by the kube-apiserver. If the API server does not recognize an APF object, and you annotate it @@ -299,6 +299,23 @@ This annotation is part of the Kubernetes Resource Model (KRM) Functions Specifi which is used by Kustomize and similar third-party tools. For example, Kustomize removes objects with this annotation from its final build output. + +### container.apparmor.security.beta.kubernetes.io/* (beta) {#container-apparmor-security-beta-kubernetes-io} + +Type: Annotation + +Example: `container.apparmor.security.beta.kubernetes.io/my-container: my-custom-profile` + +Used on: Pods + +This annotation allows you to specify the AppArmor security profile for a container within a +Kubernetes pod. +To learn more, see the [AppArmor](/docs/tutorials/security/apparmor/) tutorial. +The tutorial illustrates using AppArmor to restrict a container's abilities and access. + +The profile specified dictates the set of rules and restrictions that the containerized process must +adhere to. This helps enforce security policies and isolation for your containers. + ### internal.config.kubernetes.io/* (reserved prefix) {#internal.config.kubernetes.io-reserved-wildcard} Type: Annotation @@ -417,6 +434,59 @@ The annotation `kubernetes.io/limit-ranger` records that resource defaults were and they were applied successfully. For more details, read about [LimitRanges](/docs/concepts/policy/limit-range). +### kubernetes.io/config.hash + +Type: Annotation + +Example: `kubernetes.io/config.hash: "df7cc47f8477b6b1226d7d23a904867b"` + +Used on: Pod + +When the kubelet creates a static Pod based on a given manifest, it attaches this annotation +to the static Pod. The value of the annotation is the UID of the Pod. +Note that the kubelet also sets the `.spec.nodeName` to the current node name as if the Pod +was scheduled to the node. + +### kubernetes.io/config.mirror + +Type: Annotation + +Example: `kubernetes.io/config.mirror: "df7cc47f8477b6b1226d7d23a904867b"` + +Used on: Pod + +For a static Pod created by the kubelet on a node, a {{< glossary_tooltip text="mirror Pod" term_id="mirror-pod" >}} +is created on the API server. The kubelet adds an annotation to indicate that this Pod is +actually a mirror Pod. The annotation value is copied from the [`kubernetes.io/config.hash`](#kubernetes-io-config-hash) +annotation, which is the UID of the Pod. + +When updating a Pod with this annotation set, the annotation cannot be changed or removed. +If a Pod doesn't have this annotation, it cannot be added during a Pod update. + +### kubernetes.io/config.source + +Type: Annotation + +Example: `kubernetes.io/config.source: "file"` + +Used on: Pod + +This annotation is added by the kubelet to indicate where the Pod comes from. +For static Pods, the annotation value could be one of `file` or `http` depending +on where the Pod manifest is located. For a Pod created on the API server and then +scheduled to the current node, the annotation value is `api`. + +### kubernetes.io/config.seen + +Type: Annotation + +Example: `kubernetes.io/config.seen: "2023-10-27T04:04:56.011314488Z"` + +Used on: Pod + +When the kubelet sees a Pod for the first time, it may add this annotation to +the Pod with a value of current timestamp in the RFC3339 format. + ### addonmanager.kubernetes.io/mode Type: Label @@ -531,8 +601,23 @@ Example: `kubernetes.io/enforce-mountable-secrets: "true"` Used on: ServiceAccount The value for this annotation must be **true** to take effect. -This annotation indicates that Pods running as this ServiceAccount may only reference -Secret API objects specified in the ServiceAccount's `secrets` field. +When you set this annotation to "true", Kubernetes enforces the following rules for +Pods running as this ServiceAccount: + +1. Secrets mounted as volumes must be listed in the ServiceAccount's `secrets` field. +1. Secrets referenced in `envFrom` for containers (including sidecar containers and init containers) + must also be listed in the ServiceAccount's secrets field. + If any container in a Pod references a Secret not listed in the ServiceAccount's `secrets` field + (and even if the reference is marked as `optional`), then the Pod will fail to start, + and an error indicating the non-compliant secret reference will be generated. +1. Secrets referenced in a Pod's `imagePullSecrets` must be present in the + ServiceAccount's `imagePullSecrets` field, the Pod will fail to start, + and an error indicating the non-compliant image pull secret reference will be generated. + +When you create or update a Pod, these rules are checked. If a Pod doesn't follow them, it won't start and you'll see an error message. +If a Pod is already running and you change the `kubernetes.io/enforce-mountable-secrets` annotation +to true, or you edit the associated ServiceAccount to remove the reference to a Secret +that the Pod is already using, the Pod continues to run. ### node.kubernetes.io/exclude-from-external-load-balancers @@ -940,6 +1025,22 @@ works in that release. There are no other valid values for this annotation. If you don't want topology aware hints for a Service, don't add this annotation. +### service.kubernetes.io/topology-mode + +Type: Annotation + +Example: `service.kubernetes.io/topology-mode: Auto` + +Used on: Service + +This annotation provides a way to define how Services handle network topology; +for example, you can configure a Service so that Kubernetes prefers keeping traffic between +a client and server within a single topology zone. +In some cases this can help reduce costs or improve network performance. + +See [Topology Aware Routing](/docs/concepts/services-networking/topology-aware-routing/) +for more details. + ### kubernetes.io/service-name {#kubernetesioservice-name} Type: Label @@ -995,6 +1096,23 @@ last saw a request where the client authenticated using the service account toke If a legacy token was last used before the cluster gained the feature (added in Kubernetes v1.26), then the label isn't set. +### kubernetes.io/legacy-token-invalid-since + +Type: Label + +Example: `kubernetes.io/legacy-token-invalid-since: 2023-10-27` + +Used on: Secret + +The control plane automatically adds this label to auto-generated Secrets that +have the type `kubernetes.io/service-account-token`, provided that you have the +`LegacyServiceAccountTokenCleanUp` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +enabled. Kubernetes {{< skew currentVersion >}} enables that behavior by default. +This label marks the Secret-based token as invalid for authentication. The value +of this label records the date (ISO 8601 format, UTC time zone) when the control +plane detects that the auto-generated Secret has not been used for a specified +duration (defaults to one year). + ### endpointslice.kubernetes.io/managed-by {#endpointslicekubernetesiomanaged-by} Type: Label @@ -1083,7 +1201,7 @@ Example: `alpha.kubernetes.io/provided-node-ip: "10.0.0.1"` Used on: Node -The kubelet can set this annotation on a Node to denote its configured IPv4 address. +The kubelet can set this annotation on a Node to denote its configured IPv4 and/or IPv6 address. When kubelet is started with the `--cloud-provider` flag set to any value (includes both external and legacy in-tree cloud providers), it sets this annotation on the Node to denote an IP address @@ -1176,6 +1294,27 @@ has been truncated to 1000. If the number of backend endpoints falls below 1000, the control plane removes this annotation. +### control-plane.alpha.kubernetes.io/leader (deprecated) {#control-plane-alpha-kubernetes-io-leader} + +Type: Annotation + +Example: `control-plane.alpha.kubernetes.io/leader={"holderIdentity":"controller-0","leaseDurationSeconds":15,"acquireTime":"2023-01-19T13:12:57Z","renewTime":"2023-01-19T13:13:54Z","leaderTransitions":1}` + +Used on: Endpoints + +The {{< glossary_tooltip text="control plane" term_id="control-plane" >}} previously set annotation on +an [Endpoints](/docs/concepts/services-networking/service/#endpoints) object. This annotation provided +the following detail: + +- Who is the current leader. +- The time when the current leadership was acquired. +- The duration of the lease (of the leadership) in seconds. +- The time the current lease (the current leadership) should be renewed. +- The number of leadership transitions that happened in the past. + +Kubernetes now uses [Leases](/docs/concepts/architecture/leases/) to +manage leader assignment for the Kubernetes control plane. + ### batch.kubernetes.io/job-tracking (deprecated) {#batch-kubernetes-io-job-tracking} Type: Annotation @@ -1466,10 +1605,23 @@ This annotation records a comma-separated list of managed by [Node Feature Discovery](https://kubernetes-sigs.github.io/node-feature-discovery/) (NFD). NFD uses this for an internal mechanism. You should not edit this annotation yourself. +### nfd.node.kubernetes.io/node-name + +Type: Label + +Example: `nfd.node.kubernetes.io/node-name: node-1` + +Used on: Nodes + +It specifies which node the NodeFeature object is targeting. +Creators of NodeFeature objects must set this label and +consumers of the objects are supposed to use the label for +filtering features designated for a certain node. + {{< note >}} -These annotations only applies to nodes where NFD is running. -To learn more about NFD and its components go to its official -[documentation](https://kubernetes-sigs.github.io/node-feature-discovery/stable/get-started/). +These Node Feature Discovery (NFD) labels or annotations only apply to +the nodes where NFD is running. To learn more about NFD and +its components go to its official [documentation](https://kubernetes-sigs.github.io/node-feature-discovery/stable/get-started/). {{< /note >}} ### service.beta.kubernetes.io/aws-load-balancer-access-log-emit-interval (beta) {#service-beta-kubernetes-io-aws-load-balancer-access-log-emit-interval} @@ -1790,6 +1942,26 @@ uses this annotation. See [annotations](https://kubernetes-sigs.github.io/aws-load-balancer-controller/latest/guide/service/annotations/) in the AWS load balancer controller documentation. +### service.beta.kubernetes.io/aws-load-balancer-security-groups (deprecated) {#service-beta-kubernetes-io-aws-load-balancer-security-groups} + +Example: `service.beta.kubernetes.io/aws-load-balancer-security-groups: "sg-53fae93f,sg-8725gr62r"` + +Used on: Service + +The AWS load balancer controller uses this annotation to specify a comma seperated list +of security groups you want to attach to an AWS load balancer. Both name and ID of security +are supported where name matches a `Name` tag, not the `groupName` attribute. + +When this annotation is added to a Service, the load-balancer controller attaches the security groups +referenced by the annotation to the load balancer. If you omit this annotation, the AWS load balancer +controller automatically creates a new security group and attaches it to the load balancer. + +{{< note >}} +Kubernetes v1.27 and later do not directly set or read this annotation. However, the AWS +load balancer controller (part of the Kubernetes project) does still use the +`service.beta.kubernetes.io/aws-load-balancer-security-groups` annotation. +{{< /note >}} + ### service.beta.kubernetes.io/load-balancer-source-ranges (deprecated) {#service-beta-kubernetes-io-load-balancer-source-ranges} Example: `service.beta.kubernetes.io/load-balancer-source-ranges: "192.0.2.0/25"` diff --git a/content/en/docs/reference/networking/virtual-ips.md b/content/en/docs/reference/networking/virtual-ips.md index 1595834ee5cc3..862458009f81d 100644 --- a/content/en/docs/reference/networking/virtual-ips.md +++ b/content/en/docs/reference/networking/virtual-ips.md @@ -14,6 +14,18 @@ The `kube-proxy` component is responsible for implementing a _virtual IP_ mechanism for {{< glossary_tooltip term_id="service" text="Services">}} of `type` other than [`ExternalName`](/docs/concepts/services-networking/service/#externalname). +Each instance of kube-proxy watches the Kubernetes {{< glossary_tooltip +term_id="control-plane" text="control plane" >}} for the addition and +removal of Service and EndpointSlice {{< glossary_tooltip +term_id="object" text="objects" >}}. For each Service, kube-proxy +calls appropriate APIs (depending on the kube-proxy mode) to configure +the node to capture traffic to the Service's `clusterIP` and `port`, +and redirect that traffic to one of the Service's endpoints +(usually a Pod, but possibly an arbitrary user-provided IP address). A control +loop ensures that the rules on each node are reliably synchronized with +the Service and EndpointSlice state as indicated by the API server. + +{{< figure src="/images/docs/services-iptables-overview.svg" title="Virtual IP mechanism for Services, using iptables mode" class="diagram-medium" >}} A question that pops up every now and then is why Kubernetes relies on proxying to forward inbound traffic to backends. What about other @@ -57,11 +69,14 @@ The kube-proxy starts up in different modes, which are determined by its configu On Linux nodes, the available modes for kube-proxy are: [`iptables`](#proxy-mode-iptables) -: A mode where the kube-proxy configures packet forwarding rules using iptables, on Linux. +: A mode where the kube-proxy configures packet forwarding rules using iptables. [`ipvs`](#proxy-mode-ipvs) : a mode where the kube-proxy configures packet forwarding rules using ipvs. +[`nftables`](#proxy-mode-nftables) +: a mode where the kube-proxy configures packet forwarding rules using nftables. + There is only one mode available for kube-proxy on Windows: [`kernelspace`](#proxy-mode-kernelspace) @@ -71,32 +86,10 @@ There is only one mode available for kube-proxy on Windows: _This proxy mode is only available on Linux nodes._ -In this mode, kube-proxy watches the Kubernetes -{{< glossary_tooltip term_id="control-plane" text="control plane" >}} for the addition and -removal of Service and EndpointSlice {{< glossary_tooltip term_id="object" text="objects." >}} -For each Service, it installs -iptables rules, which capture traffic to the Service's `clusterIP` and `port`, -and redirect that traffic to one of the Service's -backend sets. For each endpoint, it installs iptables rules which -select a backend Pod. - -By default, kube-proxy in iptables mode chooses a backend at random. - -Using iptables to handle traffic has a lower system overhead, because traffic -is handled by Linux netfilter without the need to switch between userspace and the -kernel space. This approach is also likely to be more reliable. - -If kube-proxy is running in iptables mode and the first Pod that's selected -does not respond, the connection fails. This is different from the old `userspace` -mode: in that scenario, kube-proxy would detect that the connection to the first -Pod had failed and would automatically retry with a different backend Pod. - -You can use Pod [readiness probes](/docs/concepts/workloads/pods/pod-lifecycle/#container-probes) -to verify that backend Pods are working OK, so that kube-proxy in iptables mode -only sees backends that test out as healthy. Doing this means you avoid -having traffic sent via kube-proxy to a Pod that's known to have failed. - -{{< figure src="/images/docs/services-iptables-overview.svg" title="Virtual IP mechanism for Services, using iptables mode" class="diagram-medium" >}} +In this mode, kube-proxy configures packet forwarding rules using the +iptables API of the kernel netfilter subsystem. For each endpoint, it +installs iptables rules which, by default, select a backend Pod at +random. #### Example {#packet-processing-iptables} @@ -122,8 +115,10 @@ through a load-balancer, though in those cases the client IP address does get al #### Optimizing iptables mode performance -In large clusters (with tens of thousands of Pods and Services), the -iptables mode of kube-proxy may take a long time to update the rules +In iptables mode, kube-proxy creates a few iptables rules for every +Service, and a few iptables rules for each endpoint IP address. In +clusters with tens of thousands of Pods and Services, this means tens +of thousands of iptables rules, and kube-proxy may take a long time to update the rules in the kernel when Services (or their EndpointSlices) change. You can adjust the syncing behavior of kube-proxy via options in the [`iptables` section](/docs/reference/config-api/kube-proxy-config.v1alpha1/#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPTablesConfiguration) of the @@ -204,18 +199,15 @@ and is likely to hurt functionality more than it improves performance. _This proxy mode is only available on Linux nodes._ -In `ipvs` mode, kube-proxy watches Kubernetes Services and EndpointSlices, -calls `netlink` interface to create IPVS rules accordingly and synchronizes -IPVS rules with Kubernetes Services and EndpointSlices periodically. -This control loop ensures that IPVS status matches the desired state. -When accessing a Service, IPVS directs traffic to one of the backend Pods. +In `ipvs` mode, kube-proxy uses the kernel IPVS and iptables APIs to +create rules to redirect traffic from Service IPs to endpoint IPs. The IPVS proxy mode is based on netfilter hook function that is similar to iptables mode, but uses a hash table as the underlying data structure and works in the kernel space. That means kube-proxy in IPVS mode redirects traffic with lower latency than kube-proxy in iptables mode, with much better performance when synchronizing -proxy rules. Compared to the other proxy modes, IPVS mode also supports a +proxy rules. Compared to the iptables proxy mode, IPVS mode also supports a higher throughput of network traffic. IPVS provides more options for balancing traffic to backend Pods; @@ -263,11 +255,28 @@ the node before starting kube-proxy. When kube-proxy starts in IPVS proxy mode, it verifies whether IPVS kernel modules are available. If the IPVS kernel modules are not detected, then kube-proxy -falls back to running in iptables proxy mode. +exits with an error. {{< /note >}} {{< figure src="/images/docs/services-ipvs-overview.svg" title="Virtual IP address mechanism for Services, using IPVS mode" class="diagram-medium" >}} +### `nftables` proxy mode {#proxy-mode-nftables} + +{{< feature-state for_k8s_version="v1.29" state="alpha" >}} + +_This proxy mode is only available on Linux nodes._ + +In this mode, kube-proxy configures packet forwarding rules using the +nftables API of the kernel netfilter subsystem. For each endpoint, it +installs nftables rules which, by default, select a backend Pod at +random. + +The nftables API is the successor to the iptables API, and although it +is designed to provide better performance and scalability than +iptables, the kube-proxy nftables mode is still under heavy +development as of {{< skew currentVersion >}} and is not necessarily +expected to outperform the other Linux modes at this time. + ### `kernelspace` proxy mode {#proxy-mode-kernelspace} _This proxy mode is only available on Windows nodes._ @@ -344,9 +353,9 @@ ensure that no two Services can collide. Kubernetes does that by allocating each Service its own IP address from within the `service-cluster-ip-range` CIDR range that is configured for the {{< glossary_tooltip term_id="kube-apiserver" text="API Server" >}}. -#### IP address allocation tracking +### IP address allocation tracking -To ensure each Service receives a unique IP, an internal allocator atomically +To ensure each Service receives a unique IP address, an internal allocator atomically updates a global allocation map in {{< glossary_tooltip term_id="etcd" >}} prior to creating each Service. The map object must exist in the registry for Services to get IP address assignments, otherwise creations will @@ -355,28 +364,37 @@ fail with a message indicating an IP address could not be allocated. In the control plane, a background controller is responsible for creating that map (needed to support migrating from older versions of Kubernetes that used in-memory locking). Kubernetes also uses controllers to check for invalid -assignments (e.g. due to administrator intervention) and for cleaning up allocated +assignments (for example: due to administrator intervention) and for cleaning up allocated IP addresses that are no longer used by any Services. +#### IP address allocation tracking using the Kubernetes API {#ip-address-objects} + {{< feature-state for_k8s_version="v1.27" state="alpha" >}} + If you enable the `MultiCIDRServiceAllocator` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and the [`networking.k8s.io/v1alpha1` API group](/docs/tasks/administer-cluster/enable-disable-api/), -the control plane replaces the existing etcd allocator with a new one, using IPAddress -objects instead of an internal global allocation map. The ClusterIP address -associated to each Service will have a referenced IPAddress object. +the control plane replaces the existing etcd allocator with a revised implementation +that uses IPAddress and ServiceCIDR objects instead of an internal global allocation map. +Each cluster IP address associated to a Service then references an IPAddress object. + +Enabling the feature gate also replaces a background controller with an alternative +that handles the IPAddress objects and supports migration from the old allocator model. +Kubernetes {{< skew currentVersion >}} does not support migrating from IPAddress +objects to the internal allocation map. -The background controller is also replaced by a new one to handle the new IPAddress -objects and the migration from the old allocator model. +One of the main benefits of the revised allocator is that it removes the size limitations +for the IP address range that can be used for the cluster IP address of Services. +With `MultiCIDRServiceAllocator` enabled, there are no limitations for IPv4, and for IPv6 +you can use IP address netmasks that are a /64 or smaller (as opposed to /108 with the +legacy implementation). -One of the main benefits of the new allocator is that it removes the size limitations -for the `service-cluster-ip-range`, there is no limitations for IPv4 and for IPv6 -users can use masks equal or larger than /64 (previously it was /108). +Making IP address allocations available via the API means that you as a cluster administrator +can allow users to inspect the IP addresses assigned to their Services. +Kubernetes extensions, such as the [Gateway API](/docs/concepts/services-networking/gateway/), +can use the IPAddress API to extend Kubernetes' inherent networking capabilities. -Users now will be able to inspect the IP addresses assigned to their Services, and -Kubernetes extensions such as the [Gateway](https://gateway-api.sigs.k8s.io/) API, can use this new -IPAddress object kind to enhance the Kubernetes networking capabilities, going beyond the limitations of -the built-in Service API. +Here is a brief example of a user querying for IP addresses: ```shell kubectl get services @@ -394,7 +412,45 @@ NAME PARENTREF 2001:db8:1:2::a services/kube-system/kube-dns ``` -#### IP address ranges for Service virtual IP addresses {#service-ip-static-sub-range} +Kubernetes also allow users to dynamically define the available IP ranges for Services using +ServiceCIDR objects. During bootstrap, a default ServiceCIDR object named `kubernetes` is created +from the value of the `--service-cluster-ip-range` command line argument to kube-apiserver: + +```shell +kubectl get servicecidrs +``` +``` +NAME CIDRS AGE +kubernetes 10.96.0.0/28 17m +``` + +Users can create or delete new ServiceCIDR objects to manage the available IP ranges for Services: + +```shell +cat <<'EOF' | kubectl apply -f - +apiVersion: networking.k8s.io/v1alpha1 +kind: ServiceCIDR +metadata: + name: newservicecidr +spec: + cidrs: + - 10.96.0.0/24 +EOF +``` +``` +servicecidr.networking.k8s.io/newcidr1 created +``` + +```shell +kubectl get servicecidrs +``` +``` +NAME CIDRS AGE +kubernetes 10.96.0.0/28 17m +newservicecidr 10.96.0.0/24 7m +``` + +### IP address ranges for Service virtual IP addresses {#service-ip-static-sub-range} {{< feature-state for_k8s_version="v1.26" state="stable" >}} @@ -408,11 +464,6 @@ which means that if you want to assign a specific IP address to a `type: Cluster Service, you should manually assign an IP address from the **lower** band. That approach reduces the risk of a conflict over allocation. -If you disable the `ServiceIPStaticSubrange` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) then Kubernetes -uses a single shared pool for both manually and dynamically assigned IP addresses, -that are used for `type: ClusterIP` Services. - ## Traffic policies You can set the `.spec.internalTrafficPolicy` and `.spec.externalTrafficPolicy` fields diff --git a/content/en/docs/reference/scheduling/config.md b/content/en/docs/reference/scheduling/config.md index 99808dbf8497e..a0a34cb83a1e1 100644 --- a/content/en/docs/reference/scheduling/config.md +++ b/content/en/docs/reference/scheduling/config.md @@ -33,8 +33,7 @@ clientConnection: ``` {{< note >}} -KubeSchedulerConfiguration [v1beta3](/docs/reference/config-api/kube-scheduler-config.v1beta3/) -is deprecated in v1.26 and will be removed in v1.29. +KubeSchedulerConfiguration v1beta3 is deprecated in v1.26 and is removed in v1.29. Please migrate KubeSchedulerConfiguration to [v1](/docs/reference/config-api/kube-scheduler-config.v1/). {{< /note >}} @@ -375,6 +374,7 @@ Besides keeping most of the config in one spot, this sample does a few things: * Reorders `DefaultPlugin2` to run first in `score` (even before the custom plugins) In versions of the config before `v1beta3`, without `multiPoint`, the above snippet would equate to this: + ```yaml apiVersion: kubescheduler.config.k8s.io/v1beta2 kind: KubeSchedulerConfiguration @@ -463,6 +463,4 @@ to achieve similar behavior. * Read the [kube-scheduler reference](/docs/reference/command-line-tools-reference/kube-scheduler/) * Learn about [scheduling](/docs/concepts/scheduling-eviction/kube-scheduler/) -* Read the [kube-scheduler configuration (v1beta2)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) reference -* Read the [kube-scheduler configuration (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) reference * Read the [kube-scheduler configuration (v1)](/docs/reference/config-api/kube-scheduler-config.v1/) reference diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md index bdcce7cdb0d5c..7d9370d8c0368 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md index db92db3f73189..5ceca77416ef4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md index e0e736136692d..9590089860e81 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md index 3e860170baf2d..a96fb3648aafc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md index f43f4d24a73a0..ff8f1f621ccc9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md index daff674e980d5..7630fa9cdd33a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md index 818f4e33784f7..81a937a47ab89 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md index 7c6486706aefb..219cdbd23865d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md index ce306b673d277..14d6ed17d7dc0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md index af7f6709ebfd6..858b5d1845bcb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md index 166f96000fc1a..ae846f8d239b6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md index 7d1716ef257e6..19d7816e31eae 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md index 4bd2a7b7fb749..5dd94c77e86ef 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md index 6c4b011acb54a..7ad9ed5f6885e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md index eafb4f5aa3db1..05dcdd79205e2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md index 6e3f828dc7dfd..4cd61043f795a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md index 3bdeb4cf93346..ce81cbd8c0993 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_super-admin.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_super-admin.conf.md new file mode 100644 index 0000000000000..df7fedd75b881 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_super-admin.conf.md @@ -0,0 +1,92 @@ + + + +Renew the certificate embedded in the kubeconfig file for the super-admin + +### Synopsis + + +Renew the certificate embedded in the kubeconfig file for the super-admin. + +Renewals run unconditionally, regardless of certificate expiration date; extra attributes such as SANs will be based on the existing file/certificates, there is no need to resupply them. + +Renewal by default tries to use the certificate authority in the local PKI managed by kubeadm; as alternative it is possible to use K8s certificate API for certificate renewal, or as a last option, to generate a CSR request. + +After renewal, in order to make changes effective, is required to restart control-plane components and eventually re-distribute the renewed certificate in case the file is used elsewhere. + +``` +kubeadm certs renew super-admin.conf [flags] +``` + +### Options + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    --cert-dir string     Default: "/etc/kubernetes/pki"

    The path where to save the certificates

    --config string

    Path to a kubeadm configuration file.

    -h, --help

    help for super-admin.conf

    --kubeconfig string     Default: "/etc/kubernetes/admin.conf"

    The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

    + + + +### Options inherited from parent commands + + ++++ + + + + + + + + + + +
    --rootfs string

    [EXPERIMENTAL] The path to the 'real' host root filesystem.

    + + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md index b7e9ac15977a0..e64b7375e24e8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md index 6e96780c8897a..df55d3bfe6094 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md index d76d7e492a005..dcd2ebf339433 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md index 0b093d4133875..416d92bc86ec9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -55,7 +55,7 @@ kubeadm config images list [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md index 68c44dd1d4456..9ef9b05e8f523 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -48,7 +48,7 @@ kubeadm config images pull [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md index 9e37f62946939..b428c383c22ff 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md index cc423f6c8e309..c8d1df365646d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md index c58f0b04e0dee..ef5dd5c8b1db1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md index faacd3485cec0..c48848a119cf1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -35,13 +35,6 @@ kubeadm config print join-defaults [flags] - ---component-configs strings - - -

    A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.

    - - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_reset-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_reset-defaults.md index 67744c5e9a972..46f3c948c4edd 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_reset-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_reset-defaults.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -35,13 +35,6 @@ kubeadm config print reset-defaults [flags] - ---component-configs strings - - -

    A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.

    - - -h, --help diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_validate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_validate.md index 27580b90b56ac..9a65b7e6930da 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_validate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_validate.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md index 4a3d576229cb0..d554cdd0d7103 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -34,6 +34,7 @@ certs Certificate generation /sa Generate a private key for signing service account tokens along with its public key kubeconfig Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file /admin Generate a kubeconfig file for the admin to use and for kubeadm itself + /super-admin Generate a kubeconfig file for the super-admin /kubelet Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes /controller-manager Generate a kubeconfig file for the controller manager to use /scheduler Generate a kubeconfig file for the scheduler to use @@ -104,7 +105,7 @@ kubeadm init [flags] --certificate-key string -

    Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.

    +

    Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    @@ -139,7 +140,7 @@ kubeadm init [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md index 2e30edead2352..564ed50b187c7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md index ecd1f4d6ecf96..a6082dddd2426 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md index 760c907993345..9856fbb338ee4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -69,7 +69,7 @@ kubeadm init phase addon all [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md index d66b771a14b5a..aa9b25aa74635 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -48,7 +48,7 @@ kubeadm init phase addon coredns [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md index bfc3a74a5b91a..4f61b572d28f6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md index 27b722f289b80..145c1c246a333 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md index 0399a71c464f0..a263260100f27 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md index a6723674affc4..0336919ea6e1a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md index 416c59933ee12..bb5a59dca59f9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the certificate the apiserver uses to access etcd, and save them into a If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs apiserver-etcd-client [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md index e4128aedead89..b1ee579e34464 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the certificate for the API server to connect to kubelet, and save them If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs apiserver-kubelet-client [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md index ff6d9adc00586..95dc5b984831c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the certificate for serving the Kubernetes API, and save them into apis If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs apiserver [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md index 7f333a5da4766..e92f9c2e95ba7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the self-signed Kubernetes CA to provision identities for other Kuberne If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs ca [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md index 3c72fcdf6a52c..3f9672800546b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the self-signed CA to provision identities for etcd, and save them into If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs etcd-ca [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md index 708e244f2bbb3..ff8db9ede5c8c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the certificate for liveness probes to healthcheck etcd, and save them If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs etcd-healthcheck-client [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md index 54c17d5196124..b08540b49e5e1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -21,8 +21,6 @@ Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs etcd-peer [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md index 96eeba4003f5a..a00377dc9f395 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -21,8 +21,6 @@ Default SANs are localhost, 127.0.0.1, 127.0.0.1, ::1 If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs etcd-server [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md index 1c425e7a2f000..0f0dfb46e8921 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the self-signed CA to provision identities for front proxy, and save th If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs front-proxy-ca [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md index 12867c61a67f5..52b2e30f82244 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -19,8 +19,6 @@ Generate the certificate for the front proxy client, and save them into front-pr If both files already exist, kubeadm skips the generation step and existing files will be used. -Alpha Disclaimer: this command is currently alpha. - ``` kubeadm init phase certs front-proxy-client [flags] ``` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md index 41c989018dfe2..28b5cd93095e5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -15,9 +15,9 @@ Generate a private key for signing service account tokens along with its public ### Synopsis -Generate the private key for signing service account tokens along with its public key, and save them into sa.key and sa.pub files. If both files already exist, kubeadm skips the generation step and existing files will be used. +Generate the private key for signing service account tokens along with its public key, and save them into sa.key and sa.pub files. -Alpha Disclaimer: this command is currently alpha. +If both files already exist, kubeadm skips the generation step and existing files will be used. ``` kubeadm init phase certs sa [flags] diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md index a5167eeb8f7c1..cb73db5164f23 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md index fafe05803a600..2c86f8622c4ba 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -101,7 +101,7 @@ kubeadm init phase control-plane all [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md index b1a0381e5d4e9..c38bd787a8542 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -83,7 +83,7 @@ kubeadm init phase control-plane apiserver [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md index 4aff8b021d380..f99791eaf7388 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md index 062e96f8e25e2..63cac55bd4105 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md index 0360af7aefa97..5e58ababb8f9b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md index d5c168024f4da..bdb9b3c8c07bd 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md index d23c2e5cbe2b0..cc026ae4ada29 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md index dc6264e2abe60..c19aaaa37d2ec 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md index 28be5441026d6..0d2e1792a1841 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md index 5c1563f6375fb..c2c9fe26e45d5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md index 19b8cfe6728d8..196b5d78fbd30 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md index 580f99d255ac2..cdd9337dea2f6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_super-admin.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_super-admin.md new file mode 100644 index 0000000000000..f266096148123 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_super-admin.md @@ -0,0 +1,121 @@ + + + +Generate a kubeconfig file for the super-admin + +### Synopsis + + +Generate a kubeconfig file for the super-admin, and save it to super-admin.conf file. + +``` +kubeadm init phase kubeconfig super-admin [flags] +``` + +### Options + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    --apiserver-advertise-address string

    The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

    --apiserver-bind-port int32     Default: 6443

    Port for the API Server to bind to.

    --cert-dir string     Default: "/etc/kubernetes/pki"

    The path where to save and store the certificates.

    --config string

    Path to a kubeadm configuration file.

    --control-plane-endpoint string

    Specify a stable IP address or DNS name for the control plane.

    --dry-run

    Don't apply any changes; just output what would be done.

    -h, --help

    help for super-admin

    --kubeconfig-dir string     Default: "/etc/kubernetes"

    The path where to save the kubeconfig file.

    --kubernetes-version string     Default: "stable-1"

    Choose a specific Kubernetes version for the control plane.

    + + + +### Options inherited from parent commands + + ++++ + + + + + + + + + + +
    --rootfs string

    [EXPERIMENTAL] The path to the 'real' host root filesystem.

    + + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md index 92a48a215a1e7..dfa1b52522967 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md index 62278d5c12319..3e7f64cd63af6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md index 93c521157bb5a..9decae2f14092 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md index 2dd93c707d6be..04f89bf380ff8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -65,6 +65,13 @@ kubeadm init phase kubelet-start [flags]

    help for kubelet-start

    + +--image-repository string     Default: "registry.k8s.io" + + +

    Choose a container registry to pull control plane images from

    + + --node-name string diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md index 685dfdcab50e3..11c5f2b21eca1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md index 23013ec7e3ade..b7d43d9e1f232 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -72,6 +72,13 @@ kubeadm init phase preflight [flags]

    A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

    + +--image-repository string     Default: "registry.k8s.io" + + +

    Choose a container registry to pull control plane images from

    + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_show-join-command.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_show-join-command.md index 23abc5671cdc6..355e432a90e4b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_show-join-command.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_show-join-command.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md index 9915f522ab954..9e596329d614f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -34,7 +34,7 @@ kubeadm init phase upload-certs [flags] --certificate-key string -

    Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.

    +

    Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md index f74c243390ffb..594cfed952dde 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md index 3feed3b189244..620b801069586 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md index 4ef3f88e7e8fb..6dc7d875672f5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md index c83a86129e58a..26eb4178aa180 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md index c78cd9c9cccce..d1bff7d0c1824 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -107,7 +107,7 @@ kubeadm join [api-server-endpoint] [flags] --certificate-key string -

    Use this key to decrypt the certificate secrets uploaded by init.

    +

    Use this key to decrypt the certificate secrets uploaded by init. The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md index bbab5b26997ba..de074378f60e2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md index 7a8b17decd6b6..47bb65a233c17 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md index 4a94a75f49d67..d9ec5fe25f0c3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md index 637e909c3b914..d97d7a540dce8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md index 888b17e17ee42..3e55ebbe8057b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md index c2e387505ca60..9fb71b6c02b2b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md index 918c4385819dd..395357a202484 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md index 266906c653d36..e17152349dcbb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -48,7 +48,7 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] --certificate-key string -

    Use this key to decrypt the certificate secrets uploaded by init.

    +

    Use this key to decrypt the certificate secrets uploaded by init. The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md index ee1234e5a134e..f488eecfcc8d8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md index b6f19a68a2304..6ecdb632d9a15 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md index 019ea5cb5cfae..1e0131256b195 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -34,7 +34,7 @@ kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [f --certificate-key string -

    Use this key to decrypt the certificate secrets uploaded by init.

    +

    Use this key to decrypt the certificate secrets uploaded by init. The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md index d12f102bbb985..387ca757e7d9b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -34,7 +34,7 @@ kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags --certificate-key string -

    Use this key to decrypt the certificate secrets uploaded by init.

    +

    Use this key to decrypt the certificate secrets uploaded by init. The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md index 1902c7cc2c0b4..55befe834c9ad 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md index ecfb735e7efc2..10dc19022290d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -55,7 +55,7 @@ kubeadm join phase preflight [api-server-endpoint] [flags] --certificate-key string -

    Use this key to decrypt the certificate secrets uploaded by init.

    +

    Use this key to decrypt the certificate secrets uploaded by init. The certificate key is a hex encoded string that is an AES key of size 32 bytes.

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md index 62ba2f0b953de..34a496927e6c5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md index 25edb297c20e6..2f9a2095c8b0b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md index 9c4b0576c57d7..fc95b377c3dd4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md index 8df03b6bdb6b7..b7015907dff2e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md index f62de85dd351a..3f10d13b4ab2a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md index dd074f8ecfa60..03be0ef5acd33 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md index 54e7cf0e1b2d0..383728f8f8844 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md index 7cb160a9e714d..a763641ee0157 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md index 47faf7af634bc..3857d95857646 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md index 125a8c62dd867..4a5c1ab126b5a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md index a34aeceb10cfa..b16ad1a5a5c3f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md index 96f2908f6a844..e803b5947f5bc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md index c94aacfb84ed8..7311869440ae5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md index cc7fb32b6fe6e..8d257715149a8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -76,7 +76,7 @@ kubeadm upgrade apply [version] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md index c99c4723ca366..0eae71711dea1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md index e929b0db21ade..1ee54c4ee3721 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md index 418cb37af9a3e..53c0054b7c1f3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md index c007a9efe3677..0290456ec51e3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md index 4e3a6bbb4f1d5..c057e331ab632 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md index 4e1bdac5cd82f..2b52274ff9c32 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md index 286c3d5d1c590..a2f1de2ee3efa 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. @@ -55,7 +55,7 @@ kubeadm upgrade plan [version] [flags] --feature-gates string -

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (ALPHA - default=false)
    PublicKeysECDSA=true|false (ALPHA - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    +

    A set of key=value pairs that describe feature gates for various features. Options are:
    EtcdLearnerMode=true|false (BETA - default=true)
    PublicKeysECDSA=true|false (DEPRECATED - default=false)
    RootlessControlPlane=true|false (ALPHA - default=false)
    UpgradeAddonsBeforeControlPlane=true|false (DEPRECATED - default=false)

    diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md index 62ff8895c8475..e210885018a53 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md @@ -3,7 +3,7 @@ The file is auto-generated from the Go source code of the component using a gene [generator](https://github.com/kubernetes-sigs/reference-docs/). To learn how to generate the reference documentation, please read [Contributing to the reference documentation](/docs/contribute/generate-ref-docs/). -To update the reference content, please follow the +To update the reference content, please follow the [Contributing upstream](/docs/contribute/generate-ref-docs/contribute-upstream/) guide. You can file document formatting bugs against the [reference-docs](https://github.com/kubernetes-sigs/reference-docs/) project. diff --git a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md index 7a0d5b3bf11a6..33463afb8cad8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -64,6 +64,7 @@ in a majority of cases, and the most intuitive location; other constants paths a - `controller-manager.conf` - `scheduler.conf` - `admin.conf` for the cluster admin and kubeadm itself + - `super-admin.conf` for the cluster super-admin that can bypass RBAC - Names of certificates and key files : @@ -209,12 +210,21 @@ Kubeadm generates kubeconfig files with identities for control plane components: This client cert should have the CN `system:kube-scheduler`, as defined by default [RBAC core components roles](/docs/reference/access-authn-authz/rbac/#core-component-roles) -Additionally, a kubeconfig file for kubeadm itself and the admin is generated and saved into the -`/etc/kubernetes/admin.conf` file. The "admin" here is defined as the actual person(s) that is -administering the cluster and wants to have full control (**root**) over the cluster. The -embedded client certificate for admin should be in the `system:masters` organization, as defined -by default [RBAC user facing role bindings](/docs/reference/access-authn-authz/rbac/#user-facing-roles). -It should also include a CN. Kubeadm uses the `kubernetes-admin` CN. +Additionally, a kubeconfig file for kubeadm as an administrative entity is generated and stored +in `/etc/kubernetes/admin.conf`. This file includes a certificate with +`Subject: O = kubeadm:cluster-admins, CN = kubernetes-admin`. `kubeadm:cluster-admins` +is a group managed by kubeadm. It is bound to the `cluster-admin` ClusterRole during `kubeadm init`, +by using the `super-admin.conf` file, which does not require RBAC. +This `admin.conf` file must remain on control plane nodes and not be shared with additional users. + +During `kubeadm init` another kubeconfig file is generated and stored in `/etc/kubernetes/super-admin.conf`. +This file includes a certificate with `Subject: O = system:masters, CN = kubernetes-super-admin`. +`system:masters` is a super user group that bypasses RBAC and makes `super-admin.conf` useful in case +of an emergency where a cluster is locked due to RBAC misconfiguration. +The `super-admin.conf` file can be stored in a safe location and not shared with additional users. + +See [RBAC user facing role bindings](/docs/reference/access-authn-authz/rbac/#user-facing-roles) +for additional information RBAC and built-in ClusterRoles and groups. Please note that: diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-certs.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-certs.md index 3bce10ccf0b67..e05ea06b8707f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-certs.md @@ -34,6 +34,7 @@ For more details see [Manual certificate renewal](/docs/tasks/administer-cluster {{< tab name="etcd-server" include="generated/kubeadm_certs_renew_etcd-server.md" />}} {{< tab name="front-proxy-client" include="generated/kubeadm_certs_renew_front-proxy-client.md" />}} {{< tab name="scheduler.conf" include="generated/kubeadm_certs_renew_scheduler.conf.md" />}} +{{< tab name="super-admin.conf" include="generated/kubeadm_certs_renew_super-admin.conf.md" />}} {{< /tabs >}} ## kubeadm certs certificate-key {#cmd-certs-certificate-key} @@ -60,7 +61,9 @@ For more details see ## kubeadm certs generate-csr {#cmd-certs-generate-csr} This command can be used to generate keys and CSRs for all control-plane certificates and kubeconfig files. -The user can then sign the CSRs with a CA of their choice. +The user can then sign the CSRs with a CA of their choice. To read more information +on how to use the command see +[Signing certificate signing requests (CSR) generated by kubeadm](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs#signing-csr). {{< tabs name="tab-certs-generate-csr" >}} {{< tab name="generate-csr" include="generated/kubeadm_certs_generate-csr.md" />}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index 2bab24f74d7d7..c08427d4b675e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -58,6 +58,7 @@ You can create all required kubeconfig files by calling the `all` subcommand or {{< tab name="kubelet" include="generated/kubeadm_init_phase_kubeconfig_kubelet.md" />}} {{< tab name="controller-manager" include="generated/kubeadm_init_phase_kubeconfig_controller-manager.md" />}} {{< tab name="scheduler" include="generated/kubeadm_init_phase_kubeconfig_scheduler.md" />}} +{{< tab name="super-admin" include="generated/kubeadm_init_phase_kubeconfig_super-admin.md" />}} {{< /tabs >}} ## kubeadm init phase control-plane {#cmd-phase-control-plane} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md index a766250ef1f41..0fbeb13e93040 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -32,8 +32,9 @@ following steps: arguments, lowercased if necessary. 1. Writes kubeconfig files in `/etc/kubernetes/` for the kubelet, the controller-manager and the - scheduler to use to connect to the API server, each with its own identity, as well as an - additional kubeconfig file for administration named `admin.conf`. + scheduler to use to connect to the API server, each with its own identity. Also + additional kubeconfig files are written, for kubeadm as administrative entity (`admin.conf`) + and for a super admin user that can bypass RBAC (`super-admin.conf`). 1. Generates static Pod manifests for the API server, controller-manager and scheduler. In case an external etcd is not provided, @@ -135,7 +136,7 @@ If your configuration is not using the latest version it is **recommended** that the [kubeadm config migrate](/docs/reference/setup-tools/kubeadm/kubeadm-config/) command. For more information on the fields and usage of the configuration you can navigate to our -[API reference page](/docs/reference/config-api/kubeadm-config.v1beta4/). +[API reference page](/docs/reference/config-api/kubeadm-config.v1beta3/). ### Using kubeadm init with feature gates {#feature-gates} @@ -145,7 +146,7 @@ of the cluster. Feature gates are removed after a feature graduates to GA. To pass a feature gate you can either use the `--feature-gates` flag for `kubeadm init`, or you can add items into the `featureGates` field when you pass -a [configuration file](/docs/reference/config-api/kubeadm-config.v1beta4/#kubeadm-k8s-io-v1beta4-ClusterConfiguration) +a [configuration file](/docs/reference/config-api/kubeadm-config.v1beta3/#kubeadm-k8s-io-v1beta3-ClusterConfiguration) using `--config`. Passing [feature gates for core Kubernetes components](/docs/reference/command-line-tools-reference/feature-gates) @@ -157,9 +158,9 @@ List of feature gates: {{< table caption="kubeadm feature gates" >}} Feature | Default | Alpha | Beta | GA :-------|:--------|:------|:-----|:---- +`EtcdLearnerMode` | `true` | 1.27 | 1.29 | - `PublicKeysECDSA` | `false` | 1.19 | - | - `RootlessControlPlane` | `false` | 1.22 | - | - -`EtcdLearnerMode` | `false` | 1.27 | - | - {{< /table >}} {{< note >}} @@ -168,6 +169,10 @@ Once a feature gate goes GA its value becomes locked to `true` by default. Feature gate descriptions: +`EtcdLearnerMode` +: With this feature gate enabled, when joining a new control plane node, a new etcd member will be created +as a learner and promoted to a voting member only after the etcd data are fully aligned. + `PublicKeysECDSA` : Can be used to create a cluster that uses ECDSA certificates instead of the default RSA algorithm. Renewal of existing ECDSA certificates is also supported using `kubeadm certs renew`, but you cannot @@ -179,14 +184,10 @@ for `kube-apiserver`, `kube-controller-manager`, `kube-scheduler` and `etcd` to If the flag is not set, those components run as root. You can change the value of this feature gate before you upgrade to a newer version of Kubernetes. -`EtcdLearnerMode` -: With this feature gate enabled, when joining a new control plane node, a new etcd member will be created -as a learner and promoted to a voting member only after the etcd data are fully aligned. - List of deprecated feature gates: {{< table caption="kubeadm deprecated feature gates" >}} -Feature | Default +Feature | Default :-------|:-------- `UpgradeAddonsBeforeControlPlane` | `false` {{< /table >}} @@ -212,12 +213,16 @@ List of removed feature gates: {{< table caption="kubeadm removed feature gates" >}} Feature | Alpha | Beta | GA | Removed :-------|:------|:-----|:---|:------- -`UnversionedKubeletConfigMap` | 1.22 | 1.23 | 1.25 | 1.26 `IPv6DualStack` | 1.16 | 1.21 | 1.23 | 1.24 +`UnversionedKubeletConfigMap` | 1.22 | 1.23 | 1.25 | 1.26 {{< /table >}} Feature gate descriptions: +`IPv6DualStack` +: This flag helps to configure components dual stack when the feature is in progress. For more details on Kubernetes +dual-stack support see [Dual-stack support with kubeadm](/docs/setup/production-environment/tools/kubeadm/dual-stack-support/). + `UnversionedKubeletConfigMap` : This flag controls the name of the {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} where kubeadm stores kubelet configuration data. With this flag not specified or set to `true`, the ConfigMap is named `kubelet-config`. @@ -228,10 +233,6 @@ or `kubeadm upgrade apply`), kubeadm respects the value of `UnversionedKubeletCo (during `kubeadm join`, `kubeadm reset`, `kubeadm upgrade ...`), kubeadm attempts to use unversioned ConfigMap name first; if that does not succeed, kubeadm falls back to using the legacy (versioned) name for that ConfigMap. -`IPv6DualStack` -: This flag helps to configure components dual stack when the feature is in progress. For more details on Kubernetes -dual-stack support see [Dual-stack support with kubeadm](/docs/setup/production-environment/tools/kubeadm/dual-stack-support/). - ### Adding kube-proxy parameters {#kube-proxy} For information about kube-proxy parameters in the kubeadm configuration see: @@ -291,7 +292,7 @@ for etcd and CoreDNS. #### Custom sandbox (pause) images {#custom-pause-image} -To set a custom image for these you need to configure this in your +To set a custom image for these you need to configure this in your {{< glossary_tooltip text="container runtime" term_id="container-runtime" >}} to use the image. Consult the documentation for your container runtime to find out how to change this setting; @@ -314,7 +315,7 @@ kubeadm init phase upload-certs --upload-certs --config=SOME_YAML_FILE ``` {{< note >}} A predefined `certificateKey` can be provided in `InitConfiguration` when passing the -[configuration file](/docs/reference/config-api/kubeadm-config.v1beta4/) with `--config`. +[configuration file](/docs/reference/config-api/kubeadm-config.v1beta3/) with `--config`. {{< /note >}} If a predefined certificate key is not passed to `kubeadm init` and @@ -386,8 +387,9 @@ DNS name or an address of a load balancer. kubeadm certs certificate-key ``` -Once the cluster is up, you can grab the admin credentials from the control-plane node -at `/etc/kubernetes/admin.conf` and use that to talk to the cluster. +Once the cluster is up, you can use the `/etc/kubernetes/admin.conf` file from +a control-plane node to talk to the cluster with administrator credentials or +[Generating kubeconfig files for additional users](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs#kubeconfig-additional-users). Note that this style of bootstrap has some relaxed security guarantees because it does not allow the root CA hash to be validated with diff --git a/content/en/docs/reference/using-api/api-concepts.md b/content/en/docs/reference/using-api/api-concepts.md index 9d8a55d66c011..3e50fabb94313 100644 --- a/content/en/docs/reference/using-api/api-concepts.md +++ b/content/en/docs/reference/using-api/api-concepts.md @@ -34,7 +34,7 @@ API concepts: * A *resource type* is the name used in the URL (`pods`, `namespaces`, `services`) * All resource types have a concrete representation (their object schema) which is called a *kind* -* A list of instances of a resource is known as a *collection* +* A list of instances of a resource type is known as a *collection* * A single instance of a resource type is called a *resource*, and also usually represents an *object* * For some resource types, the API includes one or more *sub-resources*, which are represented as URI paths below the resource @@ -148,7 +148,7 @@ For example: 1. List all of the pods in a given namespace. - ```console + ``` GET /api/v1/namespaces/test/pods --- 200 OK @@ -204,7 +204,7 @@ to a given `resourceVersion` the client is requesting have already been sent. Th document representing the `BOOKMARK` event is of the type requested by the request, but only includes a `.metadata.resourceVersion` field. For example: -```console +``` GET /api/v1/namespaces/test/pods?watch=1&resourceVersion=10245&allowWatchBookmarks=true --- 200 OK @@ -262,7 +262,7 @@ is 10245 and there are two pods: `foo` and `bar`. Then sending the following req _consistent read_ by setting empty resource version using `resourceVersion=`) could result in the following sequence of events: -```console +``` GET /api/v1/namespaces/test/pods?watch=1&sendInitialEvents=true&allowWatchBookmarks=true&resourceVersion=&resourceVersionMatch=NotOlderThan --- 200 OK @@ -303,7 +303,7 @@ can be saved and the latency can be reduced. To verify if `APIResponseCompression` is working, you can send a **get** or **list** request to the API server with an `Accept-Encoding` header, and check the response size and headers. For example: -```console +``` GET /api/v1/pods Accept-Encoding: gzip --- @@ -317,7 +317,7 @@ The `content-encoding` header indicates that the response is compressed with `gz ## Retrieving large results sets in chunks -{{< feature-state for_k8s_version="v1.9" state="beta" >}} +{{< feature-state for_k8s_version="v1.29" state="stable" >}} On large clusters, retrieving the collection of some resource types may result in very large responses that can impact the server and client. For instance, a cluster @@ -325,9 +325,7 @@ may have tens of thousands of Pods, each of which is equivalent to roughly 2 KiB encoded JSON. Retrieving all pods across all namespaces may result in a very large response (10-20MB) and consume a large amount of server resources. -Provided that you don't explicitly disable the `APIListChunking` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/), the -Kubernetes API server supports the ability to break a single large collection request +The Kubernetes API server supports the ability to break a single large collection request into many smaller chunks while preserving the consistency of the total request. Each chunk can be returned sequentially which reduces both the total size of the request and allows user-oriented clients to display results incrementally to improve responsiveness. @@ -356,7 +354,7 @@ of 500 pods at a time, request those chunks as follows: 1. List all of the pods on a cluster, retrieving up to 500 pods each time. - ```console + ``` GET /api/v1/pods?limit=500 --- 200 OK @@ -377,7 +375,7 @@ of 500 pods at a time, request those chunks as follows: 2. Continue the previous call, retrieving the next set of 500 pods. - ```console + ``` GET /api/v1/pods?limit=500&continue=ENCODED_CONTINUE_TOKEN --- 200 OK @@ -398,7 +396,7 @@ of 500 pods at a time, request those chunks as follows: 3. Continue the previous call, retrieving the last 253 pods. - ```console + ``` GET /api/v1/pods?limit=500&continue=ENCODED_CONTINUE_TOKEN_2 --- 200 OK @@ -542,7 +540,7 @@ type. For example, list all of the pods on a cluster in the Table format. -```console +``` GET /api/v1/pods Accept: application/json;as=Table;g=meta.k8s.io;v=v1 --- @@ -563,7 +561,7 @@ For API resource types that do not have a custom Table definition known to the c plane, the API server returns a default Table response that consists of the resource's `name` and `creationTimestamp` fields. -```console +``` GET /apis/crd.example.com/v1alpha1/namespaces/default/resources --- 200 OK @@ -598,7 +596,7 @@ uses the Table information and must work against all resource types, including extensions, you should make requests that specify multiple content types in the `Accept` header. For example: -```console +``` Accept: application/json;as=Table;g=meta.k8s.io;v=v1, application/json ``` @@ -626,7 +624,7 @@ For example: 1. List all of the pods on a cluster in Protobuf format. - ```console + ``` GET /api/v1/pods Accept: application/vnd.kubernetes.protobuf --- @@ -639,7 +637,7 @@ For example: 1. Create a pod by sending Protobuf encoded data to the server, but request a response in JSON. - ```console + ``` POST /api/v1/namespaces/test/pods Content-Type: application/vnd.kubernetes.protobuf Accept: application/json @@ -664,7 +662,7 @@ As a client, if you might need to work with extension types you should specify m content types in the request `Accept` header to support fallback to JSON. For example: -```console +``` Accept: application/vnd.kubernetes.protobuf, application/json ``` @@ -677,7 +675,7 @@ describes the encoding and type of the underlying object and then contains the o The wrapper format is: -```console +``` A four byte magic number prefix: Bytes 0-3: "k8s\x00" [0x6b, 0x38, 0x73, 0x00] @@ -724,13 +722,13 @@ When you **delete** a resource this takes place in two phases. "kind": "ConfigMap", "apiVersion": "v1", "metadata": { - "finalizers": {"url.io/neat-finalization", "other-url.io/my-finalizer"}, + "finalizers": ["url.io/neat-finalization", "other-url.io/my-finalizer"], "deletionTimestamp": nil, } } ``` -When a client first sends a **delete** to request removal of a resource, the `.metadata.deletionTimestamp` is set to the current time. +When a client first sends a **delete** to request the removal of a resource, the `.metadata.deletionTimestamp` is set to the current time. Once the `.metadata.deletionTimestamp` is set, external controllers that act on finalizers may start performing their cleanup work at any time, in any order. @@ -895,7 +893,7 @@ effects on any request marked as dry runs. Here is an example dry-run request that uses `?dryRun=All`: -```console +``` POST /api/v1/namespaces/test/pods?dryRun=All Content-Type: application/json Accept: application/json diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index dfbc740ec6c2c..168aeea70ce8b 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -31,7 +31,7 @@ The following client libraries are officially maintained by | Language | Client Library | Sample Programs | |------------|----------------|-----------------| | C | [github.com/kubernetes-client/c](https://github.com/kubernetes-client/c/) | [browse](https://github.com/kubernetes-client/c/tree/master/examples) -| dotnet | [github.com/kubernetes-client/csharp](https://github.com/kubernetes-client/csharp) | [browse](https://github.com/kubernetes-client/csharp/tree/master/examples/simple) +| dotnet | [github.com/kubernetes-client/csharp](https://github.com/kubernetes-client/csharp) | [browse](https://github.com/kubernetes-client/csharp/tree/master/examples) | Go | [github.com/kubernetes/client-go/](https://github.com/kubernetes/client-go/) | [browse](https://github.com/kubernetes/client-go/tree/master/examples) | Haskell | [github.com/kubernetes-client/haskell](https://github.com/kubernetes-client/haskell) | [browse](https://github.com/kubernetes-client/haskell/tree/master/kubernetes-client/example) | Java | [github.com/kubernetes-client/java](https://github.com/kubernetes-client/java/) | [browse](https://github.com/kubernetes-client/java/tree/master/examples) @@ -54,7 +54,6 @@ their authors, not the Kubernetes team. | DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | | Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | | Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | -| Go | [github.com/ericchiang/k8s](https://github.com/ericchiang/k8s) | | Java (OSGi) | [bitbucket.org/amdatulabs/amdatu-kubernetes](https://bitbucket.org/amdatulabs/amdatu-kubernetes) | | Java (Fabric8, OSGi) | [github.com/fabric8io/kubernetes-client](https://github.com/fabric8io/kubernetes-client) | | Java | [github.com/manusa/yakc](https://github.com/manusa/yakc) | diff --git a/content/en/docs/reference/using-api/deprecation-guide.md b/content/en/docs/reference/using-api/deprecation-guide.md index 69f32f9eebc95..80da749edc696 100644 --- a/content/en/docs/reference/using-api/deprecation-guide.md +++ b/content/en/docs/reference/using-api/deprecation-guide.md @@ -20,6 +20,19 @@ deprecated API versions to newer and more stable API versions. ## Removed APIs by release +### v1.32 + +The **v1.32** release will stop serving the following deprecated API versions: + +#### Flow control resources {#flowcontrol-resources-v132} + +The **flowcontrol.apiserver.k8s.io/v1beta3** API version of FlowSchema and PriorityLevelConfiguration will no longer be served in v1.32. + +* Migrate manifests and API clients to use the **flowcontrol.apiserver.k8s.io/v1** API version, available since v1.29. +* All existing persisted objects are accessible via the new API +* Notable changes in **flowcontrol.apiserver.k8s.io/v1**: + * The PriorityLevelConfiguration `spec.limited.nominalConcurrencyShares` field only defaults to 30 when unspecified, and an explicit value of 0 is not changed to 30. + ### v1.29 The **v1.29** release will stop serving the following deprecated API versions: @@ -28,14 +41,16 @@ The **v1.29** release will stop serving the following deprecated API versions: The **flowcontrol.apiserver.k8s.io/v1beta2** API version of FlowSchema and PriorityLevelConfiguration will no longer be served in v1.29. -* Migrate manifests and API clients to use the **flowcontrol.apiserver.k8s.io/v1beta3** API version, available since v1.26. +* Migrate manifests and API clients to use the **flowcontrol.apiserver.k8s.io/v1** API version, available since v1.29, or the **flowcontrol.apiserver.k8s.io/v1beta3** API version, available since v1.26. * All existing persisted objects are accessible via the new API +* Notable changes in **flowcontrol.apiserver.k8s.io/v1**: + * The PriorityLevelConfiguration `spec.limited.assuredConcurrencyShares` field is renamed to `spec.limited.nominalConcurrencyShares` and only defaults to 30 when unspecified, and an explicit value of 0 is not changed to 30. * Notable changes in **flowcontrol.apiserver.k8s.io/v1beta3**: * The PriorityLevelConfiguration `spec.limited.assuredConcurrencyShares` field is renamed to `spec.limited.nominalConcurrencyShares` ### v1.27 -The **v1.27** release will stop serving the following deprecated API versions: +The **v1.27** release stopped serving the following deprecated API versions: #### CSIStorageCapacity {#csistoragecapacity-v127} @@ -53,7 +68,7 @@ The **v1.26** release stopped serving the following deprecated API versions: The **flowcontrol.apiserver.k8s.io/v1beta1** API version of FlowSchema and PriorityLevelConfiguration is no longer served as of v1.26. -* Migrate manifests and API clients to use the **flowcontrol.apiserver.k8s.io/v1beta3** API version, available since v1.26. +* Migrate manifests and API clients to use the **flowcontrol.apiserver.k8s.io/v1beta2** API version. * All existing persisted objects are accessible via the new API * No notable changes diff --git a/content/en/docs/reference/using-api/deprecation-policy.md b/content/en/docs/reference/using-api/deprecation-policy.md index ad8b0291b2733..6d1259b365246 100644 --- a/content/en/docs/reference/using-api/deprecation-policy.md +++ b/content/en/docs/reference/using-api/deprecation-policy.md @@ -349,6 +349,15 @@ after their announced deprecation for no less than:** * **Beta: 3 months or 1 release (whichever is longer)** * **Alpha: 0 releases** +**Rule #5c: Command line interface (CLI) elements cannot be deprecated in favor of +less stable CLI elements** + +Similar to the Rule #3 for APIs, if an element of a command line interface is being replaced with an +alternative implementation, such as by renaming an existing element, or by switching to +use configuration sourced from a file +instead of a command line argument, that recommended alternative must be of +the same or higher stability level. + **Rule #6: Deprecated CLI elements must emit warnings (optionally disable) when used.** @@ -361,8 +370,31 @@ rules for deprecation are as follows: **Rule #7: Deprecated behaviors must function for no less than 1 year after their announced deprecation.** -This does not imply that all changes to the system are governed by this policy. -This applies only to significant, user-visible behaviors which impact the +If the feature or behavior is being replaced with an alternative implementation +that requires work to adopt the change, there should be an effort to simplify +the transition whenever possible. If an alternative implementation is under +Kubernetes organization control, the following rules apply: + +**Rule #8: The feature of behavior must not be deprecated in favor of an alternative +implementation that is less stable** + +For example, a generally available feature cannot be deprecated in favor of a Beta +replacement. +The Kubernetes project does, however, encourage users to adopt and transitions to alternative +implementations even before they reach the same maturity level. This is particularly important +for exploring new use cases of a feature or getting an early feedback on the replacement. + +Alternative implementations may sometimes be external tools or products, +for example a feature may move from the kubelet to container runtime +that is not under Kubernetes project control. In such cases, the rule cannot be +applied, but there must be an effort to ensure that there is a transition path +that does not compromise on components' maturity levels. In the example with +container runtimes, the effort may involve trying to ensure that popular container runtimes +have versions that offer the same level of stability while implementing that replacement behavior. + +Deprecation rules for features and behaviors do not imply that all changes +to the system are governed by this policy. +These rules applies only to significant, user-visible behaviors which impact the correctness of applications running on Kubernetes or that impact the administration of Kubernetes clusters, and which are being removed entirely. @@ -405,14 +437,14 @@ feature in the associated feature gate. Versioning for feature gates is different from the previously discussed components, therefore the rules for deprecation are as follows: -**Rule #8: Feature gates must be deprecated when the corresponding feature they control +**Rule #9: Feature gates must be deprecated when the corresponding feature they control transitions a lifecycle stage as follows. Feature gates must function for no less than:** * **Beta feature to GA: 6 months or 2 releases (whichever is longer)** * **Beta feature to EOL: 3 months or 1 release (whichever is longer)** * **Alpha feature to EOL: 0 releases** -**Rule #9: Deprecated feature gates must respond with a warning when used. When a feature gate +**Rule #10: Deprecated feature gates must respond with a warning when used. When a feature gate is deprecated it must be documented in both in the release notes and the corresponding CLI help. Both warnings and documentation must indicate whether a feature gate is non-operational.** @@ -430,13 +462,13 @@ this impacts removal of a metric during a Kubernetes release. These classes are determined by the perceived importance of the metric. The rules for deprecating and removing a metric are as follows: -**Rule #9a: Metrics, for the corresponding stability class, must function for no less than:** +**Rule #11a: Metrics, for the corresponding stability class, must function for no less than:** * **STABLE: 4 releases or 12 months (whichever is longer)** * **BETA: 2 releases or 8 months (whichever is longer)** * **ALPHA: 0 releases** -**Rule #9b: Metrics, after their _announced deprecation_, must function for no less than:** +**Rule #11b: Metrics, after their _announced deprecation_, must function for no less than:** * **STABLE: 3 releases or 9 months (whichever is longer)** * **BETA: 1 releases or 4 months (whichever is longer)** diff --git a/content/en/docs/reference/using-api/health-checks.md b/content/en/docs/reference/using-api/health-checks.md index b13793a07b608..200d44a5e26c9 100644 --- a/content/en/docs/reference/using-api/health-checks.md +++ b/content/en/docs/reference/using-api/health-checks.md @@ -94,7 +94,7 @@ The output show that the `etcd` check is excluded: {{< feature-state state="alpha" >}} Each individual health check exposes an HTTP endpoint and can be checked individually. -The schema for the individual health checks is `/livez/` where `livez` and `readyz` and be used to indicate if you want to check the liveness or the readiness of the API server. +The schema for the individual health checks is `/livez/` or `/readyz/`, where `livez` and `readyz` can be used to indicate if you want to check the liveness or the readiness of the API server, respectively. The `` path can be discovered using the `verbose` flag from above and take the path between `[+]` and `ok`. These individual health checks should not be consumed by machines but can be helpful for a human operator to debug a system: diff --git a/content/en/docs/reference/using-api/server-side-apply.md b/content/en/docs/reference/using-api/server-side-apply.md index c48dfdb10963a..830a09133466f 100644 --- a/content/en/docs/reference/using-api/server-side-apply.md +++ b/content/en/docs/reference/using-api/server-side-apply.md @@ -180,7 +180,7 @@ Managers identify distinct workflows that are modifying the object (especially useful on conflicts!), and can be specified through the [`fieldManager`](/docs/reference/kubernetes-api/common-parameters/common-parameters/#fieldManager) query parameter as part of a modifying request. When you Apply to a resource, -the `fieldManager` parameter is required +the `fieldManager` parameter is required. For other updates, the API server infers a field manager identity from the "User-Agent:" HTTP header (if present). @@ -214,17 +214,6 @@ Here's an example of a Server-Side Apply message body (fully specified intent): of a **patch** request to a valid `v1/configmaps` resource, and with the appropriate request `Content-Type`). -## Server-Side Apply for custom resources {#custom-resources} - -By default, Server-Side Apply treats -{{< glossary_tooltip term_id="CustomResourceDefinition" text="custom resources" >}} -as unstructured data. All keys are treated the same as if they were struct fields for -a built-in API, and all lists are considered atomic. - -If the CustomResourceDefinition defines a -[schema](/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1/#JSONSchemaProps) -that contains annotations as defined in [Merge strategy](#merge-strategy), -then these annotations will be used when merging objects of this type. ## Operations in scope for field management {#apply-and-update} @@ -394,7 +383,7 @@ read-modify-write and/or patch are the following: to be specified. It is strongly recommended for controllers to always force conflicts on objects that -the own and manage, since they might not be able to resolve or act on these conflicts. +they own and manage, since they might not be able to resolve or act on these conflicts. ## Transferring ownership diff --git a/content/en/docs/setup/best-practices/certificates.md b/content/en/docs/setup/best-practices/certificates.md index 8bcf5f7e1ecc4..f8af369c80412 100644 --- a/content/en/docs/setup/best-practices/certificates.md +++ b/content/en/docs/setup/best-practices/certificates.md @@ -95,6 +95,12 @@ Required certificates: | kube-apiserver-kubelet-client | kubernetes-ca | system:masters | client | | | front-proxy-client | kubernetes-front-proxy-ca | | client | | +{{< note >}} +Instead of using the super-user group `system:masters` for `kube-apiserver-kubelet-client` +a less privileged group can be used. kubeadm uses the `kubeadm:cluster-admins` group for +that purpose. +{{< /note >}} + [1]: any other IP or DNS name you contact your cluster on (as used by [kubeadm](/docs/reference/setup-tools/kubeadm/) the load balancer stable IP and/or DNS name, `kubernetes`, `kubernetes.default`, `kubernetes.default.svc`, `kubernetes.default.svc.cluster`, `kubernetes.default.svc.cluster.local`) @@ -184,12 +190,13 @@ you need to provide if you are generating all of your own keys and certificates: You must manually configure these administrator account and service accounts: -| filename | credential name | Default CN | O (in Subject) | -|-------------------------|----------------------------|-------------------------------------|----------------| -| admin.conf | default-admin | kubernetes-admin | system:masters | -| kubelet.conf | default-auth | system:node:`` (see note) | system:nodes | -| controller-manager.conf | default-controller-manager | system:kube-controller-manager | | -| scheduler.conf | default-scheduler | system:kube-scheduler | | +| filename | credential name | Default CN | O (in Subject) | +|-------------------------|----------------------------|-------------------------------------|------------------------| +| admin.conf | default-admin | kubernetes-admin | `` | +| super-admin.conf | default-super-admin | kubernetes-super-admin | system:masters | +| kubelet.conf | default-auth | system:node:`` (see note) | system:nodes | +| controller-manager.conf | default-controller-manager | system:kube-controller-manager | | +| scheduler.conf | default-scheduler | system:kube-scheduler | | {{< note >}} The value of `` for `kubelet.conf` **must** match precisely the value of the node name @@ -197,6 +204,22 @@ provided by the kubelet as it registers with the apiserver. For further details, [Node Authorization](/docs/reference/access-authn-authz/node/). {{< /note >}} +{{< note >}} +In the above example `` is implementation specific. Some tools sign the +certificate in the default `admin.conf` to be part of the `system:masters` group. +`system:masters` is a break-glass, super user group can bypass the authorization +layer of Kubernetes, such as RBAC. Also some tools do not generate a separate +`super-admin.conf` with a certificate bound to this super user group. + +kubeadm generates two separate administrator certificates in kubeconfig files. +One is in `admin.conf` and has `Subject: O = kubeadm:cluster-admins, CN = kubernetes-admin`. +`kubeadm:cluster-admins` is a custom group bound to the `cluster-admin` ClusterRole. +This file is generated on all kubeadm managed control plane machines. + +Another is in `super-admin.conf` that has `Subject: O = system:masters, CN = kubernetes-super-admin`. +This file is generated only on the node where `kubeadm init` was called. +{{< /note >}} + 1. For each config, generate an x509 cert/key pair with the given CN and O. 1. Run `kubectl` as follows for each config: @@ -213,6 +236,7 @@ These files are used as follows: | filename | command | comment | |-------------------------|-------------------------|-----------------------------------------------------------------------| | admin.conf | kubectl | Configures administrator user for the cluster | +| super-admin.conf | kubectl | Configures super administrator user for the cluster | | kubelet.conf | kubelet | One required for each node in the cluster. | | controller-manager.conf | kube-controller-manager | Must be added to manifest in `manifests/kube-controller-manager.yaml` | | scheduler.conf | kube-scheduler | Must be added to manifest in `manifests/kube-scheduler.yaml` | @@ -221,6 +245,7 @@ The following files illustrate full paths to the files listed in the previous ta ``` /etc/kubernetes/admin.conf +/etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf diff --git a/content/en/docs/setup/production-environment/_index.md b/content/en/docs/setup/production-environment/_index.md index 054c788088c2a..7aeb4eb1919bf 100644 --- a/content/en/docs/setup/production-environment/_index.md +++ b/content/en/docs/setup/production-environment/_index.md @@ -296,9 +296,8 @@ needs of your cluster's workloads: and the [API server](/docs/setup/production-environment/tools/kubeadm/ha-topology/). - Choose from [kubeadm](/docs/setup/production-environment/tools/kubeadm/), - [kops](/docs/setup/production-environment/tools/kops/) or - [Kubespray](/docs/setup/production-environment/tools/kubespray/) - deployment methods. + [kops](https://kops.sigs.k8s.io/) or + [Kubespray](https://kubespray.io/) deployment methods. - Configure user management by determining your [Authentication](/docs/reference/access-authn-authz/authentication/) and [Authorization](/docs/reference/access-authn-authz/authorization/) methods. diff --git a/content/en/docs/setup/production-environment/tools/_index.md b/content/en/docs/setup/production-environment/tools/_index.md index 5beb1d5a9da43..c48dda146c4af 100644 --- a/content/en/docs/setup/production-environment/tools/_index.md +++ b/content/en/docs/setup/production-environment/tools/_index.md @@ -1,4 +1,23 @@ --- title: Installing Kubernetes with deployment tools weight: 30 +no_list: true --- + +There are many methods and tools for setting up your own production Kubernetes cluster. +For example: + +- [kubeadm](/docs/setup/production-environment/tools/kubeadm/) + +- [kops](https://kops.sigs.k8s.io/): An automated cluster provisioning tool. + For tutorials, best practices, configuration options and information on + reaching out to the community, please check the + [`kOps` website](https://kops.sigs.k8s.io/) for details. + +- [kubespray](https://kubespray.io/): + A composition of [Ansible](https://docs.ansible.com/) playbooks, + [inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md#inventory), + provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration + management tasks. You can reach out to the community on Slack channel + [#kubespray](https://kubernetes.slack.com/messages/kubespray/). + diff --git a/content/en/docs/setup/production-environment/tools/kops.md b/content/en/docs/setup/production-environment/tools/kops.md deleted file mode 100644 index 1e52de6fa94bf..0000000000000 --- a/content/en/docs/setup/production-environment/tools/kops.md +++ /dev/null @@ -1,237 +0,0 @@ ---- -title: Installing Kubernetes with kOps -content_type: task -weight: 20 ---- - - - -This quickstart shows you how to easily install a Kubernetes cluster on AWS. -It uses a tool called [`kOps`](https://github.com/kubernetes/kops). - -`kOps` is an automated provisioning system: - -* Fully automated installation -* Uses DNS to identify clusters -* Self-healing: everything runs in Auto-Scaling Groups -* Multiple OS support (Amazon Linux, Debian, Flatcar, RHEL, Rocky and Ubuntu) - see the - [images.md](https://github.com/kubernetes/kops/blob/master/docs/operations/images.md) -* High-Availability support - see the - [high_availability.md](https://github.com/kubernetes/kops/blob/master/docs/operations/high_availability.md) -* Can directly provision, or generate terraform manifests - see the - [terraform.md](https://github.com/kubernetes/kops/blob/master/docs/terraform.md) - -## {{% heading "prerequisites" %}} - -* You must have [kubectl](/docs/tasks/tools/) installed. - -* You must [install](https://github.com/kubernetes/kops#installing) `kops` on a 64-bit (AMD64 and Intel 64) device architecture. - -* You must have an [AWS account](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html), - generate [IAM keys](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) - and [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) them. - The IAM user will need [adequate permissions](https://github.com/kubernetes/kops/blob/master/docs/getting_started/aws.md#setup-iam-user). - - - -## Creating a cluster - -### (1/5) Install kops - -#### Installation - -Download kops from the [releases page](https://github.com/kubernetes/kops/releases) -(it is also convenient to build from source): - -{{< tabs name="kops_installation" >}} -{{% tab name="macOS" %}} - -Download the latest release with the command: - -```shell -curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64 -``` - -To download a specific version, replace the following portion of the command with the specific kops version. - -```shell -$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) -``` - -For example, to download kops version v1.20.0 type: - -```shell -curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-darwin-amd64 -``` - -Make the kops binary executable. - -```shell -chmod +x kops-darwin-amd64 -``` - -Move the kops binary in to your PATH. - -```shell -sudo mv kops-darwin-amd64 /usr/local/bin/kops -``` - -You can also install kops using [Homebrew](https://brew.sh/). - -```shell -brew update && brew install kops -``` -{{% /tab %}} -{{% tab name="Linux" %}} - -Download the latest release with the command: - -```shell -curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64 -``` - -To download a specific version of kops, replace the following portion of the command with the specific kops version. - -```shell -$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) -``` - -For example, to download kops version v1.20.0 type: - -```shell -curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-linux-amd64 -``` - -Make the kops binary executable - -```shell -chmod +x kops-linux-amd64 -``` - -Move the kops binary in to your PATH. - -```shell -sudo mv kops-linux-amd64 /usr/local/bin/kops -``` - -You can also install kops using [Homebrew](https://docs.brew.sh/Homebrew-on-Linux). - -```shell -brew update && brew install kops -``` - -{{% /tab %}} -{{< /tabs >}} - -### (2/5) Create a route53 domain for your cluster - -kops uses DNS for discovery, both inside the cluster and outside, so that you can reach the kubernetes API server -from clients. - -kops has a strong opinion on the cluster name: it should be a valid DNS name. By doing so you will -no longer get your clusters confused, you can share clusters with your colleagues unambiguously, -and you can reach them without relying on remembering an IP address. - -You can, and probably should, use subdomains to divide your clusters. As our example we will use -`useast1.dev.example.com`. The API server endpoint will then be `api.useast1.dev.example.com`. - -A Route53 hosted zone can serve subdomains. Your hosted zone could be `useast1.dev.example.com`, -but also `dev.example.com` or even `example.com`. kops works with any of these, so typically -you choose for organization reasons (e.g. you are allowed to create records under `dev.example.com`, -but not under `example.com`). - -Let's assume you're using `dev.example.com` as your hosted zone. You create that hosted zone using -the [normal process](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingNewSubdomain.html), or -with a command such as `aws route53 create-hosted-zone --name dev.example.com --caller-reference 1`. - -You must then set up your NS records in the parent domain, so that records in the domain will resolve. Here, -you would create NS records in `example.com` for `dev`. If it is a root domain name you would configure the NS -records at your domain registrar (e.g. `example.com` would need to be configured where you bought `example.com`). - -Verify your route53 domain setup (it is the #1 cause of problems!). You can double-check that -your cluster is configured correctly if you have the dig tool by running: - -`dig NS dev.example.com` - -You should see the 4 NS records that Route53 assigned your hosted zone. - -### (3/5) Create an S3 bucket to store your clusters state - -kops lets you manage your clusters even after installation. To do this, it must keep track of the clusters -that you have created, along with their configuration, the keys they are using etc. This information is stored -in an S3 bucket. S3 permissions are used to control access to the bucket. - -Multiple clusters can use the same S3 bucket, and you can share an S3 bucket between your colleagues that -administer the same clusters - this is much easier than passing around kubecfg files. But anyone with access -to the S3 bucket will have administrative access to all your clusters, so you don't want to share it beyond -the operations team. - -So typically you have one S3 bucket for each ops team (and often the name will correspond -to the name of the hosted zone above!) - -In our example, we chose `dev.example.com` as our hosted zone, so let's pick `clusters.dev.example.com` as -the S3 bucket name. - -* Export `AWS_PROFILE` (if you need to select a profile for the AWS CLI to work) - -* Create the S3 bucket using `aws s3 mb s3://clusters.dev.example.com` - -* You can `export KOPS_STATE_STORE=s3://clusters.dev.example.com` and then kops will use this location by default. - We suggest putting this in your bash profile or similar. - -### (4/5) Build your cluster configuration - -Run `kops create cluster` to create your cluster configuration: - -`kops create cluster --zones=us-east-1c useast1.dev.example.com` - -kops will create the configuration for your cluster. Note that it _only_ creates the configuration, it does -not actually create the cloud resources - you'll do that in the next step with a `kops update cluster`. This -give you an opportunity to review the configuration or change it. - -It prints commands you can use to explore further: - -* List your clusters with: `kops get cluster` -* Edit this cluster with: `kops edit cluster useast1.dev.example.com` -* Edit your node instance group: `kops edit ig --name=useast1.dev.example.com nodes` -* Edit your master instance group: `kops edit ig --name=useast1.dev.example.com master-us-east-1c` - -If this is your first time using kops, do spend a few minutes to try those out! An instance group is a -set of instances, which will be registered as kubernetes nodes. On AWS this is implemented via auto-scaling-groups. -You can have several instance groups, for example if you wanted nodes that are a mix of spot and on-demand instances, or -GPU and non-GPU instances. - -### (5/5) Create the cluster in AWS - -Run `kops update cluster` to create your cluster in AWS: - -`kops update cluster useast1.dev.example.com --yes` - -That takes a few seconds to run, but then your cluster will likely take a few minutes to actually be ready. -`kops update cluster` will be the tool you'll use whenever you change the configuration of your cluster; it -applies the changes you have made to the configuration to your cluster - reconfiguring AWS or kubernetes as needed. - -For example, after you `kops edit ig nodes`, then `kops update cluster --yes` to apply your configuration, and -sometimes you will also have to `kops rolling-update cluster` to roll out the configuration immediately. - -Without `--yes`, `kops update cluster` will show you a preview of what it is going to do. This is handy -for production clusters! - -### Explore other add-ons - -See the [list of add-ons](/docs/concepts/cluster-administration/addons/) to explore other add-ons, -including tools for logging, monitoring, network policy, visualization, and control of your Kubernetes cluster. - -## Cleanup - -* To delete your cluster: `kops delete cluster useast1.dev.example.com --yes` - -## {{% heading "whatsnext" %}} - -* Learn more about Kubernetes [concepts](/docs/concepts/) and [`kubectl`](/docs/reference/kubectl/). -* Learn more about `kOps` [advanced usage](https://kops.sigs.k8s.io/) for tutorials, - best practices and advanced configuration options. -* Follow `kOps` community discussions on Slack: - [community discussions](https://kops.sigs.k8s.io/contributing/#other-ways-to-communicate-with-the-contributors). - (visit https://slack.k8s.io/ for an invitation to this Slack workspace). -* Contribute to `kOps` by addressing or raising an issue [GitHub Issues](https://github.com/kubernetes/kops/issues). \ No newline at end of file diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index 5db35089784f0..61b62893288c4 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -72,6 +72,8 @@ Any commands under `kubeadm alpha` are, by definition, supported on an alpha lev ### Preparing the hosts +#### Component installation + Install a {{< glossary_tooltip term_id="container-runtime" text="container runtime" >}} and kubeadm on all the hosts. For detailed instructions and other prerequisites, see [Installing kubeadm](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). @@ -84,6 +86,63 @@ kubeadm to tell it what to do. This crashloop is expected and normal. After you initialize your control-plane, the kubelet runs normally. {{< /note >}} +#### Network setup + +kubeadm similarly to other Kubernetes components tries to find a usable IP on +the network interfaces associated with a default gateway on a host. Such +an IP is then used for the advertising and/or listening performed by a component. + +To find out what this IP is on a Linux host you can use: + +```shell +ip route show # Look for a line starting with "default via" +``` + +{{< note >}} +If two or more default gateways are present on the host, a Kubernetes component will +try to use the first one it encounters that has a suitable global unicast IP address. +While making this choice, the exact ordering of gateways might vary between different +operating systems and kernel versions. +{{< /note >}} + +Kubernetes components do not accept custom network interface as an option, +therefore a custom IP address must be passed as a flag to all components instances +that need such a custom configuration. + +{{< note >}} +If the host does not have a default gateway and if a custom IP address is not passed +to a Kubernetes component, the component may exit with an error. +{{< /note >}} + +To configure the API server advertise address for control plane nodes created with both +`init` and `join`, the flag `--apiserver-advertise-address` can be used. +Preferably, this option can be set in the [kubeadm API](/docs/reference/config-api/kubeadm-config.v1beta3) +as `InitConfiguration.localAPIEndpoint` and `JoinConfiguration.controlPlane.localAPIEndpoint`. + +For kubelets on all nodes, the `--node-ip` option can be passed in +`.nodeRegistration.kubeletExtraArgs` inside a kubeadm configuration file +(`InitConfiguration` or `JoinConfiguration`). + +For dual-stack see +[Dual-stack support with kubeadm](/docs/setup/production-environment/tools/kubeadm/dual-stack-support). + +The IP addresses that you assign to control plane components become part of their X.509 certificates' +subject alternative name fields. Changing these IP addresses would require +signing new certificates and restarting the affected components, so that the change in +certificate files is reflected. See +[Manual certificate renewal](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#manual-certificate-renewal) +for more details on this topic. + +{{< warning >}} +The Kubernetes project recommends against this approach (configuring all component instances +with custom IP addresses). Instead, the Kubernetes maintainers recommend to setup the host network, +so that the default gateway IP is the one that Kubernetes components auto-detect and use. +On Linux nodes, you can use commands such as `ip route` to configure networking; your operating +system might also provide higher level network management tools. If your node's default gateway +is a public IP address, you should configure packet filtering or other security measures that +protect the nodes and your cluster. +{{< /warning >}} + ### Preparing the required container images This step is optional and only applies in case you wish `kubeadm init` and `kubeadm join` @@ -117,11 +176,6 @@ a provider-specific value. See [Installing a Pod network add-on](#pod-network). known endpoints. To use different container runtime or if there are more than one installed on the provisioned node, specify the `--cri-socket` argument to `kubeadm`. See [Installing a runtime](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-runtime). -1. (Optional) Unless otherwise specified, `kubeadm` uses the network interface associated -with the default gateway to set the advertise address for this particular control-plane node's API server. -To use a different network interface, specify the `--apiserver-advertise-address=` argument -to `kubeadm init`. To deploy an IPv6 Kubernetes cluster using IPv6 addressing, you -must specify an IPv6 address, for example `--apiserver-advertise-address=2001:db8::101` To initialize the control-plane node run: @@ -211,11 +265,19 @@ export KUBECONFIG=/etc/kubernetes/admin.conf ``` {{< warning >}} -Kubeadm signs the certificate in the `admin.conf` to have `Subject: O = system:masters, CN = kubernetes-admin`. -`system:masters` is a break-glass, super user group that bypasses the authorization layer (e.g. RBAC). -Do not share the `admin.conf` file with anyone and instead grant users custom permissions by generating -them a kubeconfig file using the `kubeadm kubeconfig user` command. For more details see -[Generating kubeconfig files for additional users](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs#kubeconfig-additional-users). +The kubeconfig file `admin.conf` that `kubeadm init` generates contains a certificate with +`Subject: O = kubeadm:cluster-admins, CN = kubernetes-admin`. The group `kubeadm:cluster-admins` +is bound to the built-in `cluster-admin` ClusterRole. +Do not share the `admin.conf` file with anyone. + +`kubeadm init` generates another kubeconfig file `super-admin.conf` that contains a certificate with +`Subject: O = system:masters, CN = kubernetes-super-admin`. +`system:masters` is a break-glass, super user group that bypasses the authorization layer (for example RBAC). +Do not share the `super-admin.conf` file with anyone. It is recommended to move the file to a safe location. + +See +[Generating kubeconfig files for additional users](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs#kubeconfig-additional-users) +on how to use `kubeadm kubeconfig user` to generate kubeconfig files for additional users. {{< /warning >}} Make a record of the `kubeadm join` command that `kubeadm init` outputs. You @@ -551,7 +613,7 @@ version as kubeadm or one version older. Example: * kubeadm is at {{< skew currentVersion >}} -* kubelet on the host must be at {{< skew currentVersion >}} or {{< skew currentVersionAddMinor -1 >}} +* kubelet on the host must be at {{< skew currentVersion >}}, {{< skew currentVersionAddMinor -1 >}}, {{< skew currentVersionAddMinor -2 >}} or {{< skew currentVersionAddMinor -3 >}} ### kubeadm's skew against kubeadm diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md index c82a84d616823..98ba9069ea324 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/high-availability.md @@ -218,8 +218,10 @@ option. Your cluster requirements may need a different configuration. kubeadm certs certificate-key ``` + The certificate key is a hex encoded string that is an AES key of size 32 bytes. + {{< note >}} - The `kubeadm-certs` Secret and decryption key expire after two hours. + The `kubeadm-certs` Secret and the decryption key expire after two hours. {{< /note >}} {{< caution >}} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 8125e3857f96c..07265d0fac62b 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -15,10 +15,10 @@ This page shows how to install the `kubeadm` toolbox. For information on how to create a cluster with kubeadm once you have performed this installation process, see the [Creating a cluster with kubeadm](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) page. +{{< doc-versions-list "installation guide" >}} ## {{% heading "prerequisites" %}} - * A compatible Linux host. The Kubernetes project provides generic instructions for Linux distributions based on Debian and Red Hat, and those distributions without a package manager. * 2 GB or more of RAM per machine (any less will leave little room for your apps). @@ -33,6 +33,14 @@ see the [Creating a cluster with kubeadm](/docs/setup/production-environment/too will disable swapping temporarily. To make this change persistent across reboots, make sure swap is disabled in config files like `/etc/fstab`, `systemd.swap`, depending how it was configured on your system. +{{< note >}} +The `kubeadm` installation is done via binaries that use dynamic linking and assumes that your target system provides `glibc`. +This is a reasonable assumption on many Linux distributions (including Debian, Ubuntu, Fedora, CentOS, etc.) +but it is not always the case with custom and lightweight distributions which don't include `glibc` by default, such as Alpine Linux. +The expectation is that the distribution either includes `glibc` or a [compatibility layer](https://wiki.alpinelinux.org/wiki/Running_glibc_programs) +that provides the expected symbols. +{{< /note >}} + ## Verify the MAC address and product_uuid are unique for every node {#verify-mac-address} @@ -51,6 +59,7 @@ If you have more than one network adapter, and your Kubernetes components are no route, we recommend you add IP route(s) so Kubernetes cluster addresses go via the appropriate adapter. ## Check required ports + These [required ports](/docs/reference/networking/ports-and-protocols/) need to be open in order for Kubernetes components to communicate with each other. You can use tools like netcat to check if a port is open. For example: @@ -123,7 +132,7 @@ You will install these packages on all of your machines: * `kubeadm`: the command to bootstrap the cluster. * `kubelet`: the component that runs on all of the machines in your cluster - and does things like starting pods and containers. + and does things like starting pods and containers. * `kubectl`: the command line util to talk to your cluster. @@ -148,30 +157,17 @@ For more information on version skews, see: * Kubernetes [version and version-skew policy](/docs/setup/release/version-skew-policy/) * Kubeadm-specific [version skew policy](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#version-skew-policy) -{{< note >}} -Kubernetes has two different package repositories starting from August 2023. -The Google-hosted repository is deprecated and it's being replaced with the -Kubernetes (community-owned) package repositories. The Kubernetes project strongly -recommends using the Kubernetes community-owned package repositories, because the -project plans to stop publishing packages to the Google-hosted repository in the future. - -There are some important considerations for the Kubernetes package repositories: - -- The Kubernetes package repositories contain packages beginning with those - Kubernetes versions that were still under support when the community took - over the package builds. This means that anything before v1.24.0 will only be - available in the Google-hosted repository. -- There's a dedicated package repository for each Kubernetes minor version. - When upgrading to a different minor release, you must bear in mind that - the package repository details also change. +{{% legacy-repos-deprecation %}} +{{< note >}} +There's a dedicated package repository for each Kubernetes minor version. If you want to install +a minor version other than {{< skew currentVersion >}}, please see the installation guide for +your desired minor version. {{< /note >}} {{< tabs name="k8s_install" >}} {{% tab name="Debian-based distributions" %}} -### Kubernetes package repositories {#dpkg-k8s-package-repo} - These instructions are for Kubernetes {{< skew currentVersion >}}. 1. Update the `apt` package index and install packages needed to use the Kubernetes `apt` repository: @@ -179,16 +175,21 @@ These instructions are for Kubernetes {{< skew currentVersion >}}. ```shell sudo apt-get update # apt-transport-https may be a dummy package; if so, you can skip that package - sudo apt-get install -y apt-transport-https ca-certificates curl + sudo apt-get install -y apt-transport-https ca-certificates curl gpg ``` -2. Download the public signing key for the Kubernetes package repositories. The same signing key is used for all repositories so you can disregard the version in the URL: +2. Download the public signing key for the Kubernetes package repositories. + The same signing key is used for all repositories so you can disregard the version in the URL: ```shell curl -fsSL https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg ``` -3. Add the appropriate Kubernetes `apt` repository: +3. Add the appropriate Kubernetes `apt` repository. Please note that this repository have packages + only for Kubernetes {{< skew currentVersion >}}; for other Kubernetes minor versions, you need to + change the Kubernetes minor version in the URL to match your desired minor version + (you should also check that you are reading the documentation for the version of Kubernetes + that you plan to install). ```shell # This overwrites any existing configuration in /etc/apt/sources.list.d/kubernetes.list @@ -208,127 +209,57 @@ In releases older than Debian 12 and Ubuntu 22.04, `/etc/apt/keyrings` does not you can create it by running `sudo mkdir -m 755 /etc/apt/keyrings` {{< /note >}} -### Google-hosted package repository (deprecated) {#dpkg-google-package-repo} - -These instructions are for Kubernetes {{< skew currentVersion >}}. - -1. Update the `apt` package index and install packages needed to use the Kubernetes `apt` repository: - - ```shell - sudo apt-get update - # apt-transport-https may be a dummy package; if so, you can skip that package - sudo apt-get install -y apt-transport-https ca-certificates curl - ``` - -2. Download the Google Cloud public signing key: - - ```shell - curl -fsSL https://dl.k8s.io/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg - ``` - -3. Add the Google-hosted `apt` repository: - - ```shell - # This overwrites any existing configuration in /etc/apt/sources.list.d/kubernetes.list - echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list - ``` - -4. Update the `apt` package index, install kubelet, kubeadm and kubectl, and pin their version: - - ```shell - sudo apt-get update - sudo apt-get install -y kubelet kubeadm kubectl - sudo apt-mark hold kubelet kubeadm kubectl - ``` - -{{< note >}} -In releases older than Debian 12 and Ubuntu 22.04, `/etc/apt/keyrings` does not exist by default; -you can create it by running `sudo mkdir -m 755 /etc/apt/keyrings` -{{< /note >}} - {{% /tab %}} {{% tab name="Red Hat-based distributions" %}} 1. Set SELinux to `permissive` mode: -```shell -# Set SELinux in permissive mode (effectively disabling it) -sudo setenforce 0 -sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config -``` + These instructions are for Kubernetes {{< skew currentVersion >}}. + + ```shell + # Set SELinux in permissive mode (effectively disabling it) + sudo setenforce 0 + sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config + ``` {{< caution >}} - Setting SELinux in permissive mode by running `setenforce 0` and `sed ...` - effectively disables it. This is required to allow containers to access the host - filesystem; for example, some cluster network plugins require that. You have to - do this until SELinux support is improved in the kubelet. +effectively disables it. This is required to allow containers to access the host +filesystem; for example, some cluster network plugins require that. You have to +do this until SELinux support is improved in the kubelet. - You can leave SELinux enabled if you know how to configure it but it may require - settings that are not supported by kubeadm. +settings that are not supported by kubeadm. {{< /caution >}} -### Kubernetes package repositories {#rpm-k8s-package-repo} - -These instructions are for Kubernetes {{< skew currentVersion >}}. - 2. Add the Kubernetes `yum` repository. The `exclude` parameter in the repository definition ensures that the packages related to Kubernetes are not upgraded upon running `yum update` as there's a special procedure that - must be followed for upgrading Kubernetes. - -```shell -# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo -cat <}}/rpm/ -enabled=1 -gpgcheck=1 -gpgkey=https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/rpm/repodata/repomd.xml.key -exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni -EOF -``` - -3. Install kubelet, kubeadm and kubectl, and enable kubelet to ensure it's automatically started on startup: - -```shell -sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes -sudo systemctl enable --now kubelet -``` - -### Google-hosted package repository (deprecated) {#rpm-google-package-repo} - -These instructions are for Kubernetes {{< skew currentVersion >}}. - -2. Add the Google-hosted `yum` repository. The `exclude` parameter in the - repository definition ensures that the packages related to Kubernetes are - not upgraded upon running `yum update` as there's a special procedure that - must be followed for upgrading Kubernetes. + must be followed for upgrading Kubernetes. Please note that this repository + have packages only for Kubernetes {{< skew currentVersion >}}; for other + Kubernetes minor versions, you need to change the Kubernetes minor version + in the URL to match your desired minor version (you should also check that + you are reading the documentation for the version of Kubernetes that you + plan to install). -```shell -# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo -cat <}}/rpm/ + enabled=1 + gpgcheck=1 + gpgkey=https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/rpm/repodata/repomd.xml.key + exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni + EOF + ``` 3. Install kubelet, kubeadm and kubectl, and enable kubelet to ensure it's automatically started on startup: -```shell -sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes -sudo systemctl enable --now kubelet -``` - -{{< note >}} -If the `baseurl` fails because your RPM-based distribution cannot interpret `$basearch`, replace `\$basearch` with your computer's architecture. -Type `uname -m` to see that value. -For example, the `baseurl` URL for `x86_64` could be: `https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64`. -{{< /note >}} + ```shell + sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes + sudo systemctl enable --now kubelet + ``` {{% /tab %}} {{% tab name="Without a package manager" %}} @@ -342,7 +273,7 @@ sudo mkdir -p "$DEST" curl -L "https://github.com/containernetworking/plugins/releases/download/${CNI_PLUGINS_VERSION}/cni-plugins-linux-${ARCH}-${CNI_PLUGINS_VERSION}.tgz" | sudo tar -C "$DEST" -xz ``` -Define the directory to download command files +Define the directory to download command files: {{< note >}} The `DOWNLOAD_DIR` variable must be set to a writable directory. @@ -354,7 +285,7 @@ DOWNLOAD_DIR="/usr/local/bin" sudo mkdir -p "$DOWNLOAD_DIR" ``` -Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI)) +Install crictl (required for kubeadm / Kubelet Container Runtime Interface (CRI)): ```bash CRICTL_VERSION="v1.28.0" @@ -371,12 +302,17 @@ cd $DOWNLOAD_DIR sudo curl -L --remote-name-all https://dl.k8s.io/release/${RELEASE}/bin/linux/${ARCH}/{kubeadm,kubelet} sudo chmod +x {kubeadm,kubelet} -RELEASE_VERSION="v0.15.1" -curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service +RELEASE_VERSION="v0.16.2" +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/krel/templates/latest/kubelet/kubelet.service" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service sudo mkdir -p /etc/systemd/system/kubelet.service.d -curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service.d/10-kubeadm.conf +curl -sSL "https://raw.githubusercontent.com/kubernetes/release/${RELEASE_VERSION}/cmd/krel/templates/latest/kubeadm/10-kubeadm.conf" | sed "s:/usr/bin:${DOWNLOAD_DIR}:g" | sudo tee /etc/systemd/system/kubelet.service.d/10-kubeadm.conf ``` +{{< note >}} +Please refer to the note in the [Before you begin](#before-you-begin) section for Linux distributions +that do not include `glibc` by default. +{{< /note >}} + Install `kubectl` by following the instructions on [Install Tools page](/docs/tasks/tools/#kubectl). Enable and start `kubelet`: @@ -388,12 +324,12 @@ systemctl enable --now kubelet {{< note >}} The Flatcar Container Linux distribution mounts the `/usr` directory as a read-only filesystem. Before bootstrapping your cluster, you need to take additional steps to configure a writable directory. -See the [Kubeadm Troubleshooting guide](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#usr-mounted-read-only/) to learn how to set up a writable directory. +See the [Kubeadm Troubleshooting guide](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/#usr-mounted-read-only/) +to learn how to set up a writable directory. {{< /note >}} {{% /tab %}} {{< /tabs >}} - The kubelet is now restarting every few seconds, as it waits in a crashloop for kubeadm to tell it what to do. @@ -411,7 +347,8 @@ See [Configuring a cgroup driver](/docs/tasks/administer-cluster/kubeadm/configu ## Troubleshooting -If you are running into difficulties with kubeadm, please consult our [troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). +If you are running into difficulties with kubeadm, please consult our +[troubleshooting docs](/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/). ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md b/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md index d8c1499ae97c9..3e91058180b13 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md @@ -162,12 +162,10 @@ Kubeadm deletes the `/etc/kubernetes/bootstrap-kubelet.conf` file after completi Note that the kubeadm CLI command never touches this drop-in file. This configuration file installed by the `kubeadm` -[DEB](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubeadm/10-kubeadm.conf) or -[RPM package](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubeadm/10-kubeadm.conf) is written to +[package](https://github.com/kubernetes/release/blob/cd53840/cmd/krel/templates/latest/kubeadm/10-kubeadm.conf) is written to `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` and is used by systemd. It augments the basic -[`kubelet.service` for RPM](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/rpm/kubelet/kubelet.service) or -[`kubelet.service` for DEB](https://github.com/kubernetes/release/blob/master/cmd/kubepkg/templates/latest/deb/kubelet/lib/systemd/system/kubelet.service): +[`kubelet.service`](https://github.com/kubernetes/release/blob/cd53840/cmd/krel/templates/latest/kubelet/kubelet.service): {{< note >}} The contents below are just an example. If you don't want to use a package manager diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index fe3f08e57856f..abd3f3e0e4968 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -73,7 +73,8 @@ If you see the following warnings while running `kubeadm init` [preflight] WARNING: ethtool not found in system path ``` -Then you may be missing `ebtables`, `ethtool` or a similar executable on your node. You can install them with the following commands: +Then you may be missing `ebtables`, `ethtool` or a similar executable on your node. +You can install them with the following commands: - For Ubuntu/Debian users, run `apt install ebtables ethtool`. - For CentOS/Fedora users, run `yum install ebtables ethtool`. @@ -90,9 +91,9 @@ This may be caused by a number of problems. The most common are: - network connection problems. Check that your machine has full network connectivity before continuing. - the cgroup driver of the container runtime differs from that of the kubelet. To understand how to - configure it properly see [Configuring a cgroup driver](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/). + configure it properly, see [Configuring a cgroup driver](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/). - control plane containers are crashlooping or hanging. You can check this by running `docker ps` - and investigating each container by running `docker logs`. For other container runtime see + and investigating each container by running `docker logs`. For other container runtime, see [Debugging Kubernetes nodes with crictl](/docs/tasks/debug/debug-cluster/crictl/). ## kubeadm blocks when removing managed containers @@ -144,10 +145,12 @@ provider. Please contact the author of the Pod Network add-on to find out whethe Calico, Canal, and Flannel CNI providers are verified to support HostPort. -For more information, see the [CNI portmap documentation](https://github.com/containernetworking/plugins/blob/master/plugins/meta/portmap/README.md). +For more information, see the +[CNI portmap documentation](https://github.com/containernetworking/plugins/blob/master/plugins/meta/portmap/README.md). -If your network provider does not support the portmap CNI plugin, you may need to use the [NodePort feature of -services](/docs/concepts/services-networking/service/#type-nodeport) or use `HostNetwork=true`. +If your network provider does not support the portmap CNI plugin, you may need to use the +[NodePort feature of services](/docs/concepts/services-networking/service/#type-nodeport) +or use `HostNetwork=true`. ## Pods are not accessible via their Service IP @@ -157,9 +160,10 @@ services](/docs/concepts/services-networking/service/#type-nodeport) or use `Hos add-on provider to get the latest status of their support for hairpin mode. - If you are using VirtualBox (directly or via Vagrant), you will need to - ensure that `hostname -i` returns a routable IP address. By default the first + ensure that `hostname -i` returns a routable IP address. By default, the first interface is connected to a non-routable host-only network. A work around - is to modify `/etc/hosts`, see this [Vagrantfile](https://github.com/errordeveloper/k8s-playground/blob/22dd39dfc06111235620e6c4404a96ae146f26fd/Vagrantfile#L11) + is to modify `/etc/hosts`, see this + [Vagrantfile](https://github.com/errordeveloper/k8s-playground/blob/22dd39dfc06111235620e6c4404a96ae146f26fd/Vagrantfile#L11) for an example. ## TLS certificate errors @@ -175,6 +179,7 @@ Unable to connect to the server: x509: certificate signed by unknown authority ( regenerate a certificate if necessary. The certificates in a kubeconfig file are base64 encoded. The `base64 --decode` command can be used to decode the certificate and `openssl x509 -text -noout` can be used for viewing the certificate information. + - Unset the `KUBECONFIG` environment variable using: ```sh @@ -190,7 +195,7 @@ Unable to connect to the server: x509: certificate signed by unknown authority ( - Another workaround is to overwrite the existing `kubeconfig` for the "admin" user: ```sh - mv $HOME/.kube $HOME/.kube.bak + mv $HOME/.kube $HOME/.kube.bak mkdir $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config @@ -198,7 +203,8 @@ Unable to connect to the server: x509: certificate signed by unknown authority ( ## Kubelet client certificate rotation fails {#kubelet-client-cert} -By default, kubeadm configures a kubelet with automatic rotation of client certificates by using the `/var/lib/kubelet/pki/kubelet-client-current.pem` symlink specified in `/etc/kubernetes/kubelet.conf`. +By default, kubeadm configures a kubelet with automatic rotation of client certificates by using the +`/var/lib/kubelet/pki/kubelet-client-current.pem` symlink specified in `/etc/kubernetes/kubelet.conf`. If this rotation process fails you might see errors such as `x509: certificate has expired or is not yet valid` in kube-apiserver logs. To fix the issue you must follow these steps: @@ -231,24 +237,34 @@ The following error might indicate that something was wrong in the pod network: Error from server (NotFound): the server could not find the requested resource ``` -- If you're using flannel as the pod network inside Vagrant, then you will have to specify the default interface name for flannel. +- If you're using flannel as the pod network inside Vagrant, then you will have to + specify the default interface name for flannel. - Vagrant typically assigns two interfaces to all VMs. The first, for which all hosts are assigned the IP address `10.0.2.15`, is for external traffic that gets NATed. + Vagrant typically assigns two interfaces to all VMs. The first, for which all hosts + are assigned the IP address `10.0.2.15`, is for external traffic that gets NATed. - This may lead to problems with flannel, which defaults to the first interface on a host. This leads to all hosts thinking they have the same public IP address. To prevent this, pass the `--iface eth1` flag to flannel so that the second interface is chosen. + This may lead to problems with flannel, which defaults to the first interface on a host. + This leads to all hosts thinking they have the same public IP address. To prevent this, + pass the `--iface eth1` flag to flannel so that the second interface is chosen. ## Non-public IP used for containers -In some situations `kubectl logs` and `kubectl run` commands may return with the following errors in an otherwise functional cluster: +In some situations `kubectl logs` and `kubectl run` commands may return with the +following errors in an otherwise functional cluster: ```console Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc65b868-glc5m/mysql: dial tcp 10.19.0.41:10250: getsockopt: no route to host ``` -- This may be due to Kubernetes using an IP that can not communicate with other IPs on the seemingly same subnet, possibly by policy of the machine provider. -- DigitalOcean assigns a public IP to `eth0` as well as a private one to be used internally as anchor for their floating IP feature, yet `kubelet` will pick the latter as the node's `InternalIP` instead of the public one. +- This may be due to Kubernetes using an IP that can not communicate with other IPs on + the seemingly same subnet, possibly by policy of the machine provider. +- DigitalOcean assigns a public IP to `eth0` as well as a private one to be used internally + as anchor for their floating IP feature, yet `kubelet` will pick the latter as the node's + `InternalIP` instead of the public one. - Use `ip addr show` to check for this scenario instead of `ifconfig` because `ifconfig` will not display the offending alias IP address. Alternatively an API endpoint specific to DigitalOcean allows to query for the anchor IP from the droplet: + Use `ip addr show` to check for this scenario instead of `ifconfig` because `ifconfig` will + not display the offending alias IP address. Alternatively an API endpoint specific to + DigitalOcean allows to query for the anchor IP from the droplet: ```sh curl http://169.254.169.254/metadata/v1/interfaces/public/0/anchor_ipv4/address @@ -270,12 +286,13 @@ Error from server: Get https://10.19.0.41:10250/containerLogs/default/mysql-ddc6 ## `coredns` pods have `CrashLoopBackOff` or `Error` state -If you have nodes that are running SELinux with an older version of Docker you might experience a scenario -where the `coredns` pods are not starting. To solve that you can try one of the following options: +If you have nodes that are running SELinux with an older version of Docker, you might experience a scenario +where the `coredns` pods are not starting. To solve that, you can try one of the following options: - Upgrade to a [newer version of Docker](/docs/setup/production-environment/container-runtimes/#docker). - [Disable SELinux](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/security-enhanced_linux/sect-security-enhanced_linux-enabling_and_disabling_selinux-disabling_selinux). + - Modify the `coredns` deployment to set `allowPrivilegeEscalation` to `true`: ```bash @@ -284,7 +301,8 @@ kubectl -n kube-system get deployment coredns -o yaml | \ kubectl apply -f - ``` -Another cause for CoreDNS to have `CrashLoopBackOff` is when a CoreDNS Pod deployed in Kubernetes detects a loop. [A number of workarounds](https://github.com/coredns/coredns/tree/master/plugin/loop#troubleshooting-loops-in-kubernetes-clusters) +Another cause for CoreDNS to have `CrashLoopBackOff` is when a CoreDNS Pod deployed in Kubernetes detects a loop. +[A number of workarounds](https://github.com/coredns/coredns/tree/master/plugin/loop#troubleshooting-loops-in-kubernetes-clusters) are available to avoid Kubernetes trying to restart the CoreDNS Pod every time CoreDNS detects the loop and exits. {{< warning >}} @@ -300,7 +318,7 @@ If you encounter the following error: rpc error: code = 2 desc = oci runtime error: exec failed: container_linux.go:247: starting container process caused "process_linux.go:110: decoding init error from pipe caused \"read parent: connection reset by peer\"" ``` -this issue appears if you run CentOS 7 with Docker 1.13.1.84. +This issue appears if you run CentOS 7 with Docker 1.13.1.84. This version of Docker can prevent the kubelet from executing into the etcd container. To work around the issue, choose one of these options: @@ -344,6 +362,7 @@ to pick up the node's IP address properly and has knock-on effects to the proxy load balancers. The following error can be seen in kube-proxy Pods: + ``` server.go:610] Failed to retrieve node IP: host IP unknown; known addresses: [] proxier.go:340] invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP @@ -352,8 +371,26 @@ proxier.go:340] invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP A known solution is to patch the kube-proxy DaemonSet to allow scheduling it on control-plane nodes regardless of their conditions, keeping it off of other nodes until their initial guarding conditions abate: + ``` -kubectl -n kube-system patch ds kube-proxy -p='{ "spec": { "template": { "spec": { "tolerations": [ { "key": "CriticalAddonsOnly", "operator": "Exists" }, { "effect": "NoSchedule", "key": "node-role.kubernetes.io/control-plane" } ] } } } }' +kubectl -n kube-system patch ds kube-proxy -p='{ + "spec": { + "template": { + "spec": { + "tolerations": [ + { + "key": "CriticalAddonsOnly", + "operator": "Exists" + }, + { + "effect": "NoSchedule", + "key": "node-role.kubernetes.io/control-plane" + } + ] + } + } + } +}' ``` The tracking issue for this problem is [here](https://github.com/kubernetes/kubeadm/issues/1027). @@ -365,12 +402,15 @@ For [flex-volume support](https://github.com/kubernetes/community/blob/ab55d85/c Kubernetes components like the kubelet and kube-controller-manager use the default path of `/usr/libexec/kubernetes/kubelet-plugins/volume/exec/`, yet the flex-volume directory _must be writeable_ for the feature to work. -(**Note**: FlexVolume was deprecated in the Kubernetes v1.23 release) -To workaround this issue you can configure the flex-volume directory using the kubeadm +{{< note >}} +FlexVolume was deprecated in the Kubernetes v1.23 release. +{{< /note >}} + +To workaround this issue, you can configure the flex-volume directory using the kubeadm [configuration file](/docs/reference/config-api/kubeadm-config.v1beta3/). -On the primary control-plane Node (created using `kubeadm init`) pass the following +On the primary control-plane Node (created using `kubeadm init`), pass the following file using `--config`: ```yaml @@ -402,7 +442,10 @@ be advised that this is modifying a design principle of the Linux distribution. ## `kubeadm upgrade plan` prints out `context deadline exceeded` error message -This error message is shown when upgrading a Kubernetes cluster with `kubeadm` in the case of running an external etcd. This is not a critical bug and happens because older versions of kubeadm perform a version check on the external etcd cluster. You can proceed with `kubeadm upgrade apply ...`. +This error message is shown when upgrading a Kubernetes cluster with `kubeadm` in +the case of running an external etcd. This is not a critical bug and happens because +older versions of kubeadm perform a version check on the external etcd cluster. +You can proceed with `kubeadm upgrade apply ...`. This issue is fixed as of version 1.19. @@ -422,6 +465,7 @@ can be used insecurely by passing the `--kubelet-insecure-tls` to it. This is no If you want to use TLS between the metrics-server and the kubelet there is a problem, since kubeadm deploys a self-signed serving certificate for the kubelet. This can cause the following errors on the side of the metrics-server: + ``` x509: certificate signed by unknown authority x509: certificate is valid for IP-foo not IP-bar @@ -431,3 +475,87 @@ See [Enabling signed kubelet serving certificates](/docs/tasks/administer-cluste to understand how to configure the kubelets in a kubeadm cluster to have properly signed serving certificates. Also see [How to run the metrics-server securely](https://github.com/kubernetes-sigs/metrics-server/blob/master/FAQ.md#how-to-run-metrics-server-securely). + +## Upgrade fails due to etcd hash not changing + +Only applicable to upgrading a control plane node with a kubeadm binary v1.28.3 or later, +where the node is currently managed by kubeadm versions v1.28.0, v1.28.1 or v1.28.2. + +Here is the error message you may encounter: + +``` +[upgrade/etcd] Failed to upgrade etcd: couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced: static Pod hash for component etcd on Node kinder-upgrade-control-plane-1 did not change after 5m0s: timed out waiting for the condition +[upgrade/etcd] Waiting for previous etcd to become available +I0907 10:10:09.109104 3704 etcd.go:588] [etcd] attempting to see if all cluster endpoints ([https://172.17.0.6:2379/ https://172.17.0.4:2379/ https://172.17.0.3:2379/]) are available 1/10 +[upgrade/etcd] Etcd was rolled back and is now available +static Pod hash for component etcd on Node kinder-upgrade-control-plane-1 did not change after 5m0s: timed out waiting for the condition +couldn't upgrade control plane. kubeadm has tried to recover everything into the earlier state. Errors faced +k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade.rollbackOldManifests + cmd/kubeadm/app/phases/upgrade/staticpods.go:525 +k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade.upgradeComponent + cmd/kubeadm/app/phases/upgrade/staticpods.go:254 +k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade.performEtcdStaticPodUpgrade + cmd/kubeadm/app/phases/upgrade/staticpods.go:338 +... +``` + +The reason for this failure is that the affected versions generate an etcd manifest file with +unwanted defaults in the PodSpec. This will result in a diff from the manifest comparison, +and kubeadm will expect a change in the Pod hash, but the kubelet will never update the hash. + +There are two way to workaround this issue if you see it in your cluster: + +- The etcd upgrade can be skipped between the affected versions and v1.28.3 (or later) by using: + + ```shell + kubeadm upgrade {apply|node} [version] --etcd-upgrade=false + ``` + + This is not recommended in case a new etcd version was introduced by a later v1.28 patch version. + +- Before upgrade, patch the manifest for the etcd static pod, to remove the problematic defaulted attributes: + + ```patch + diff --git a/etc/kubernetes/manifests/etcd_defaults.yaml b/etc/kubernetes/manifests/etcd_origin.yaml + index d807ccbe0aa..46b35f00e15 100644 + --- a/etc/kubernetes/manifests/etcd_defaults.yaml + +++ b/etc/kubernetes/manifests/etcd_origin.yaml + @@ -43,7 +43,6 @@ spec: + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + - successThreshold: 1 + timeoutSeconds: 15 + name: etcd + resources: + @@ -59,26 +58,18 @@ spec: + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + - successThreshold: 1 + timeoutSeconds: 15 + - terminationMessagePath: /dev/termination-log + - terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/etcd + name: etcd-data + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + - dnsPolicy: ClusterFirst + - enableServiceLinks: true + hostNetwork: true + priority: 2000001000 + priorityClassName: system-node-critical + - restartPolicy: Always + - schedulerName: default-scheduler + securityContext: + seccompProfile: + type: RuntimeDefault + - terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /etc/kubernetes/pki/etcd + ``` + +More information can be found in the +[tracking issue](https://github.com/kubernetes/kubeadm/issues/2927) for this bug. diff --git a/content/en/docs/setup/production-environment/tools/kubespray.md b/content/en/docs/setup/production-environment/tools/kubespray.md deleted file mode 100644 index 94698659aecd5..0000000000000 --- a/content/en/docs/setup/production-environment/tools/kubespray.md +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Installing Kubernetes with Kubespray -content_type: concept -weight: 30 ---- - - - -This quickstart helps to install a Kubernetes cluster hosted on GCE, Azure, OpenStack, -AWS, vSphere, Equinix Metal (formerly Packet), Oracle Cloud Infrastructure (Experimental) -or Baremetal with [Kubespray](https://github.com/kubernetes-sigs/kubespray). - -Kubespray is a composition of [Ansible](https://docs.ansible.com/) playbooks, -[inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md#inventory), -provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration management tasks. - -Kubespray provides: - -* Highly available cluster. -* Composable (Choice of the network plugin for instance). -* Supports most popular Linux distributions: - - Flatcar Container Linux by Kinvolk - - Debian Bullseye, Buster, Jessie, Stretch - - Ubuntu 16.04, 18.04, 20.04, 22.04 - - CentOS/RHEL 7, 8, 9 - - Fedora 35, 36 - - Fedora CoreOS - - openSUSE Leap 15.x/Tumbleweed - - Oracle Linux 7, 8, 9 - - Alma Linux 8, 9 - - Rocky Linux 8, 9 - - Kylin Linux Advanced Server V10 - - Amazon Linux 2 -* Continuous integration tests. - -To choose a tool which best fits your use case, read -[this comparison](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md) to -[kubeadm](/docs/reference/setup-tools/kubeadm/) and [kops](/docs/setup/production-environment/tools/kops/). - - - -## Creating a cluster - -### (1/5) Meet the underlay requirements - -Provision servers with the following [requirements](https://github.com/kubernetes-sigs/kubespray#requirements): - -* **Minimum required version of Kubernetes is v1.22** -* **Ansible v2.11+, Jinja 2.11+ and python-netaddr is installed on the machine that will run Ansible commands** -* The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required See ([Offline Environment](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/offline-environment.md)) -* The target servers are configured to allow **IPv4 forwarding**. -* If using IPv6 for pods and services, the target servers are configured to allow **IPv6 forwarding**. -* The **firewalls are not managed**, you'll need to implement your own rules the way you used to. - in order to avoid any issue during deployment you should disable your firewall. -* If kubespray is run from non-root user account, correct privilege escalation method - should be configured in the target servers. Then the `ansible_become` flag or command - parameters `--become` or `-b` should be specified. - -Kubespray provides the following utilities to help provision your environment: - -* [Terraform](https://www.terraform.io/) scripts for the following cloud providers: - * [AWS](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform/aws) - * [OpenStack](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform/openstack) - * [Equinix Metal](https://github.com/kubernetes-sigs/kubespray/tree/master/contrib/terraform/equinix) - -### (2/5) Compose an inventory file - -After you provision your servers, create an -[inventory file for Ansible](https://docs.ansible.com/ansible/latest/network/getting_started/first_inventory.html). -You can do this manually or via a dynamic inventory script. For more information, -see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". - -### (3/5) Plan your cluster deployment - -Kubespray provides the ability to customize many aspects of the deployment: - -* Choice deployment mode: kubeadm or non-kubeadm -* CNI (networking) plugins -* DNS configuration -* Choice of control plane: native/binary or containerized -* Component versions -* Calico route reflectors -* Component runtime options - * {{< glossary_tooltip term_id="docker" >}} - * {{< glossary_tooltip term_id="containerd" >}} - * {{< glossary_tooltip term_id="cri-o" >}} -* Certificate generation methods - -Kubespray customizations can be made to a -[variable file](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html). -If you are getting started with Kubespray, consider using the Kubespray -defaults to deploy your cluster and explore Kubernetes. - -### (4/5) Deploy a Cluster - -Next, deploy your cluster: - -Cluster deployment using -[ansible-playbook](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#starting-custom-deployment). - -```shell -ansible-playbook -i your/inventory/inventory.ini cluster.yml -b -v \ - --private-key=~/.ssh/private_key -``` - -Large deployments (100+ nodes) may require -[specific adjustments](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/large-deployments.md) -for best results. - -### (5/5) Verify the deployment - -Kubespray provides a way to verify inter-pod connectivity and DNS resolve with -[Netchecker](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/netcheck.md). -Netchecker ensures the netchecker-agents pods can resolve DNS requests and ping each -over within the default namespace. Those pods mimic similar behavior as the rest -of the workloads and serve as cluster health indicators. - -## Cluster operations - -Kubespray provides additional playbooks to manage your cluster: _scale_ and _upgrade_. - -### Scale your cluster - -You can add worker nodes from your cluster by running the scale playbook. For more information, -see "[Adding nodes](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#adding-nodes)". -You can remove worker nodes from your cluster by running the remove-node playbook. For more information, -see "[Remove nodes](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#remove-nodes)". - -### Upgrade your cluster - -You can upgrade your cluster by running the upgrade-cluster playbook. For more information, -see "[Upgrades](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/upgrades.md)". - -## Cleanup - -You can reset your nodes and wipe out all components installed with Kubespray -via the [reset playbook](https://github.com/kubernetes-sigs/kubespray/blob/master/reset.yml). - -{{< caution >}} -When running the reset playbook, be sure not to accidentally target your production cluster! -{{< /caution >}} - -## Feedback - -* Slack Channel: [#kubespray](https://kubernetes.slack.com/messages/kubespray/) - (You can get your invite [here](https://slack.k8s.io/)). -* [GitHub Issues](https://github.com/kubernetes-sigs/kubespray/issues). - -## {{% heading "whatsnext" %}} - -* Check out planned work on Kubespray's [roadmap](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/roadmap.md). -* Learn more about [Kubespray](https://github.com/kubernetes-sigs/kubespray). diff --git a/content/en/docs/tasks/access-application-cluster/access-cluster-services.md b/content/en/docs/tasks/access-application-cluster/access-cluster-services.md index 8d2d47e349190..7994b620feac6 100644 --- a/content/en/docs/tasks/access-application-cluster/access-cluster-services.md +++ b/content/en/docs/tasks/access-application-cluster/access-cluster-services.md @@ -7,13 +7,10 @@ weight: 140 This page shows how to connect to services running on the Kubernetes cluster. - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - ## Accessing services running on the cluster @@ -28,30 +25,30 @@ such as your desktop machine. You have several options for connecting to nodes, pods and services from outside the cluster: - - Access services through public IPs. - - Use a service with type `NodePort` or `LoadBalancer` to make the service reachable outside - the cluster. See the [services](/docs/concepts/services-networking/service/) and - [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) documentation. - - Depending on your cluster environment, this may only expose the service to your corporate network, - or it may expose it to the internet. Think about whether the service being exposed is secure. - Does it do its own authentication? - - Place pods behind services. To access one specific pod from a set of replicas, such as for debugging, - place a unique label on the pod and create a new service which selects this label. - - In most cases, it should not be necessary for application developer to directly access - nodes via their nodeIPs. - - Access services, nodes, or pods using the Proxy Verb. - - Does apiserver authentication and authorization prior to accessing the remote service. - Use this if the services are not secure enough to expose to the internet, or to gain - access to ports on the node IP, or for debugging. - - Proxies may cause problems for some web applications. - - Only works for HTTP/HTTPS. - - Described [here](#manually-constructing-apiserver-proxy-urls). +- Access services through public IPs. + - Use a service with type `NodePort` or `LoadBalancer` to make the service reachable outside + the cluster. See the [services](/docs/concepts/services-networking/service/) and + [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) documentation. + - Depending on your cluster environment, this may only expose the service to your corporate network, + or it may expose it to the internet. Think about whether the service being exposed is secure. + Does it do its own authentication? + - Place pods behind services. To access one specific pod from a set of replicas, such as for debugging, + place a unique label on the pod and create a new service which selects this label. + - In most cases, it should not be necessary for application developer to directly access + nodes via their nodeIPs. +- Access services, nodes, or pods using the Proxy Verb. + - Does apiserver authentication and authorization prior to accessing the remote service. + Use this if the services are not secure enough to expose to the internet, or to gain + access to ports on the node IP, or for debugging. + - Proxies may cause problems for some web applications. + - Only works for HTTP/HTTPS. + - Described [here](#manually-constructing-apiserver-proxy-urls). - Access from a node or pod in the cluster. - - Run a pod, and then connect to a shell in it using [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec). - Connect to other nodes, pods, and services from that shell. - - Some clusters may allow you to ssh to a node in the cluster. From there you may be able to - access cluster services. This is a non-standard method, and will work on some clusters but - not others. Browsers and other tools may or may not be installed. Cluster DNS may not work. + - Run a pod, and then connect to a shell in it using [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec). + Connect to other nodes, pods, and services from that shell. + - Some clusters may allow you to ssh to a node in the cluster. From there you may be able to + access cluster services. This is a non-standard method, and will work on some clusters but + not others. Browsers and other tools may or may not be installed. Cluster DNS may not work. ### Discovering builtin services @@ -75,19 +72,23 @@ heapster is running at https://192.0.2.1/api/v1/namespaces/kube-system/services/ This shows the proxy-verb URL for accessing each service. For example, this cluster has cluster-level logging enabled (using Elasticsearch), which can be reached -at `https://192.0.2.1/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/` if suitable credentials are passed, or through a kubectl proxy at, for example: +at `https://192.0.2.1/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/` +if suitable credentials are passed, or through a kubectl proxy at, for example: `http://localhost:8080/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/`. {{< note >}} -See [Access Clusters Using the Kubernetes API](/docs/tasks/administer-cluster/access-cluster-api/#accessing-the-cluster-api) for how to pass credentials or use kubectl proxy. +See [Access Clusters Using the Kubernetes API](/docs/tasks/administer-cluster/access-cluster-api/#accessing-the-cluster-api) +for how to pass credentials or use kubectl proxy. {{< /note >}} #### Manually constructing apiserver proxy URLs -As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you append to the service's proxy URL: +As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create +proxy URLs that include service endpoints, suffixes, and parameters, you append to the service's proxy URL: `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`[https:]service_name[:port_name]`*`/proxy` -If you haven't specified a name for your port, you don't have to specify *port_name* in the URL. You can also use the port number in place of the *port_name* for both named and unnamed ports. +If you haven't specified a name for your port, you don't have to specify *port_name* in the URL. You can also +use the port number in place of the *port_name* for both named and unnamed ports. By default, the API server proxies to your service using HTTP. To use HTTPS, prefix the service name with `https:`: `http:///api/v1/namespaces//services//proxy` @@ -99,53 +100,49 @@ The supported formats for the `` segment of the URL are: * `https::` - proxies to the default or unnamed port using https (note the trailing colon) * `https::` - proxies to the specified port name or port number using https - ##### Examples * To access the Elasticsearch service endpoint `_search?q=user:kimchy`, you would use: - ``` - http://192.0.2.1/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/_search?q=user:kimchy - ``` + ``` + http://192.0.2.1/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/_search?q=user:kimchy + ``` * To access the Elasticsearch cluster health information `_cluster/health?pretty=true`, you would use: - ``` - https://192.0.2.1/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/_cluster/health?pretty=true - ``` - - The health information is similar to this: - - ```json - { - "cluster_name" : "kubernetes_logging", - "status" : "yellow", - "timed_out" : false, - "number_of_nodes" : 1, - "number_of_data_nodes" : 1, - "active_primary_shards" : 5, - "active_shards" : 5, - "relocating_shards" : 0, - "initializing_shards" : 0, - "unassigned_shards" : 5 - } - ``` + ``` + https://192.0.2.1/api/v1/namespaces/kube-system/services/elasticsearch-logging/proxy/_cluster/health?pretty=true + ``` + + The health information is similar to this: + + ```json + { + "cluster_name" : "kubernetes_logging", + "status" : "yellow", + "timed_out" : false, + "number_of_nodes" : 1, + "number_of_data_nodes" : 1, + "active_primary_shards" : 5, + "active_shards" : 5, + "relocating_shards" : 0, + "initializing_shards" : 0, + "unassigned_shards" : 5 + } + ``` * To access the *https* Elasticsearch service health information `_cluster/health?pretty=true`, you would use: - ``` - https://192.0.2.1/api/v1/namespaces/kube-system/services/https:elasticsearch-logging:/proxy/_cluster/health?pretty=true - ``` + ``` + https://192.0.2.1/api/v1/namespaces/kube-system/services/https:elasticsearch-logging:/proxy/_cluster/health?pretty=true + ``` #### Using web browsers to access services running on the cluster You may be able to put an apiserver proxy URL into the address bar of a browser. However: - - Web browsers cannot usually pass tokens, so you may need to use basic (password) auth. Apiserver can be configured to accept basic auth, - but your cluster may not be configured to accept basic auth. - - Some web apps may not work, particularly those with client side javascript that construct URLs in a - way that is unaware of the proxy path prefix. - - - - +- Web browsers cannot usually pass tokens, so you may need to use basic (password) auth. + Apiserver can be configured to accept basic auth, + but your cluster may not be configured to accept basic auth. +- Some web apps may not work, particularly those with client side javascript that construct URLs in a + way that is unaware of the proxy path prefix. diff --git a/content/en/docs/tasks/access-application-cluster/access-cluster.md b/content/en/docs/tasks/access-application-cluster/access-cluster.md index 87c3fdcd34769..09122dbc65609 100644 --- a/content/en/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/access-cluster.md @@ -16,7 +16,7 @@ When accessing the Kubernetes API for the first time, we suggest using the Kubernetes CLI, `kubectl`. To access a cluster, you need to know the location of the cluster and have credentials -to access it. Typically, this is automatically set-up when you work through +to access it. Typically, this is automatically set-up when you work through a [Getting started guide](/docs/setup/), or someone else set up the cluster and provided you with credentials and a location. @@ -36,20 +36,20 @@ Kubectl handles locating and authenticating to the apiserver. If you want to directly access the REST API with an http client like curl or wget, or a browser, there are several ways to locate and authenticate: - - Run kubectl in proxy mode. - - Recommended approach. - - Uses stored apiserver location. - - Verifies identity of apiserver using self-signed cert. No MITM possible. - - Authenticates to apiserver. - - In future, may do intelligent client-side load-balancing and failover. - - Provide the location and credentials directly to the http client. - - Alternate approach. - - Works with some types of client code that are confused by using a proxy. - - Need to import a root cert into your browser to protect against MITM. +- Run kubectl in proxy mode. + - Recommended approach. + - Uses stored apiserver location. + - Verifies identity of apiserver using self-signed cert. No MITM possible. + - Authenticates to apiserver. + - In future, may do intelligent client-side load-balancing and failover. +- Provide the location and credentials directly to the http client. + - Alternate approach. + - Works with some types of client code that are confused by using a proxy. + - Need to import a root cert into your browser to protect against MITM. ### Using kubectl proxy -The following command runs kubectl in a mode where it acts as a reverse proxy. It handles +The following command runs kubectl in a mode where it acts as a reverse proxy. It handles locating the apiserver and authenticating. Run it like this: @@ -83,7 +83,6 @@ The output is similar to this: } ``` - ### Without kubectl proxy Use `kubectl apply` and `kubectl describe secret...` to create a token for the default service account with grep/cut: @@ -163,16 +162,16 @@ The output is similar to this: } ``` -The above examples use the `--insecure` flag. This leaves it subject to MITM -attacks. When kubectl accesses the cluster it uses a stored root certificate -and client certificates to access the server. (These are installed in the -`~/.kube` directory). Since cluster certificates are typically self-signed, it +The above examples use the `--insecure` flag. This leaves it subject to MITM +attacks. When kubectl accesses the cluster it uses a stored root certificate +and client certificates to access the server. (These are installed in the +`~/.kube` directory). Since cluster certificates are typically self-signed, it may take special configuration to get your http client to use root certificate. On some clusters, the apiserver does not require authentication; it may serve -on localhost, or be protected by a firewall. There is not a standard -for this. [Controlling Access to the API](/docs/concepts/security/controlling-access) +on localhost, or be protected by a firewall. There is not a standard +for this. [Controlling Access to the API](/docs/concepts/security/controlling-access) describes how a cluster admin can configure this. ## Programmatic access to the API @@ -182,20 +181,30 @@ client libraries. ### Go client -* To get the library, run the following command: `go get k8s.io/client-go@kubernetes-`, see [INSTALL.md](https://github.com/kubernetes/client-go/blob/master/INSTALL.md#for-the-casual-user) for detailed installation instructions. See [https://github.com/kubernetes/client-go](https://github.com/kubernetes/client-go#compatibility-matrix) to see which versions are supported. -* Write an application atop of the client-go clients. Note that client-go defines its own API objects, so if needed, please import API definitions from client-go rather than from the main repository, e.g., `import "k8s.io/client-go/kubernetes"` is correct. +* To get the library, run the following command: `go get k8s.io/client-go@kubernetes-`, + see [INSTALL.md](https://github.com/kubernetes/client-go/blob/master/INSTALL.md#for-the-casual-user) + for detailed installation instructions. See + [https://github.com/kubernetes/client-go](https://github.com/kubernetes/client-go#compatibility-matrix) + to see which versions are supported. +* Write an application atop of the client-go clients. Note that client-go defines its own API objects, + so if needed, please import API definitions from client-go rather than from the main repository, + e.g., `import "k8s.io/client-go/kubernetes"` is correct. The Go client can use the same [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) -as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://git.k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go). +as the kubectl CLI does to locate and authenticate to the apiserver. See this +[example](https://git.k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go). If the application is deployed as a Pod in the cluster, please refer to the [next section](#accessing-the-api-from-a-pod). ### Python client -To use [Python client](https://github.com/kubernetes-client/python), run the following command: `pip install kubernetes`. See [Python Client Library page](https://github.com/kubernetes-client/python) for more installation options. +To use [Python client](https://github.com/kubernetes-client/python), run the following command: +`pip install kubernetes`. See [Python Client Library page](https://github.com/kubernetes-client/python) +for more installation options. The Python client can use the same [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) -as the kubectl CLI does to locate and authenticate to the apiserver. See this [example](https://github.com/kubernetes-client/python/tree/master/examples). +as the kubectl CLI does to locate and authenticate to the apiserver. See this +[example](https://github.com/kubernetes-client/python/tree/master/examples). ### Other languages @@ -218,52 +227,51 @@ For information about connecting to other services running on a Kubernetes clust ## Requesting redirects -The redirect capabilities have been deprecated and removed. Please use a proxy (see below) instead. +The redirect capabilities have been deprecated and removed. Please use a proxy (see below) instead. -## So Many Proxies +## So many proxies There are several different proxies you may encounter when using Kubernetes: -1. The [kubectl proxy](#directly-accessing-the-rest-api): +1. The [kubectl proxy](#directly-accessing-the-rest-api): - - runs on a user's desktop or in a pod - - proxies from a localhost address to the Kubernetes apiserver - - client to proxy uses HTTP - - proxy to apiserver uses HTTPS - - locates apiserver - - adds authentication headers + - runs on a user's desktop or in a pod + - proxies from a localhost address to the Kubernetes apiserver + - client to proxy uses HTTP + - proxy to apiserver uses HTTPS + - locates apiserver + - adds authentication headers -1. The [apiserver proxy](/docs/tasks/access-application-cluster/access-cluster-services/#discovering-builtin-services): +1. The [apiserver proxy](/docs/tasks/access-application-cluster/access-cluster-services/#discovering-builtin-services): - - is a bastion built into the apiserver - - connects a user outside of the cluster to cluster IPs which otherwise might not be reachable - - runs in the apiserver processes - - client to proxy uses HTTPS (or http if apiserver so configured) - - proxy to target may use HTTP or HTTPS as chosen by proxy using available information - - can be used to reach a Node, Pod, or Service - - does load balancing when used to reach a Service + - is a bastion built into the apiserver + - connects a user outside of the cluster to cluster IPs which otherwise might not be reachable + - runs in the apiserver processes + - client to proxy uses HTTPS (or http if apiserver so configured) + - proxy to target may use HTTP or HTTPS as chosen by proxy using available information + - can be used to reach a Node, Pod, or Service + - does load balancing when used to reach a Service -1. The [kube proxy](/docs/concepts/services-networking/service/#ips-and-vips): +1. The [kube proxy](/docs/concepts/services-networking/service/#ips-and-vips): - - runs on each node - - proxies UDP and TCP - - does not understand HTTP - - provides load balancing - - is only used to reach services + - runs on each node + - proxies UDP and TCP + - does not understand HTTP + - provides load balancing + - is only used to reach services -1. A Proxy/Load-balancer in front of apiserver(s): +1. A Proxy/Load-balancer in front of apiserver(s): - - existence and implementation varies from cluster to cluster (e.g. nginx) - - sits between all clients and one or more apiservers - - acts as load balancer if there are several apiservers. + - existence and implementation varies from cluster to cluster (e.g. nginx) + - sits between all clients and one or more apiservers + - acts as load balancer if there are several apiservers. -1. Cloud Load Balancers on external services: +1. Cloud Load Balancers on external services: - - are provided by some cloud providers (e.g. AWS ELB, Google Cloud Load Balancer) - - are created automatically when the Kubernetes service has type `LoadBalancer` - - use UDP/TCP only - - implementation varies by cloud provider. + - are provided by some cloud providers (e.g. AWS ELB, Google Cloud Load Balancer) + - are created automatically when the Kubernetes service has type `LoadBalancer` + - use UDP/TCP only + - implementation varies by cloud provider. -Kubernetes users will typically not need to worry about anything other than the first two types. The cluster admin +Kubernetes users will typically not need to worry about anything other than the first two types. The cluster admin will typically ensure that the latter types are set up correctly. - diff --git a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md index d0ac36881a915..6c0c7584676d9 100644 --- a/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md +++ b/content/en/docs/tasks/access-application-cluster/create-external-load-balancer.md @@ -164,7 +164,7 @@ With each target weighted equally in terms of sending traffic to Nodes, external traffic is not equally load balanced across different Pods. The external load balancer is unaware of the number of Pods on each node that are used as a target. -Where `NumServicePods << _NumNodes` or `NumServicePods >> NumNodes`, a fairly close-to-equal +Where `NumServicePods << NumNodes` or `NumServicePods >> NumNodes`, a fairly close-to-equal distribution will be seen, even without weights. Internal pod to pod traffic should behave similar to ClusterIP services, with equal probability across all pods. diff --git a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md index 1efc5e9b69d90..c250347d423dc 100644 --- a/content/en/docs/tasks/access-application-cluster/ingress-minikube.md +++ b/content/en/docs/tasks/access-application-cluster/ingress-minikube.md @@ -108,6 +108,10 @@ If you haven't already set up a cluster locally, run `minikube start` to create http://172.17.0.15:31637 ``` + ```shell + curl http://172.17.0.15:31637 + ``` + The output is similar to: ```none diff --git a/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md index 0ae940296241b..5a5f41008e913 100644 --- a/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md +++ b/content/en/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -23,7 +23,7 @@ of Containers for each. - Fetch all Pods in all namespaces using `kubectl get pods --all-namespaces` - Format the output to include only the list of Container image names - using `-o jsonpath={.items[*].spec.containers[*].image}`. This will recursively parse out the + using `-o jsonpath={.items[*].spec['initContainers', 'containers'][*].image}`. This will recursively parse out the `image` field from the returned json. - See the [jsonpath reference](/docs/reference/kubectl/jsonpath/) for further information on how to use jsonpath. @@ -33,7 +33,7 @@ of Containers for each. - Use `uniq` to aggregate image counts ```shell -kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" |\ +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec['initContainers', 'containers'][*].image}" |\ tr -s '[[:space:]]' '\n' |\ sort |\ uniq -c @@ -42,7 +42,7 @@ The jsonpath is interpreted as follows: - `.items[*]`: for each returned value - `.spec`: get the spec -- `.containers[*]`: for each container +- `['initContainers', 'containers'][*]`: for each container - `.image`: get the image {{< note >}} diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-api.md b/content/en/docs/tasks/administer-cluster/access-cluster-api.md index ed707fb278ad6..ac21dc00c38e6 100644 --- a/content/en/docs/tasks/administer-cluster/access-cluster-api.md +++ b/content/en/docs/tasks/administer-cluster/access-cluster-api.md @@ -39,10 +39,14 @@ kubectl. Complete documentation is found in the [kubectl manual](/docs/reference kubectl handles locating and authenticating to the API server. If you want to directly access the REST API with an http client like `curl` or `wget`, or a browser, there are multiple ways you can locate and authenticate against the API server: - 1. Run kubectl in proxy mode (recommended). This method is recommended, since it uses the stored apiserver location and verifies the identity of the API server using a self-signed cert. No man-in-the-middle (MITM) attack is possible using this method. - 1. Alternatively, you can provide the location and credentials directly to the http client. This works with client code that is confused by proxies. To protect against man in the middle attacks, you'll need to import a root cert into your browser. +1. Run kubectl in proxy mode (recommended). This method is recommended, since it uses + the stored API server location and verifies the identity of the API server using a + self-signed certificate. No man-in-the-middle (MITM) attack is possible using this method. +1. Alternatively, you can provide the location and credentials directly to the http client. + This works with client code that is confused by proxies. To protect against man in the + middle attacks, you'll need to import a root cert into your browser. - Using the Go or Python client libraries provides accessing kubectl in proxy mode. +Using the Go or Python client libraries provides accessing kubectl in proxy mode. #### Using kubectl proxy @@ -151,16 +155,23 @@ describes how you can configure this as a cluster administrator. ### Programmatic access to the API -Kubernetes officially supports client libraries for [Go](#go-client), [Python](#python-client), [Java](#java-client), [dotnet](#dotnet-client), [JavaScript](#javascript-client), and [Haskell](#haskell-client). There are other client libraries that are provided and maintained by their authors, not the Kubernetes team. See [client libraries](/docs/reference/using-api/client-libraries/) for accessing the API from other languages and how they authenticate. +Kubernetes officially supports client libraries for [Go](#go-client), [Python](#python-client), +[Java](#java-client), [dotnet](#dotnet-client), [JavaScript](#javascript-client), and +[Haskell](#haskell-client). There are other client libraries that are provided and maintained by +their authors, not the Kubernetes team. See [client libraries](/docs/reference/using-api/client-libraries/) +for accessing the API from other languages and how they authenticate. #### Go client -* To get the library, run the following command: `go get k8s.io/client-go@kubernetes-` See [https://github.com/kubernetes/client-go/releases](https://github.com/kubernetes/client-go/releases) to see which versions are supported. +* To get the library, run the following command: `go get k8s.io/client-go@kubernetes-` + See [https://github.com/kubernetes/client-go/releases](https://github.com/kubernetes/client-go/releases) + to see which versions are supported. * Write an application atop of the client-go clients. {{< note >}} -client-go defines its own API objects, so if needed, import API definitions from client-go rather than from the main repository. For example, `import "k8s.io/client-go/kubernetes"` is correct. +`client-go` defines its own API objects, so if needed, import API definitions from client-go rather than +from the main repository. For example, `import "k8s.io/client-go/kubernetes"` is correct. {{< /note >}} @@ -190,14 +201,18 @@ func main() { } ``` -If the application is deployed as a Pod in the cluster, see [Accessing the API from within a Pod](/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod). +If the application is deployed as a Pod in the cluster, see +[Accessing the API from within a Pod](/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod). #### Python client -To use [Python client](https://github.com/kubernetes-client/python), run the following command: `pip install kubernetes`. See [Python Client Library page](https://github.com/kubernetes-client/python) for more installation options. +To use [Python client](https://github.com/kubernetes-client/python), run the following command: +`pip install kubernetes`. See [Python Client Library page](https://github.com/kubernetes-client/python) +for more installation options. The Python client can use the same [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) -as the kubectl CLI does to locate and authenticate to the API server. See this [example](https://github.com/kubernetes-client/python/blob/master/examples/out_of_cluster_config.py): +as the kubectl CLI does to locate and authenticate to the API server. See this +[example](https://github.com/kubernetes-client/python/blob/master/examples/out_of_cluster_config.py): ```python from kubernetes import client, config @@ -224,10 +239,12 @@ cd java mvn install ``` -See [https://github.com/kubernetes-client/java/releases](https://github.com/kubernetes-client/java/releases) to see which versions are supported. +See [https://github.com/kubernetes-client/java/releases](https://github.com/kubernetes-client/java/releases) +to see which versions are supported. The Java client can use the same [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) -as the kubectl CLI does to locate and authenticate to the API server. See this [example](https://github.com/kubernetes-client/java/blob/master/examples/examples-release-15/src/main/java/io/kubernetes/client/examples/KubeConfigFileClientExample.java): +as the kubectl CLI does to locate and authenticate to the API server. See this +[example](https://github.com/kubernetes-client/java/blob/master/examples/examples-release-15/src/main/java/io/kubernetes/client/examples/KubeConfigFileClientExample.java): ```java package io.kubernetes.client.examples; @@ -278,10 +295,16 @@ public class KubeConfigFileClientExample { #### dotnet client -To use [dotnet client](https://github.com/kubernetes-client/csharp), run the following command: `dotnet add package KubernetesClient --version 1.6.1` See [dotnet Client Library page](https://github.com/kubernetes-client/csharp) for more installation options. See [https://github.com/kubernetes-client/csharp/releases](https://github.com/kubernetes-client/csharp/releases) to see which versions are supported. +To use [dotnet client](https://github.com/kubernetes-client/csharp), +run the following command: `dotnet add package KubernetesClient --version 1.6.1`. +See [dotnet Client Library page](https://github.com/kubernetes-client/csharp) +for more installation options. See +[https://github.com/kubernetes-client/csharp/releases](https://github.com/kubernetes-client/csharp/releases) +to see which versions are supported. The dotnet client can use the same [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) -as the kubectl CLI does to locate and authenticate to the API server. See this [example](https://github.com/kubernetes-client/csharp/blob/master/examples/simple/PodList.cs): +as the kubectl CLI does to locate and authenticate to the API server. See this +[example](https://github.com/kubernetes-client/csharp/blob/master/examples/simple/PodList.cs): ```csharp using System; @@ -313,10 +336,14 @@ namespace simple #### JavaScript client -To install [JavaScript client](https://github.com/kubernetes-client/javascript), run the following command: `npm install @kubernetes/client-node`. See [https://github.com/kubernetes-client/javascript/releases](https://github.com/kubernetes-client/javascript/releases) to see which versions are supported. +To install [JavaScript client](https://github.com/kubernetes-client/javascript), +run the following command: `npm install @kubernetes/client-node`. See +[https://github.com/kubernetes-client/javascript/releases](https://github.com/kubernetes-client/javascript/releases) +to see which versions are supported. The JavaScript client can use the same [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) -as the kubectl CLI does to locate and authenticate to the API server. See this [example](https://github.com/kubernetes-client/javascript/blob/master/examples/example.js): +as the kubectl CLI does to locate and authenticate to the API server. See this +[example](https://github.com/kubernetes-client/javascript/blob/master/examples/example.js): ```javascript const k8s = require('@kubernetes/client-node'); @@ -333,10 +360,13 @@ k8sApi.listNamespacedPod('default').then((res) => { #### Haskell client -See [https://github.com/kubernetes-client/haskell/releases](https://github.com/kubernetes-client/haskell/releases) to see which versions are supported. +See [https://github.com/kubernetes-client/haskell/releases](https://github.com/kubernetes-client/haskell/releases) +to see which versions are supported. -The [Haskell client](https://github.com/kubernetes-client/haskell) can use the same [kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) -as the kubectl CLI does to locate and authenticate to the API server. See this [example](https://github.com/kubernetes-client/haskell/blob/master/kubernetes-client/example/App.hs): +The [Haskell client](https://github.com/kubernetes-client/haskell) can use the same +[kubeconfig file](/docs/concepts/configuration/organize-cluster-access-kubeconfig/) +as the kubectl CLI does to locate and authenticate to the API server. See this +[example](https://github.com/kubernetes-client/haskell/blob/master/kubernetes-client/example/App.hs): ```haskell exampleWithKubeConfig :: IO () diff --git a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md index c3194b71805b2..26f0cadc6da6c 100644 --- a/content/en/docs/tasks/administer-cluster/change-default-storage-class.md +++ b/content/en/docs/tasks/administer-cluster/change-default-storage-class.md @@ -8,15 +8,10 @@ weight: 90 This page shows how to change the default Storage Class that is used to provision volumes for PersistentVolumeClaims that have no special requirements. - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - ## Why change the default storage class? @@ -39,67 +34,63 @@ for details about addon manager and how to disable individual addons. ## Changing the default StorageClass -1. List the StorageClasses in your cluster: +1. List the StorageClasses in your cluster: - ```bash - kubectl get storageclass - ``` + ```bash + kubectl get storageclass + ``` - The output is similar to this: + The output is similar to this: - ```bash - NAME PROVISIONER AGE - standard (default) kubernetes.io/gce-pd 1d - gold kubernetes.io/gce-pd 1d - ``` + ```bash + NAME PROVISIONER AGE + standard (default) kubernetes.io/gce-pd 1d + gold kubernetes.io/gce-pd 1d + ``` - The default StorageClass is marked by `(default)`. + The default StorageClass is marked by `(default)`. 1. Mark the default StorageClass as non-default: - The default StorageClass has an annotation - `storageclass.kubernetes.io/is-default-class` set to `true`. Any other value - or absence of the annotation is interpreted as `false`. + The default StorageClass has an annotation + `storageclass.kubernetes.io/is-default-class` set to `true`. Any other value + or absence of the annotation is interpreted as `false`. - To mark a StorageClass as non-default, you need to change its value to `false`: + To mark a StorageClass as non-default, you need to change its value to `false`: - ```bash - kubectl patch storageclass standard -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' - ``` + ```bash + kubectl patch storageclass standard -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' + ``` - where `standard` is the name of your chosen StorageClass. + where `standard` is the name of your chosen StorageClass. 1. Mark a StorageClass as default: - Similar to the previous step, you need to add/set the annotation - `storageclass.kubernetes.io/is-default-class=true`. + Similar to the previous step, you need to add/set the annotation + `storageclass.kubernetes.io/is-default-class=true`. - ```bash - kubectl patch storageclass gold -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' - ``` + ```bash + kubectl patch storageclass gold -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' + ``` - Please note that at most one StorageClass can be marked as default. If two - or more of them are marked as default, a `PersistentVolumeClaim` without `storageClassName` explicitly specified cannot be created. + Please note that at most one StorageClass can be marked as default. If two + or more of them are marked as default, a `PersistentVolumeClaim` without + `storageClassName` explicitly specified cannot be created. 1. Verify that your chosen StorageClass is default: - ```bash - kubectl get storageclass - ``` - - The output is similar to this: - - ```bash - NAME PROVISIONER AGE - standard kubernetes.io/gce-pd 1d - gold (default) kubernetes.io/gce-pd 1d - ``` + ```bash + kubectl get storageclass + ``` + The output is similar to this: + ```bash + NAME PROVISIONER AGE + standard kubernetes.io/gce-pd 1d + gold (default) kubernetes.io/gce-pd 1d + ``` ## {{% heading "whatsnext" %}} * Learn more about [PersistentVolumes](/docs/concepts/storage/persistent-volumes/). - - - diff --git a/content/en/docs/tasks/administer-cluster/change-pv-access-mode-readwriteoncepod.md b/content/en/docs/tasks/administer-cluster/change-pv-access-mode-readwriteoncepod.md new file mode 100644 index 0000000000000..a2dffdc702c22 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/change-pv-access-mode-readwriteoncepod.md @@ -0,0 +1,187 @@ +--- +title: Change the Access Mode of a PersistentVolume to ReadWriteOncePod +content_type: task +weight: 90 +min-kubernetes-server-version: v1.22 +--- + + + +This page shows how to change the access mode on an existing PersistentVolume to +use `ReadWriteOncePod`. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + +{{< note >}} +The `ReadWriteOncePod` access mode graduated to stable in the Kubernetes v1.29 +release. If you are running a version of Kubernetes older than v1.29, you might +need to enable a feature gate. Check the documentation for your version of +Kubernetes. +{{< /note >}} + +{{< note >}} +The `ReadWriteOncePod` access mode is only supported for +{{< glossary_tooltip text="CSI" term_id="csi" >}} volumes. +To use this volume access mode you will need to update the following +[CSI sidecars](https://kubernetes-csi.github.io/docs/sidecar-containers.html) +to these versions or greater: + +* [csi-provisioner:v3.0.0+](https://github.com/kubernetes-csi/external-provisioner/releases/tag/v3.0.0) +* [csi-attacher:v3.3.0+](https://github.com/kubernetes-csi/external-attacher/releases/tag/v3.3.0) +* [csi-resizer:v1.3.0+](https://github.com/kubernetes-csi/external-resizer/releases/tag/v1.3.0) +{{< /note >}} + +## Why should I use `ReadWriteOncePod`? + +Prior to Kubernetes v1.22, the `ReadWriteOnce` access mode was commonly used to +restrict PersistentVolume access for workloads that required single-writer +access to storage. However, this access mode had a limitation: it restricted +volume access to a single *node*, allowing multiple pods on the same node to +read from and write to the same volume simultaneously. This could pose a risk +for applications that demand strict single-writer access for data safety. + +If ensuring single-writer access is critical for your workloads, consider +migrating your volumes to `ReadWriteOncePod`. + + + +## Migrating existing PersistentVolumes + +If you have existing PersistentVolumes, they can be migrated to use +`ReadWriteOncePod`. Only migrations from `ReadWriteOnce` to `ReadWriteOncePod` +are supported. + +In this example, there is already a `ReadWriteOnce` "cat-pictures-pvc" +PersistentVolumeClaim that is bound to a "cat-pictures-pv" PersistentVolume, +and a "cat-pictures-writer" Deployment that uses this PersistentVolumeClaim. + +{{< note >}} +If your storage plugin supports +[Dynamic provisioning](/docs/concepts/storage/dynamic-provisioning/), +the "cat-picutres-pv" will be created for you, but its name may differ. To get +your PersistentVolume's name run: + +```shell +kubectl get pvc cat-pictures-pvc -o jsonpath='{.spec.volumeName}' +``` +{{< /note >}} + +And you can view the PVC before you make changes. Either view the manifest +locally, or run `kubectl get pvc -o yaml`. The output is similar +to: + +```yaml +# cat-pictures-pvc.yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: cat-pictures-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +``` + +Here's an example Deployment that relies on that PersistentVolumeClaim: + +```yaml +# cat-pictures-writer-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cat-pictures-writer +spec: + replicas: 3 + selector: + matchLabels: + app: cat-pictures-writer + template: + metadata: + labels: + app: cat-pictures-writer + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + volumeMounts: + - name: cat-pictures + mountPath: /mnt + volumes: + - name: cat-pictures + persistentVolumeClaim: + claimName: cat-pictures-pvc + readOnly: false +``` + +As a first step, you need to edit your PersistentVolume's +`spec.persistentVolumeReclaimPolicy` and set it to `Retain`. This ensures your +PersistentVolume will not be deleted when you delete the corresponding +PersistentVolumeClaim: + +```shell +kubectl patch pv cat-pictures-pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' +``` + +Next you need to stop any workloads that are using the PersistentVolumeClaim +bound to the PersistentVolume you want to migrate, and then delete the +PersistentVolumeClaim. Avoid making any other changes to the +PersistentVolumeClaim, such as volume resizes, until after the migration is +complete. + +Once that is done, you need to clear your PersistentVolume's `spec.claimRef.uid` +to ensure PersistentVolumeClaims can bind to it upon recreation: + +```shell +kubectl scale --replicas=0 deployment cat-pictures-writer +kubectl delete pvc cat-pictures-pvc +kubectl patch pv cat-pictures-pv -p '{"spec":{"claimRef":{"uid":""}}}' +``` + +After that, replace the PersistentVolume's list of valid access modes to be +(only) `ReadWriteOncePod`: + +```shell +kubectl patch pv cat-pictures-pv -p '{"spec":{"accessModes":["ReadWriteOncePod"]}}' +``` + +{{< note >}} +The `ReadWriteOncePod` access mode cannot be combined with other access modes. +Make sure `ReadWriteOncePod` is the only access mode on the PersistentVolume +when updating, otherwise the request will fail. +{{< /note >}} + +Next you need to modify your PersistentVolumeClaim to set `ReadWriteOncePod` as +the only access mode. You should also set the PersistentVolumeClaim's +`spec.volumeName` to the name of your PersistentVolume to ensure it binds to +this specific PersistentVolume. + +Once this is done, you can recreate your PersistentVolumeClaim and start up your +workloads: + +```shell +# IMPORTANT: Make sure to edit your PVC in cat-pictures-pvc.yaml before applying. You need to: +# - Set ReadWriteOncePod as the only access mode +# - Set spec.volumeName to "cat-pictures-pv" + +kubectl apply -f cat-pictures-pvc.yaml +kubectl apply -f cat-pictures-writer-deployment.yaml +``` + +Lastly you may edit your PersistentVolume's `spec.persistentVolumeReclaimPolicy` +and set to it back to `Delete` if you previously changed it. + +```shell +kubectl patch pv cat-pictures-pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}' +``` + +## {{% heading "whatsnext" %}} + +* Learn more about [PersistentVolumes](/docs/concepts/storage/persistent-volumes/). +* Learn more about [PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims). +* Learn more about [Configuring a Pod to Use a PersistentVolume for Storage](/docs/tasks/configure-pod-container/configure-persistent-volume-storage/) diff --git a/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md b/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md index 3b4f90770bf84..ac69021c0ba26 100644 --- a/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md +++ b/content/en/docs/tasks/administer-cluster/configure-upgrade-etcd.md @@ -14,7 +14,11 @@ weight: 270 ## {{% heading "prerequisites" %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +You need to have a Kubernetes cluster, and the kubectl command-line tool must +be configured to communicate with your cluster. It is recommended to run this +task on a cluster with at least two nodes that are not acting as control plane +nodes . If you do not already have a cluster, you can create one by using +[minikube](https://minikube.sigs.k8s.io/docs/tutorials/multi_node/). @@ -271,16 +275,16 @@ that is not currently used by an etcd process. Taking the snapshot will not affect the performance of the member. Below is an example for taking a snapshot of the keyspace served by -`$ENDPOINT` to the file `snapshotdb`: +`$ENDPOINT` to the file `snapshot.db`: ```shell -ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshotdb +ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshot.db ``` Verify the snapshot: ```shell -ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshotdb +ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshot.db ``` ```console @@ -339,19 +343,25 @@ employed to recover the data of a failed cluster. Before starting the restore operation, a snapshot file must be present. It can either be a snapshot file from a previous backup operation, or from a remaining [data directory](https://etcd.io/docs/current/op-guide/configuration/#--data-dir). + Here is an example: ```shell -ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 snapshot restore snapshotdb +ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 snapshot restore snapshot.db ``` -Another example for restoring using etcdctl options: + +Another example for restoring using `etcdctl` options: + ```shell -ETCDCTL_API=3 etcdctl snapshot restore --data-dir snapshotdb +ETCDCTL_API=3 etcdctl --data-dir snapshot restore snapshot.db ``` -Yet another example would be to first export the environment variable +where `` is a directory that will be created during the restore process. + +Yet another example would be to first export the `ETCDCTL_API` environment variable: + ```shell export ETCDCTL_API=3 -etcdctl snapshot restore --data-dir snapshotdb +etcdctl --data-dir snapshot restore snapshot.db ``` For more information and examples on restoring a cluster from a snapshot file, see @@ -406,4 +416,8 @@ Defragmentation is an expensive operation, so it should be executed as infrequen as possible. On the other hand, it's also necessary to make sure any etcd member will not run out of the storage quota. The Kubernetes project recommends that when you perform defragmentation, you use a tool such as [etcd-defrag](https://github.com/ahrtr/etcd-defrag). + +You can also run the defragmentation tool as a Kubernetes CronJob, to make sure that +defragmentation happens regularly. See [`etcd-defrag-cronjob.yaml`](https://github.com/ahrtr/etcd-defrag/blob/main/doc/etcd-defrag-cronjob.yaml) +for details. {{< /note >}} diff --git a/content/en/docs/tasks/administer-cluster/encrypt-data.md b/content/en/docs/tasks/administer-cluster/encrypt-data.md index 5a86d2df14c81..1ceed92ac3d94 100644 --- a/content/en/docs/tasks/administer-cluster/encrypt-data.md +++ b/content/en/docs/tasks/administer-cluster/encrypt-data.md @@ -248,7 +248,7 @@ The following table describes each available provider. - kms v2 (beta) + kms v2 Uses envelope encryption scheme with DEK per API server. Strongest Fast @@ -259,14 +259,10 @@ The following table describes each available provider. Data is encrypted by data encryption keys (DEKs) using AES-GCM; DEKs are encrypted by key encryption keys (KEKs) according to configuration in Key Management Service (KMS). - Kubernetes defaults to generating a new DEK at API server startup, which is then - reused for object encryption. - If you enable the KMSv2KDF - feature gate, - Kubernetes instead generates a new DEK per encryption from a secret seed. - Whichever approach you configure, the DEK or seed is also rotated whenever the KEK is rotated.
    + Kubernetes generates a new DEK per encryption from a secret seed. + The seed is rotated whenever the KEK is rotated.
    A good choice if using a third party tool for key management. - Available in beta from Kubernetes v1.27. + Available as stable from Kubernetes v1.29.
    Read how to configure the KMS V2 provider. @@ -538,4 +534,3 @@ To allow automatic reloading, configure the API server to run with: * Read about [decrypting data that are already stored at rest](/docs/tasks/administer-cluster/decrypt-data/) * Learn more about the [EncryptionConfiguration configuration API (v1)](/docs/reference/config-api/apiserver-encryption.v1/). - diff --git a/content/en/docs/tasks/administer-cluster/kms-provider.md b/content/en/docs/tasks/administer-cluster/kms-provider.md index 921e13d29fe81..1ea101c161cab 100644 --- a/content/en/docs/tasks/administer-cluster/kms-provider.md +++ b/content/en/docs/tasks/administer-cluster/kms-provider.md @@ -9,9 +9,17 @@ weight: 370 This page shows how to configure a Key Management Service (KMS) provider and plugin to enable secret data encryption. In Kubernetes {{< skew currentVersion >}} there are two versions of KMS at-rest encryption. -You should use KMS v2 if feasible because KMS v1 is deprecated (since Kubernetes v1.28). -However, you should also read and observe the **Caution** notices in this page that highlight specific -cases when you must not use KMS v2. KMS v2 offers significantly better performance characteristics than KMS v1. +You should use KMS v2 if feasible because KMS v1 is deprecated (since Kubernetes v1.28) and disabled by default (since Kubernetes v1.29). +KMS v2 offers significantly better performance characteristics than KMS v1. + +{{< caution >}} +This documentation is for the generally available implementation of KMS v2 (and for the +deprecated version 1 implementation). +If you are using any control plane components older than Kubernetes v1.29, please check +the equivalent page in the documentation for the version of Kubernetes that your cluster +is running. Earlier releases of Kubernetes had different behavior that may be relevant +for information security. +{{< /caution >}} ## {{% heading "prerequisites" %}} @@ -24,7 +32,7 @@ you have selected. Kubernetes recommends using KMS v2. (if you are running a different version of Kubernetes that also supports the v2 KMS API, switch to the documentation for that version of Kubernetes). - If you selected KMS API v1 to support clusters prior to version v1.27 - or if you have a legacy KMS plugin that only supports KMS v1, + or if you have a legacy KMS plugin that only supports KMS v1, any supported Kubernetes version will work. This API is deprecated as of Kubernetes v1.28. Kubernetes does not recommend the use of this API. @@ -35,80 +43,36 @@ you have selected. Kubernetes recommends using KMS v2. * Kubernetes version 1.10.0 or later is required -* Your cluster must use etcd v3 or later +* For version 1.29 and later, the v1 implementation of KMS is disabled by default. + To enable the feature, set `--feature-gates=KMSv1=true` to configure a KMS v1 provider. -### KMS v2 -{{< feature-state for_k8s_version="v1.27" state="beta" >}} - -* For version 1.25 and 1.26, enabling the feature via kube-apiserver feature gate is required. -Set `--feature-gates=KMSv2=true` to configure a KMS v2 provider. - For environments where all API servers are running version 1.28 or later, and you do not require the ability - to downgrade to Kubernetes v1.27, you can enable the `KMSv2KDF` feature gate (a beta feature) for more - robust data encryption key generation. The Kubernetes project recommends enabling KMS v2 KDF if those - preconditions are met. - * Your cluster must use etcd v3 or later -{{< caution >}} -The KMS v2 API and implementation changed in incompatible ways in-between the alpha release in v1.25 -and the beta release in v1.27. Attempting to upgrade from old versions with the alpha feature -enabled will result in data loss. - ---- +### KMS v2 +{{< feature-state for_k8s_version="v1.29" state="stable" >}} -Running mixed API server versions with some servers at v1.27, and others at v1.28 _with the -`KMSv2KDF` feature gate enabled_ is **not supported** - and is likely to result in data loss. -{{< /caution >}} +* Your cluster must use etcd v3 or later +## KMS encryption and per-object encryption keys + The KMS encryption provider uses an envelope encryption scheme to encrypt data in etcd. The data is encrypted using a data encryption key (DEK). The DEKs are encrypted with a key encryption key (KEK) that is stored and managed in a remote KMS. -With KMS v1, a new DEK is generated for each encryption. +If you use the (deprecated) v1 implementation of KMS, a new DEK is generated for each encryption. -With KMS v2, there are two ways for the API server to generate a DEK. -Kubernetes defaults to generating a new DEK at API server startup, which is then reused -for resource encryption. However, if you use KMS v2 _and_ enable the `KMSv2KDF` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/), then -Kubernetes instead generates a new DEK **per encryption**: the API server uses a +With KMS v2, a new DEK is generated **per encryption**: the API server uses a _key derivation function_ to generate single use data encryption keys from a secret seed combined with some random data. -Whichever approach you configure, the DEK or seed is also rotated whenever the KEK is rotated -(see `Understanding key_id and Key Rotation` section below for more details). +The seed is rotated whenever the KEK is rotated +(see the _Understanding key_id and Key Rotation_ section below for more details). The KMS provider uses gRPC to communicate with a specific KMS plugin over a UNIX domain socket. The KMS plugin, which is implemented as a gRPC server and deployed on the same host(s) as the Kubernetes control plane, is responsible for all communication with the remote KMS. -{{< caution >}} - -If you are running virtual machine (VM) based nodes that leverage VM state store with this feature, -using KMS v2 is **insecure** and an information security risk unless you also explicitly enable -the `KMSv2KDF` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/). - -With KMS v2, the API server uses AES-GCM with a 12 byte nonce (8 byte atomic counter and 4 bytes random data) for encryption. -The following issues could occur if the VM is saved and restored: - -1. The counter value may be lost or corrupted if the VM is saved in an inconsistent state or restored improperly. - This can lead to a situation where the same counter value is used twice, resulting in the same nonce being used - for two different messages. -2. If the VM is restored to a previous state, the counter value may be set back to its previous value, -resulting in the same nonce being used again. - -Although both of these cases are partially mitigated by the 4 byte random nonce, this can compromise -the security of the encryption. - -If you have enabled the `KMSv2KDF` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) _and_ are using KMS v2 -(not KMS v1), the API server generates single use data encryption keys from a secret seed. -This eliminates the need for a counter based nonce while avoiding nonce collision concerns. -It also removes any specific concerns with using KMS v2 and VM state store. - -{{< /caution >}} - ## Configuring the KMS provider To configure a KMS provider on the API server, include a provider of type `kms` in the @@ -197,10 +161,14 @@ Then use the functions and data structures in the stub file to develop the serve ##### KMS v2 {#developing-a-kms-plugin-gRPC-server-notes-kms-v2} -* KMS plugin version: `v2beta1` +* KMS plugin version: `v2` - In response to procedure call `Status`, a compatible KMS plugin should return `v2beta1` as `StatusResponse.version`, + In response to the `Status` remote procedure call, a compatible KMS plugin should return its KMS compatibility + version as `StatusResponse.version`. That status response should also include "ok" as `StatusResponse.healthz` and a `key_id` (remote KMS KEK ID) as `StatusResponse.key_id`. + The Kubernetes project recommends you make your plugin + compatible with the stable `v2` KMS API. Kubernetes {{< skew currentVersion >}} also supports the + `v2beta1` API for KMS; future Kubernetes releases are likely to continue supporting that beta version. The API server polls the `Status` procedure call approximately every minute when everything is healthy, and every 10 seconds when the plugin is not healthy. Plugins must take care to optimize this call as it will be @@ -258,20 +226,20 @@ Then use the functions and data structures in the stub file to develop the serve API server restart is required to perform KEK rotation. {{< caution >}} - Because you don't control the number of writes performed with the DEK, + Because you don't control the number of writes performed with the DEK, the Kubernetes project recommends rotating the KEK at least every 90 days. {{< /caution >}} * protocol: UNIX domain socket (`unix`) - The plugin is implemented as a gRPC server that listens at UNIX domain socket. - The plugin deployment should create a file on the file system to run the gRPC unix domain socket connection. - The API server (gRPC client) is configured with the KMS provider (gRPC server) unix - domain socket endpoint in order to communicate with it. - An abstract Linux socket may be used by starting the endpoint with `/@`, i.e. `unix:///@foo`. - Care must be taken when using this type of socket as they do not have concept of ACL - (unlike traditional file based sockets). - However, they are subject to Linux networking namespace, so will only be accessible to + The plugin is implemented as a gRPC server that listens at UNIX domain socket. + The plugin deployment should create a file on the file system to run the gRPC unix domain socket connection. + The API server (gRPC client) is configured with the KMS provider (gRPC server) unix + domain socket endpoint in order to communicate with it. + An abstract Linux socket may be used by starting the endpoint with `/@`, i.e. `unix:///@foo`. + Care must be taken when using this type of socket as they do not have concept of ACL + (unlike traditional file based sockets). + However, they are subject to Linux networking namespace, so will only be accessible to containers within the same pod unless host networking is used. ### Integrating a KMS plugin with the remote KMS @@ -363,10 +331,6 @@ The following table summarizes the health check endpoints for each KMS version: These healthcheck endpoint paths are hard coded and generated/controlled by the server. The indices for individual healthchecks corresponds to the order in which the KMS encryption config is processed. -At a high level, restarting an API server when a KMS plugin is unhealthy is unlikely to make the situation better. -It can make the situation significantly worse by throwing away the API server's DEK cache. Thus the general -recommendation is to ignore the API server KMS healthz checks for liveness purposes, i.e. `/livez?exclude=kms-providers`. - Until the steps defined in [Ensuring all secrets are encrypted](#ensuring-all-secrets-are-encrypted) are performed, the `providers` list should end with the `identity: {}` provider to allow unencrypted data to be read. Once all resources are encrypted, the `identity` provider should be removed to prevent the API server from honoring unencrypted data. For details about the `EncryptionConfiguration` format, please check the @@ -447,30 +411,10 @@ To switch from a local encryption provider to the `kms` provider and re-encrypt kubectl get secrets --all-namespaces -o json | kubectl replace -f - ``` -## Disabling encryption at rest - -To disable encryption at rest: +## {{% heading "whatsnext" %}} -1. Place the `identity` provider as the first entry in the configuration file: + + - ```yaml - apiVersion: apiserver.config.k8s.io/v1 - kind: EncryptionConfiguration - resources: - - resources: - - secrets - providers: - - identity: {} - - kms: - apiVersion: v2 - name : myKmsPlugin - endpoint: unix:///tmp/socketfile.sock - ``` - -1. Restart all `kube-apiserver` processes. - -1. Run the following command to force all secrets to be decrypted. - - ```shell - kubectl get secrets --all-namespaces -o json | kubectl replace -f - - ``` +If you no longer want to use encryption for data persisted in the Kubernetes API, read +[decrypt data that are already stored at rest](/docs/tasks/administer-cluster/decrypt-data/). diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/change-package-repository.md b/content/en/docs/tasks/administer-cluster/kubeadm/change-package-repository.md index d39f2a4891e6e..d3b4454e48004 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/change-package-repository.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/change-package-repository.md @@ -6,21 +6,42 @@ weight: 120 -This page explains how to switch from one Kubernetes package repository to another -when upgrading Kubernetes minor releases. Unlike deprecated Google-hosted -repositories, the Kubernetes package repositories are structured in a way that -there's a dedicated package repository for each Kubernetes minor version. +This page explains how to enable a package repository for the desired +Kubernetes minor release upon upgrading a cluster. This is only needed +for users of the community-owned package repositories hosted at `pkgs.k8s.io`. +Unlike the legacy package repositories, the community-owned package +repositories are structured in a way that there's a dedicated package +repository for each Kubernetes minor version. + +{{< note >}} +This guide only covers a part of the Kubernetes upgrade process. Please see the +[upgrade guide](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) for +more information about upgrading Kubernetes clusters. +{{}} + +{{< note >}} +This step is only needed upon upgrading a cluster to another **minor** release. +If you're upgrading to another patch release within the same minor release (e.g. +v{{< skew currentVersion >}}.5 to v{{< skew currentVersion >}}.7), you don't +need to follow this guide. However, if you're still using the legacy package +repositories, you'll need to migrate to the new community-owned package +repositories before upgrading (see the next section for more details on how to +do this). +{{}} ## {{% heading "prerequisites" %}} -This document assumes that you're already using the Kubernetes community-owned -package repositories. If that's not the case, it's strongly recommended to migrate -to the Kubernetes package repositories. +This document assumes that you're already using the community-owned +package repositories (`pkgs.k8s.io`). If that's not the case, it's strongly +recommended to migrate to the community-owned package repositories as described +in the [official announcement](/blog/2023/08/15/pkgs-k8s-io-introduction/). + +{{% legacy-repos-deprecation %}} ### Verifying if the Kubernetes package repositories are used -If you're unsure whether you're using the Kubernetes package repositories or the -Google-hosted repository, take the following steps to verify: +If you're unsure whether you're using the community-owned package repositories or the +legacy package repositories, take the following steps to verify: {{< tabs name="k8s_install_versions" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} @@ -39,7 +60,8 @@ deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io ``` **You're using the Kubernetes package repositories and this guide applies to you.** -Otherwise, it's strongly recommended to migrate to the Kubernetes package repositories. +Otherwise, it's strongly recommended to migrate to the Kubernetes package repositories +as described in the [official announcement](/blog/2023/08/15/pkgs-k8s-io-introduction/). {{% /tab %}} {{% tab name="CentOS, RHEL or Fedora" %}} @@ -64,7 +86,35 @@ exclude=kubelet kubeadm kubectl ``` **You're using the Kubernetes package repositories and this guide applies to you.** -Otherwise, it's strongly recommended to migrate to the Kubernetes package repositories. +Otherwise, it's strongly recommended to migrate to the Kubernetes package repositories +as described in the [official announcement](/blog/2023/08/15/pkgs-k8s-io-introduction/). + +{{% /tab %}} + +{{% tab name="openSUSE or SLES" %}} + +Print the contents of the file that defines the Kubernetes `zypper` repository: + +```shell +# On your system, this configuration file could have a different name +cat /etc/zypp/repos.d/kubernetes.repo +``` + +If you see a `baseurl` similar to the `baseurl` in the output below: + +``` +[kubernetes] +name=Kubernetes +baseurl=https://pkgs.k8s.io/core:/stable:/v{{< skew currentVersionAddMinor -1 "." >}}/rpm/ +enabled=1 +gpgcheck=1 +gpgkey=https://pkgs.k8s.io/core:/stable:/v{{< skew currentVersionAddMinor -1 "." >}}/rpm/repodata/repomd.xml.key +exclude=kubelet kubeadm kubectl +``` + +**You're using the Kubernetes package repositories and this guide applies to you.** +Otherwise, it's strongly recommended to migrate to the Kubernetes package repositories +as described in the [official announcement](/blog/2023/08/15/pkgs-k8s-io-introduction/). {{% /tab %}} {{< /tabs >}} @@ -86,7 +136,7 @@ This step should be done upon upgrading from one to another Kubernetes minor release in order to get access to the packages of the desired Kubernetes minor version. -{{< tabs name="k8s_install_versions" >}} +{{< tabs name="k8s_upgrade_versions" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} 1. Open the file that defines the Kubernetes `apt` repository using a text editor of your choice: @@ -139,10 +189,10 @@ version. ``` [kubernetes] name=Kubernetes - baseurl=https://pkgs.k8s.io/core:/stable:/v{{< param "version" >}}/rpm/ + baseurl=https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/rpm/ enabled=1 gpgcheck=1 - gpgkey=https://pkgs.k8s.io/core:/stable:/v{{< param "version" >}}/rpm/repodata/repomd.xml.key + gpgkey=https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/rpm/repodata/repomd.xml.key exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni ``` diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index fead85f7e6af4..b745a22792c75 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -225,28 +225,11 @@ A CSR represents a request to a CA for a signed certificate for a client. In kubeadm terms, any certificate that would normally be signed by an on-disk CA can be produced as a CSR instead. A CA, however, cannot be produced as a CSR. -### Create certificate signing requests (CSR) - -You can create certificate signing requests with `kubeadm certs renew --csr-only`. - -Both the CSR and the accompanying private key are given in the output. -You can pass in a directory with `--csr-dir` to output the CSRs to the specified location. -If `--csr-dir` is not specified, the default certificate directory (`/etc/kubernetes/pki`) is used. - -Certificates can be renewed with `kubeadm certs renew --csr-only`. -As with `kubeadm init`, an output directory can be specified with the `--csr-dir` flag. - -A CSR contains a certificate's name, domains, and IPs, but it does not specify usages. -It is the responsibility of the CA to specify [the correct cert usages](/docs/setup/best-practices/certificates/#all-certificates) -when issuing a certificate. +### Renewal by using certificate signing requests (CSR) -* In `openssl` this is done with the - [`openssl ca` command](https://superuser.com/questions/738612/openssl-ca-keyusage-extension). -* In `cfssl` you specify - [usages in the config file](https://github.com/cloudflare/cfssl/blob/master/doc/cmd/cfssl.txt#L170). - -After a certificate is signed using your preferred method, the certificate and the private key -must be copied to the PKI directory (by default `/etc/kubernetes/pki`). +Renewal of ceritficates is possible by generating new CSRs and signing them with the external CA. +For more details about working with CSRs generated by kubeadm see the section +[Signing certificate signing requests (CSR) generated by kubeadm](#signing-csr). ## Certificate authority (CA) rotation {#certificate-authority-rotation} @@ -373,3 +356,246 @@ The following example will generate a kubeconfig file with administrator credent ```shell kubeadm kubeconfig user --config example.yaml --client-name admin --validity-period 168h ``` + +## Signing certificate signing requests (CSR) generated by kubeadm {#signing-csr} + +You can create certificate signing requests with `kubeadm certs generate-csr`. +Calling this command will generate `.csr` / `.key` file pairs for regular +certificates. For certificates embedded in kubeconfig files, the command will +generate a `.csr` / `.conf` pair where the key is already embedded in the `.conf` file. + +A CSR file contains all relevant information for a CA to sign a certificate. +kubeadm uses a +[well defined specification](/docs/setup/best-practices/certificates/#all-certificates) +for all its certificates and CSRs. + +The default certificate directory is `/etc/kubernetes/pki`, while the default +directory for kubeconfig files is `/etc/kubernetes`. These defaults can be +overridden with the flags `--cert-dir` and `--kubeconfig-dir`, respectively. + +To pass custom options to `kubeadm certs generate-csr` use the `--config` flag, +which accepts a [kubeadm configuration](/docs/reference/config-api/kubeadm-config.v1beta3/) +file, similarly to commands such as `kubeadm init`. Any specification such +as extra SANs and custom IP addresses must be stored in the same configuration +file and used for all relevant kubeadm commands by passing it as `--config`. + +{{< note >}} +This guide will cover the usage of the `openssl` command for singing the CSRs, +but you can use your preferred tools. +{{< /note >}} + +{{< note >}} +This guide will use the default Kubernetes directory `/etc/kubernetes`, which requires +a super user. If you are following this guide with permissive directories +(by passing `--cert-dir` and `--kubeconfig-dir`) you can omit the `sudo` command). +But note that the resulted files must be copied to the `/etc/kubernetes` tree, +so that `kubeadm init` or `kubeadm join` will find them. +{{< /note >}} + +### Preparing CA and service account files + +On the primary control plane node, where `kubeadm init` will be executed, call the following +commands: + +```shell +sudo kubeadm init phase certs ca +sudo kubeadm init phase certs etcd-ca +sudo kubeadm init phase certs front-proxy-ca +sudo kubeadm init phase certs sa +``` + +This will populate the folders `/etc/kubernetes/pki` and `/etc/kubernetes/pki/etcd` +with all self-signed CA files (certificates and keys) and service account (public and +private keys) that kubeadm needs for a control plane node. + +{{< note >}} +If you are using an external CA, you must generate the same files out of band and manually +copy them to the primary control plane node in `/etc/kubernetes`. Once all CSRs +are signed, you can delete the root CA key (`ca.key`) as noted in the +[External CA mode](#external-ca-mode) section. +{{< /note >}} + +For secondary control plane nodes (`kubeadm join --control-plane`) there is no need to call +the above commands. Depending on how you setup the +[High Availability](/docs/setup/production-environment/tools/kubeadm/high-availability) +cluster, you either have to manually copy the same files from the primary +control plane node, or use the automated `--upload-certs` functionality of `kubeadm init`. + +### Generate CSRs + +The `kubeadm certs generate-csr` command generates CSRs for all known certificates +managed by kubeadm. Once the command is done you must manually delete `.csr`, `.conf` +or `.key` files that you don't need. + +#### Considerations for kubelet.conf {#considerations-kubelet-conf} + +This section applies to both control plane and worker nodes. + +If you have deleted the `ca.key` file from control plane nodes +([External CA mode](#external-ca-mode)), the active kube-controller-manager in +this cluster will not be able to sign kubelet client certificates. If no external +method for signing these certificates exists in your setup (such as an +[external signer](#set-up-a-signer), you could manually sign the `kubelet.conf.csr` +as explained in this guide. + +Note that this also means that the automatic +[kubelet client certificate rotation](/docs/tasks/tls/certificate-rotation/#enabling-client-certificate-rotation) +will be disabled. If so, close to certificate expiration, you must generate +a new `kubelet.conf.csr`, sign the certificate, embed it in `kubelet.conf` +and restart the kubelet. + +If this does not apply to your setup, you can skip processing the `kubelet.conf.csr` +on secondary control plane and on workers nodes (all nodes tha call `kubeadm join ...`). +That is because the active kube-controller-manager will be responsible +for signing new kubelet client certificates. + +{{< note >}} +Processing the `kubelet.conf.csr` on the primary control plane node +(`kubeadm init`) is required, because that is considered the node that +bootstraps the cluster and a pre-populated `kubelet.conf` is needed. +{{< /note >}} + +#### Control plane nodes + +Execute the following command on primary (`kubeadm init`) and secondary +(`kubeadm join --control-plane`) control plane nodes to generate all CSR files: + +```shell +sudo kubeadm certs generate-csr +``` + +If external etcd is to be used, follow the +[External etcd with kubeadm](docs/setup/production-environment/tools/kubeadm/high-availability/#external-etcd-nodes) +guide to understand what CSR files are needed on the kubeadm and etcd nodes. Other +`.csr` and `.key` files under `/etc/kubernetes/pki/etcd` can be removed. + +Based on the explanation in +[Considerations for kubelet.conf](#considerations-kubelet-conf) keep or delete +the `kubelet.conf` and `kubelet.conf.csr` files. + +#### Worker nodes + +Based on the explanation in +[Considerations for kubelet.conf](#considerations-kubelet-conf), optionally call: + +```shell +sudo kubeadm certs generate-csr +``` + +and keep only the `kubelet.conf` and `kubelet.conf.csr` files. Alternatively skip +the steps for worker nodes entirely. + +### Signing CSRs for all certificates + +{{< note >}} +If you are using external CA and already have CA serial number files (`.srl`) for +`openssl` you can copy such files to a kubeadm node where CSRs will be processed. +`.srl` files to copy are `/etc/kubernetes/pki/ca.srl`, +`/etc/kubernetes/pki/front-proxy-ca.srl` and `/etc/kubernetes/pki/etcd/ca.srl`. +The files can be then moved to a new node where CSR files will be processed. + +If a `.srl` file is missing for a CA on a node, the script below will generate a new SRL file +with a random starting serial number. + +To read more about `.srl` files see the +[`openssl`](https://www.openssl.org/docs/man3.0/man1/openssl-x509.html) +documentation for the `--CAserial` flag. +{{< /note >}} + +Repeat this step for all nodes that have CSR files. + +Write the following script in the `/etc/kubernetes` directory, navigate to the directory +and execute the script. The script will generate certificates for all CSR files that are +present in the `/etc/kubernetes` tree. + +```bash +#!/bin/bash + +# Set certificate expiration time in days +DAYS=365 + +# Process all CSR files except those for front-proxy and etcd +find ./ -name "*.csr" | grep -v "pki/etcd" | grep -v "front-proxy" | while read -r FILE; +do + echo "* Processing ${FILE} ..." + FILE=${FILE%.*} # Trim the extension + if [ -f "./pki/ca.srl" ]; then + SERIAL_FLAG="-CAserial ./pki/ca.srl" + else + SERIAL_FLAG="-CAcreateserial" + fi + openssl x509 -req -days "${DAYS}" -CA ./pki/ca.crt -CAkey ./pki/ca.key ${SERIAL_FLAG} \ + -in "${FILE}.csr" -out "${FILE}.crt" + sleep 2 +done + +# Process all etcd CSRs +find ./pki/etcd -name "*.csr" | while read -r FILE; +do + echo "* Processing ${FILE} ..." + FILE=${FILE%.*} # Trim the extension + if [ -f "./pki/etcd/ca.srl" ]; then + SERIAL_FLAG=-CAserial ./pki/etcd/ca.srl + else + SERIAL_FLAG=-CAcreateserial + fi + openssl x509 -req -days "${DAYS}" -CA ./pki/etcd/ca.crt -CAkey ./pki/etcd/ca.key ${SERIAL_FLAG} \ + -in "${FILE}.csr" -out "${FILE}.crt" +done + +# Process front-proxy CSRs +echo "* Processing ./pki/front-proxy-client.csr ..." +openssl x509 -req -days "${DAYS}" -CA ./pki/front-proxy-ca.crt -CAkey ./pki/front-proxy-ca.key -CAcreateserial \ + -in ./pki/front-proxy-client.csr -out ./pki/front-proxy-client.crt +``` + +### Embedding certificates in kubeconfig files + +Repeat this step for all nodes that have CSR files. + +Write the following script in the `/etc/kubernetes` directory, navigate to the directory +and execute the script. The script will take the `.crt` files that were signed for +kubeconfig files from CSRs in the previous step and will embed them in the kubeconfig files. + +```bash +#!/bin/bash + +CLUSTER=kubernetes +find ./ -name "*.conf" | while read -r FILE; +do + echo "* Processing ${FILE} ..." + KUBECONFIG="${FILE}" kubectl config set-cluster "${CLUSTER}" --certificate-authority ./pki/ca.crt --embed-certs + USER=$(KUBECONFIG="${FILE}" kubectl config view -o jsonpath='{.users[0].name}') + KUBECONFIG="${FILE}" kubectl config set-credentials "${USER}" --client-certificate "${FILE}.crt" --embed-certs +done +``` + +### Performing cleanup {#post-csr-cleanup} + +Perform this step on all nodes that have CSR files. + +Write the following script in the `/etc/kubernetes` directory, navigate to the directory +and execute the script. + +```bash +#!/bin/bash + +# Cleanup CSR files +rm -f ./*.csr ./pki/*.csr ./pki/etcd/*.csr # Clean all CSR files + +# Cleanup CRT files that were already embedded in kubeconfig files +rm -f ./*.crt +``` + +Optionally, move `.srl` files to the next node to be processed. + +Optionally, if using external CA remove the `/etc/kubernetes/pki/ca.key` file, +as explained in the [External CA node](#external-ca-mode) section. + +### kubeadm node initialization + +Once CSR files have been signed and required certificates are in place on the hosts +you want to use as nodes, you can use the commands `kubeadm init` and `kubeadm join` +to create a Kubernetes cluster from these nodes. During `init` and `join`, kubeadm +uses existing certificates, encryption keys and kubeconfig files that it finds in the +`/etc/kubernetes` tree on the host's local filesystem. diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-reconfigure.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-reconfigure.md index ec372fe231b24..d2d6a83d3565a 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-reconfigure.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-reconfigure.md @@ -99,11 +99,14 @@ kubeadm init phase certs --config To write new manifest files in `/etc/kubernetes/manifests` you can use: ```shell +# For Kubernetes control plane components kubeadm init phase control-plane --config +# For local etcd +kubeadm init phase etcd local --config ``` The `` contents must match the updated `ClusterConfiguration`. -The `` value must be the name of the component. +The `` value must be a name of a Kubernetes control plane component (`apiserver`, `controller-manager` or `scheduler`). {{< note >}} Updating a file in `/etc/kubernetes/manifests` will tell the kubelet to restart the static Pod for the corresponding component. @@ -131,7 +134,7 @@ The configuration is located under the `data.kubelet` key. To reflect the change on kubeadm nodes you must do the following: - Log in to a kubeadm node - Run `kubeadm upgrade node phase kubelet-config` to download the latest `kubelet-config` -ConfigMap contents into the local file `/var/lib/kubelet/config.conf` +ConfigMap contents into the local file `/var/lib/kubelet/config.yaml` - Edit the file `/var/lib/kubelet/kubeadm-flags.env` to apply additional configuration with flags - Restart the kubelet service with `systemctl restart kubelet` @@ -142,10 +145,10 @@ Do these changes one node at a time to allow workloads to be rescheduled properl {{< note >}} During `kubeadm upgrade`, kubeadm downloads the `KubeletConfiguration` from the -`kubelet-config` ConfigMap and overwrite the contents of `/var/lib/kubelet/config.conf`. +`kubelet-config` ConfigMap and overwrite the contents of `/var/lib/kubelet/config.yaml`. This means that node local configuration must be applied either by flags in `/var/lib/kubelet/kubeadm-flags.env` or by manually updating the contents of -`/var/lib/kubelet/config.conf` after `kubeadm upgrade`, and then restarting the kubelet. +`/var/lib/kubelet/config.yaml` after `kubeadm upgrade`, and then restarting the kubelet. {{< /note >}} ### Applying kube-proxy configuration changes @@ -264,14 +267,14 @@ the set of node specific patches must be updated accordingly. #### Persisting kubelet reconfiguration -Any changes to the `KubeletConfiguration` stored in `/var/lib/kubelet/config.conf` will be overwritten on +Any changes to the `KubeletConfiguration` stored in `/var/lib/kubelet/config.yaml` will be overwritten on `kubeadm upgrade` by downloading the contents of the cluster wide `kubelet-config` ConfigMap. -To persist kubelet node specific configuration either the file `/var/lib/kubelet/config.conf` +To persist kubelet node specific configuration either the file `/var/lib/kubelet/config.yaml` has to be updated manually post-upgrade or the file `/var/lib/kubelet/kubeadm-flags.env` can include flags. The kubelet flags override the associated `KubeletConfiguration` options, but note that some of the flags are deprecated. -A kubelet restart will be required after changing `/var/lib/kubelet/config.conf` or +A kubelet restart will be required after changing `/var/lib/kubelet/config.yaml` or `/var/lib/kubelet/kubeadm-flags.env`. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index 075b610b688b5..12e7298a30ad9 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -54,11 +54,13 @@ The upgrade workflow at high level is the following: ## Changing the package repository -If you're using the Kubernetes community-owned repositories, you need to change -the package repository to one that contains packages for your desired Kubernetes -minor version. This is explained in [Changing the Kubernetes package repository](/docs/tasks/administer-cluster/kubeadm/change-package-repository/) +If you're using the community-owned package repositories (`pkgs.k8s.io`), you need to +enable the package repository for the desired Kubernetes minor release. This is explained in +[Changing the Kubernetes package repository](/docs/tasks/administer-cluster/kubeadm/change-package-repository/) document. +{{% legacy-repos-deprecation %}} + ## Determine which version to upgrade to Find the latest patch release for Kubernetes {{< skew currentVersion >}} using the OS package manager: diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md index cff79362570c2..e61c6f3d2b134 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/upgrading-linux-nodes.md @@ -19,11 +19,13 @@ upgrade the control plane nodes before upgrading your Linux Worker nodes. ## Changing the package repository -If you're using the Kubernetes community-owned repositories, you need to change -the package repository to one that contains packages for your desired Kubernetes -minor version. This is explained in [Changing the Kubernetes package repository](/docs/tasks/administer-cluster/kubeadm/change-package-repository/) +If you're using the community-owned package repositories (`pkgs.k8s.io`), you need to +enable the package repository for the desired Kubernetes minor release. This is explained in +[Changing the Kubernetes package repository](/docs/tasks/administer-cluster/kubeadm/change-package-repository/) document. +{{% legacy-repos-deprecation %}} + ## Upgrading worker nodes ### Upgrade kubeadm @@ -60,6 +62,7 @@ sudo kubeadm upgrade node Prepare the node for maintenance by marking it unschedulable and evicting the workloads: ```shell +# execute this command on a control plane node # replace with the name of your node you are draining kubectl drain --ignore-daemonsets ``` @@ -97,6 +100,7 @@ kubectl drain --ignore-daemonsets Bring the node back online by marking it schedulable: ```shell +# execute this command on a control plane node # replace with the name of your node kubectl uncordon ``` diff --git a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md index 506dd2723e00b..815f9f70c3aec 100644 --- a/content/en/docs/tasks/administer-cluster/kubelet-config-file.md +++ b/content/en/docs/tasks/administer-cluster/kubelet-config-file.md @@ -35,14 +35,22 @@ address: "192.168.0.8" port: 20250 serializeImagePulls: false evictionHard: - memory.available: "200Mi" + memory.available: "100Mi" + nodefs.available: "10%" + nodefs.inodesFree: "5%" + imagefs.available: "15%" ``` -In the example, the kubelet is configured to serve on IP address 192.168.0.8 and port 20250, pull images in parallel, -and evict Pods when available memory drops below 200Mi. Since only one of the four evictionHard thresholds is configured, -other evictionHard thresholds are reset to 0 from their built-in defaults. -All other kubelet configuration values are left at their built-in defaults, unless overridden -by flags. Command line flags which target the same value as a config file will override that value. +In this example, the kubelet is configured with the following settings: + +1. `address`: The kubelet will serve on IP address `192.168.0.8`. +2. `port`: The kubelet will serve on port `20250`. +3. `serializeImagePulls`: Image pulls will be done in parallel. +4. `evictionHard`: The kubelet will evict Pods under one of the following conditions: + - When the node's available memory drops below 100MiB. + - When the node's main filesystem's available space is less than 10%. + - When the image filesystem's available space is less than 15%. + - When more than 95% of the node's main filesystem's inodes are in use. {{< note >}} In the example, by changing the default value of only one parameter for @@ -51,6 +59,9 @@ will be set to zero. In order to provide custom values, you should provide all the threshold values respectively. {{< /note >}} +The `imagefs` is an optional filesystem that container runtimes use to store container +images and container writable layers. + ## Start a kubelet process configured via the config file {{< note >}} diff --git a/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md b/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md index b46b90c9d612e..9e91f1dc31206 100644 --- a/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md +++ b/content/en/docs/tasks/administer-cluster/kubelet-credential-provider.md @@ -3,7 +3,6 @@ title: Configure a kubelet image credential provider reviewers: - liggitt - cheftako -description: Configure the kubelet's image credential provider plugin content_type: task min-kubernetes-server-version: v1.26 weight: 120 diff --git a/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd.md b/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd.md index 9bbba039e0d9c..a4adb12234622 100644 --- a/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd.md +++ b/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/migrate-dockershim-dockerd.md @@ -76,6 +76,8 @@ instructions for that tool. 1. Open `/var/lib/kubelet/kubeadm-flags.env` on each affected node. 1. Modify the `--container-runtime-endpoint` flag to `unix:///var/run/cri-dockerd.sock`. +1. Modify the `--container-runtime` flag to `remote` + (unavailable in Kubernetes v1.27 and later). The kubeadm tool stores the node's socket as an annotation on the `Node` object in the control plane. To modify this socket for each affected node: @@ -118,4 +120,4 @@ kubectl uncordon ## {{% heading "whatsnext" %}} * Read the [dockershim removal FAQ](/dockershim/). -* [Learn how to migrate from Docker Engine with dockershim to containerd](/docs/tasks/administer-cluster/migrating-from-dockershim/change-runtime-containerd/). \ No newline at end of file +* [Learn how to migrate from Docker Engine with dockershim to containerd](/docs/tasks/administer-cluster/migrating-from-dockershim/change-runtime-containerd/). diff --git a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md index 66c0fc8f2c655..fedc88f2b2757 100644 --- a/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md +++ b/content/en/docs/tasks/administer-cluster/reserve-compute-resources.md @@ -96,7 +96,7 @@ system daemon should ideally run within its own child control group. Refer to for more details on recommended control group hierarchy. Note that Kubelet **does not** create `--kube-reserved-cgroup` if it doesn't -exist. Kubelet will fail if an invalid cgroup is specified. With `systemd` +exist. The kubelet will fail to start if an invalid cgroup is specified. With `systemd` cgroup driver, you should follow a specific pattern for the name of the cgroup you define: the name should be the value you set for `--kube-reserved-cgroup`, with `.slice` appended. diff --git a/content/en/docs/tasks/administer-cluster/running-cloud-controller.md b/content/en/docs/tasks/administer-cluster/running-cloud-controller.md index df7e7e9a2facf..dc6c0240fe747 100644 --- a/content/en/docs/tasks/administer-cluster/running-cloud-controller.md +++ b/content/en/docs/tasks/administer-cluster/running-cloud-controller.md @@ -46,17 +46,15 @@ integration, it should not be too different from the requirements when running Successfully running cloud-controller-manager requires some changes to your cluster configuration. -* `kube-apiserver` and `kube-controller-manager` MUST NOT specify the `--cloud-provider` - flag. This ensures that it does not run any cloud specific loops that would be run by - cloud controller manager. In the future, this flag will be deprecated and removed. -* `kubelet` must run with `--cloud-provider=external`. This is to ensure that the - kubelet is aware that it must be initialized by the cloud controller manager - before it is scheduled any work. +* `kubelet`, `kube-apiserver`, and `kube-controller-manager` must be set according to the + user's usage of external CCM. If the user has an external CCM (not the internal cloud + controller loops in the Kubernetes Controller Manager), then `--cloud-provider=external` + must be specified. Otherwise, it should not be specified. Keep in mind that setting up your cluster to use cloud controller manager will change your cluster behaviour in a few ways: -* kubelets specifying `--cloud-provider=external` will add a taint +* Components that specify `--cloud-provider=external` will add a taint `node.cloudprovider.kubernetes.io/uninitialized` with an effect `NoSchedule` during initialization. This marks the node as needing a second initialization from an external controller before it can be scheduled work. Note that in the diff --git a/content/en/docs/tasks/administer-cluster/sysctl-cluster.md b/content/en/docs/tasks/administer-cluster/sysctl-cluster.md index 9347dc5c3ab90..cccf4b8350a41 100644 --- a/content/en/docs/tasks/administer-cluster/sysctl-cluster.md +++ b/content/en/docs/tasks/administer-cluster/sysctl-cluster.md @@ -76,6 +76,7 @@ The following sysctls are supported in the _safe_ set: - `net.ipv4.tcp_syncookies`, - `net.ipv4.ping_group_range` (since Kubernetes 1.18), - `net.ipv4.ip_unprivileged_port_start` (since Kubernetes 1.22). +- `net.ipv4.ip_local_reserved_ports` (since Kubernetes 1.27). {{< note >}} There are some exceptions to the set of safe sysctls: diff --git a/content/en/docs/tasks/administer-cluster/verify-signed-artifacts.md b/content/en/docs/tasks/administer-cluster/verify-signed-artifacts.md index 660b4e903bc96..f1f8a232a1b17 100644 --- a/content/en/docs/tasks/administer-cluster/verify-signed-artifacts.md +++ b/content/en/docs/tasks/administer-cluster/verify-signed-artifacts.md @@ -15,7 +15,7 @@ You will need to have the following tools installed: - `cosign` ([install guide](https://docs.sigstore.dev/cosign/installation/)) - `curl` (often provided by your operating system) -- `jq` ([download jq](https://stedolan.github.io/jq/download/)) +- `jq` ([download jq](https://jqlang.github.io/jq/download/)) ## Verifying binary signatures @@ -51,7 +51,7 @@ cosign verify-blob "$BINARY" \ {{< note >}} Cosign 2.0 requires the `--certificate-identity` and `--certificate-oidc-issuer` options. -To learn more about keyless signing, please refer to [Keyless Signatures](https://docs.sigstore.dev/cosign/keyless). +To learn more about keyless signing, please refer to [Keyless Signatures](https://docs.sigstore.dev/signing/overview/). Previous versions of Cosign required that you set `COSIGN_EXPERIMENTAL=1`. diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md index 7245624bf8349..2f3dd8dc0ae92 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -109,6 +109,10 @@ stringData: password: ``` +{{< note >}} +The `stringData` field for a Secret does not work well with server-side apply. +{{< /note >}} + When you retrieve the Secret data, the command returns the encoded values, and not the plaintext values you provided in `stringData`. @@ -152,6 +156,10 @@ stringData: username: administrator ``` +{{< note >}} +The `stringData` field for a Secret does not work well with server-side apply. +{{< /note >}} + The `Secret` object is created as follows: ```yaml diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md index 51f66d44be347..36ce3b5875c4c 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -40,6 +40,10 @@ You must use single quotes `''` to escape special characters such as `$`, `\`, `*`, `=`, and `!` in your strings. If you don't, your shell will interpret these characters. +{{< note >}} +The `stringData` field for a Secret does not work well with server-side apply. +{{< /note >}} + ### Use source files 1. Store the credentials in files: diff --git a/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md b/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md index bc14e4cecb2dd..364b461614381 100644 --- a/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md +++ b/content/en/docs/tasks/configmap-secret/managing-secret-using-kustomize.md @@ -24,6 +24,10 @@ You can generate a Secret by defining a `secretGenerator` in a literal values. For example, the following instructions create a Kustomization file for the username `admin` and the password `1f2d1e2e67df`. +{{< note >}} +The `stringData` field for a Secret does not work well with server-side apply. +{{< /note >}} + ### Create the Kustomization file {{< tabs name="Secret data" >}} @@ -35,7 +39,7 @@ secretGenerator: - password=1f2d1e2e67df {{< /tab >}} {{% tab name="Files" %}} -1. Store the credentials in files with the values encoded in base64: +1. Store the credentials in files. The filenames are the keys of the secret: ```shell echo -n 'admin' > ./username.txt @@ -146,4 +150,4 @@ kubectl delete secret db-user-pass - Read more about the [Secret concept](/docs/concepts/configuration/secret/) - Learn how to [manage Secrets using kubectl](/docs/tasks/configmap-secret/managing-secret-using-kubectl/) -- Learn how to [manage Secrets using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) \ No newline at end of file +- Learn how to [manage Secrets using config file](/docs/tasks/configmap-secret/managing-secret-using-config-file/) diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index c08fc1250448f..4d0b744d7d206 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -207,7 +207,7 @@ can't it is considered a failure. As you can see, configuration for a TCP check is quite similar to an HTTP check. This example uses both readiness and liveness probes. The kubelet will send the -first readiness probe 5 seconds after the container starts. This will attempt to +first readiness probe 15 seconds after the container starts. This will attempt to connect to the `goproxy` container on port 8080. If the probe succeeds, the Pod will be marked as ready. The kubelet will continue to run this check every 10 seconds. @@ -358,8 +358,8 @@ Readiness probes runs on the container during its whole lifecycle. {{< /note >}} {{< caution >}} -Liveness probes *do not* wait for readiness probes to succeed. -If you want to wait before executing a liveness probe you should use +The readiness and liveness probes do not depend on each other to succeed. +If you want to wait before executing a readiness probe, you should use `initialDelaySeconds` or a `startupProbe`. {{< /caution >}} @@ -393,8 +393,9 @@ liveness and readiness checks: * `initialDelaySeconds`: Number of seconds after the container has started before startup, liveness or readiness probes are initiated. If a startup probe is defined, liveness and - readiness probe delays do not begin until the startup probe has succeeded. - Defaults to 0 seconds. Minimum value is 0. + readiness probe delays do not begin until the startup probe has succeeded. If the value of + `periodSeconds` is greater than `initialDelaySeconds` then the `initialDelaySeconds` would be + ignored. Defaults to 0 seconds. Minimum value is 0. * `periodSeconds`: How often (in seconds) to perform the probe. Default to 10 seconds. The minimum value is 1. * `timeoutSeconds`: Number of seconds after which the probe times out. @@ -486,6 +487,26 @@ startupProbe: value: "" ``` +{{< note >}} +When the kubelet probes a Pod using HTTP, it only follows redirects if the redirect +is to the same host. If the kubelet receives 11 or more redirects during probing, the probe is considered successful +and a related Event is created: + +```none +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 29m default-scheduler Successfully assigned default/httpbin-7b8bc9cb85-bjzwn to daocloud + Normal Pulling 29m kubelet Pulling image "docker.io/kennethreitz/httpbin" + Normal Pulled 24m kubelet Successfully pulled image "docker.io/kennethreitz/httpbin" in 5m12.402735213s + Normal Created 24m kubelet Created container httpbin + Normal Started 24m kubelet Started container httpbin + Warning ProbeWarning 4m11s (x1197 over 24m) kubelet Readiness probe warning: Probe terminated redirects +``` + +If the kubelet receives a redirect where the hostname is different from the request, the outcome of the probe is treated as successful and kubelet creates an event to report the redirect failure. +{{< /note >}} + ### TCP probes For a TCP probe, the kubelet makes the probe connection at the node, not in the Pod, which diff --git a/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md index dc1a01abb00ad..0d6c9859dc97f 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md +++ b/content/en/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -98,6 +98,12 @@ read-write by a single Node. It defines the [StorageClass name](/docs/concepts/s `manual` for the PersistentVolume, which will be used to bind PersistentVolumeClaim requests to this PersistentVolume. +{{< note >}} +This example uses the `ReadWriteOnce` access mode, for simplicity. For +production use, the Kubernetes project recommends using the `ReadWriteOncePod` +access mode instead. +{{< /note >}} + Create the PersistentVolume: ```shell diff --git a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md index 9a49ad3063054..b7d942b30e589 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/en/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -526,7 +526,7 @@ Before proceeding, clean up some of the ConfigMaps you made: ```bash kubectl delete configmap special-config kubectl delete configmap env-config -kubectl delete configmap -l 'game-config in (config-4,config-5)’ +kubectl delete configmap -l 'game-config in (config-4,config-5)' ``` Now that you have learned to define ConfigMaps, you can move on to the next @@ -910,7 +910,7 @@ kubectl delete pod dapi-test-pod --now # You might already have removed the next set kubectl delete configmaps/special-config configmaps/env-config -kubectl delete configmap -l 'game-config in (config-4,config-5)’ +kubectl delete configmap -l 'game-config in (config-4,config-5)' ``` If you created a directory `configure-pod-container` and no longer need it, you should remove that too, diff --git a/content/en/docs/tasks/configure-pod-container/configure-service-account.md b/content/en/docs/tasks/configure-pod-container/configure-service-account.md index 8bea409cf213d..002fc3708e965 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/en/docs/tasks/configure-pod-container/configure-service-account.md @@ -1,6 +1,6 @@ --- reviewers: -- bprashanth +- enj - liggitt - thockin title: Configure Service Accounts for Pods @@ -184,6 +184,16 @@ ServiceAccount. You can request a specific token duration using the `--duration` command line argument to `kubectl create token` (the actual duration of the issued token might be shorter, or could even be longer). +When the `ServiceAccountTokenNodeBinding` and `ServiceAccountTokenNodeBindingValidation` +features are enabled and the `KUBECTL_NODE_BOUND_TOKENS` enviroment variable is set to `true`, +it is possible to create a service account token that is directly bound to a `Node`: + +```shell +KUBECTL_NODE_BOUND_TOKENS=true kubectl create token build-robot --bound-object-kind Node --bound-object-name node-001 --bound-object-uid 123...456 +``` + +The token will be valid until it expires or either the assocaited `Node` or service account are deleted. + {{< note >}} Versions of Kubernetes before v1.22 automatically created long term credentials for accessing the Kubernetes API. This older mechanism was based on creating token Secrets @@ -262,6 +272,16 @@ Secret somewhere that your terminal / computer screen could be seen by an onlook When you delete a ServiceAccount that has an associated Secret, the Kubernetes control plane automatically cleans up the long-lived token from that Secret. +{{< note >}} +If you view the ServiceAccount using: + +` kubectl get serviceaccount build-robot -o yaml` + +You can't see the `build-robot-secret` Secret in the ServiceAccount API objects +[`.secrets`](/docs/reference/kubernetes-api/authentication-resources/service-account-v1/) field +because that field is only populated with auto-generated Secrets. +{{< /note >}} + ## Add ImagePullSecrets to a service account First, [create an imagePullSecret](/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod). @@ -398,6 +418,39 @@ You can configure this behavior for the `spec` of a Pod using a [projected volume](/docs/concepts/storage/volumes/#projected) type called `ServiceAccountToken`. +The token from this projected volume is a {{}} (JWT). +The JSON payload of this token follows a well defined schema - an example payload for a pod bound token: + +```yaml +{ + "aud": [ # matches the requested audiences, or the API server's default audiences when none are explicitly requested + "https://kubernetes.default.svc" + ], + "exp": 1731613413, + "iat": 1700077413, + "iss": "https://kubernetes.default.svc", # matches the first value passed to the --service-account-issuer flag + "jti": "ea28ed49-2e11-4280-9ec5-bc3d1d84661a", # ServiceAccountTokenJTI feature must be enabled for the claim to be present + "kubernetes.io": { + "namespace": "kube-system", + "node": { # ServiceAccountTokenPodNodeInfo feature must be enabled for the API server to add this node reference claim + "name": "127.0.0.1", + "uid": "58456cb0-dd00-45ed-b797-5578fdceaced" + }, + "pod": { + "name": "coredns-69cbfb9798-jv9gn", + "uid": "778a530c-b3f4-47c0-9cd5-ab018fb64f33" + }, + "serviceaccount": { + "name": "coredns", + "uid": "a087d5a0-e1dd-43ec-93ac-f13d89cd13af" + }, + "warnafter": 1700081020 + }, + "nbf": 1700077413, + "sub": "system:serviceaccount:kube-system:coredns" +} +``` + ### Launch a Pod using service account token projection To provide a Pod with a token with an audience of `vault` and a validity duration diff --git a/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md index 5f11dba1a1451..207d589d3535c 100644 --- a/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md +++ b/content/en/docs/tasks/configure-pod-container/enforce-standards-admission-controller.md @@ -33,12 +33,12 @@ For v1.22, use [v1alpha1](https://v1-22.docs.kubernetes.io/docs/tasks/configure- {{< /note >}} ```yaml -apiVersion: apiserver.config.k8s.io/v1 # see compatibility note +apiVersion: apiserver.config.k8s.io/v1 kind: AdmissionConfiguration plugins: - name: PodSecurity configuration: - apiVersion: pod-security.admission.config.k8s.io/v1 + apiVersion: pod-security.admission.config.k8s.io/v1 # see compatibility note kind: PodSecurityConfiguration # Defaults applied when a mode label is not set. # diff --git a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md index 24d913beca89d..fe973d912f67c 100644 --- a/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/en/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -38,7 +38,8 @@ docker login When prompted, enter your Docker ID, and then the credential you want to use (access token, or the password for your Docker ID). -The login process creates or updates a `config.json` file that holds an authorization token. Review [how Kubernetes interprets this file](/docs/concepts/containers/images#config-json). +The login process creates or updates a `config.json` file that holds an authorization token. +Review [how Kubernetes interprets this file](/docs/concepts/containers/images#config-json). View the `config.json` file: @@ -60,7 +61,8 @@ The output contains a section similar to this: {{< note >}} If you use a Docker credentials store, you won't see that `auth` entry but a `credsStore` entry with the name of the store as value. -In that case, you can create a secret directly. See [Create a Secret by providing credentials on the command line](#create-a-secret-by-providing-credentials-on-the-command-line). +In that case, you can create a secret directly. +See [Create a Secret by providing credentials on the command line](#create-a-secret-by-providing-credentials-on-the-command-line). {{< /note >}} ## Create a Secret based on existing credentials {#registry-secret-existing-credentials} @@ -211,7 +213,14 @@ kubectl get pod private-reg ``` {{< note >}} -In case the Pod fails to start with the status `ImagePullBackOff`, view the Pod events: +To use image pull secrets for a Pod (or a Deployment, or other object that +has a pod template that you are using), you need to make sure that the appropriate +Secret does exist in the right namespace. The namespace to use is the same +namespace where you defined the Pod. +{{< /note >}} + +Also, in case the Pod fails to start with the status `ImagePullBackOff`, view the Pod events: + ```shell kubectl describe pod private-reg ``` @@ -229,12 +238,6 @@ Events: ... FailedToRetrieveImagePullSecret ... Unable to retrieve some image pull secrets (); attempting to pull the image may not succeed. ``` - -{{< /note >}} - - - - ## {{% heading "whatsnext" %}} * Learn more about [Secrets](/docs/concepts/configuration/secret/) diff --git a/content/en/docs/tasks/configure-pod-container/static-pod.md b/content/en/docs/tasks/configure-pod-container/static-pod.md index ed692dec17de0..d4da549d5f68c 100644 --- a/content/en/docs/tasks/configure-pod-container/static-pod.md +++ b/content/en/docs/tasks/configure-pod-container/static-pod.md @@ -97,17 +97,16 @@ For example, this is how to start a simple web server as a static Pod: EOF ``` -1. Configure your kubelet on the node to use this directory by running it with +1. Configure the kubelet on that node to set a `staticPodPath` value in the + [kubelet configuration file](/docs/reference/config-api/kubelet-config.v1beta1/). + See [Set Kubelet Parameters Via A Configuration File](/docs/tasks/administer-cluster/kubelet-config-file/) + for more information. + + An alternative and deprecated method is to configure the kubelet on that node + to look for static Pod manifests locally, using a command line argument. + To use the deprecated approach, start the kubelet with the `--pod-manifest-path=/etc/kubernetes/manifests/` argument. - On Fedora, edit `/etc/kubernetes/kubelet` to include this line: - - ``` - KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubernetes/manifests/" - ``` - - or add the `staticPodPath: ` field in the - [kubelet configuration file](/docs/reference/config-api/kubelet-config.v1beta1/). - + 1. Restart the kubelet. On Fedora, you would run: ```shell diff --git a/content/en/docs/tasks/debug/debug-application/debug-pods.md b/content/en/docs/tasks/debug/debug-application/debug-pods.md index f1aa831f3f718..7723a5c79097f 100644 --- a/content/en/docs/tasks/debug/debug-application/debug-pods.md +++ b/content/en/docs/tasks/debug/debug-application/debug-pods.md @@ -69,6 +69,34 @@ There are three things to check: * Try to manually pull the image to see if the image can be pulled. For example, if you use Docker on your PC, run `docker pull `. + +#### My pod stays terminating + +If a Pod is stuck in the `Terminating` state, it means that a deletion has been +issued for the Pod, but the control plane is unable to delete the Pod object. + +This typically happens if the Pod has a [finalizer](/docs/concepts/overview/working-with-objects/finalizers/) +and there is an [admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/) +installed in the cluster that prevents the control plane from removing the +finalizer. + +To identify this scenario, check if your cluster has any +ValidatingWebhookConfiguration or MutatingWebhookConfiguration that target +`UPDATE` operations for `pods` resources. + +If the webhook is provided by a third-party: +- Make sure you are using the latest version. +- Disable the webhook for `UPDATE` operations. +- Report an issue with the corresponding provider. + +If you are the author of the webhook: +- For a mutating webhook, make sure it never changes immutable fields on + `UPDATE` operations. For example, changes to containers are usually not allowed. +- For a validating webhook, make sure that your validation policies only apply + to new changes. In other words, you should allow Pods with existing violations + to pass validation. This allows Pods that were created before the validating + webhook was installed to continue running. + #### My pod is crashing or otherwise unhealthy Once your pod has been scheduled, the methods described in diff --git a/content/en/docs/tasks/debug/debug-cluster/_index.md b/content/en/docs/tasks/debug/debug-cluster/_index.md index 3278fdfa7d4ce..a10b0bdcff7d9 100644 --- a/content/en/docs/tasks/debug/debug-cluster/_index.md +++ b/content/en/docs/tasks/debug/debug-cluster/_index.md @@ -14,6 +14,9 @@ problem you are experiencing. See the [application troubleshooting guide](/docs/tasks/debug/debug-application/) for tips on application debugging. You may also visit the [troubleshooting overview document](/docs/tasks/debug/) for more information. +For troubleshooting {{}}, refer to +[Troubleshooting kubectl](/docs/tasks/debug/debug-cluster/troubleshoot-kubectl/). + ## Listing your cluster @@ -249,14 +252,14 @@ This is an incomplete list of things that could go wrong, and how to adjust your - Network partition within cluster, or between cluster and users - Crashes in Kubernetes software - Data loss or unavailability of persistent storage (e.g. GCE PD or AWS EBS volume) -- Operator error, for example misconfigured Kubernetes software or application software +- Operator error, for example, misconfigured Kubernetes software or application software ### Specific scenarios - API server VM shutdown or apiserver crashing - Results - unable to stop, update, or start new pods, services, replication controller - - existing pods and services should continue to work normally, unless they depend on the Kubernetes API + - existing pods and services should continue to work normally unless they depend on the Kubernetes API - API server backing storage lost - Results - the kube-apiserver component fails to start successfully and become healthy @@ -288,7 +291,7 @@ This is an incomplete list of things that could go wrong, and how to adjust your ### Mitigations -- Action: Use IaaS provider's automatic VM restarting feature for IaaS VMs +- Action: Use the IaaS provider's automatic VM restarting feature for IaaS VMs - Mitigates: Apiserver VM shutdown or apiserver crashing - Mitigates: Supporting services VM shutdown or crashes diff --git a/content/en/docs/tasks/debug/debug-cluster/crictl.md b/content/en/docs/tasks/debug/debug-cluster/crictl.md index e0c6932546a37..d3c10cb83b881 100644 --- a/content/en/docs/tasks/debug/debug-cluster/crictl.md +++ b/content/en/docs/tasks/debug/debug-cluster/crictl.md @@ -274,7 +274,7 @@ deleted by the Kubelet. ### Create a container Using `crictl` to create a container is useful for debugging container runtimes. -On a running Kubernetes cluster, the sandbox will eventually be stopped and +On a running Kubernetes cluster, the container will eventually be stopped and deleted by the Kubelet. 1. Pull a busybox image diff --git a/content/en/docs/tasks/debug/debug-cluster/local-debugging.md b/content/en/docs/tasks/debug/debug-cluster/local-debugging.md index 6e7d73841f244..2bbf59a22e362 100644 --- a/content/en/docs/tasks/debug/debug-cluster/local-debugging.md +++ b/content/en/docs/tasks/debug/debug-cluster/local-debugging.md @@ -26,7 +26,7 @@ running on a remote cluster locally. * Kubernetes cluster is installed * `kubectl` is configured to communicate with the cluster -* [Telepresence](https://www.telepresence.io/docs/latest/install/) is installed +* [Telepresence](https://www.telepresence.io/docs/latest/quick-start/) is installed diff --git a/content/en/docs/tasks/debug/debug-cluster/troubleshoot-kubectl.md b/content/en/docs/tasks/debug/debug-cluster/troubleshoot-kubectl.md new file mode 100644 index 0000000000000..2166d204b3776 --- /dev/null +++ b/content/en/docs/tasks/debug/debug-cluster/troubleshoot-kubectl.md @@ -0,0 +1,158 @@ +--- +title: "Troubleshooting kubectl" +content_type: task +weight: 10 +--- + + + +This documentation is about investigating and diagnosing +{{}} related issues. +If you encounter issues accessing `kubectl` or connecting to your cluster, this +document outlines various common scenarios and potential solutions to help +identify and address the likely cause. + + + +## {{% heading "prerequisites" %}} + +* You need to have a Kubernetes cluster. +* You also need to have `kubectl` installed - see [install tools](/docs/tasks/tools/#kubectl) + +## Verify kubectl setup + +Make sure you have installed and configured `kubectl` correctly on your local machine. +Check the `kubectl` version to ensure it is up-to-date and compatible with your cluster. + +Check kubectl version: + +```shell +kubectl version +``` + +You'll see a similar output: + +```console +Client Version: version.Info{Major:"1", Minor:"27", GitVersion:"v1.27.4",GitCommit:"fa3d7990104d7c1f16943a67f11b154b71f6a132", GitTreeState:"clean",BuildDate:"2023-07-19T12:20:54Z", GoVersion:"go1.20.6", Compiler:"gc", Platform:"linux/amd64"} +Kustomize Version: v5.0.1 +Server Version: version.Info{Major:"1", Minor:"27", GitVersion:"v1.27.3",GitCommit:"25b4e43193bcda6c7328a6d147b1fb73a33f1598", GitTreeState:"clean",BuildDate:"2023-06-14T09:47:40Z", GoVersion:"go1.20.5", Compiler:"gc", Platform:"linux/amd64"} + +``` + +If you see `Unable to connect to the server: dial tcp :8443: i/o timeout`, +instead of `Server Version`, you need to troubleshoot kubectl connectivity with your cluster. + +Make sure you have installed the kubectl by following the +[official documentation for installing kubectl](/docs/tasks/tools/#kubectl), and you have +properly configured the `$PATH` environment variable. + +## Check kubeconfig + +The `kubectl` requires a `kubeconfig` file to connect to a Kubernetes cluster. The +`kubeconfig` file is usually located under the `~/.kube/config` directory. Make sure +that you have a valid `kubeconfig` file. If you don't have a `kubeconfig` file, you can +obtain it from your Kubernetes administrator, or you can copy it from your Kubernetes +control plane's `/etc/kubernetes/admin.conf` directory. If you have deployed your +Kubernetes cluster on a cloud platform and lost your `kubeconfig` file, you can +re-generate it using your cloud provider's tools. Refer the cloud provider's +documentation for re-generating a `kubeconfig` file. + +Check if the `$KUBECONFIG` environment variable is configured correctly. You can set +`$KUBECONFIG`environment variable or use the `--kubeconfig` parameter with the kubectl +to specify the directory of a `kubeconfig` file. + +## Check VPN connectivity + +If you are using a Virtual Private Network (VPN) to access your Kubernetes cluster, +make sure that your VPN connection is active and stable. Sometimes, VPN disconnections +can lead to connection issues with the cluster. Reconnect to the VPN and try accessing +the cluster again. + +## Authentication and authorization + +If you are using the token based authentication and the kubectl is returning an error +regarding the authentication token or authentication server address, validate the +Kubernetes authentication token and the authentication server address are configured +properly. + +If kubectl is returning an error regarding the authorization, make sure that you are +using the valid user credentials. And you have the permission to access the resource +that you have requested. + +## Verify contexts + +Kubernetes supports [multiple clusters and contexts](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). +Ensure that you are using the correct context to interact with your cluster. + +List available contexts: + +```shell +kubectl config get-contexts +``` + +Switch to the appropriate context: + +```shell +kubectl config use-context +``` + +## API server and load balancer + +The {{}} server is the +central component of a Kubernetes cluster. If the API server or the load balancer that +runs in front of your API servers is not reachable or not responding, you won't be able +to interact with the cluster. + +Check the if the API server's host is reachable by using `ping` command. Check cluster's +network connectivity and firewall. If your are using a cloud provider for deploying +the cluster, check your cloud provider's health check status for the cluster's +API server. + +Verify the status of the load balancer (if used) to ensure it is healthy and forwarding +traffic to the API server. + +## TLS problems + +The Kubernetes API server only serves HTTPS requests by default. In that case TLS problems +may occur due to various reasons, such as certificate expiry or chain of trust validity. + +You can find the TLS certificate in the kubeconfig file, located in the `~/.kube/config` +directory. The `certificate-authority` attribute contains the CA certificate and the +`client-certificate` attribute contains the client certificate. + +Verify the expiry of these certificates: + +```shell +openssl x509 -noout -dates -in $(kubectl config view --minify --output 'jsonpath={.clusters[0].cluster.certificate-authority}') +``` + +output: +```console +notBefore=Sep 2 08:34:12 2023 GMT +notAfter=Aug 31 08:34:12 2033 GMT +``` + +```shell +openssl x509 -noout -dates -in $(kubectl config view --minify --output 'jsonpath={.users[0].user.client-certificate}') +``` + +output: +```console +notBefore=Sep 2 08:34:12 2023 GMT +notAfter=Sep 2 08:34:12 2026 GMT +``` + +## Verify kubectl helpers + +Some kubectl authentication helpers provide easy access to Kubernetes clusters. If you +have used such helpers and are facing connectivity issues, ensure that the necessary +configurations are still present. + +Check kubectl configuration for authentication details: + +```shell +kubectl config view +``` + +If you previously used a helper tool (for example, `kubectl-oidc-login`), ensure that it is still +installed and configured correctly. \ No newline at end of file diff --git a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md index 4967ff8f27cc2..c8f10829db234 100644 --- a/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md +++ b/content/en/docs/tasks/extend-kubernetes/configure-multiple-schedulers.md @@ -52,11 +52,13 @@ Save the file as `Dockerfile`, build the image and push it to a registry. This e pushes the image to [Google Container Registry (GCR)](https://cloud.google.com/container-registry/). For more details, please read the GCR -[documentation](https://cloud.google.com/container-registry/docs/). +[documentation](https://cloud.google.com/container-registry/docs/). Alternatively +you can also use the [docker hub](https://hub.docker.com/search?q=). For more details +refer to the docker hub [documentation](https://docs.docker.com/docker-hub/repos/create/#create-a-repository). ```shell -docker build -t gcr.io/my-gcp-project/my-kube-scheduler:1.0 . -gcloud docker -- push gcr.io/my-gcp-project/my-kube-scheduler:1.0 +docker build -t gcr.io/my-gcp-project/my-kube-scheduler:1.0 . # The image name and the repository +gcloud docker -- push gcr.io/my-gcp-project/my-kube-scheduler:1.0 # used in here is just an example ``` ## Define a Kubernetes Deployment for the scheduler @@ -76,7 +78,7 @@ to customize the behavior of your scheduler implementation. This configuration h the `kube-scheduler` during initialization with the `--config` option. The `my-scheduler-config` ConfigMap stores the configuration file. The Pod of the`my-scheduler` Deployment mounts the `my-scheduler-config` ConfigMap as a volume. In the aforementioned Scheduler Configuration, your scheduler implementation is represented via -a [KubeSchedulerProfile](/docs/reference/config-api/kube-scheduler-config.v1beta3/#kubescheduler-config-k8s-io-v1beta3-KubeSchedulerProfile). +a [KubeSchedulerProfile](/docs/reference/config-api/kube-scheduler-config.v1/#kubescheduler-config-k8s-io-v1-KubeSchedulerProfile). {{< note >}} To determine if a scheduler is responsible for scheduling a specific Pod, the `spec.schedulerName` field in a PodTemplate or Pod manifest must match the `schedulerName` field of the `KubeSchedulerProfile`. @@ -89,7 +91,7 @@ Also, note that you create a dedicated service account `my-scheduler` and bind t Please see the [kube-scheduler documentation](/docs/reference/command-line-tools-reference/kube-scheduler/) for detailed description of other command line arguments and -[Scheduler Configuration reference](/docs/reference/config-api/kube-scheduler-config.v1beta3/) for +[Scheduler Configuration reference](/docs/reference/config-api/kube-scheduler-config.v1/) for detailed description of other customizable `kube-scheduler` configurations. ## Run the second scheduler in the cluster diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md index 60536cf858347..6e665427cb1bb 100644 --- a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions.md @@ -749,8 +749,12 @@ validations are not supported by ratcheting under the implementation in Kubernet - `not` - any validations in a descendent of one of these fields - `x-kubernetes-validations` - For Kubernetes {{< skew currentVersion >}}, CRD validation rules](#validation-rules) are ignored by - ratcheting. This may change in later Kubernetes releases. + For Kubernetes 1.28, CRD validation rules](#validation-rules) are ignored by + ratcheting. Starting with Alpha 2 in Kubernetes 1.29, `x-kubernetes-validations` + are ratcheted. + + Transition Rules are never ratcheted: only errors raised by rules that do not + use `oldSelf` will be automatically ratcheted if their values are unchanged. - `x-kubernetes-list-type` Errors arising from changing the list type of a subschema will not be ratcheted. For example adding `set` onto a list with duplicates will always @@ -767,19 +771,13 @@ validations are not supported by ratcheting under the implementation in Kubernet - `additionalProperties` To remove a previously specified `additionalProperties` validation will not be ratcheted. +- `metadata` + Errors arising from changes to fields within an object's `metadata` are not + ratcheted. +### Validation rules -## Validation rules - -{{< feature-state state="beta" for_k8s_version="v1.25" >}} - - -Validation rules are in beta since 1.25 and the `CustomResourceValidationExpressions` -[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled by default to -validate custom resource based on _validation rules_. You can disable this feature by explicitly -setting the `CustomResourceValidationExpressions` feature gate to `false`, for the -[kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) component. This -feature is only available if the schema is a [structural schema](#specifying-a-structural-schema). +{{< feature-state state="stable" for_k8s_version="v1.29" >}} Validation rules use the [Common Expression Language (CEL)](https://github.com/google/cel-spec) to validate custom resource values. Validation rules are included in @@ -1177,6 +1175,34 @@ The `fieldPath` field does not support indexing arrays numerically. Setting `fieldPath` is optional. +#### The `optionalOldSelf` field {#field-optional-oldself} + +{{< feature-state state="alpha" for_k8s_version="v1.29" >}} + +The feature [CRDValidationRatcheting](#validation-ratcheting) must be enabled in order to +make use of this field. + +The `optionalOldSelf` field is a boolean field that alters the behavior of [Transition Rules](#transition-rules) described +below. Normally, a transition rule will not evaluate if `oldSelf` cannot be determined: +during object creation or when a new value is introduced in an update. + +If `optionalOldSelf` is set to true, then transition rules will always be +evaluated and the type of `oldSelf` be changed to a CEL [`Optional`](https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes) type. + +`optionalOldSelf` is useful in cases where schema authors would like a more +control tool [than provided by the default equality based behavior of](#validation-ratcheting) +to introduce newer, usually stricter constraints on new values, while still +allowing old values to be "grandfathered" or ratcheted using the older validation. + +Example Usage: + +| CEL | Description | +|-----------------------------------------|-------------| +| `self.foo == "foo" || (oldSelf.hasValue() && oldSelf.value().foo != "foo")` | Ratcheted rule. Once a value is set to "foo", it must stay foo. But if it existed before the "foo" constraint was introduced, it may use any value | +| [oldSelf.orValue(""), self].all(x, ["OldCase1", "OldCase2"].exists(case, x == case)) || ["NewCase1", "NewCase2"].exists(case, self == case) || ["NewCase"].has(self)` | "Ratcheted validation for removed enum cases if oldSelf used them" | +| oldSelf.optMap(o, o.size()).orValue(0) < 4 || self.size() >= 4 | Ratcheted validation of newly increased minimum map or list size | + + #### Validation functions {#available-validation-functions} Functions available include: diff --git a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md index b052a04826933..3f94e249eb2dc 100644 --- a/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -1,6 +1,5 @@ --- title: Coarse Parallel Processing Using a Work Queue -min-kubernetes-server-version: v1.8 content_type: task weight: 20 --- @@ -8,7 +7,7 @@ weight: 20 -In this example, we will run a Kubernetes Job with multiple parallel +In this example, you will run a Kubernetes Job with multiple parallel worker processes. In this example, as each pod is created, it picks up one unit of work @@ -16,7 +15,7 @@ from a task queue, completes it, deletes it from the queue, and exits. Here is an overview of the steps in this example: -1. **Start a message queue service.** In this example, we use RabbitMQ, but you could use another +1. **Start a message queue service.** In this example, you use RabbitMQ, but you could use another one. In practice you would set up a message queue service once and reuse it for many jobs. 1. **Create a queue, and fill it with messages.** Each message represents one task to be done. In this example, a message is an integer that we will do a lengthy computation on. @@ -26,11 +25,16 @@ Here is an overview of the steps in this example: ## {{% heading "prerequisites" %}} -Be familiar with the basic, +You should already be familiar with the basic, non-parallel, use of [Job](/docs/concepts/workloads/controllers/job/). {{< include "task-tutorial-prereqs.md" >}} +You will need a container image registry where you can upload images to run in your cluster. + +This task example also assumes that you have Docker installed locally. + + ## Starting a message queue service @@ -43,21 +47,20 @@ cluster and reuse it for many jobs, as well as for long-running services. Start RabbitMQ as follows: ```shell -kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.3/examples/celery-rabbitmq/rabbitmq-service.yaml +# make a Service for the StatefulSet to use +kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq-service.yaml ``` ``` service "rabbitmq-service" created ``` ```shell -kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.3/examples/celery-rabbitmq/rabbitmq-controller.yaml +kubectl create -f https://kubernetes.io/examples/application/job/rabbitmq-statefulset.yaml ``` ``` -replicationcontroller "rabbitmq-controller" created +statefulset "rabbitmq" created ``` -We will only use the rabbitmq part from the [celery-rabbitmq example](https://github.com/kubernetes/kubernetes/tree/release-1.3/examples/celery-rabbitmq). - ## Testing the message queue service Now, we can experiment with accessing the message queue. We will @@ -68,7 +71,7 @@ First create a temporary interactive Pod. ```shell # Create a temporary interactive container -kubectl run -i --tty temp --image ubuntu:18.04 +kubectl run -i --tty temp --image ubuntu:22.04 ``` ``` Waiting for pod default/temp-loe07 to be running, status is Pending, pod ready: false @@ -77,76 +80,82 @@ Waiting for pod default/temp-loe07 to be running, status is Pending, pod ready: Note that your pod name and command prompt will be different. -Next install the `amqp-tools` so we can work with message queues. +Next install the `amqp-tools` so you can work with message queues. +The next commands show what you need to run inside the interactive shell in that Pod: ```shell -# Install some tools -root@temp-loe07:/# apt-get update -.... [ lots of output ] .... -root@temp-loe07:/# apt-get install -y curl ca-certificates amqp-tools python dnsutils -.... [ lots of output ] .... +apt-get update && apt-get install -y curl ca-certificates amqp-tools python dnsutils ``` -Later, we will make a docker image that includes these packages. +Later, you will make a container image that includes these packages. -Next, we will check that we can discover the rabbitmq service: +Next, you will check that you can discover the Service for RabbitMQ: ``` +# Run these commands inside the Pod # Note the rabbitmq-service has a DNS name, provided by Kubernetes: - -root@temp-loe07:/# nslookup rabbitmq-service +nslookup rabbitmq-service +``` +``` Server: 10.0.0.10 Address: 10.0.0.10#53 Name: rabbitmq-service.default.svc.cluster.local Address: 10.0.147.152 - -# Your address will vary. ``` +(the IP addresses will vary) -If Kube-DNS is not set up correctly, the previous step may not work for you. -You can also find the service IP in an env var: +If the kube-dns addon is not set up correctly, the previous step may not work for you. +You can also find the IP address for that Service in an environment variable: +```shell +# run this check inside the Pod +env | grep RABBITMQ_SERVICE | grep HOST +``` ``` -# env | grep RABBIT | grep HOST RABBITMQ_SERVICE_SERVICE_HOST=10.0.147.152 -# Your address will vary. ``` +(the IP address will vary) -Next we will verify we can create a queue, and publish and consume messages. +Next you will verify that you can create a queue, and publish and consume messages. ```shell +# Run these commands inside the Pod # In the next line, rabbitmq-service is the hostname where the rabbitmq-service # can be reached. 5672 is the standard port for rabbitmq. - -root@temp-loe07:/# export BROKER_URL=amqp://guest:guest@rabbitmq-service:5672 +export BROKER_URL=amqp://guest:guest@rabbitmq-service:5672 # If you could not resolve "rabbitmq-service" in the previous step, # then use this command instead: -# root@temp-loe07:/# BROKER_URL=amqp://guest:guest@$RABBITMQ_SERVICE_SERVICE_HOST:5672 +BROKER_URL=amqp://guest:guest@$RABBITMQ_SERVICE_SERVICE_HOST:5672 # Now create a queue: -root@temp-loe07:/# /usr/bin/amqp-declare-queue --url=$BROKER_URL -q foo -d +/usr/bin/amqp-declare-queue --url=$BROKER_URL -q foo -d +``` +``` foo +``` -# Publish one message to it: - -root@temp-loe07:/# /usr/bin/amqp-publish --url=$BROKER_URL -r foo -p -b Hello +Publish one message to the queue: +```shell +/usr/bin/amqp-publish --url=$BROKER_URL -r foo -p -b Hello # And get it back. -root@temp-loe07:/# /usr/bin/amqp-consume --url=$BROKER_URL -q foo -c 1 cat && echo +/usr/bin/amqp-consume --url=$BROKER_URL -q foo -c 1 cat && echo 1>&2 +``` +``` Hello -root@temp-loe07:/# ``` -In the last command, the `amqp-consume` tool takes one message (`-c 1`) -from the queue, and passes that message to the standard input of an arbitrary command. In this case, the program `cat` prints out the characters read from standard input, and the echo adds a carriage -return so the example is readable. +In the last command, the `amqp-consume` tool took one message (`-c 1`) +from the queue, and passes that message to the standard input of an arbitrary command. +In this case, the program `cat` prints out the characters read from standard input, and +the echo adds a carriage return so the example is readable. -## Filling the Queue with tasks +## Fill the queue with tasks -Now let's fill the queue with some "tasks". In our example, our tasks are strings to be +Now, fill the queue with some simulated tasks. In this example, the tasks are strings to be printed. In a practice, the content of the messages might be: @@ -157,18 +166,22 @@ In a practice, the content of the messages might be: - configuration parameters to a simulation - frame numbers of a scene to be rendered -In practice, if there is large data that is needed in a read-only mode by all pods -of the Job, you will typically put that in a shared file system like NFS and mount -that readonly on all the pods, or the program in the pod will natively read data from -a cluster file system like HDFS. +If there is large data that is needed in a read-only mode by all pods +of the Job, you typically put that in a shared file system like NFS and mount +that readonly on all the pods, or write the program in the pod so that it can natively read +data from a cluster file system (for example: HDFS). -For our example, we will create the queue and fill it using the amqp command line tools. -In practice, you might write a program to fill the queue using an amqp client library. +For this example, you will create the queue and fill it using the AMQP command line tools. +In practice, you might write a program to fill the queue using an AMQP client library. ```shell +# Run this on your computer, not in the Pod /usr/bin/amqp-declare-queue --url=$BROKER_URL -q job1 -d +``` +``` job1 ``` +Add items to the queue: ```shell for f in apple banana cherry date fig grape lemon melon do @@ -176,14 +189,14 @@ do done ``` -So, we filled the queue with 8 messages. +You added 8 messages to the queue. -## Create an Image +## Create a container image -Now we are ready to create an image that we will run as a job. +Now you are ready to create an image that you will run as a Job. -We will use the `amqp-consume` utility to read the message -from the queue and run our actual program. Here is a very simple +The job will use the `amqp-consume` utility to read the message +from the queue and run the actual work. Here is a very simple example program: {{% code_sample language="python" file="application/job/rabbitmq/worker.py" %}} @@ -194,9 +207,7 @@ Give the script execution permission: chmod +x worker.py ``` -Now, build an image. If you are working in the source -tree, then change directory to `examples/job/work-queue-1`. -Otherwise, make a temporary directory, change to it, +Now, build an image. Make a temporary directory, change to it, download the [Dockerfile](/examples/application/job/rabbitmq/Dockerfile), and [worker.py](/examples/application/job/rabbitmq/worker.py). In either case, build the image with this command: @@ -214,33 +225,27 @@ docker tag job-wq-1 /job-wq-1 docker push /job-wq-1 ``` -If you are using [Google Container -Registry](https://cloud.google.com/tools/container-registry/), tag -your app image with your project ID, and push to GCR. Replace -`` with your project ID. - -```shell -docker tag job-wq-1 gcr.io//job-wq-1 -gcloud docker -- push gcr.io//job-wq-1 -``` +If you are using an alternative container image registry, tag the +image and push it there instead. ## Defining a Job -Here is a job definition. You'll need to make a copy of the Job and edit the -image to match the name you used, and call it `./job.yaml`. - +Here is a manifest for a Job. You'll need to make a copy of the Job manifest +(call it `./job.yaml`), +and edit the name of the container image to match the name you used. {{% code_sample file="application/job/rabbitmq/job.yaml" %}} In this example, each pod works on one item from the queue and then exits. So, the completion count of the Job corresponds to the number of work items -done. So we set, `.spec.completions: 8` for the example, since we put 8 items in the queue. +done. That is why the example manifest has `.spec.completions` set to `8`. ## Running the Job -So, now run the Job: +Now, run the Job: ```shell +# this assumes you downloaded and then edited the manifest already kubectl apply -f ./job.yaml ``` @@ -264,14 +269,14 @@ Labels: controller-uid=41d75705-92df-11e7-b85e-fa163ee3c11f Annotations: Parallelism: 2 Completions: 8 -Start Time: Wed, 06 Sep 2017 16:42:02 +0800 +Start Time: Wed, 06 Sep 2022 16:42:02 +0000 Pods Statuses: 0 Running / 8 Succeeded / 0 Failed Pod Template: Labels: controller-uid=41d75705-92df-11e7-b85e-fa163ee3c11f job-name=job-wq-1 Containers: c: - Image: gcr.io/causal-jigsaw-637/job-wq-1 + Image: container-registry.example/causal-jigsaw-637/job-wq-1 Port: Environment: BROKER_URL: amqp://guest:guest@rabbitmq-service:5672 @@ -293,30 +298,31 @@ Events: -All the pods for that Job succeeded. Yay. - +All the pods for that Job succeeded! You're done. ## Alternatives -This approach has the advantage that you -do not need to modify your "worker" program to be aware that there is a work queue. +This approach has the advantage that you do not need to modify your "worker" program to be +aware that there is a work queue. You can include the worker program unmodified in your container +image. -It does require that you run a message queue service. +Using this approach does require that you run a message queue service. If running a queue service is inconvenient, you may want to consider one of the other [job patterns](/docs/concepts/workloads/controllers/job/#job-patterns). This approach creates a pod for every work item. If your work items only take a few seconds, though, creating a Pod for every work item may add a lot of overhead. Consider another -[example](/docs/tasks/job/fine-parallel-processing-work-queue/), that executes multiple work items per Pod. +design, such as in the [fine parallel work queue example](/docs/tasks/job/fine-parallel-processing-work-queue/), +that executes multiple work items per Pod. -In this example, we use the `amqp-consume` utility to read the message -from the queue and run our actual program. This has the advantage that you +In this example, you used the `amqp-consume` utility to read the message +from the queue and run the actual program. This has the advantage that you do not need to modify your program to be aware of the queue. -A [different example](/docs/tasks/job/fine-parallel-processing-work-queue/), shows how to -communicate with the work queue using a client library. +The [fine parallel work queue example](/docs/tasks/job/fine-parallel-processing-work-queue/) +shows how to communicate with the work queue using a client library. ## Caveats @@ -327,11 +333,11 @@ If the number of completions is set to more than the number of items in the queu then the Job will not appear to be completed, even though all items in the queue have been processed. It will start additional pods which will block waiting for a message. +You would need to make your own mechanism to spot when there is work +to do and measure the size of the queue, setting the number of completions to match. There is an unlikely race with this pattern. If the container is killed in between the time -that the message is acknowledged by the amqp-consume command and the time that the container +that the message is acknowledged by the `amqp-consume` command and the time that the container exits with success, or if the node crashes before the kubelet is able to post the success of the pod -back to the api-server, then the Job will not appear to be complete, even though all items +back to the API server, then the Job will not appear to be complete, even though all items in the queue have been processed. - - diff --git a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md index 258e90ecf04b9..b865f49fd742d 100644 --- a/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md +++ b/content/en/docs/tasks/job/fine-parallel-processing-work-queue.md @@ -1,23 +1,23 @@ --- title: Fine Parallel Processing Using a Work Queue content_type: task -min-kubernetes-server-version: v1.8 weight: 30 --- -In this example, we will run a Kubernetes Job with multiple parallel -worker processes in a given pod. +In this example, you will run a Kubernetes Job that runs multiple parallel +tasks as worker processes, each running as a separate Pod. In this example, as each pod is created, it picks up one unit of work from a task queue, processes it, and repeats until the end of the queue is reached. Here is an overview of the steps in this example: -1. **Start a storage service to hold the work queue.** In this example, we use Redis to store - our work items. In the previous example, we used RabbitMQ. In this example, we use Redis and - a custom work-queue client library because AMQP does not provide a good way for clients to +1. **Start a storage service to hold the work queue.** In this example, you will use Redis to store + work items. In the [previous example](/docs/tasks/job/coarse-parallel-processing-work-queue), + you used RabbitMQ. In this example, you will use Redis and a custom work-queue client library; + this is because AMQP does not provide a good way for clients to detect when a finite-length work queue is empty. In practice you would set up a store such as Redis once and reuse it for the work queues of many jobs, and other things. 1. **Create a queue, and fill it with messages.** Each message represents one task to be done. In @@ -30,6 +30,13 @@ Here is an overview of the steps in this example: {{< include "task-tutorial-prereqs.md" >}} +You will need a container image registry where you can upload images to run in your cluster. +The example uses [Docker Hub](https://hub.docker.com/), but you could adapt it to a different +container image registry. + +This task example also assumes that you have Docker installed locally. You use Docker to +build container images. + Be familiar with the basic, @@ -39,7 +46,7 @@ non-parallel, use of [Job](/docs/concepts/workloads/controllers/job/). ## Starting Redis -For this example, for simplicity, we will start a single instance of Redis. +For this example, for simplicity, you will start a single instance of Redis. See the [Redis Example](https://github.com/kubernetes/examples/tree/master/guestbook) for an example of deploying Redis scalably and redundantly. @@ -53,23 +60,27 @@ You could also download the following files directly: - [`worker.py`](/examples/application/job/redis/worker.py) -## Filling the Queue with tasks +## Filling the queue with tasks -Now let's fill the queue with some "tasks". In our example, our tasks are strings to be +Now let's fill the queue with some "tasks". In this example, the tasks are strings to be printed. Start a temporary interactive pod for running the Redis CLI. ```shell kubectl run -i --tty temp --image redis --command "/bin/sh" +``` +``` Waiting for pod default/redis2-c7h78 to be running, status is Pending, pod ready: false Hit enter for command prompt ``` -Now hit enter, start the redis CLI, and create a list with some work items in it. +Now hit enter, start the Redis CLI, and create a list with some work items in it. +```shell +redis-cli -h redis ``` -# redis-cli -h redis +```console redis:6379> rpush job2 "apple" (integer) 1 redis:6379> rpush job2 "banana" @@ -100,21 +111,21 @@ redis:6379> lrange job2 0 -1 9) "orange" ``` -So, the list with key `job2` will be our work queue. +So, the list with key `job2` will be the work queue. Note: if you do not have Kube DNS setup correctly, you may need to change the first step of the above block to `redis-cli -h $REDIS_SERVICE_HOST`. -## Create an Image +## Create a container image {#create-an-image} -Now we are ready to create an image that we will run. +Now you are ready to create an image that will process the work in that queue. -We will use a python worker program with a redis client to read +You're going to use a Python worker program with a Redis client to read the messages from the message queue. A simple Redis work queue client library is provided, -called rediswq.py ([Download](/examples/application/job/redis/rediswq.py)). +called `rediswq.py` ([Download](/examples/application/job/redis/rediswq.py)). The "worker" program in each Pod of the Job uses the work queue client library to get work. Here it is: @@ -124,7 +135,7 @@ client library to get work. Here it is: You could also download [`worker.py`](/examples/application/job/redis/worker.py), [`rediswq.py`](/examples/application/job/redis/rediswq.py), and [`Dockerfile`](/examples/application/job/redis/Dockerfile) files, then build -the image: +the container image. Here's an example using Docker to do the image build: ```shell docker build -t job-wq-2 . @@ -144,46 +155,40 @@ docker push /job-wq-2 You need to push to a public repository or [configure your cluster to be able to access your private repository](/docs/concepts/containers/images/). -If you are using [Google Container -Registry](https://cloud.google.com/tools/container-registry/), tag -your app image with your project ID, and push to GCR. Replace -`` with your project ID. - -```shell -docker tag job-wq-2 gcr.io//job-wq-2 -gcloud docker -- push gcr.io//job-wq-2 -``` - ## Defining a Job -Here is the job definition: +Here is a manifest for the Job you will create: {{% code_sample file="application/job/redis/job.yaml" %}} -Be sure to edit the job template to +{{< note >}} +Be sure to edit the manifest to change `gcr.io/myproject` to your own path. +{{< /note >}} In this example, each pod works on several items from the queue and then exits when there are no more items. Since the workers themselves detect when the workqueue is empty, and the Job controller does not know about the workqueue, it relies on the workers to signal when they are done working. -The workers signal that the queue is empty by exiting with success. So, as soon as any worker -exits with success, the controller knows the work is done, and the Pods will exit soon. -So, we set the completion count of the Job to 1. The job controller will wait for the other pods to complete -too. - +The workers signal that the queue is empty by exiting with success. So, as soon as **any** worker +exits with success, the controller knows the work is done, and that the Pods will exit soon. +So, you need to set the completion count of the Job to 1. The job controller will wait for +the other pods to complete too. ## Running the Job So, now run the Job: ```shell +# this assumes you downloaded and then edited the manifest already kubectl apply -f ./job.yaml ``` -Now wait a bit, then check on the job. +Now wait a bit, then check on the Job: ```shell kubectl describe jobs/job-wq-2 +``` +``` Name: job-wq-2 Namespace: default Selector: controller-uid=b1c7e4e3-92e1-11e7-b85e-fa163ee3c11f @@ -192,14 +197,14 @@ Labels: controller-uid=b1c7e4e3-92e1-11e7-b85e-fa163ee3c11f Annotations: Parallelism: 2 Completions: -Start Time: Mon, 11 Jan 2016 17:07:59 -0800 +Start Time: Mon, 11 Jan 2022 17:07:59 +0000 Pods Statuses: 1 Running / 0 Succeeded / 0 Failed Pod Template: Labels: controller-uid=b1c7e4e3-92e1-11e7-b85e-fa163ee3c11f job-name=job-wq-2 Containers: c: - Image: gcr.io/exampleproject/job-wq-2 + Image: container-registry.example/exampleproject/job-wq-2 Port: Environment: Mounts: @@ -227,7 +232,7 @@ Working on date Working on lemon ``` -As you can see, one of our pods worked on several work units. +As you can see, one of the pods for this Job worked on several work units. @@ -238,8 +243,7 @@ want to consider one of the other [job patterns](/docs/concepts/workloads/controllers/job/#job-patterns). If you have a continuous stream of background processing work to run, then -consider running your background workers with a `ReplicaSet` instead, +consider running your background workers with a ReplicaSet instead, and consider running a background processing library such as [https://github.com/resque/resque](https://github.com/resque/resque). - diff --git a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md index dd784463810ce..2bae4e6c9bff5 100644 --- a/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md +++ b/content/en/docs/tasks/manage-hugepages/scheduling-hugepages.md @@ -14,15 +14,45 @@ by applications in a Pod. This page describes how users can consume huge pages. ## {{% heading "prerequisites" %}} +Kubernetes nodes must +[pre-allocate huge pages](https://www.kernel.org/doc/html/latest/admin-guide/mm/hugetlbpage.html) +in order for the node to report its huge page capacity. -1. Kubernetes nodes must pre-allocate huge pages in order for the node to report - its huge page capacity. A node can pre-allocate huge pages for multiple - sizes. +A node can pre-allocate huge pages for multiple sizes, for instance, +the following line in `/etc/default/grub` allocates `2*1GiB` of 1 GiB +and `512*2 MiB` of 2 MiB pages: + +``` +GRUB_CMDLINE_LINUX="hugepagesz=1G hugepages=2 hugepagesz=2M hugepages=512" +``` The nodes will automatically discover and report all huge page resources as schedulable resources. +When you describe the Node, you should see something similar to the following +in the following in the `Capacity` and `Allocatable` sections: + +``` +Capacity: + cpu: ... + ephemeral-storage: ... + hugepages-1Gi: 2Gi + hugepages-2Mi: 1Gi + memory: ... + pods: ... +Allocatable: + cpu: ... + ephemeral-storage: ... + hugepages-1Gi: 2Gi + hugepages-2Mi: 1Gi + memory: ... + pods: ... +``` +{{< note >}} +For dynamically allocated pages (after boot), the Kubelet needs to be restarted +for the new allocations to be refrelected. +{{< /note >}} diff --git a/content/en/docs/tasks/network/extend-service-ip-ranges.md b/content/en/docs/tasks/network/extend-service-ip-ranges.md new file mode 100644 index 0000000000000..fdce843c68c41 --- /dev/null +++ b/content/en/docs/tasks/network/extend-service-ip-ranges.md @@ -0,0 +1,184 @@ +--- +reviewers: +- thockin +- dwinship +min-kubernetes-server-version: v1.29 +title: Extend Service IP Ranges +content_type: task +--- + + +{{< feature-state state="alpha" for_k8s_version="v1.29" >}} + +This document shares how to extend the existing Service IP range assigned to a cluster. + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + +{{< version-check >}} + + + +## API + +Kubernetes clusters with kube-apiservers that have enabled the `MultiCIDRServiceAllocator` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) and the `networking.k8s.io/v1alpha1` API, +will create a new ServiceCIDR object that takes the well-known name `kubernetes`, and that uses an IP address range +based on the value of the `--service-cluster-ip-range` command line argument to kube-apiserver. + +```sh +kubectl get servicecidr +``` +``` +NAME CIDRS AGE +kubernetes 10.96.0.0/28 17d +``` + +The well-known `kubernetes` Service, that exposes the kube-apiserver endpoint to the Pods, calculates +the first IP address from the default ServiceCIDR range and uses that IP address as its +cluster IP address. + +```sh +kubectl get service kubernetes +``` +``` +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +kubernetes ClusterIP 10.96.0.1 443/TCP 17d +``` + +The default Service, in this case, uses the ClusterIP 10.96.0.1, that has the corresponding IPAddress object. + +```sh +kubectl get ipaddress 10.96.0.1 +``` +``` +NAME PARENTREF +10.96.0.1 services/default/kubernetes +``` + +The ServiceCIDRs are protected with {{}}, to avoid leaving Service ClusterIPs orphans; +the finalizer is only removed if there is another subnet that contains the existing IPAddresses or +there are no IPAddresses belonging to the subnet. + +## Extend the number of available IPs for Services + +There are cases that users will need to increase the number addresses available to Services, previously, increasing the Service range was a disruptive operation that could also cause data loss. With this new feature users only need to add a new ServiceCIDR to increase the number of available addresses. + +### Adding a new ServiceCIDR + +On a cluster with a 10.96.0.0/28 range for Services, there is only 2^(32-28) - 2 = 14 IP addresses available. The `kubernetes.default` Service is always created; for this example, that leaves you with only 13 possible Services. + +```sh +for i in $(seq 1 13); do kubectl create service clusterip "test-$i" --tcp 80 -o json | jq -r .spec.clusterIP; done +``` +``` +10.96.0.11 +10.96.0.5 +10.96.0.12 +10.96.0.13 +10.96.0.14 +10.96.0.2 +10.96.0.3 +10.96.0.4 +10.96.0.6 +10.96.0.7 +10.96.0.8 +10.96.0.9 +error: failed to create ClusterIP service: Internal error occurred: failed to allocate a serviceIP: range is full +``` + +You can increase the number of IP addresses available for Services, by creating a new ServiceCIDR +that extends or adds new IP address ranges. + +```sh +cat @@ -267,7 +267,7 @@ kubectl get csr my-svc.my-namespace -o json | \ ``` {{< note >}} -This uses the command line tool [`jq`](https://stedolan.github.io/jq/) to populate the base64-encoded +This uses the command line tool [`jq`](https://jqlang.github.io/jq/) to populate the base64-encoded content in the `.status.certificate` field. If you do not have `jq`, you can also save the JSON output to a file, populate this field manually, and upload the resulting file. diff --git a/content/en/docs/tasks/tools/included/verify-kubectl.md b/content/en/docs/tasks/tools/included/verify-kubectl.md index 78246912657e6..b4eb0fe08d2a3 100644 --- a/content/en/docs/tasks/tools/included/verify-kubectl.md +++ b/content/en/docs/tasks/tools/included/verify-kubectl.md @@ -23,16 +23,30 @@ kubectl cluster-info If you see a URL response, kubectl is correctly configured to access your cluster. -If you see a message similar to the following, kubectl is not configured correctly or is not able to connect to a Kubernetes cluster. +If you see a message similar to the following, kubectl is not configured correctly +or is not able to connect to a Kubernetes cluster. ``` The connection to the server was refused - did you specify the right host or port? ``` -For example, if you are intending to run a Kubernetes cluster on your laptop (locally), you will need a tool like Minikube to be installed first and then re-run the commands stated above. +For example, if you are intending to run a Kubernetes cluster on your laptop (locally), +you will need a tool like Minikube to be installed first and then re-run the commands stated above. -If kubectl cluster-info returns the url response but you can't access your cluster, to check whether it is configured properly, use: +If kubectl cluster-info returns the url response but you can't access your cluster, +to check whether it is configured properly, use: ```shell kubectl cluster-info dump -``` \ No newline at end of file +``` + +### Troubleshooting the 'No Auth Provider Found' error message {#no-auth-provider-found} + +In Kubernetes 1.26, kubectl removed the built-in authentication for the following cloud +providers' managed Kubernetes offerings. These providers have released kubectl plugins +to provide the cloud-specific authentication. For instructions, refer to the following provider documentation: + +* Azure AKS: [kubelogin plugin](https://azure.github.io/kubelogin/) +* Google Kubernetes Engine: [gke-gcloud-auth-plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin) + +(There could also be other reasons to see the same error message, unrelated to that change.) diff --git a/content/en/docs/tasks/tools/install-kubectl-linux.md b/content/en/docs/tasks/tools/install-kubectl-linux.md index 684f904b14bda..ef7a87889a935 100644 --- a/content/en/docs/tasks/tools/install-kubectl-linux.md +++ b/content/en/docs/tasks/tools/install-kubectl-linux.md @@ -192,6 +192,38 @@ To upgrade kubectl to another minor release, you'll need to bump the version in sudo yum install -y kubectl ``` +{{% /tab %}} + +{{% tab name="SUSE-based distributions" %}} + +1. Add the Kubernetes `zypper` repository. If you want to use Kubernetes version + different than {{< param "version" >}}, replace {{< param "version" >}} with + the desired minor version in the command below. + + ```bash + # This overwrites any existing configuration in /etc/zypp/repos.d/kubernetes.repo + cat <}}/rpm/ + enabled=1 + gpgcheck=1 + gpgkey=https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/rpm/repodata/repomd.xml.key + EOF + ``` + +{{< note >}} +To upgrade kubectl to another minor release, you'll need to bump the version in `/etc/zypp/repos.d/kubernetes.repo` +before running `zypper update`. This procedure is described in more detail in +[Changing The Kubernetes Package Repository](/docs/tasks/administer-cluster/kubeadm/change-package-repository/). +{{< /note >}} + + 2. Install kubectl using `zypper`: + + ```bash + sudo zypper install -y kubectl + ``` + {{% /tab %}} {{< /tabs >}} diff --git a/content/en/docs/tasks/tools/install-kubectl-macos.md b/content/en/docs/tasks/tools/install-kubectl-macos.md index 14a22fee587f4..229cff151fcf2 100644 --- a/content/en/docs/tasks/tools/install-kubectl-macos.md +++ b/content/en/docs/tasks/tools/install-kubectl-macos.md @@ -123,10 +123,10 @@ The following methods exist for installing kubectl on macOS: kubectl version --client --output=yaml ``` -1. After installing the plugin, clean up the installation files: +1. After installing and validating kubectl, delete the checksum file: ```bash - rm kubectl kubectl.sha256 + rm kubectl.sha256 ``` ### Install with Homebrew on macOS diff --git a/content/en/docs/tutorials/_index.md b/content/en/docs/tutorials/_index.md index 75d1ac2fc865c..97a3bacbdf6a5 100644 --- a/content/en/docs/tutorials/_index.md +++ b/content/en/docs/tutorials/_index.md @@ -20,31 +20,24 @@ Before walking through each tutorial, you may want to bookmark the ## Basics * [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) is an in-depth interactive tutorial that helps you understand the Kubernetes system and try out some basic Kubernetes features. - * [Introduction to Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) - * [Hello Minikube](/docs/tutorials/hello-minikube/) ## Configuration * [Example: Configuring a Java Microservice](/docs/tutorials/configuration/configure-java-microservice/) - * [Configuring Redis Using a ConfigMap](/docs/tutorials/configuration/configure-redis-using-configmap/) ## Stateless Applications * [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/) - * [Example: Deploying PHP Guestbook application with Redis](/docs/tutorials/stateless-application/guestbook/) ## Stateful Applications * [StatefulSet Basics](/docs/tutorials/stateful-application/basic-stateful-set/) - * [Example: WordPress and MySQL with Persistent Volumes](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) - * [Example: Deploying Cassandra with Stateful Sets](/docs/tutorials/stateful-application/cassandra/) - * [Running ZooKeeper, A CP Distributed System](/docs/tutorials/stateful-application/zookeeper/) ## Services diff --git a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md index c408555958c89..024dc04676ebd 100644 --- a/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/en/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -61,7 +61,7 @@ kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/main/conte Examine the contents of the Redis pod manifest and note the following: * A volume named `config` is created by `spec.volumes[1]` -* The `key` and `path` under `spec.volumes[1].items[0]` exposes the `redis-config` key from the +* The `key` and `path` under `spec.volumes[1].configMap.items[0]` exposes the `redis-config` key from the `example-redis-config` ConfigMap as a file named `redis.conf` on the `config` volume. * The `config` volume is then mounted at `/redis-master` by `spec.containers[0].volumeMounts[1]`. diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index d12775eb2352f..1e47b76d8aa4a 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -60,7 +60,7 @@ Now, switch back to the terminal where you ran `minikube start`. The `dashboard` command enables the dashboard add-on and opens the proxy in the default web browser. You can create Kubernetes resources on the dashboard such as Deployment and Service. -If you are running in an environment as root, see [Open Dashboard with URL](#open-dashboard-with-url). +To find out how to avoid directly invoking the browser from the terminal and get a URL for the web dashboard, see the "URL copy and paste" tab. By default, the dashboard is only accessible from within the internal Kubernetes virtual network. The `dashboard` command creates a temporary proxy to make the dashboard accessible from outside the Kubernetes virtual network. @@ -73,7 +73,7 @@ You can run the `dashboard` command again to create another proxy to access the {{% /tab %}} {{% tab name="URL copy and paste" %}} -If you don't want minikube to open a web browser for you, run the dashboard command with the +If you don't want minikube to open a web browser for you, run the `dashboard` subcommand with the `--url` flag. `minikube` outputs a URL that you can open in the browser you prefer. Open a **new** terminal, and run: @@ -82,7 +82,7 @@ Open a **new** terminal, and run: minikube dashboard --url ``` -Now, switch back to the terminal where you ran `minikube start`. +Now, you can use this URL and switch back to the terminal where you ran `minikube start`. {{% /tab %}} {{< /tabs >}} @@ -238,7 +238,7 @@ The minikube tool includes a set of built-in {{< glossary_tooltip text="addons" storage-provisioner-gluster: disabled ``` -2. Enable an addon, for example, `metrics-server`: +1. Enable an addon, for example, `metrics-server`: ```shell minikube addons enable metrics-server @@ -250,7 +250,7 @@ The minikube tool includes a set of built-in {{< glossary_tooltip text="addons" The 'metrics-server' addon is enabled ``` -3. View the Pod and Service you created by installing that addon: +1. View the Pod and Service you created by installing that addon: ```shell kubectl get pod,svc -n kube-system @@ -279,7 +279,26 @@ The minikube tool includes a set of built-in {{< glossary_tooltip text="addons" service/monitoring-influxdb ClusterIP 10.111.169.94 8083/TCP,8086/TCP 26s ``` -4. Disable `metrics-server`: +1. Check the output from `metrics-server`: + + ```shell + kubectl top pods + ``` + + The output is similar to: + + ``` + NAME CPU(cores) MEMORY(bytes) + hello-node-ccf4b9788-4jn97 1m 6Mi + ``` + + If you see the following message, wait, and try again: + + ``` + error: Metrics API not available + ``` + +1. Disable `metrics-server`: ```shell minikube addons disable metrics-server diff --git a/content/en/docs/tutorials/kubernetes-basics/_index.html b/content/en/docs/tutorials/kubernetes-basics/_index.html index 551f638f9a404..f748fda0f3ff0 100644 --- a/content/en/docs/tutorials/kubernetes-basics/_index.html +++ b/content/en/docs/tutorials/kubernetes-basics/_index.html @@ -24,8 +24,8 @@

    Kubernetes Basics

    -

    This tutorial provides a walkthrough of the basics of the Kubernetes cluster orchestration system. Each module contains some background information on major Kubernetes features and concepts, and includes an interactive online tutorial. These interactive tutorials let you manage a simple cluster and its containerized applications for yourself.

    -

    Using the interactive tutorials, you can learn to:

    +

    This tutorial provides a walkthrough of the basics of the Kubernetes cluster orchestration system. Each module contains some background information on major Kubernetes features and concepts, and a tutorial for you to follow along.

    +

    Using the tutorials, you can learn to:

    • Deploy a containerized application on a cluster.
    • Scale the deployment.
    • diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 764c785d7acaa..7339066dc7f21 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -130,10 +130,11 @@

      Deploy an app

      View the app

      -

      Pods that are running inside Kubernetes are running on a private, isolated network. +

      Pods that are running inside Kubernetes are running on a private, isolated network. By default they are visible from other pods and services within the same Kubernetes cluster, but not outside that network. When we use kubectl, we're interacting through an API endpoint to communicate with our application.

      -

      We will cover other options on how to expose your application outside the Kubernetes cluster later, in Module 4.

      +

      We will cover other options on how to expose your application outside the Kubernetes cluster later, in Module 4. + Also as a basic tutorial, we're not explaining what Pods are in any detail here, it will cover in later topics.

      The kubectl proxy command can create a proxy that will forward communications into the cluster-wide, private network. The proxy can be terminated by pressing control-C and won't show any output while its running.

      You need to open a second terminal window to run the proxy.

      kubectl proxy diff --git a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html index 7cee0d503e65f..5595ed089714e 100644 --- a/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -94,7 +94,7 @@

      Services and Labels

      -

      Create a new Service

      +

      Step 1: Creating a new Service

      Let’s verify that our application is running. We’ll use the kubectl get command and look for existing Pods:

      kubectl get pods

      If no Pods are running then it means the objects from the previous tutorials were cleaned up. In this case, go back and recreate the deployment from the Using kubectl to create a Deployment tutorial. @@ -114,13 +114,13 @@

      Create a new Service

      echo "NODE_PORT=$NODE_PORT"

      Now we can test that the app is exposed outside of the cluster using curl, the IP address of the Node and the externally exposed port:

      curl http://"$(minikube ip):$NODE_PORT"

      - {{< note >}}

      If Docker is the container runtime, a minikube tunnel is needed. This is because Docker Desktop does not support ports.
      + {{< note >}}

      If you're running minikube with Docker Desktop as the container driver, a minikube tunnel is needed. This is because containers inside Docker Desktop are isolated from your host computer.

      In a separate terminal window, execute:
      - $ minikube service kubernetes-bootcamp --url

      + minikube service kubernetes-bootcamp --url

      The output looks like this:

      http://127.0.0.1:51082
      ! Because you are using a Docker driver on darwin, the terminal needs to be open to run it.

      Then use the given URL to access the app:
      - $ curl 127.0.0.1:51082

      + curl 127.0.0.1:51082

      {{< /note >}}

      And we get a response from the server. The Service is exposed.

      @@ -151,7 +151,7 @@

      Step 2: Using labels

      -

      Deleting a service

      +

      Step 3: Deleting a service

      To delete Services you can use the delete service subcommand. Labels can be used also here:

      kubectl delete service -l app=kubernetes-bootcamp

      Confirm that the Service is gone:

      diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling1.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling1.svg index 45458cf88ba44..14513aee8bea4 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling1.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling1.svg @@ -24,13 +24,13 @@ .st18{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;} .st19{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3749,1.5832;} .st20{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4006,1.6004;} - .st21{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st22{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st23{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} - .st24{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} - .st25{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} - .st26{opacity:0.1;fill:#EEF406;} - .st27{opacity:2.000000e-02;fill:#EEF406;} + .st21{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st22{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st23{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} + .st24{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} + .st25{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} + .st26{opacity:0.1;fill:#F29202;} + .st27{opacity:2.000000e-02;fill:#F29202;} .st28{opacity:0.1;fill:#06F7C9;} .st29{fill:none;stroke:#006DE9;stroke-width:0.8;stroke-miterlimit:10;} .st30{opacity:0.1;fill:url(#SVGID_3_);} @@ -43,12 +43,12 @@ .st37{opacity:0.1;fill:url(#SVGID_9_);} .st38{opacity:0.1;fill:url(#SVGID_10_);} .st39{fill:none;stroke:#326DE6;stroke-width:2;stroke-miterlimit:10;} - .st40{opacity:0.4;fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st41{fill:none;stroke:#EEF406;stroke-width:2.4596;stroke-miterlimit:10;} + .st40{opacity:0.4;fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st41{fill:none;stroke:#F29202;stroke-width:2.4596;stroke-miterlimit:10;} .st42{fill:#011F38;} .st43{opacity:0.4;} .st44{opacity:0.1;} - .st45{fill:#326DE6;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} + .st45{fill:#326DE6;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} .st46{fill:none;stroke:#FFFFFF;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} .st47{fill:#06F7C9;stroke:#FFFFFF;stroke-width:0.3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st48{fill:none;stroke:#011F38;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} @@ -57,10 +57,10 @@ .st51{fill:#8115FF;stroke:#011F38;stroke-width:0.8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st52{opacity:0.3;} .st53{opacity:0.2;fill:#6D6E71;} - .st54{fill:#EEF406;} + .st54{fill:#F29202;} .st55{fill:#06F7C9;} .st56{fill:#FFFFFF;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st57{fill:#FFFFFF;stroke:#EEF406;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st57{fill:#FFFFFF;stroke:#F29202;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} .st58{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4938,1.6626;} .st59{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.0084,1.3389;} .st60{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.724,1.816;} diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling2.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling2.svg index 53971c5a21c37..86be02afb272e 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling2.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_05_scaling2.svg @@ -24,13 +24,13 @@ .st18{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;} .st19{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3749,1.5832;} .st20{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4006,1.6004;} - .st21{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st22{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st23{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} - .st24{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} - .st25{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} - .st26{opacity:0.1;fill:#EEF406;} - .st27{opacity:2.000000e-02;fill:#EEF406;} + .st21{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st22{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st23{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} + .st24{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} + .st25{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} + .st26{opacity:0.1;fill:#F29202;} + .st27{opacity:2.000000e-02;fill:#F29202;} .st28{opacity:0.1;fill:#06F7C9;} .st29{fill:none;stroke:#006DE9;stroke-width:0.8;stroke-miterlimit:10;} .st30{opacity:0.1;fill:url(#SVGID_3_);} @@ -43,12 +43,12 @@ .st37{opacity:0.1;fill:url(#SVGID_9_);} .st38{opacity:0.1;fill:url(#SVGID_10_);} .st39{fill:none;stroke:#326DE6;stroke-width:2;stroke-miterlimit:10;} - .st40{opacity:0.4;fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st41{fill:none;stroke:#EEF406;stroke-width:2.4596;stroke-miterlimit:10;} + .st40{opacity:0.4;fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st41{fill:none;stroke:#F29202;stroke-width:2.4596;stroke-miterlimit:10;} .st42{fill:#011F38;} .st43{opacity:0.4;} .st44{opacity:0.1;} - .st45{fill:#326DE6;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} + .st45{fill:#326DE6;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} .st46{fill:none;stroke:#FFFFFF;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} .st47{fill:#06F7C9;stroke:#FFFFFF;stroke-width:0.3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st48{fill:none;stroke:#011F38;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} @@ -57,10 +57,10 @@ .st51{fill:#8115FF;stroke:#011F38;stroke-width:0.8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st52{opacity:0.3;} .st53{opacity:0.2;fill:#6D6E71;} - .st54{fill:#EEF406;} + .st54{fill:#F29202;} .st55{fill:#06F7C9;} .st56{fill:#FFFFFF;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st57{fill:#FFFFFF;stroke:#EEF406;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st57{fill:#FFFFFF;stroke:#F29202;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} .st58{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4938,1.6626;} .st59{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.0084,1.3389;} .st60{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.724,1.816;} diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates1.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates1.svg index 53971c5a21c37..86be02afb272e 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates1.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates1.svg @@ -24,13 +24,13 @@ .st18{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;} .st19{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3749,1.5832;} .st20{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4006,1.6004;} - .st21{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st22{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st23{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} - .st24{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} - .st25{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} - .st26{opacity:0.1;fill:#EEF406;} - .st27{opacity:2.000000e-02;fill:#EEF406;} + .st21{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st22{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st23{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} + .st24{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} + .st25{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} + .st26{opacity:0.1;fill:#F29202;} + .st27{opacity:2.000000e-02;fill:#F29202;} .st28{opacity:0.1;fill:#06F7C9;} .st29{fill:none;stroke:#006DE9;stroke-width:0.8;stroke-miterlimit:10;} .st30{opacity:0.1;fill:url(#SVGID_3_);} @@ -43,12 +43,12 @@ .st37{opacity:0.1;fill:url(#SVGID_9_);} .st38{opacity:0.1;fill:url(#SVGID_10_);} .st39{fill:none;stroke:#326DE6;stroke-width:2;stroke-miterlimit:10;} - .st40{opacity:0.4;fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st41{fill:none;stroke:#EEF406;stroke-width:2.4596;stroke-miterlimit:10;} + .st40{opacity:0.4;fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st41{fill:none;stroke:#F29202;stroke-width:2.4596;stroke-miterlimit:10;} .st42{fill:#011F38;} .st43{opacity:0.4;} .st44{opacity:0.1;} - .st45{fill:#326DE6;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} + .st45{fill:#326DE6;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} .st46{fill:none;stroke:#FFFFFF;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} .st47{fill:#06F7C9;stroke:#FFFFFF;stroke-width:0.3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st48{fill:none;stroke:#011F38;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} @@ -57,10 +57,10 @@ .st51{fill:#8115FF;stroke:#011F38;stroke-width:0.8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st52{opacity:0.3;} .st53{opacity:0.2;fill:#6D6E71;} - .st54{fill:#EEF406;} + .st54{fill:#F29202;} .st55{fill:#06F7C9;} .st56{fill:#FFFFFF;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st57{fill:#FFFFFF;stroke:#EEF406;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st57{fill:#FFFFFF;stroke:#F29202;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} .st58{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4938,1.6626;} .st59{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.0084,1.3389;} .st60{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.724,1.816;} diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates2.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates2.svg index 9773502d209cf..9544b8b56898b 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates2.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates2.svg @@ -24,13 +24,13 @@ .st18{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;} .st19{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3749,1.5832;} .st20{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4006,1.6004;} - .st21{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st22{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st23{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} - .st24{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} - .st25{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} - .st26{opacity:0.1;fill:#EEF406;} - .st27{opacity:2.000000e-02;fill:#EEF406;} + .st21{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st22{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st23{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} + .st24{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} + .st25{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} + .st26{opacity:0.1;fill:#F29202;} + .st27{opacity:2.000000e-02;fill:#F29202;} .st28{opacity:0.1;fill:#06F7C9;} .st29{fill:none;stroke:#006DE9;stroke-width:0.8;stroke-miterlimit:10;} .st30{opacity:0.1;fill:url(#SVGID_3_);} @@ -43,12 +43,12 @@ .st37{opacity:0.1;fill:url(#SVGID_9_);} .st38{opacity:0.1;fill:url(#SVGID_10_);} .st39{fill:none;stroke:#326DE6;stroke-width:2;stroke-miterlimit:10;} - .st40{opacity:0.4;fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st41{fill:none;stroke:#EEF406;stroke-width:2.4596;stroke-miterlimit:10;} + .st40{opacity:0.4;fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st41{fill:none;stroke:#F29202;stroke-width:2.4596;stroke-miterlimit:10;} .st42{fill:#011F38;} .st43{opacity:0.4;} .st44{opacity:0.1;} - .st45{fill:#326DE6;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} + .st45{fill:#326DE6;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} .st46{fill:none;stroke:#FFFFFF;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} .st47{fill:#06F7C9;stroke:#FFFFFF;stroke-width:0.3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st48{fill:none;stroke:#011F38;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} @@ -57,10 +57,10 @@ .st51{fill:#8115FF;stroke:#011F38;stroke-width:0.8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st52{opacity:0.3;} .st53{opacity:0.2;fill:#6D6E71;} - .st54{fill:#EEF406;} + .st54{fill:#F29202;} .st55{fill:#06F7C9;} .st56{fill:#FFFFFF;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st57{fill:#FFFFFF;stroke:#EEF406;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st57{fill:#FFFFFF;stroke:#F29202;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} .st58{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4938,1.6626;} .st59{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.0084,1.3389;} .st60{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.724,1.816;} diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates3.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates3.svg index 15bd6e6033ebe..55a392dc4c9d2 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates3.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates3.svg @@ -24,13 +24,13 @@ .st18{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;} .st19{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3749,1.5832;} .st20{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4006,1.6004;} - .st21{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st22{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st23{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} - .st24{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} - .st25{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} - .st26{opacity:0.1;fill:#EEF406;} - .st27{opacity:2.000000e-02;fill:#EEF406;} + .st21{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st22{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st23{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} + .st24{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} + .st25{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} + .st26{opacity:0.1;fill:#F29202;} + .st27{opacity:2.000000e-02;fill:#F29202;} .st28{opacity:0.1;fill:#06F7C9;} .st29{fill:none;stroke:#006DE9;stroke-width:0.8;stroke-miterlimit:10;} .st30{opacity:0.1;fill:url(#SVGID_3_);} @@ -43,12 +43,12 @@ .st37{opacity:0.1;fill:url(#SVGID_9_);} .st38{opacity:0.1;fill:url(#SVGID_10_);} .st39{fill:none;stroke:#326DE6;stroke-width:2;stroke-miterlimit:10;} - .st40{opacity:0.4;fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st41{fill:none;stroke:#EEF406;stroke-width:2.4596;stroke-miterlimit:10;} + .st40{opacity:0.4;fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st41{fill:none;stroke:#F29202;stroke-width:2.4596;stroke-miterlimit:10;} .st42{fill:#011F38;} .st43{opacity:0.4;} .st44{opacity:0.1;} - .st45{fill:#326DE6;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} + .st45{fill:#326DE6;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} .st46{fill:none;stroke:#FFFFFF;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} .st47{fill:#06F7C9;stroke:#FFFFFF;stroke-width:0.3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st48{fill:none;stroke:#011F38;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} @@ -57,10 +57,10 @@ .st51{fill:#8115FF;stroke:#011F38;stroke-width:0.8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st52{opacity:0.3;} .st53{opacity:0.2;fill:#6D6E71;} - .st54{fill:#EEF406;} + .st54{fill:#F29202;} .st55{fill:#06F7C9;} .st56{fill:#FFFFFF;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st57{fill:#FFFFFF;stroke:#EEF406;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st57{fill:#FFFFFF;stroke:#F29202;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} .st58{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4938,1.6626;} .st59{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.0084,1.3389;} .st60{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.724,1.816;} diff --git a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates4.svg b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates4.svg index a326317a50a1d..39437d7e7dc2b 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates4.svg +++ b/content/en/docs/tutorials/kubernetes-basics/public/images/module_06_rollingupdates4.svg @@ -24,13 +24,13 @@ .st18{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;} .st19{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3749,1.5832;} .st20{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4006,1.6004;} - .st21{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st22{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st23{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} - .st24{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} - .st25{fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} - .st26{opacity:0.1;fill:#EEF406;} - .st27{opacity:2.000000e-02;fill:#EEF406;} + .st21{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st22{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st23{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3975,1.5984;} + .st24{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.395,1.5966;} + .st25{fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.3963,1.5976;} + .st26{opacity:0.1;fill:#F29202;} + .st27{opacity:2.000000e-02;fill:#F29202;} .st28{opacity:0.1;fill:#06F7C9;} .st29{fill:none;stroke:#006DE9;stroke-width:0.8;stroke-miterlimit:10;} .st30{opacity:0.1;fill:url(#SVGID_3_);} @@ -43,12 +43,12 @@ .st37{opacity:0.1;fill:url(#SVGID_9_);} .st38{opacity:0.1;fill:url(#SVGID_10_);} .st39{fill:none;stroke:#326DE6;stroke-width:2;stroke-miterlimit:10;} - .st40{opacity:0.4;fill:none;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} - .st41{fill:none;stroke:#EEF406;stroke-width:2.4596;stroke-miterlimit:10;} + .st40{opacity:0.4;fill:none;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} + .st41{fill:none;stroke:#F29202;stroke-width:2.4596;stroke-miterlimit:10;} .st42{fill:#011F38;} .st43{opacity:0.4;} .st44{opacity:0.1;} - .st45{fill:#326DE6;stroke:#EEF406;stroke-width:2;stroke-miterlimit:10;} + .st45{fill:#326DE6;stroke:#F29202;stroke-width:2;stroke-miterlimit:10;} .st46{fill:none;stroke:#FFFFFF;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} .st47{fill:#06F7C9;stroke:#FFFFFF;stroke-width:0.3;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st48{fill:none;stroke:#011F38;stroke-width:1.2;stroke-linecap:round;stroke-linejoin:round;} @@ -57,10 +57,10 @@ .st51{fill:#8115FF;stroke:#011F38;stroke-width:0.8;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;} .st52{opacity:0.3;} .st53{opacity:0.2;fill:#6D6E71;} - .st54{fill:#EEF406;} + .st54{fill:#F29202;} .st55{fill:#06F7C9;} .st56{fill:#FFFFFF;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} - .st57{fill:#FFFFFF;stroke:#EEF406;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} + .st57{fill:#FFFFFF;stroke:#F29202;stroke-width:1.6;stroke-miterlimit:10;stroke-dasharray:2.4,1.6;} .st58{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.4938,1.6626;} .st59{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.0084,1.3389;} .st60{fill:none;stroke:#06F7C9;stroke-width:2;stroke-miterlimit:10;stroke-dasharray:2.724,1.816;} diff --git a/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html index b70c087e5831a..d714433962c6e 100644 --- a/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -110,13 +110,13 @@

      Scaling overview

      Scaling a Deployment

      -

      To list your Deployments use the get deployments subcommand: - kubectl get deployments

      +

      To list your Deployments, use the get deployments subcommand:

      +

      kubectl get deployments

      The output should be similar to:

      -
      -               NAME                  READY   UP-TO-DATE   AVAILABLE   AGE
      -               kubernetes-bootcamp   1/1     1            1           11m
      -               
      +
      +NAME                  READY   UP-TO-DATE   AVAILABLE   AGE
      +kubernetes-bootcamp   1/1     1            1           11m
      +

      We should have 1 Pod. If not, run the command again. This shows:

      • NAME lists the names of the Deployments in the cluster.
      • @@ -125,8 +125,8 @@

        Scaling a Deployment

      • AVAILABLE displays how many replicas of the application are available to your users.
      • AGE displays the amount of time that the application has been running.
      -

      To see the ReplicaSet created by the Deployment, run - kubectl get rs

      +

      To see the ReplicaSet created by the Deployment, run:

      +

      kubectl get rs

      Notice that the name of the ReplicaSet is always formatted as [DEPLOYMENT-NAME]-[RANDOM-STRING]. The random string is randomly generated and uses the pod-template-hash as a seed.

      Two important columns of this output are:

        @@ -156,6 +156,14 @@

        Load Balancing

        Next, we’ll do a curl to the exposed IP address and port. Execute the command multiple times:

        curl http://"$(minikube ip):$NODE_PORT"

        We hit a different Pod with every request. This demonstrates that the load-balancing is working.

        + {{< note >}}

        If you're running minikube with Docker Desktop as the container driver, a minikube tunnel is needed. This is because containers inside Docker Desktop are isolated from your host computer.
        +

        In a separate terminal window, execute:
        + minikube service kubernetes-bootcamp --url

        +

        The output looks like this: +

        http://127.0.0.1:51082
        ! Because you are using a Docker driver on darwin, the terminal needs to be open to run it.

        +

        Then use the given URL to access the app:
        + curl 127.0.0.1:51082

        + {{< /note >}}
      diff --git a/content/en/docs/tutorials/security/cluster-level-pss.md b/content/en/docs/tutorials/security/cluster-level-pss.md index 316fe47798a8b..bc6f8fcc95a52 100644 --- a/content/en/docs/tutorials/security/cluster-level-pss.md +++ b/content/en/docs/tutorials/security/cluster-level-pss.md @@ -25,7 +25,7 @@ check the documentation for that version. Install the following on your workstation: -- [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) - [kubectl](/docs/tasks/tools/) This tutorial demonstrates what you can configure for a Kubernetes cluster that you fully @@ -252,7 +252,7 @@ following: ``` {{}} - If you use Docker Desktop with KinD on macOS, you can + If you use Docker Desktop with *kind* on macOS, you can add `/tmp` as a Shared Directory under the menu item **Preferences > Resources > File Sharing**. {{}} @@ -294,6 +294,8 @@ following: 1. Create a Pod in the default namespace: + {{% code_sample file="security/example-baseline-pod.yaml" %}} + ```shell kubectl apply -f https://k8s.io/examples/security/example-baseline-pod.yaml ``` diff --git a/content/en/docs/tutorials/security/ns-level-pss.md b/content/en/docs/tutorials/security/ns-level-pss.md index 31404c7d7f534..bcfd0ec28078b 100644 --- a/content/en/docs/tutorials/security/ns-level-pss.md +++ b/content/en/docs/tutorials/security/ns-level-pss.md @@ -22,12 +22,12 @@ level. For instructions, refer to Install the following on your workstation: -- [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) - [kubectl](/docs/tasks/tools/) ## Create cluster -1. Create a `KinD` cluster as follows: +1. Create a `kind` cluster as follows: ```shell kind create cluster --name psa-ns-level @@ -150,7 +150,7 @@ kind delete cluster --name psa-ns-level [shell script](/examples/security/kind-with-namespace-level-baseline-pod-security.sh) to perform all the preceding steps all at once. - 1. Create KinD cluster + 1. Create kind cluster 2. Create new namespace 3. Apply `baseline` Pod Security Standard in `enforce` mode while applying `restricted` Pod Security Standard also in `warn` and `audit` mode. diff --git a/content/en/docs/tutorials/security/seccomp.md b/content/en/docs/tutorials/security/seccomp.md index 2d77cf52d9f66..08e6b73d30c3e 100644 --- a/content/en/docs/tutorials/security/seccomp.md +++ b/content/en/docs/tutorials/security/seccomp.md @@ -482,7 +482,7 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane - image: kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac + image: kindest/node:v1.28.0@sha256:9f3ff58f19dcf1a0611d11e8ac989fdb30a28f40f236f59f0bea31fb956ccf5c kubeadmConfigPatches: - | kind: JoinConfiguration @@ -490,7 +490,7 @@ nodes: kubeletExtraArgs: seccomp-default: "true" - role: worker - image: kindest/node:v1.23.0@sha256:49824ab1727c04e56a21a5d8372a402fcd32ea51ac96a2706a12af38934f81ac + image: kindest/node:v1.28.0@sha256:9f3ff58f19dcf1a0611d11e8ac989fdb30a28f40f236f59f0bea31fb956ccf5c kubeadmConfigPatches: - | kind: JoinConfiguration diff --git a/content/en/docs/tutorials/services/connect-applications-service.md b/content/en/docs/tutorials/services/connect-applications-service.md index eadab8bd4612f..771149566b422 100644 --- a/content/en/docs/tutorials/services/connect-applications-service.md +++ b/content/en/docs/tutorials/services/connect-applications-service.md @@ -59,7 +59,7 @@ to make queries against both IPs. Note that the containers are *not* using port the node, nor are there any special NAT rules to route traffic to the pod. This means you can run multiple nginx pods on the same node all using the same `containerPort`, and access them from any other pod or node in your cluster using the assigned IP -address for the Service. If you want to arrange for a specific port on the host +address for the pod. If you want to arrange for a specific port on the host Node to be forwarded to backing Pods, you can - but the networking model should mean that you do not need to do so. @@ -71,7 +71,7 @@ if you're curious. So we have pods running nginx in a flat, cluster wide, address space. In theory, you could talk to these pods directly, but what happens when a node dies? The pods -die with it, and the Deployment will create new ones, with different IPs. This is +die with it, and the ReplicaSet inside the Deployment will create new ones, with different IPs. This is the problem a Service solves. A Kubernetes Service is an abstraction which defines a logical set of Pods running @@ -189,7 +189,7 @@ Note there's no mention of your Service. This is because you created the replica before the Service. Another disadvantage of doing this is that the scheduler might put both Pods on the same machine, which will take your entire Service down if it dies. We can do this the right way by killing the 2 Pods and waiting for the -Deployment to recreate them. This time around the Service exists *before* the +Deployment to recreate them. This time the Service exists *before* the replicas. This will give you scheduler-level Service spreading of your Pods (provided all your nodes have equal capacity), as well as the right environment variables: @@ -292,6 +292,10 @@ And also the configmap: ```shell kubectl create configmap nginxconfigmap --from-file=default.conf ``` + +You can find an example for `default.conf` in +[the Kubernetes examples project repo](https://github.com/kubernetes/examples/tree/bc9ca4ca32bb28762ef216386934bef20f1f9930/staging/https-nginx/). + ``` configmap/nginxconfigmap created ``` @@ -302,6 +306,49 @@ kubectl get configmaps NAME DATA AGE nginxconfigmap 1 114s ``` + +You can view the details of the `nginxconfigmap` ConfigMap using the following command: + +```shell +kubectl describe configmap nginxconfigmap +``` + +The output is similar to: + +```console +Name: nginxconfigmap +Namespace: default +Labels: +Annotations: + +Data +==== +default.conf: +---- +server { + listen 80 default_server; + listen [::]:80 default_server ipv6only=on; + + listen 443 ssl; + + root /usr/share/nginx/html; + index index.html; + + server_name localhost; + ssl_certificate /etc/nginx/ssl/tls.crt; + ssl_certificate_key /etc/nginx/ssl/tls.key; + + location / { + try_files $uri $uri/ =404; + } +} + +BinaryData +==== + +Events: +``` + Following are the manual steps to follow in case you run into problems running make (on windows for example): ```shell @@ -311,7 +358,7 @@ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /d/tmp/nginx.key -ou cat /d/tmp/nginx.crt | base64 cat /d/tmp/nginx.key | base64 ``` - + Use the output from the previous commands to create a yaml file as follows. The base64 encoded value should all be on a single line. @@ -476,5 +523,3 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el * Learn more about [Using a Service to Access an Application in a Cluster](/docs/tasks/access-application-cluster/service-access-application-cluster/) * Learn more about [Connecting a Front End to a Back End Using a Service](/docs/tasks/access-application-cluster/connecting-frontend-backend/) * Learn more about [Creating an External Load Balancer](/docs/tasks/access-application-cluster/create-external-load-balancer/) - - diff --git a/content/en/docs/tutorials/services/source-ip.md b/content/en/docs/tutorials/services/source-ip.md index c6aeea4ac424a..907856543596e 100644 --- a/content/en/docs/tutorials/services/source-ip.md +++ b/content/en/docs/tutorials/services/source-ip.md @@ -27,19 +27,19 @@ the target localization. {{< /comment >}} [NAT](https://en.wikipedia.org/wiki/Network_address_translation) -: network address translation +: Network address translation [Source NAT](https://en.wikipedia.org/wiki/Network_address_translation#SNAT) -: replacing the source IP on a packet; in this page, that usually means replacing with the IP address of a node. +: Replacing the source IP on a packet; in this page, that usually means replacing with the IP address of a node. [Destination NAT](https://en.wikipedia.org/wiki/Network_address_translation#DNAT) -: replacing the destination IP on a packet; in this page, that usually means replacing with the IP address of a {{< glossary_tooltip term_id="pod" >}} +: Replacing the destination IP on a packet; in this page, that usually means replacing with the IP address of a {{< glossary_tooltip term_id="pod" >}} [VIP](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) -: a virtual IP address, such as the one assigned to every {{< glossary_tooltip text="Service" term_id="service" >}} in Kubernetes +: A virtual IP address, such as the one assigned to every {{< glossary_tooltip text="Service" term_id="service" >}} in Kubernetes [kube-proxy](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) -: a network daemon that orchestrates Service VIP management on every node +: A network daemon that orchestrates Service VIP management on every node ### Prerequisites diff --git a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md index 70c3e9e3ac40d..1a9875929ff71 100644 --- a/content/en/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/en/docs/tutorials/stateful-application/basic-stateful-set.md @@ -27,14 +27,25 @@ following Kubernetes concepts: * [Headless Services](/docs/concepts/services-networking/service/#headless-services) * [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) * [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/master/staging/persistent-volume-provisioning/) -* [StatefulSets](/docs/concepts/workloads/controllers/statefulset/) * The [kubectl](/docs/reference/kubectl/kubectl/) command line tool +{{% include "task-tutorial-prereqs.md" %}} +You should configure `kubectl` to use a context that uses the `default` +namespace. +If you are using an existing cluster, make sure that it's OK to use that +cluster's default namespace to practice. Ideally, practice in a cluster +that doesn't run any real workloads. + +It's also useful to read the concept page about [StatefulSets](/docs/concepts/workloads/controllers/statefulset/). + {{< note >}} This tutorial assumes that your cluster is configured to dynamically provision -PersistentVolumes. If your cluster is not configured to do so, you +PersistentVolumes. You'll also need to have a [default StorageClass](/docs/concepts/storage/storage-classes/#default-storageclass). +If your cluster is not configured to provision storage dynamically, you will have to manually provision two 1 GiB volumes prior to starting this -tutorial. +tutorial and +set up your cluster so that those PersistentVolumes map to the +PersistentVolumeClaim templates that the StatefulSet defines. {{< /note >}} ## {{% heading "objectives" %}} @@ -65,22 +76,22 @@ It creates a [headless Service](/docs/concepts/services-networking/service/#head {{% code_sample file="application/web/web.yaml" %}} -Download the example above, and save it to a file named `web.yaml` - -You will need to use two terminal windows. In the first terminal, use +You will need to use at least two terminal windows. In the first terminal, use [`kubectl get`](/docs/reference/generated/kubectl/kubectl-commands/#get) to watch the creation of the StatefulSet's Pods. ```shell -kubectl get pods -w -l app=nginx +# use this terminal to run commands that specify --watch +# end this watch when you are asked to start a new watch +kubectl get pods --watch -l app=nginx ``` In the second terminal, use [`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands/#apply) to create the -headless Service and StatefulSet defined in `web.yaml`. +headless Service and StatefulSet: ```shell -kubectl apply -f web.yaml +kubectl apply -f https://k8s.io/examples/application/web/web.yaml ``` ``` service/nginx created @@ -105,7 +116,7 @@ NAME DESIRED CURRENT AGE web 2 1 20s ``` -### Ordered Pod Creation +### Ordered Pod creation For a StatefulSet with _n_ replicas, when Pods are being deployed, they are created sequentially, ordered from _{0..n-1}_. Examine the output of the @@ -113,7 +124,9 @@ created sequentially, ordered from _{0..n-1}_. Examine the output of the look like the example below. ```shell -kubectl get pods -w -l app=nginx +# Do not start a new watch; +# this should already be running +kubectl get pods --watch -l app=nginx ``` ``` NAME READY STATUS RESTARTS AGE @@ -212,7 +225,9 @@ contain the Pods' IP addresses. In one terminal, watch the StatefulSet's Pods: ```shell -kubectl get pod -w -l app=nginx +# Start a new watch +# End this watch when you've seen that the delete is finished +kubectl get pod --watch -l app=nginx ``` In a second terminal, use [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete) to delete all @@ -230,7 +245,8 @@ Wait for the StatefulSet to restart them, and for both Pods to transition to Running and Ready: ```shell -kubectl get pod -w -l app=nginx +# This should already be running +kubectl get pod --watch -l app=nginx ``` ``` NAME READY STATUS RESTARTS AGE @@ -355,7 +371,9 @@ before retrying the `curl` command above. In one terminal, watch the StatefulSet's Pods: ```shell -kubectl get pod -w -l app=nginx +# End this watch when you've reached the end of the section. +# At the start of "Scaling a StatefulSet" you'll start a new watch. +kubectl get pod --watch -l app=nginx ``` In a second terminal, delete all of the StatefulSet's Pods: @@ -371,7 +389,8 @@ Examine the output of the `kubectl get` command in the first terminal, and wait for all of the Pods to transition to Running and Ready. ```shell -kubectl get pod -w -l app=nginx +# This should already be running +kubectl get pod --watch -l app=nginx ``` ``` NAME READY STATUS RESTARTS AGE @@ -412,7 +431,10 @@ This is accomplished by updating the `replicas` field. You can use either In one terminal window, watch the Pods in the StatefulSet: ```shell -kubectl get pods -w -l app=nginx +# If you already have a watch running, you can continue using that. +# Otherwise, start one. +# End this watch when there are 5 healthy Pods for the StatefulSet +kubectl get pods --watch -l app=nginx ``` In another terminal window, use `kubectl scale` to scale the number of replicas @@ -429,7 +451,8 @@ Examine the output of the `kubectl get` command in the first terminal, and wait for the three additional Pods to transition to Running and Ready. ```shell -kubectl get pods -w -l app=nginx +# This should already be running +kubectl get pod --watch -l app=nginx ``` ``` NAME READY STATUS RESTARTS AGE @@ -456,12 +479,13 @@ created each Pod sequentially with respect to its ordinal index, and it waited for each Pod's predecessor to be Running and Ready before launching the subsequent Pod. -### Scaling Down +### Scaling down In one terminal, watch the StatefulSet's Pods: ```shell -kubectl get pods -w -l app=nginx +# End this watch when there are only 3 Pods for the StatefulSet +kubectl get pod --watch -l app=nginx ``` In another terminal, use `kubectl patch` to scale the StatefulSet back down to @@ -477,7 +501,8 @@ statefulset.apps/web patched Wait for `web-4` and `web-3` to transition to Terminating. ```shell -kubectl get pods -w -l app=nginx +# This should already be running +kubectl get pods --watch -l app=nginx ``` ``` NAME READY STATUS RESTARTS AGE @@ -556,7 +581,10 @@ statefulset.apps/web patched In another terminal, watch the Pods in the StatefulSet: ```shell -kubectl get pod -l app=nginx -w +# End this watch when the rollout is complete +# +# If you're not sure, leave it running one more minute +kubectl get pod -l app=nginx --watch ``` The output is similar to: ``` @@ -662,7 +690,8 @@ pod "web-2" deleted Wait for the Pod to be Running and Ready. ```shell -kubectl get pod -l app=nginx -w +# End the watch when you see that web-2 is healthy +kubectl get pod -l app=nginx --watch ``` ``` NAME READY STATUS RESTARTS AGE @@ -694,6 +723,8 @@ you specified [above](#staging-an-update). Patch the StatefulSet to decrement the partition: ```shell +# The value of "partition" should match the highest existing ordinal for +# the StatefulSet kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":2}}}}' ``` ``` @@ -703,7 +734,8 @@ statefulset.apps/web patched Wait for `web-2` to be Running and Ready. ```shell -kubectl get pod -l app=nginx -w +# This should already be running +kubectl get pod -l app=nginx --watch ``` ``` NAME READY STATUS RESTARTS AGE @@ -739,7 +771,8 @@ pod "web-1" deleted Wait for the `web-1` Pod to be Running and Ready. ```shell -kubectl get pod -l app=nginx -w +# This should already be running +kubectl get pod -l app=nginx --watch ``` The output is similar to: ``` @@ -792,7 +825,8 @@ statefulset.apps/web patched Wait for all of the Pods in the StatefulSet to become Running and Ready. ```shell -kubectl get pod -l app=nginx -w +# This should already be running +kubectl get pod -l app=nginx --watch ``` The output is similar to: ``` @@ -847,7 +881,8 @@ deleted. In one terminal window, watch the Pods in the StatefulSet. ``` -kubectl get pods -w -l app=nginx +# End this watch when there are no Pods for the StatefulSet +kubectl get pods --watch -l app=nginx ``` Use [`kubectl delete`](/docs/reference/generated/kubectl/kubectl-commands/#delete) to delete the @@ -900,7 +935,8 @@ As the `web` StatefulSet has been deleted, `web-0` has not been relaunched. In one terminal, watch the StatefulSet's Pods. ```shell -kubectl get pods -w -l app=nginx +# Leave this watch running until the next time you start a watch +kubectl get pods --watch -l app=nginx ``` In a second terminal, recreate the StatefulSet. Note that, unless @@ -908,7 +944,7 @@ you deleted the `nginx` Service (which you should not have), you will see an error indicating that the Service already exists. ```shell -kubectl apply -f web.yaml +kubectl apply -f https://k8s.io/examples/application/web/web.yaml ``` ``` statefulset.apps/web created @@ -921,7 +957,8 @@ headless Service even though that Service already exists. Examine the output of the `kubectl get` command running in the first terminal. ```shell -kubectl get pods -w -l app=nginx +# This should already be running +kubectl get pods --watch -l app=nginx ``` ``` NAME READY STATUS RESTARTS AGE @@ -968,7 +1005,8 @@ PersistentVolume was remounted. In one terminal window, watch the Pods in the StatefulSet. ```shell -kubectl get pods -w -l app=nginx +# Leave this running until the next page section +kubectl get pods --watch -l app=nginx ``` In another terminal, delete the StatefulSet again. This time, omit the @@ -986,7 +1024,8 @@ Examine the output of the `kubectl get` command running in the first terminal, and wait for all of the Pods to transition to Terminating. ```shell -kubectl get pods -w -l app=nginx +# This should already be running +kubectl get pods --watch -l app=nginx ``` ``` @@ -1027,7 +1066,7 @@ service "nginx" deleted Recreate the StatefulSet and headless Service one more time: ```shell -kubectl apply -f web.yaml +kubectl apply -f https://k8s.io/examples/application/web/web.yaml ``` ``` @@ -1093,21 +1132,20 @@ Pod. This option only affects the behavior for scaling operations. Updates are n {{% code_sample file="application/web/web-parallel.yaml" %}} -Download the example above, and save it to a file named `web-parallel.yaml` - This manifest is identical to the one you downloaded above except that the `.spec.podManagementPolicy` of the `web` StatefulSet is set to `Parallel`. In one terminal, watch the Pods in the StatefulSet. ```shell -kubectl get pod -l app=nginx -w +# Leave this watch running until the end of the section +kubectl get pod -l app=nginx --watch ``` In another terminal, create the StatefulSet and Service in the manifest: ```shell -kubectl apply -f web-parallel.yaml +kubectl apply -f https://k8s.io/examples/application/web/web-parallel.yaml ``` ``` service/nginx created @@ -1117,7 +1155,8 @@ statefulset.apps/web created Examine the output of the `kubectl get` command that you executed in the first terminal. ```shell -kubectl get pod -l app=nginx -w +# This should already be running +kubectl get pod -l app=nginx --watch ``` ``` NAME READY STATUS RESTARTS AGE @@ -1170,7 +1209,8 @@ kubectl delete sts web You can watch `kubectl get` to see those Pods being deleted. ```shell -kubectl get pod -l app=nginx -w +# end the watch when you've seen what you need to +kubectl get pod -l app=nginx --watch ``` ``` web-3 1/1 Terminating 0 9m diff --git a/content/en/docs/tutorials/stateful-application/zookeeper.md b/content/en/docs/tutorials/stateful-application/zookeeper.md index 3d74667f12928..ae780a3feffbb 100644 --- a/content/en/docs/tutorials/stateful-application/zookeeper.md +++ b/content/en/docs/tutorials/stateful-application/zookeeper.md @@ -26,7 +26,7 @@ Kubernetes concepts: - [Pods](/docs/concepts/workloads/pods/) - [Cluster DNS](/docs/concepts/services-networking/dns-pod-service/) - [Headless Services](/docs/concepts/services-networking/service/#headless-services) -- [PersistentVolumes](/docs/concepts/storage/volumes/) +- [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) - [PersistentVolume Provisioning](https://github.com/kubernetes/examples/tree/master/staging/persistent-volume-provisioning/) - [StatefulSets](/docs/concepts/workloads/controllers/statefulset/) - [PodDisruptionBudgets](/docs/concepts/workloads/pods/disruptions/#pod-disruption-budget) diff --git a/content/en/examples/admin/resource/memory-available-cgroupv2.sh b/content/en/examples/admin/resource/memory-available-cgroupv2.sh new file mode 100644 index 0000000000000..47b9f6802bdfd --- /dev/null +++ b/content/en/examples/admin/resource/memory-available-cgroupv2.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# This script reproduces what the kubelet does +# to calculate memory.available relative to kubepods cgroup. + +# current memory usage +memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}') +memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024)) +memory_usage_in_bytes=$(cat /sys/fs/cgroup/kubepods.slice/memory.current) +memory_total_inactive_file=$(cat /sys/fs/cgroup/kubepods.slice/memory.stat | grep inactive_file | awk '{print $2}') + +memory_working_set=${memory_usage_in_bytes} +if [ "$memory_working_set" -lt "$memory_total_inactive_file" ]; +then + memory_working_set=0 +else + memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file)) +fi + +memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set)) +memory_available_in_kb=$((memory_available_in_bytes / 1024)) +memory_available_in_mb=$((memory_available_in_kb / 1024)) + +echo "memory.capacity_in_bytes $memory_capacity_in_bytes" +echo "memory.usage_in_bytes $memory_usage_in_bytes" +echo "memory.total_inactive_file $memory_total_inactive_file" +echo "memory.working_set $memory_working_set" +echo "memory.available_in_bytes $memory_available_in_bytes" +echo "memory.available_in_kb $memory_available_in_kb" +echo "memory.available_in_mb $memory_available_in_mb" diff --git a/content/en/examples/application/hpa/php-apache.yaml b/content/en/examples/application/hpa/php-apache.yaml index f3f1ef5d4f912..1c49aca6a1ff5 100644 --- a/content/en/examples/application/hpa/php-apache.yaml +++ b/content/en/examples/application/hpa/php-apache.yaml @@ -1,4 +1,4 @@ -apiVersion: autoscaling/v1 +apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: php-apache @@ -9,4 +9,10 @@ spec: name: php-apache minReplicas: 1 maxReplicas: 10 - targetCPUUtilizationPercentage: 50 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/content/en/examples/application/job/rabbitmq/rabbitmq-service.yaml b/content/en/examples/application/job/rabbitmq/rabbitmq-service.yaml new file mode 100644 index 0000000000000..2f7fb06dcfed6 --- /dev/null +++ b/content/en/examples/application/job/rabbitmq/rabbitmq-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + component: rabbitmq + name: rabbitmq-service +spec: + ports: + - port: 5672 + selector: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq diff --git a/content/en/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml b/content/en/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml new file mode 100644 index 0000000000000..502598ddf947e --- /dev/null +++ b/content/en/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + component: rabbitmq + name: rabbitmq +spec: + replicas: 1 + serviceName: rabbitmq-service + selector: + matchLabels: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq + template: + metadata: + labels: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq + spec: + containers: + - image: rabbitmq + name: rabbitmq + ports: + - containerPort: 5672 + resources: + requests: + memory: 16M + limits: + cpu: 250m + memory: 512M + volumeMounts: + - mountPath: /var/lib/rabbitmq + name: rabbitmq-data + volumes: + - name: rabbitmq-data + emptyDir: {} diff --git a/content/en/examples/controllers/daemonset.yaml b/content/en/examples/controllers/daemonset.yaml index aa540e9697902..1650ecce4ac31 100644 --- a/content/en/examples/controllers/daemonset.yaml +++ b/content/en/examples/controllers/daemonset.yaml @@ -35,6 +35,9 @@ spec: volumeMounts: - name: varlog mountPath: /var/log + # it may be desirable to set a high priority class to ensure that a DaemonSet Pod + # preempts running Pods + # priorityClassName: important terminationGracePeriodSeconds: 30 volumes: - name: varlog diff --git a/content/en/examples/pods/probe/tcp-liveness-readiness.yaml b/content/en/examples/pods/probe/tcp-liveness-readiness.yaml index ef8a2f9500b00..d10aae980e961 100644 --- a/content/en/examples/pods/probe/tcp-liveness-readiness.yaml +++ b/content/en/examples/pods/probe/tcp-liveness-readiness.yaml @@ -13,10 +13,10 @@ spec: readinessProbe: tcpSocket: port: 8080 - initialDelaySeconds: 5 + initialDelaySeconds: 15 periodSeconds: 10 livenessProbe: tcpSocket: port: 8080 initialDelaySeconds: 15 - periodSeconds: 20 + periodSeconds: 10 diff --git a/content/en/examples/pods/security/seccomp/ga/audit-pod.yaml b/content/en/examples/pods/security/seccomp/ga/audit-pod.yaml index 409d4b923c45a..34aacd7d95343 100644 --- a/content/en/examples/pods/security/seccomp/ga/audit-pod.yaml +++ b/content/en/examples/pods/security/seccomp/ga/audit-pod.yaml @@ -11,7 +11,7 @@ spec: localhostProfile: profiles/audit.json containers: - name: test-container - image: hashicorp/http-echo:0.2.3 + image: hashicorp/http-echo:1.0 args: - "-text=just made some syscalls!" securityContext: diff --git a/content/en/examples/pods/security/seccomp/ga/default-pod.yaml b/content/en/examples/pods/security/seccomp/ga/default-pod.yaml index b884ec5924221..153031fc9df19 100644 --- a/content/en/examples/pods/security/seccomp/ga/default-pod.yaml +++ b/content/en/examples/pods/security/seccomp/ga/default-pod.yaml @@ -10,7 +10,7 @@ spec: type: RuntimeDefault containers: - name: test-container - image: hashicorp/http-echo:0.2.3 + image: hashicorp/http-echo:1.0 args: - "-text=just made some more syscalls!" securityContext: diff --git a/content/en/examples/pods/security/seccomp/ga/fine-pod.yaml b/content/en/examples/pods/security/seccomp/ga/fine-pod.yaml index 692b8281516ca..dd7622fe159ec 100644 --- a/content/en/examples/pods/security/seccomp/ga/fine-pod.yaml +++ b/content/en/examples/pods/security/seccomp/ga/fine-pod.yaml @@ -11,7 +11,7 @@ spec: localhostProfile: profiles/fine-grained.json containers: - name: test-container - image: hashicorp/http-echo:0.2.3 + image: hashicorp/http-echo:1.0 args: - "-text=just made some syscalls!" securityContext: diff --git a/content/en/examples/pods/security/seccomp/ga/violation-pod.yaml b/content/en/examples/pods/security/seccomp/ga/violation-pod.yaml index 70deadf4b22b3..c4844df37c962 100644 --- a/content/en/examples/pods/security/seccomp/ga/violation-pod.yaml +++ b/content/en/examples/pods/security/seccomp/ga/violation-pod.yaml @@ -11,7 +11,7 @@ spec: localhostProfile: profiles/violation.json containers: - name: test-container - image: hashicorp/http-echo:0.2.3 + image: hashicorp/http-echo:1.0 args: - "-text=just made some syscalls!" securityContext: diff --git a/content/en/examples/pods/storage/projected-clustertrustbundle.yaml b/content/en/examples/pods/storage/projected-clustertrustbundle.yaml new file mode 100644 index 0000000000000..452384a44514e --- /dev/null +++ b/content/en/examples/pods/storage/projected-clustertrustbundle.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sa-ctb-name-test +spec: + containers: + - name: container-test + image: busybox + command: ["sleep", "3600"] + volumeMounts: + - name: token-vol + mountPath: "/root-certificates" + readOnly: true + serviceAccountName: default + volumes: + - name: root-certificates-vol + projected: + sources: + - clusterTrustBundle: + name: example + path: example-roots.pem + - clusterTrustBundle: + signerName: "example.com/mysigner" + labelSelector: + matchLabels: + version: live + path: mysigner-roots.pem + optional: true diff --git a/content/en/examples/priority-and-fairness/health-for-strangers.yaml b/content/en/examples/priority-and-fairness/health-for-strangers.yaml index 86b92619e7fb2..312f80751ffe8 100644 --- a/content/en/examples/priority-and-fairness/health-for-strangers.yaml +++ b/content/en/examples/priority-and-fairness/health-for-strangers.yaml @@ -1,4 +1,4 @@ -apiVersion: flowcontrol.apiserver.k8s.io/v1beta3 +apiVersion: flowcontrol.apiserver.k8s.io/v1 kind: FlowSchema metadata: name: health-for-strangers diff --git a/content/en/examples/priority-and-fairness/list-events-default-service-account.yaml b/content/en/examples/priority-and-fairness/list-events-default-service-account.yaml index 94e73ae948802..e9e1beab9984d 100644 --- a/content/en/examples/priority-and-fairness/list-events-default-service-account.yaml +++ b/content/en/examples/priority-and-fairness/list-events-default-service-account.yaml @@ -1,4 +1,4 @@ -apiVersion: flowcontrol.apiserver.k8s.io/v1beta3 +apiVersion: flowcontrol.apiserver.k8s.io/v1 kind: FlowSchema metadata: name: list-events-default-service-account diff --git a/content/en/examples/secret/basicauth-secret.yaml b/content/en/examples/secret/basicauth-secret.yaml new file mode 100644 index 0000000000000..a854b267a01a5 --- /dev/null +++ b/content/en/examples/secret/basicauth-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: secret-basic-auth +type: kubernetes.io/basic-auth +stringData: + username: admin # required field for kubernetes.io/basic-auth + password: t0p-Secret # required field for kubernetes.io/basic-auth \ No newline at end of file diff --git a/content/en/examples/secret/bootstrap-token-secret-base64.yaml b/content/en/examples/secret/bootstrap-token-secret-base64.yaml new file mode 100644 index 0000000000000..98233758e2e7c --- /dev/null +++ b/content/en/examples/secret/bootstrap-token-secret-base64.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + name: bootstrap-token-5emitj + namespace: kube-system +type: bootstrap.kubernetes.io/token +data: + auth-extra-groups: c3lzdGVtOmJvb3RzdHJhcHBlcnM6a3ViZWFkbTpkZWZhdWx0LW5vZGUtdG9rZW4= + expiration: MjAyMC0wOS0xM1QwNDozOToxMFo= + token-id: NWVtaXRq + token-secret: a3E0Z2lodnN6emduMXAwcg== + usage-bootstrap-authentication: dHJ1ZQ== + usage-bootstrap-signing: dHJ1ZQ== \ No newline at end of file diff --git a/content/en/examples/secret/bootstrap-token-secret-literal.yaml b/content/en/examples/secret/bootstrap-token-secret-literal.yaml new file mode 100644 index 0000000000000..6aec11ce870fc --- /dev/null +++ b/content/en/examples/secret/bootstrap-token-secret-literal.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Secret +metadata: + # Note how the Secret is named + name: bootstrap-token-5emitj + # A bootstrap token Secret usually resides in the kube-system namespace + namespace: kube-system +type: bootstrap.kubernetes.io/token +stringData: + auth-extra-groups: "system:bootstrappers:kubeadm:default-node-token" + expiration: "2020-09-13T04:39:10Z" + # This token ID is used in the name + token-id: "5emitj" + token-secret: "kq4gihvszzgn1p0r" + # This token can be used for authentication + usage-bootstrap-authentication: "true" + # and it can be used for signing + usage-bootstrap-signing: "true" \ No newline at end of file diff --git a/content/en/examples/secret/dockercfg-secret.yaml b/content/en/examples/secret/dockercfg-secret.yaml new file mode 100644 index 0000000000000..ccf73bc306f24 --- /dev/null +++ b/content/en/examples/secret/dockercfg-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: secret-dockercfg +type: kubernetes.io/dockercfg +data: + .dockercfg: | + eyJhdXRocyI6eyJodHRwczovL2V4YW1wbGUvdjEvIjp7ImF1dGgiOiJvcGVuc2VzYW1lIn19fQo= \ No newline at end of file diff --git a/content/en/examples/secret/dotfile-secret.yaml b/content/en/examples/secret/dotfile-secret.yaml new file mode 100644 index 0000000000000..5c7900ad97479 --- /dev/null +++ b/content/en/examples/secret/dotfile-secret.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Secret +metadata: + name: dotfile-secret +data: + .secret-file: dmFsdWUtMg0KDQo= +--- +apiVersion: v1 +kind: Pod +metadata: + name: secret-dotfiles-pod +spec: + volumes: + - name: secret-volume + secret: + secretName: dotfile-secret + containers: + - name: dotfile-test-container + image: registry.k8s.io/busybox + command: + - ls + - "-l" + - "/etc/secret-volume" + volumeMounts: + - name: secret-volume + readOnly: true + mountPath: "/etc/secret-volume" \ No newline at end of file diff --git a/content/en/examples/secret/optional-secret.yaml b/content/en/examples/secret/optional-secret.yaml new file mode 100644 index 0000000000000..cc510b9078130 --- /dev/null +++ b/content/en/examples/secret/optional-secret.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: mypod +spec: + containers: + - name: mypod + image: redis + volumeMounts: + - name: foo + mountPath: "/etc/foo" + readOnly: true + volumes: + - name: foo + secret: + secretName: mysecret + optional: true \ No newline at end of file diff --git a/content/en/examples/secret/serviceaccount-token-secret.yaml b/content/en/examples/secret/serviceaccount-token-secret.yaml new file mode 100644 index 0000000000000..8ec8fb577d547 --- /dev/null +++ b/content/en/examples/secret/serviceaccount-token-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: secret-sa-sample + annotations: + kubernetes.io/service-account.name: "sa-name" +type: kubernetes.io/service-account-token +data: + extra: YmFyCg== \ No newline at end of file diff --git a/content/en/examples/secret/ssh-auth-secret.yaml b/content/en/examples/secret/ssh-auth-secret.yaml new file mode 100644 index 0000000000000..9f79cbfb065fd --- /dev/null +++ b/content/en/examples/secret/ssh-auth-secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: secret-ssh-auth +type: kubernetes.io/ssh-auth +data: + # the data is abbreviated in this example + ssh-privatekey: | + UG91cmluZzYlRW1vdGljb24lU2N1YmE= \ No newline at end of file diff --git a/content/en/examples/secret/tls-auth-secret.yaml b/content/en/examples/secret/tls-auth-secret.yaml new file mode 100644 index 0000000000000..1e14b8e00ac47 --- /dev/null +++ b/content/en/examples/secret/tls-auth-secret.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Secret +metadata: + name: secret-tls +type: kubernetes.io/tls +data: + # values are base64 encoded, which obscures them but does NOT provide + # any useful level of confidentiality + tls.crt: | + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNVakNDQWJzQ0FnMytNQTBHQ1NxR1NJYjNE + UUVCQlFVQU1JR2JNUXN3Q1FZRFZRUUdFd0pLVURFT01Bd0cKQTFVRUNCTUZWRzlyZVc4eEVEQU9C + Z05WQkFjVEIwTm9kVzh0YTNVeEVUQVBCZ05WQkFvVENFWnlZVzVyTkVSRQpNUmd3RmdZRFZRUUxF + dzlYWldKRFpYSjBJRk4xY0hCdmNuUXhHREFXQmdOVkJBTVREMFp5WVc1ck5FUkVJRmRsCllpQkRR + VEVqTUNFR0NTcUdTSWIzRFFFSkFSWVVjM1Z3Y0c5eWRFQm1jbUZ1YXpSa1pDNWpiMjB3SGhjTk1U + TXcKTVRFeE1EUTFNVE01V2hjTk1UZ3dNVEV3TURRMU1UTTVXakJMTVFzd0NRWURWUVFHREFKS1VE + RVBNQTBHQTFVRQpDQXdHWEZSdmEzbHZNUkV3RHdZRFZRUUtEQWhHY21GdWF6UkVSREVZTUJZR0Ex + VUVBd3dQZDNkM0xtVjRZVzF3CmJHVXVZMjl0TUlHYU1BMEdDU3FHU0liM0RRRUJBUVVBQTRHSUFE + Q0JoQUo5WThFaUhmeHhNL25PbjJTbkkxWHgKRHdPdEJEVDFKRjBReTliMVlKanV2YjdjaTEwZjVN + Vm1UQllqMUZTVWZNOU1vejJDVVFZdW4yRFljV29IcFA4ZQpqSG1BUFVrNVd5cDJRN1ArMjh1bklI + QkphVGZlQ09PekZSUFY2MEdTWWUzNmFScG04L3dVVm16eGFLOGtCOWVaCmhPN3F1TjdtSWQxL2pW + cTNKODhDQXdFQUFUQU5CZ2txaGtpRzl3MEJBUVVGQUFPQmdRQU1meTQzeE15OHh3QTUKVjF2T2NS + OEtyNWNaSXdtbFhCUU8xeFEzazlxSGtyNFlUY1JxTVQ5WjVKTm1rWHYxK2VSaGcwTi9WMW5NUTRZ + RgpnWXcxbnlESnBnOTduZUV4VzQyeXVlMFlHSDYyV1hYUUhyOVNVREgrRlowVnQvRGZsdklVTWRj + UUFEZjM4aU9zCjlQbG1kb3YrcE0vNCs5a1h5aDhSUEkzZXZ6OS9NQT09Ci0tLS0tRU5EIENFUlRJ + RklDQVRFLS0tLS0K + # In this example, the key data is not a real PEM-encoded private key + tls.key: | + RXhhbXBsZSBkYXRhIGZvciB0aGUgVExTIGNydCBmaWVsZA== \ No newline at end of file diff --git a/content/en/examples/storage/storageclass-low-latency.yaml b/content/en/examples/storage/storageclass-low-latency.yaml new file mode 100644 index 0000000000000..6a427a6717ba9 --- /dev/null +++ b/content/en/examples/storage/storageclass-low-latency.yaml @@ -0,0 +1,14 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: low-latency + annotations: + storageclass.kubernetes.io/is-default-class: "false" +provisioner: csi-driver.example-vendor.example +reclaimPolicy: Retain # default value is Delete +allowVolumeExpansion: true +mountOptions: + - discard # this might enable UNMAP / TRIM at the block storage layer +volumeBindingMode: WaitForFirstConsumer +parameters: + guaranteedReadWriteLatency: "true" # provider-specific diff --git a/content/en/examples/validatingadmissionpolicy/replicalimit-param.yaml b/content/en/examples/validatingadmissionpolicy/replicalimit-param.yaml index 813bc7b334528..9d8ceee220190 100644 --- a/content/en/examples/validatingadmissionpolicy/replicalimit-param.yaml +++ b/content/en/examples/validatingadmissionpolicy/replicalimit-param.yaml @@ -2,5 +2,5 @@ apiVersion: rules.example.com/v1 kind: ReplicaLimit metadata: name: "replica-limit-test.example.com" - namesapce: "default" -maxReplicas: 3 \ No newline at end of file + namespace: "default" +maxReplicas: 3 diff --git a/content/en/releases/_index.md b/content/en/releases/_index.md index 97d9004313158..3748f9231b1a3 100644 --- a/content/en/releases/_index.md +++ b/content/en/releases/_index.md @@ -6,13 +6,17 @@ layout: release-info notoc: true --- - -The Kubernetes project maintains release branches for the most recent three minor releases ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). Kubernetes 1.19 and newer receive [approximately 1 year of patch support](/releases/patch-releases/#support-period). Kubernetes 1.18 and older received approximately 9 months of patch support. +The Kubernetes project maintains release branches for the most recent three minor releases +({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). +Kubernetes 1.19 and newer receive +[approximately 1 year of patch support](/releases/patch-releases/#support-period). +Kubernetes 1.18 and older received approximately 9 months of patch support. Kubernetes versions are expressed as **x.y.z**, -where **x** is the major version, **y** is the minor version, and **z** is the patch version, following [Semantic Versioning](https://semver.org/) terminology. +where **x** is the major version, **y** is the minor version, and **z** is the patch version, +following [Semantic Versioning](https://semver.org/) terminology. More information in the [version skew policy](/releases/version-skew-policy/) document. @@ -24,6 +28,7 @@ More information in the [version skew policy](/releases/version-skew-policy/) do ## Upcoming Release -Check out the [schedule](https://github.com/kubernetes/sig-release/tree/master/releases/release-{{< skew nextMinorVersion >}}) for the upcoming **{{< skew nextMinorVersion >}}** Kubernetes release! +Check out the [schedule](https://github.com/kubernetes/sig-release/tree/master/releases/release-{{< skew nextMinorVersion >}}) +for the upcoming **{{< skew nextMinorVersion >}}** Kubernetes release! ## Helpful Resources diff --git a/content/en/releases/download.md b/content/en/releases/download.md index 0cee6e3556afb..e83a7d9ad4ee3 100644 --- a/content/en/releases/download.md +++ b/content/en/releases/download.md @@ -10,31 +10,58 @@ cluster. Those components are also shipped in container images as part of the official release process. All binaries as well as container images are available for multiple operating systems as well as hardware architectures. -## Container Images +### kubectl -All Kubernetes container images are deployed to the -`registry.k8s.io` container image registry. + + +The Kubernetes command-line tool, [kubectl](/docs/reference/kubectl/kubectl/), allows +you to run commands against Kubernetes clusters. -{{< feature-state for_k8s_version="v1.24" state="alpha" >}} +You can use kubectl to deploy applications, inspect and manage cluster resources, +and view logs. For more information including a complete list of kubectl operations, see the +[`kubectl` reference documentation](/docs/reference/kubectl/). -For Kubernetes {{< param "version" >}}, the following -container images are signed using [cosign](https://github.com/sigstore/cosign) -signatures: +kubectl is installable on a variety of Linux platforms, macOS and Windows. +Find your preferred operating system below. + +- [Install kubectl on Linux](/docs/tasks/tools/install-kubectl-linux) +- [Install kubectl on macOS](/docs/tasks/tools/install-kubectl-macos) +- [Install kubectl on Windows](/docs/tasks/tools/install-kubectl-windows) + +## Container images -| Container Image | Supported Architectures | -| ------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +All Kubernetes container images are deployed to the +`registry.k8s.io` container image registry. + +| Container Image | Supported Architectures | +| ------------------------------------------------------------------------- | --------------------------------- | | registry.k8s.io/kube-apiserver:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | | registry.k8s.io/kube-controller-manager:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | | registry.k8s.io/kube-proxy:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | | registry.k8s.io/kube-scheduler:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | | registry.k8s.io/conformance:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | +### Container image architectures + All container images are available for multiple architectures, whereas the container runtime should choose the correct one based on the underlying platform. It is also possible to pull a dedicated architecture by suffixing the container image name, for example -`registry.k8s.io/kube-apiserver-arm64:v{{< skew currentPatchVersion >}}`. All -those derivations are signed in the same way as the multi-architecture manifest lists. +`registry.k8s.io/kube-apiserver-arm64:v{{< skew currentPatchVersion >}}`. + +### Container image signatures + +{{< feature-state for_k8s_version="v1.26" state="beta" >}} + +For Kubernetes {{< param "version" >}}, +container images are signed using [sigstore](https://sigstore.dev) +signatures: + +{{< note >}} +Container image sigstore signatures do currently not match between different geographical locations. +More information about this problem is available in the corresponding +[GitHub issue](https://github.com/kubernetes/registry.k8s.io/issues/187). +{{< /note >}} The Kubernetes project publishes a list of signed Kubernetes container images in [SPDX 2.3](https://spdx.dev/specifications/) format. @@ -43,35 +70,13 @@ You can fetch that list using: ```shell curl -Ls "https://sbom.k8s.io/$(curl -Ls https://dl.k8s.io/release/stable.txt)/release" | grep "SPDXID: SPDXRef-Package-registry.k8s.io" | grep -v sha256 | cut -d- -f3- | sed 's/-/\//' | sed 's/-v1/:v1/' ``` -For Kubernetes v{{< skew currentVersion >}}, the only kind of code artifact that -you can verify integrity for is a container image, using the experimental -signing support. To manually verify signed container images of Kubernetes core components, refer to [Verify Signed Container Images](/docs/tasks/administer-cluster/verify-signed-artifacts). - +If you pull a container image for a specific architecture, the single-architecture image +is signed in the same way as for the multi-architecture manifest lists. ## Binaries -Find links to download Kubernetes components (and their checksums) in the [CHANGELOG](https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG) files. - -Alternately, use [downloadkubernetes.com](https://www.downloadkubernetes.com/) to filter by version and architecture. - -### kubectl - - - -The Kubernetes command-line tool, [kubectl](/docs/reference/kubectl/kubectl/), allows -you to run commands against Kubernetes clusters. - -You can use kubectl to deploy applications, inspect and manage cluster resources, -and view logs. For more information including a complete list of kubectl operations, see the -[`kubectl` reference documentation](/docs/reference/kubectl/). - -kubectl is installable on a variety of Linux platforms, macOS and Windows. -Find your preferred operating system below. - -- [Install kubectl on Linux](/docs/tasks/tools/install-kubectl-linux) -- [Install kubectl on macOS](/docs/tasks/tools/install-kubectl-macos) -- [Install kubectl on Windows](/docs/tasks/tools/install-kubectl-windows) +{{< release-binaries >}} diff --git a/content/en/releases/notes.md b/content/en/releases/notes.md index 1bb60c810627c..bcda7d0a04437 100644 --- a/content/en/releases/notes.md +++ b/content/en/releases/notes.md @@ -8,6 +8,10 @@ sitemap: priority: 0.5 --- -Release notes can be found by reading the [Changelog](https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG) that matches your Kubernetes version. View the changelog for {{< skew currentVersionAddMinor 0 >}} on [GitHub](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-{{< skew currentVersionAddMinor 0 >}}.md). +Release notes can be found by reading the [Changelog](https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG) +that matches your Kubernetes version. View the changelog for {{< skew currentVersionAddMinor 0 >}} on +[GitHub](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-{{< skew currentVersionAddMinor 0 >}}.md). -Alternately, release notes can be searched and filtered online at: [relnotes.k8s.io](https://relnotes.k8s.io). View filtered release notes for {{< skew currentVersionAddMinor 0 >}} on [relnotes.k8s.io](https://relnotes.k8s.io/?releaseVersions={{< skew currentVersionAddMinor 0 >}}.0). +Alternately, release notes can be searched and filtered online at: [relnotes.k8s.io](https://relnotes.k8s.io). +View filtered release notes for {{< skew currentVersionAddMinor 0 >}} on +[relnotes.k8s.io](https://relnotes.k8s.io/?releaseVersions={{< skew currentVersionAddMinor 0 >}}.0). diff --git a/content/en/releases/patch-releases.md b/content/en/releases/patch-releases.md index 08481d28b0e86..315d90e7b23af 100644 --- a/content/en/releases/patch-releases.md +++ b/content/en/releases/patch-releases.md @@ -78,13 +78,9 @@ releases may also occur in between these. | Monthly Patch Release | Cherry Pick Deadline | Target date | | --------------------- | -------------------- | ----------- | -| October 2023 | 2023-10-13 | 2023-10-18 | -| November 2023 | N/A | N/A | -| December 2023 | 2023-12-01 | 2023-12-06 | - -**Note:** Due to overlap with KubeCon NA 2023 and the resulting lack of -availability of Release Managers, it has been decided to skip patch releases -in November. Instead, we'll have patch releases early in December. +| January 2024 | 2024-01-12 | 2024-01-17 | +| February 2024 | 2024-02-09 | 2024-02-14 | +| March 2024 | 2024-03-08 | 2024-03-13 | ## Detailed Release History for Active Branches @@ -99,4 +95,4 @@ These releases are no longer supported. [cherry-picks]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-release/cherry-picks.md [release-managers]: /releases/release-managers [release process description]: /releases/release -[yearly-support]: https://git.k8s.io/enhancements/keps/sig-release/1498-kubernetes-yearly-support-period/README.md \ No newline at end of file +[yearly-support]: https://git.k8s.io/enhancements/keps/sig-release/1498-kubernetes-yearly-support-period/README.md diff --git a/content/en/releases/release-managers.md b/content/en/releases/release-managers.md index 34fab5552f9b5..4fb1ab78c4f88 100644 --- a/content/en/releases/release-managers.md +++ b/content/en/releases/release-managers.md @@ -16,7 +16,6 @@ The responsibilities of each role are described below. - [Becoming a Release Manager](#becoming-a-release-manager) - [Release Manager Associates](#release-manager-associates) - [Becoming a Release Manager Associate](#becoming-a-release-manager-associate) -- [Build Admins](#build-admins) - [SIG Release Leads](#sig-release-leads) - [Chairs](#chairs) - [Technical Leads](#technical-leads) @@ -25,13 +24,16 @@ The responsibilities of each role are described below. | Mailing List | Slack | Visibility | Usage | Membership | | --- | --- | --- | --- | --- | -| [release-managers@kubernetes.io](mailto:release-managers@kubernetes.io) | [#release-management](https://kubernetes.slack.com/messages/CJH2GBF7Y) (channel) / @release-managers (user group) | Public | Public discussion for Release Managers | All Release Managers (including Associates, Build Admins, and SIG Chairs) | +| [release-managers@kubernetes.io](mailto:release-managers@kubernetes.io) | [#release-management](https://kubernetes.slack.com/messages/CJH2GBF7Y) (channel) / @release-managers (user group) | Public | Public discussion for Release Managers | All Release Managers (including Associates, and SIG Chairs) | | [release-managers-private@kubernetes.io](mailto:release-managers-private@kubernetes.io) | N/A | Private | Private discussion for privileged Release Managers | Release Managers, SIG Release leadership | | [security-release-team@kubernetes.io](mailto:security-release-team@kubernetes.io) | [#security-release-team](https://kubernetes.slack.com/archives/G0162T1RYHG) (channel) / @security-rel-team (user group) | Private | Security release coordination with the Security Response Committee | [security-discuss-private@kubernetes.io](mailto:security-discuss-private@kubernetes.io), [release-managers-private@kubernetes.io](mailto:release-managers-private@kubernetes.io) | ### Security Embargo Policy -Some information about releases is subject to embargo and we have defined policy about how those embargoes are set. Please refer to the [Security Embargo Policy](https://github.com/kubernetes/committee-security-response/blob/main/private-distributors-list.md#embargo-policy) for more information. +Some information about releases is subject to embargo and we have defined policy about +how those embargoes are set. Please refer to the +[Security Embargo Policy](https://github.com/kubernetes/committee-security-response/blob/main/private-distributors-list.md#embargo-policy) +for more information. ## Handbooks @@ -39,7 +41,6 @@ Some information about releases is subject to embargo and we have defined policy - [Patch Release Team][handbook-patch-release] - [Branch Managers][handbook-branch-mgmt] -- [Build Admins][handbook-packaging] ## Release Managers @@ -155,22 +156,6 @@ Contributors can become Associates by demonstrating the following: - these efforts require interacting and pairing with Release Managers and Associates -## Build Admins - -Build Admins are (currently) Google employees with the requisite access to -Google build systems/tooling to publish deb/rpm packages on behalf of the -Kubernetes project. They are responsible for: - -- Building, signing, and publishing the deb/rpm packages -- Being the interlock with Release Managers (and Associates) on the final steps -of each minor (1.Y) and patch (1.Y.Z) release - -GitHub team: [@kubernetes/build-admins](https://github.com/orgs/kubernetes/teams/build-admins) - -- Aaron Crickenberger ([@spiffxp](https://github.com/spiffxp)) -- Ben Kazemi ([@BenjaminKazemi](https://github.com/BenjaminKazemi)) -- Grant McCloskey ([@MushuEE](https://github.com/MushuEE)) - ## SIG Release Leads SIG Release Chairs and Technical Leads are responsible for: @@ -208,7 +193,6 @@ Example: [1.15 Release Team](https://git.k8s.io/sig-release/releases/release-1.1 [community-membership]: https://git.k8s.io/community/community-membership.md#member [handbook-branch-mgmt]: https://git.k8s.io/sig-release/release-engineering/role-handbooks/branch-manager.md -[handbook-packaging]: https://git.k8s.io/release/hack/rapture/README.md [handbook-patch-release]: https://git.k8s.io/sig-release/release-engineering/role-handbooks/patch-release-team.md [k-sig-release-releases]: https://git.k8s.io/sig-release/releases [patches]: /releases/patch-releases/ diff --git a/content/en/releases/release.md b/content/en/releases/release.md index e0d21c0df16c7..ee1bf72b789d3 100644 --- a/content/en/releases/release.md +++ b/content/en/releases/release.md @@ -4,7 +4,7 @@ type: docs auto_generated: true --- - + {{< warning >}} This content is auto-generated and links may not function. The source of the document is located [here](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-release/release.md). diff --git a/content/en/releases/version-skew-policy.md b/content/en/releases/version-skew-policy.md index 7a9f1c753a1b2..7031402e5c356 100644 --- a/content/en/releases/version-skew-policy.md +++ b/content/en/releases/version-skew-policy.md @@ -20,13 +20,19 @@ Specific cluster deployment tools may place additional restrictions on version s ## Supported versions -Kubernetes versions are expressed as **x.y.z**, where **x** is the major version, **y** is the minor version, and **z** is the patch version, following [Semantic Versioning](https://semver.org/) terminology. -For more information, see [Kubernetes Release Versioning](https://git.k8s.io/sig-release/release-engineering/versioning.md#kubernetes-release-versioning). +Kubernetes versions are expressed as **x.y.z**, where **x** is the major version, +**y** is the minor version, and **z** is the patch version, following +[Semantic Versioning](https://semver.org/) terminology. For more information, see +[Kubernetes Release Versioning](https://git.k8s.io/sig-release/release-engineering/versioning.md#kubernetes-release-versioning). -The Kubernetes project maintains release branches for the most recent three minor releases ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). Kubernetes 1.19 and newer receive [approximately 1 year of patch support](/releases/patch-releases/#support-period). Kubernetes 1.18 and older received approximately 9 months of patch support. +The Kubernetes project maintains release branches for the most recent three minor releases +({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). +Kubernetes 1.19 and newer receive [approximately 1 year of patch support](/releases/patch-releases/#support-period). +Kubernetes 1.18 and older received approximately 9 months of patch support. -Applicable fixes, including security fixes, may be backported to those three release branches, depending on severity and feasibility. -Patch releases are cut from those branches at a [regular cadence](/releases/patch-releases/#cadence), plus additional urgent releases, when required. +Applicable fixes, including security fixes, may be backported to those three release branches, +depending on severity and feasibility. Patch releases are cut from those branches at a +[regular cadence](/releases/patch-releases/#cadence), plus additional urgent releases, when required. The [Release Managers](/releases/release-managers/) group owns this decision. @@ -36,7 +42,8 @@ For more information, see the Kubernetes [patch releases](/releases/patch-releas ### kube-apiserver -In [highly-available (HA) clusters](/docs/setup/production-environment/tools/kubeadm/high-availability/), the newest and oldest `kube-apiserver` instances must be within one minor version. +In [highly-available (HA) clusters](/docs/setup/production-environment/tools/kubeadm/high-availability/), +the newest and oldest `kube-apiserver` instances must be within one minor version. Example: @@ -51,7 +58,8 @@ Example: Example: * `kube-apiserver` is at **{{< skew currentVersion >}}** -* `kubelet` is supported at **{{< skew currentVersion >}}**, **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, and **{{< skew currentVersionAddMinor -3 >}}** +* `kubelet` is supported at **{{< skew currentVersion >}}**, **{{< skew currentVersionAddMinor -1 >}}**, + **{{< skew currentVersionAddMinor -2 >}}**, and **{{< skew currentVersionAddMinor -3 >}}** {{< note >}} If version skew exists between `kube-apiserver` instances in an HA cluster, this narrows the allowed `kubelet` versions. @@ -60,18 +68,24 @@ If version skew exists between `kube-apiserver` instances in an HA cluster, this Example: * `kube-apiserver` instances are at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** -* `kubelet` is supported at **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, and **{{< skew currentVersionAddMinor -3 >}}** (**{{< skew currentVersion >}}** is not supported because that would be newer than the `kube-apiserver` instance at version **{{< skew currentVersionAddMinor -1 >}}**) +* `kubelet` is supported at **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, + and **{{< skew currentVersionAddMinor -3 >}}** (**{{< skew currentVersion >}}** is not supported because that + would be newer than the `kube-apiserver` instance at version **{{< skew currentVersionAddMinor -1 >}}**) ### kube-proxy * `kube-proxy` must not be newer than `kube-apiserver`. -* `kube-proxy` may be up to three minor versions older than `kube-apiserver` (`kube-proxy` < 1.25 may only be up to two minor versions older than `kube-apiserver`). -* `kube-proxy` may be up to three minor versions older or newer than the `kubelet` instance it runs alongside (`kube-proxy` < 1.25 may only be up to two minor versions older or newer than the `kubelet` instance it runs alongside). +* `kube-proxy` may be up to three minor versions older than `kube-apiserver` + (`kube-proxy` < 1.25 may only be up to two minor versions older than `kube-apiserver`). +* `kube-proxy` may be up to three minor versions older or newer than the `kubelet` instance + it runs alongside (`kube-proxy` < 1.25 may only be up to two minor versions older or newer + than the `kubelet` instance it runs alongside). Example: * `kube-apiserver` is at **{{< skew currentVersion >}}** -* `kube-proxy` is supported at **{{< skew currentVersion >}}**, **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, and **{{< skew currentVersionAddMinor -3 >}}** +* `kube-proxy` is supported at **{{< skew currentVersion >}}**, **{{< skew currentVersionAddMinor -1 >}}**, + **{{< skew currentVersionAddMinor -2 >}}**, and **{{< skew currentVersionAddMinor -3 >}}** {{< note >}} If version skew exists between `kube-apiserver` instances in an HA cluster, this narrows the allowed `kube-proxy` versions. @@ -80,26 +94,36 @@ If version skew exists between `kube-apiserver` instances in an HA cluster, this Example: * `kube-apiserver` instances are at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** -* `kube-proxy` is supported at **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, and **{{< skew currentVersionAddMinor -3 >}}** (**{{< skew currentVersion >}}** is not supported because that would be newer than the `kube-apiserver` instance at version **{{< skew currentVersionAddMinor -1 >}}**) +* `kube-proxy` is supported at **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, + and **{{< skew currentVersionAddMinor -3 >}}** (**{{< skew currentVersion >}}** is not supported because that would + be newer than the `kube-apiserver` instance at version **{{< skew currentVersionAddMinor -1 >}}**) ### kube-controller-manager, kube-scheduler, and cloud-controller-manager -`kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` must not be newer than the `kube-apiserver` instances they communicate with. They are expected to match the `kube-apiserver` minor version, but may be up to one minor version older (to allow live upgrades). +`kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` must not be newer than the +`kube-apiserver` instances they communicate with. They are expected to match the `kube-apiserver` minor version, +but may be up to one minor version older (to allow live upgrades). Example: * `kube-apiserver` is at **{{< skew currentVersion >}}** -* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` are supported at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** +* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` are supported + at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** {{< note >}} -If version skew exists between `kube-apiserver` instances in an HA cluster, and these components can communicate with any `kube-apiserver` instance in the cluster (for example, via a load balancer), this narrows the allowed versions of these components. +If version skew exists between `kube-apiserver` instances in an HA cluster, and these components +can communicate with any `kube-apiserver` instance in the cluster (for example, via a load balancer), +this narrows the allowed versions of these components. {{< /note >}} Example: * `kube-apiserver` instances are at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** -* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` communicate with a load balancer that can route to any `kube-apiserver` instance -* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` are supported at **{{< skew currentVersionAddMinor -1 >}}** (**{{< skew currentVersion >}}** is not supported because that would be newer than the `kube-apiserver` instance at version **{{< skew currentVersionAddMinor -1 >}}**) +* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` communicate with a load balancer + that can route to any `kube-apiserver` instance +* `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` are supported at + **{{< skew currentVersionAddMinor -1 >}}** (**{{< skew currentVersion >}}** is not supported + because that would be newer than the `kube-apiserver` instance at version **{{< skew currentVersionAddMinor -1 >}}**) ### kubectl @@ -108,7 +132,8 @@ Example: Example: * `kube-apiserver` is at **{{< skew currentVersion >}}** -* `kubectl` is supported at **{{< skew currentVersionAddMinor 1 >}}**, **{{< skew currentVersion >}}**, and **{{< skew currentVersionAddMinor -1 >}}** +* `kubectl` is supported at **{{< skew currentVersionAddMinor 1 >}}**, **{{< skew currentVersion >}}**, + and **{{< skew currentVersionAddMinor -1 >}}** {{< note >}} If version skew exists between `kube-apiserver` instances in an HA cluster, this narrows the supported `kubectl` versions. @@ -117,21 +142,24 @@ If version skew exists between `kube-apiserver` instances in an HA cluster, this Example: * `kube-apiserver` instances are at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** -* `kubectl` is supported at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** (other versions would be more than one minor version skewed from one of the `kube-apiserver` components) +* `kubectl` is supported at **{{< skew currentVersion >}}** and **{{< skew currentVersionAddMinor -1 >}}** + (other versions would be more than one minor version skewed from one of the `kube-apiserver` components) ## Supported component upgrade order -The supported version skew between components has implications on the order in which components must be upgraded. -This section describes the order in which components must be upgraded to transition an existing cluster from version **{{< skew currentVersionAddMinor -1 >}}** to version **{{< skew currentVersion >}}**. +The supported version skew between components has implications on the order +in which components must be upgraded. This section describes the order in +which components must be upgraded to transition an existing cluster from version +**{{< skew currentVersionAddMinor -1 >}}** to version **{{< skew currentVersion >}}**. Optionally, when preparing to upgrade, the Kubernetes project recommends that you do the following to benefit from as many regression and bug fixes as -possible during your upgrade: +possible during your upgrade: -* Ensure that components are on the most recent patch version of your current - minor version. -* Upgrade components to the most recent patch version of the target minor - version. +* Ensure that components are on the most recent patch version of your current + minor version. +* Upgrade components to the most recent patch version of the target minor + version. For example, if you're running version {{}}, ensure that you're on the most recent patch version. Then, upgrade to the most @@ -142,12 +170,19 @@ recent patch version of {{}}. Pre-requisites: * In a single-instance cluster, the existing `kube-apiserver` instance is **{{< skew currentVersionAddMinor -1 >}}** -* In an HA cluster, all `kube-apiserver` instances are at **{{< skew currentVersionAddMinor -1 >}}** or **{{< skew currentVersion >}}** (this ensures maximum skew of 1 minor version between the oldest and newest `kube-apiserver` instance) -* The `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` instances that communicate with this server are at version **{{< skew currentVersionAddMinor -1 >}}** (this ensures they are not newer than the existing API server version, and are within 1 minor version of the new API server version) -* `kubelet` instances on all nodes are at version **{{< skew currentVersionAddMinor -1 >}}** or **{{< skew currentVersionAddMinor -2 >}}** (this ensures they are not newer than the existing API server version, and are within 2 minor versions of the new API server version) +* In an HA cluster, all `kube-apiserver` instances are at **{{< skew currentVersionAddMinor -1 >}}** or + **{{< skew currentVersion >}}** (this ensures maximum skew of 1 minor version between the oldest and newest `kube-apiserver` instance) +* The `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` instances that + communicate with this server are at version **{{< skew currentVersionAddMinor -1 >}}** + (this ensures they are not newer than the existing API server version, and are within 1 minor version of the new API server version) +* `kubelet` instances on all nodes are at version **{{< skew currentVersionAddMinor -1 >}}** or **{{< skew currentVersionAddMinor -2 >}}** + (this ensures they are not newer than the existing API server version, and are within 2 minor versions of the new API server version) * Registered admission webhooks are able to handle the data the new `kube-apiserver` instance will send them: - * `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` objects are updated to include any new versions of REST resources added in **{{< skew currentVersion >}}** (or use the [`matchPolicy: Equivalent` option](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) available in v1.15+) - * The webhooks are able to handle any new versions of REST resources that will be sent to them, and any new fields added to existing versions in **{{< skew currentVersion >}}** + * `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration` objects are updated to include + any new versions of REST resources added in **{{< skew currentVersion >}}** + (or use the [`matchPolicy: Equivalent` option](/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy) available in v1.15+) + * The webhooks are able to handle any new versions of REST resources that will be sent to them, + and any new fields added to existing versions in **{{< skew currentVersion >}}** Upgrade `kube-apiserver` to **{{< skew currentVersion >}}** @@ -161,7 +196,9 @@ require `kube-apiserver` to not skip minor versions when upgrading, even in sing Pre-requisites: -* The `kube-apiserver` instances these components communicate with are at **{{< skew currentVersion >}}** (in HA clusters in which these control plane components can communicate with any `kube-apiserver` instance in the cluster, all `kube-apiserver` instances must be upgraded before upgrading these components) +* The `kube-apiserver` instances these components communicate with are at **{{< skew currentVersion >}}** + (in HA clusters in which these control plane components can communicate with any `kube-apiserver` + instance in the cluster, all `kube-apiserver` instances must be upgraded before upgrading these components) Upgrade `kube-controller-manager`, `kube-scheduler`, and `cloud-controller-manager` to **{{< skew currentVersion >}}**. There is no @@ -175,7 +212,8 @@ Pre-requisites: * The `kube-apiserver` instances the `kubelet` communicates with are at **{{< skew currentVersion >}}** -Optionally upgrade `kubelet` instances to **{{< skew currentVersion >}}** (or they can be left at **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, or **{{< skew currentVersionAddMinor -3 >}}**) +Optionally upgrade `kubelet` instances to **{{< skew currentVersion >}}** (or they can be left at +**{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, or **{{< skew currentVersionAddMinor -3 >}}**) {{< note >}} Before performing a minor version `kubelet` upgrade, [drain](/docs/tasks/administer-cluster/safely-drain-node/) pods from that node. @@ -183,7 +221,8 @@ In-place minor version `kubelet` upgrades are not supported. {{}} {{< warning >}} -Running a cluster with `kubelet` instances that are persistently three minor versions behind `kube-apiserver` means they must be upgraded before the control plane can be upgraded. +Running a cluster with `kubelet` instances that are persistently three minor versions behind +`kube-apiserver` means they must be upgraded before the control plane can be upgraded. {{}} ### kube-proxy @@ -192,8 +231,11 @@ Pre-requisites: * The `kube-apiserver` instances `kube-proxy` communicates with are at **{{< skew currentVersion >}}** -Optionally upgrade `kube-proxy` instances to **{{< skew currentVersion >}}** (or they can be left at **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, or **{{< skew currentVersionAddMinor -3 >}}**) +Optionally upgrade `kube-proxy` instances to **{{< skew currentVersion >}}** +(or they can be left at **{{< skew currentVersionAddMinor -1 >}}**, **{{< skew currentVersionAddMinor -2 >}}**, +or **{{< skew currentVersionAddMinor -3 >}}**) {{< warning >}} -Running a cluster with `kube-proxy` instances that are persistently three minor versions behind `kube-apiserver` means they must be upgraded before the control plane can be upgraded. +Running a cluster with `kube-proxy` instances that are persistently three minor versions behind +`kube-apiserver` means they must be upgraded before the control plane can be upgraded. {{}} diff --git a/content/en/training/_index.html b/content/en/training/_index.html index 74880486a6bf2..28fe46cfc0c43 100644 --- a/content/en/training/_index.html +++ b/content/en/training/_index.html @@ -14,17 +14,22 @@

      Build your cloud native career

      Kubernetes is at the core of the cloud native movement. Training and certifications from the Linux Foundation and our training partners lets you invest in your career, learn Kubernetes, and make your cloud native projects successful.

      -
      - -
      -
      - -
      -
      - -
      -
      - +
      +
      + +
      +
      + +
      +
      + +
      +
      + +
      +
      + +
      @@ -93,6 +98,16 @@

      Go to Certification
      +
      + +
      + Kubernetes and Cloud Native Security Associate (KCSA) +
      +

      The KCSA is a pre-professional certification designed for candidates interested in advancing to the professional level through a demonstrated understanding of foundational knowledge and skills of security technologies in the cloud native ecosystem.

      +

      A certified KCSA will confirm an understanding of the baseline security configuration of Kubernetes clusters to meet compliance objectives.

      +
      + Go to Certification +
      Certified Kubernetes Application Developer (CKAD) @@ -106,6 +121,7 @@
      Certified Kubernetes Administrator (CKA)
      +

      The Certified Kubernetes Administrator (CKA) program provides assurance that CKAs have the skills, knowledge, and competency to perform the responsibilities of Kubernetes administrators.

      A certified Kubernetes administrator has demonstrated the ability to do basic installation as well as configuring and managing production-grade Kubernetes clusters.


      @@ -115,6 +131,7 @@
      Certified Kubernetes Security Specialist (CKS)
      +

      The Certified Kubernetes Security Specialist program provides assurance that the holder is comfortable and competent with a broad range of best practices. CKS certification covers skills for securing container-based applications and Kubernetes platforms during build, deployment and runtime.

      Candidates for CKS must hold a current Certified Kubernetes Administrator (CKA) certification to demonstrate they possess sufficient Kubernetes expertise before sitting for the CKS.


      diff --git a/content/es/_index.html b/content/es/_index.html index 1a5d8d06cdf5c..24acdd9cc9dbb 100644 --- a/content/es/_index.html +++ b/content/es/_index.html @@ -4,6 +4,8 @@ cid: home --- +{{< site-searchbar >}} + {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} ### Kubernetes (K8s) es una plataforma de código abierto para automatizar la implementación, el escalado y la administración de aplicaciones en contenedores. @@ -41,12 +43,12 @@

      El desafío de migrar más de 150 microservicios a Kubernetes



      - Asista a la KubeCon en Norte América del 24 al 28 de Octubre 2022 + Asiste a la KubeCon en Europa del 19 al 22 de Marzo 2024



      - Asista a la KubeCon en Europa del 17 al 21 de Abril 2023 + Asiste a la KubeCon en Norteamérica del 12 al 15 de Noviembre 2024
      diff --git a/content/es/docs/concepts/storage/ephemeral-volumes.md b/content/es/docs/concepts/storage/ephemeral-volumes.md new file mode 100644 index 0000000000000..743d773ba8e54 --- /dev/null +++ b/content/es/docs/concepts/storage/ephemeral-volumes.md @@ -0,0 +1,189 @@ +--- +reviewers: + - ramrodo + - krol3 + - electrocucaracha +title: Volúmenes efímeros +content_type: concept +weight: 30 +--- + + + +Este documento describe _volúmenes efímeros_ en Kubernetes. Se sugiere tener conocimiento previo sobre [volúmenes](/docs/concepts/storage/volumes/), en particular PersistentVolumeClaim y PersistentVolume. + + + +Algunas aplicaciones requieren almacenamiento adicional, pero no les preocupa si esos datos se almacenan de manera persistente entre reinicios. Por ejemplo, los servicios de caché a menudo tienen limitaciones de tamaño de memoria y pueden trasladar datos poco utilizados a un almacenamiento más lento que la memoria, con un impacto mínimo en el rendimiento general. + +Otras aplicaciones esperan que algunos datos de entrada de solo lectura estén presentes en archivos, como datos de configuración o claves secretas. + +Los _volúmenes efímeros_ están diseñados para estos casos de uso. Debido a que los volúmenes siguen el ciclo de vida del Pod y se crean y eliminan junto con el Pod, los Pods pueden detenerse y reiniciarse sin estar limitados a la disponibilidad de algún volumen persistente. + +Los volúmenes efímeros se especifican _en línea_ en la especificación del Pod, lo que simplifica la implementación y gestión de aplicaciones. + +### Tipos de volúmenes efímeros + +Kubernetes admite varios tipos diferentes de volúmenes efímeros para diversos propósitos: + +- [emptyDir](/docs/concepts/storage/volumes/#emptydir): vacíos al inicio del Pod, con el almacenamiento proveniente localmente del directorio base de kubelet (generalmente el disco raíz) o la RAM. +- [configMap](/docs/concepts/storage/volumes/#configmap), + [downwardAPI](/docs/concepts/storage/volumes/#downwardapi), + [secret](/docs/concepts/storage/volumes/#secret): inyectar diferentes tipos de datos de Kubernetes en un Pod. + +- [CSI volúmenes efímeros](#csi-ephemeral-volumes): + Similar a los tipos de volumen anteriores, pero proporcionados por controladores especiales {{< glossary_tooltip text="CSI" term_id="csi" >}} que [soportan específicamente esta característica](https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html) +- [volúmenes efímeros genéricos](#generic-ephemeral-volumes), que pueden proporcionar todos los controladores de almacenamiento que también admiten volúmenes persistentes + +`emptyDir`, `configMap`, `downwardAPI`, `secret` se proporcionan como [almacenamiento efímero local](/docs/concepts/configuration/manage-resources-containers/#local-ephemeral-storage). +Ellos son administrados por kubelet en cada nodo. + +Los volúmenes efímeros CSI _deben_ ser proporcionados por controladores de almacenamiento CSI de terceros. + +Los volúmenes efímeros genéricos _pueden_ ser proporcionados por controladores de almacenamiento CSI de terceros, pero también por cualquier otro controlador de almacenamiento que admita la provisión dinámica. Algunos controladores CSI están escritos específicamente para volúmenes efímeros CSI y no admiten la provisión dinámica; por lo tanto, no se pueden utilizar para volúmenes efímeros genéricos. + +La ventaja de utilizar controladores de terceros es que pueden ofrecer funcionalidades que Kubernetes en sí mismo no admite, como el almacenamiento con características de rendimiento diferentes al disco gestionado por kubelet o la inyección de datos diversos. + +### Volúmenes efímeros de CSI + +{{< feature-state for_k8s_version="v1.25" state="stable" >}} + +{{< note >}} +Los volúmenes efímeros CSI solo son compatibles con un subconjunto de controladores CSI. +La [lista de controladores](https://kubernetes-csi.github.io/docs/drivers.html) CSI de Kubernetes muestra cuáles controladores admiten volúmenes efímeros. +{{< /note >}} +Conceptualmente, los volúmenes efímeros CSI son similares a los tipos de volumen `configMap`, +`downwardAPI` y `secret`: el almacenamiento se gestiona localmente en cada nodo y se crea junto con otros recursos locales después de que un Pod ha sido programado en un nodo. Kubernetes ya no tiene ningún concepto de reprogramación de Pods en esta etapa. La creación de volúmenes debe ser poco propensa a fallos, +de lo contrario, el inicio del Pod queda atascado. En particular, [la programación de Pods con conciencia de la capacidad de almacenamiento](/docs/concepts/storage/storage-capacity/) _no_ está admitida para estos volúmenes. Actualmente, tampoco están cubiertos por los límites de uso de recursos de almacenamiento de un Pod, porque eso es algo que kubelet solo puede aplicar para el almacenamiento que administra él mismo. + +Aquí tienes un ejemplo de manifiesto para un Pod que utiliza almacenamiento efímero CSI: + +```yaml +kind: Pod +apiVersion: v1 +metadata: + name: my-csi-app +spec: + containers: + - name: my-frontend + image: busybox:1.28 + volumeMounts: + - mountPath: "/data" + name: my-csi-inline-vol + command: ["sleep", "1000000"] + volumes: + - name: my-csi-inline-vol + csi: + driver: inline.storage.kubernetes.io + volumeAttributes: + foo: bar +``` + +Los `volumeAttributes` determinan qué volumen es preparado por el controlador. Estos atributos son específicos de cada controlador y no están estandarizados. Consulta la documentación de cada controlador CSI para obtener instrucciones adicionales. + +### Restricciones del conductor CSI + +Los volúmenes efímeros CSI permiten a los usuarios proporcionar `volumeAttributes` directamente al controlador CSI como parte de la especificación del Pod. Un controlador CSI que permite `volumeAttributes` que normalmente están restringidos a administradores NO es adecuado para su uso en un volumen efímero en línea. Por ejemplo, los parámetros que normalmente se definen en la clase de almacenamiento no deben estar expuestos a los usuarios a través del uso de volúmenes efímeros en línea. + +Los administradores del clúster que necesiten restringir los controladores CSI que se pueden utilizar como volúmenes en línea dentro de una especificación de Pod pueden hacerlo mediante: + +- Eliminar `Ephemeral` de `volumeLifecycleModes` en la especificación de CSIDriver, lo que evita que los controladores CSI admitan volúmenes efímeros en línea. + +- Usando un [webhook de admisión](/docs/reference/access-authn-authz/extensible-admission-controllers/) + para restringir el uso de este controlador. + +### Volúmenes efímeros genéricos + +{{< feature-state for_k8s_version="v1.23" state="stable" >}} + +Los volúmenes efímeros genéricos son similares a los volúmenes `emptyDir` en el sentido de que proporcionan un directorio por Pod para datos temporales que generalmente está vacío después de la provisión. Pero también pueden tener características adicionales: + +- El almacenamiento puede ser local o conectado a la red. +- Los volúmenes pueden tener un tamaño fijo que los Pods no pueden exceder. +- Los volúmenes pueden tener algunos datos iniciales, dependiendo del controlador y los parámetros. +- Se admiten operaciones típicas en los volúmenes, siempre que el controlador las soporte, incluyendo + [instantáneas](/docs/concepts/storage/volume-snapshots/), + [clonación](/docs/concepts/storage/volume-pvc-datasource/), + [cambiar el tamaño](/docs/concepts/storage/persistent-volumes/#expanding-persistent-volumes-claims), + y [seguimiento de la capacidad de almacenamiento](/docs/concepts/storage/storage-capacity/). + +Ejemplo: + +```yaml +kind: Pod +apiVersion: v1 +metadata: + name: my-app +spec: + containers: + - name: my-frontend + image: busybox:1.28 + volumeMounts: + - mountPath: "/scratch" + name: scratch-volume + command: ["sleep", "1000000"] + volumes: + - name: scratch-volume + ephemeral: + volumeClaimTemplate: + metadata: + labels: + type: my-frontend-volume + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: "scratch-storage-class" + resources: + requests: + storage: 1Gi +``` + +### Ciclo de vida y reclamo de volumen persistente + +La idea clave de diseño es que los [parámetros para una solicitud de volumen](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#ephemeralvolumesource-v1-core) +se permiten dentro de una fuente de volumen del Pod. Se admiten etiquetas, anotaciones y +todo el conjunto de campos para una PersistentVolumeClaim. Cuando se crea un Pod de este tipo, el controlador de volúmenes efímeros crea entonces un objeto PersistentVolumeClaim real en el mismo espacio de nombres que el Pod y asegura que la PersistentVolumeClaim +se elimine cuando se elimina el Pod. + +Eso desencadena la vinculación y/o aprovisionamiento de volúmenes, ya sea de inmediato si el {{< glossary_tooltip text="StorageClass" term_id="storage-class" >}} utiliza la vinculación inmediata de volúmenes o cuando el Pod está programado provisionalmente en un nodo (modo de vinculación de volumen `WaitForFirstConsumer`). Este último se recomienda para volúmenes efímeros genéricos, ya que permite al planificador elegir libremente un nodo adecuado para el Pod. Con la vinculación inmediata, el planificador está obligado a seleccionar un nodo que tenga acceso al volumen una vez que esté disponible. + +En términos de [propiedad de recursos](/docs/concepts/architecture/garbage-collection/#owners-dependents), +un Pod que tiene almacenamiento efímero genérico es el propietario de la PersistentVolumeClaim(s) que proporciona ese almacenamiento efímero. Cuando se elimina el Pod, el recolector de basura de Kubernetes elimina la PVC, lo que suele desencadenar la eliminación del volumen, ya que la política de recuperación predeterminada de las clases de almacenamiento es eliminar los volúmenes. +Puedes crear almacenamiento local cuasi-efímero utilizando una StorageClass con una política de recuperación de `retain`: el almacenamiento sobrevive al Pod y, en este caso, debes asegurarte de que la limpieza del volumen se realice por separado. + +Mientras estas PVC existen, pueden usarse como cualquier otra PVC. En particular, pueden ser referenciadas como fuente de datos en la clonación o creación de instantáneas de volúmenes. El objeto PVC también contiene el estado actual del volumen. + +### Nomenclatura de PersistentVolumeClaim. + +La nomenclatura de las PVC creadas automáticamente es determinista: el nombre es una combinación del nombre del Pod y el nombre del volumen, con un guion medio (`-`) en el medio. En el ejemplo anterior, el nombre de la PVC será `my-app-scratch-volume`. Esta nomenclatura determinista facilita la interacción con la PVC, ya que no es necesario buscarla una vez que se conocen el nombre del Pod y el nombre del volumen. + +La nomenclatura determinista también introduce un posible conflicto entre diferentes Pods (un Pod "pod-a" con el volumen "scratch" y otro Pod con nombre "pod" y volumen "a-scratch" terminan teniendo el mismo nombre de PVC "pod-a-scratch") y entre Pods y PVCs creadas manualmente. + +Estos conflictos se detectan: una PVC solo se utiliza para un volumen efímero si se creó para el Pod. Esta comprobación se basa en la relación de propiedad. Una PVC existente no se sobrescribe ni se modifica. Pero esto no resuelve el conflicto, ya que sin la PVC adecuada, el Pod no puede iniciarse. + +{{< caution >}} +Ten cuidado al nombrar Pods y volúmenes dentro del mismo espacio de nombres para evitar que se produzcan estos conflictos. +{{< /caution >}} + +### Seguridad + +El uso de volúmenes efímeros genéricos permite a los usuarios crear PVC de forma indirecta si pueden crear Pods, incluso si no tienen permiso para crear PVC directamente. Los administradores del clúster deben ser conscientes de esto. Si esto no encaja en su modelo de seguridad, deberían utilizar un [webhook de admisión](/docs/reference/access-authn-authz/extensible-admission-controllers/) que rechace objetos como Pods que tienen un volumen efímero genérico. + +La cuota normal del [espacio de nombres para PVC](/docs/concepts/policy/resource-quotas/#storage-resource-quota) sigue aplicándose, por lo que incluso si a los usuarios se les permite utilizar este nuevo mecanismo, no pueden utilizarlo para eludir otras políticas. + +## {{% heading "whatsnext" %}} + +### Volúmenes efímeros gestionados por kubelet + +Ver [almacenamiento efímero local](/docs/concepts/configuration/manage-resources-containers/#local-ephemeral-storage). + +### Volúmenes efímeros de CSI + +- Para obtener más información sobre el diseño, consulta el + [KEP de Volúmenes efímeros en línea de CSI](https://github.com/kubernetes/enhancements/blob/ad6021b3d61a49040a3f835e12c8bb5424db2bbb/keps/sig-storage/20190122-csi-inline-volumes.md). +- Para obtener más información sobre el desarrollo futuro de esta función, consulte el + [problema de seguimiento de mejoras #596](https://github.com/kubernetes/enhancements/issues/596). + +### Volúmenes efímeros genéricos + +- Para obtener más información sobre el diseño, consulta el + [KEP de Volúmenes efímeros genéricos en línea](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/1698-generic-ephemeral-volumes/README.md). diff --git a/content/es/docs/concepts/storage/projected-volumes.md b/content/es/docs/concepts/storage/projected-volumes.md new file mode 100644 index 0000000000000..1540d5df97b54 --- /dev/null +++ b/content/es/docs/concepts/storage/projected-volumes.md @@ -0,0 +1,119 @@ +--- +reviewers: + - ramrodo + - krol3 + - electrocucaracha +title: Volúmenes proyectados +content_type: concept +weight: 21 # just after persistent volumes +--- + + + +Este documento describe los _volúmenes proyectados_ en Kubernetes. Necesita estar familiarizado con [volúmenes](/es/docs/concepts/storage/volumes/). + + + +## Introducción + +Un volumen `proyectado` asigna varias fuentes de volúmenes existentes al mismo directorio. + +Actualmente se pueden proyectar los siguientes tipos de fuentes de volumen: + +- [`secret`](/es/docs/concepts/storage/volumes/#secret) +- [`downwardAPI`](/es/docs/concepts/storage/volumes/#downwardapi) +- [`configMap`](/es/docs/concepts/storage/volumes/#configmap) +- [`serviceAccountToken`](#serviceaccounttoken) + +Se requiere que todas las fuentes estén en el mismo espacio de nombres que el Pod. Para más detalles, +vea el documento de diseño [all-in-one volume](https://git.k8s.io/design-proposals-archive/node/all-in-one-volume.md). + +### Configuración de ejemplo con un secreto, una downwardAPI y una configMap {#example-configuration-secret-downwardapi-configmap} + +{{% code_sample file="pods/storage/projected-secret-downwardapi-configmap.yaml" %}} + +### Configuración de ejemplo: secretos con un modo de permiso no predeterminado establecido {#example-configuration-secrets-nondefault-permission-mode} + +{{% code_sample file="pods/storage/projected-secrets-nondefault-permission-mode.yaml" %}} + +Cada fuente de volumen proyectada aparece en la especificación bajo `sources`. Los parámetros son casi los mismos con dos excepciones: + +- Para los secretos, el campo `secretName` se ha cambiado a `name` para que sea coherente con el nombre de ConfigMap. +- El `defaultMode` solo se puede especificar en el nivel proyectado y no para cada fuente de volumen. Sin embargo, como se ilustra arriba, puede configurar explícitamente el `mode` para cada proyección individual. + +## Volúmenes proyectados de serviceAccountToken {#serviceaccounttoken} + +Puede inyectar el token para la [service account](/docs/reference/access-authn-authz/authentication/#service-account-tokens) actual +en un Pod en una ruta especificada. Por ejemplo: + +{{% code_sample file="pods/storage/projected-service-account-token.yaml" %}} + +El Pod de ejemplo tiene un volumen proyectado que contiene el token de cuenta de servicio inyectado. +Los contenedores en este Pod pueden usar ese token para acceder al servidor API de Kubernetes, autenticándose con la identidad de [the pod's ServiceAccount](/docs/tasks/configure-pod-container/configure-service-account/). + +El campo `audience` contiene la audiencia prevista del +token. Un destinatario del token debe identificarse con un identificador especificado en la audiencia del token y, de lo contrario, debe rechazar el token. Este campo es opcional y de forma predeterminada es el identificador del servidor API. + +The `expirationSeconds` es la duración esperada de validez del token de la cuenta de servicio. El valor predeterminado es 1 hora y debe durar al menos 10 minutos (600 segundos). +Un administrador +también puede limitar su valor máximo especificando la opción `--service-account-max-token-expiration` +para el servidor API. El campo `path` especifica una ruta relativa al punto de montaje del volumen proyectado. + +{{< note >}} +Un contenedor que utiliza una fuente de volumen proyectada como montaje de volumen [`subPath`](/docs/concepts/storage/volumes/#using-subpath) +no recibirá actualizaciones para esas fuentes de volumen. +{{< /note >}} + +## Interacciones SecurityContext + +La [propuesta](https://git.k8s.io/enhancements/keps/sig-storage/2451-service-account-token-volumes#proposal) para el manejo de permisos de archivos en la mejora del volumen de cuentas de servicio proyectadas introdujo los archivos proyectados que tienen los permisos de propietario correctos establecidos. + +### Linux + +En los pods de Linux que tienen un volumen proyectado y `RunAsUser` configurado en el Pod +[`SecurityContext`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context), +los archivos proyectados tienen la conjunto de propiedad correcto, incluida la propiedad del usuario del contenedor. + +Cuando todos los contenedores en un pod tienen el mismo `runAsUser` configurado en su +[`PodSecurityContext`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context) +o el contenedor +[`SecurityContext`](/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1), +entonces el kubelet garantiza que el contenido del volumen `serviceAccountToken` sea propiedad de ese usuario y que el archivo token tenga su modo de permiso establecido en `0600`. + +{{< note >}} +{{< glossary_tooltip text="Ephemeral containers" term_id="ephemeral-container" >}} +agregado a un pod después de su creación _no_ cambia los permisos de volumen que se establecieron cuando se creó el pod. + +Si los permisos de volumen `serviceAccountToken` de un Pod se establecieron en `0600` porque todos los demás contenedores en el Pod tienen el mismo `runAsUser`, los contenedores efímeros deben usar el mismo `runAsUser` para poder leer el token. +{{< /note >}} + +### Windows + +En los pods de Windows que tienen un volumen proyectado y `RunAsUsername` configurado en el pod `SecurityContext`, la propiedad no se aplica debido a la forma en que se administran las cuentas de usuario en Windows. +Windows almacena y administra cuentas de grupos y usuarios locales en un archivo de base de datos llamado Administrador de cuentas de seguridad (SAM). +Cada contenedor mantiene su propia instancia de la base de datos SAM, de la cual el host no tiene visibilidad mientras el contenedor se está ejecutando. +Los contenedores de Windows están diseñados para ejecutar la parte del modo de usuario del sistema operativo de forma aislada del host, de ahí el mantenimiento de una base de datos SAM virtual. +Como resultado, el kubelet que se ejecuta en el host no tiene la capacidad de configurar dinámicamente la propiedad de los archivos del host para cuentas de contenedores virtualizados. Se recomienda que, si los archivos de la máquina host se van a compartir con el contenedor, se coloquen en su propio montaje de volumen fuera de `C:\`. + +De forma predeterminada, los archivos proyectados tendrán la siguiente propiedad, como se muestra en un archivo de volumen proyectado de ejemplo: + +```powershell +PS C:\> Get-Acl C:\var\run\secrets\kubernetes.io\serviceaccount\..2021_08_31_22_22_18.318230061\ca.crt | Format-List + +Path : Microsoft.PowerShell.Core\FileSystem::C:\var\run\secrets\kubernetes.io\serviceaccount\..2021_08_31_22_22_18.318230061\ca.crt +Owner : BUILTIN\Administrators +Group : NT AUTHORITY\SYSTEM +Access : NT AUTHORITY\SYSTEM Allow FullControl + BUILTIN\Administrators Allow FullControl + BUILTIN\Users Allow ReadAndExecute, Synchronize +Audit : +Sddl : O:BAG:SYD:AI(A;ID;FA;;;SY)(A;ID;FA;;;BA)(A;ID;0x1200a9;;;BU) +``` + +Esto implica que todos los usuarios administradores como `ContainerAdministrator` tendrán acceso de lectura, escritura y ejecución, mientras que los usuarios que no sean administradores tendrán acceso de lectura y ejecución. + +{{< note >}} +En general, se desaconseja otorgar acceso al contenedor al host, ya que puede abrir la puerta a posibles vulnerabilidades de seguridad. + +Crear un Pod de Windows con `RunAsUser` en su `SecurityContext` dará como resultado que el Pod quede atascado en `ContainerCreating` para siempre. Por lo tanto, se recomienda no utilizar la opción `RunAsUser` exclusiva de Linux con Windows Pods. +{{< /note >}} diff --git a/content/es/docs/concepts/storage/volumes.md b/content/es/docs/concepts/storage/volumes.md index ac462bec7b130..e12e6bf25c996 100644 --- a/content/es/docs/concepts/storage/volumes.md +++ b/content/es/docs/concepts/storage/volumes.md @@ -203,7 +203,8 @@ metadata: spec: containers: - name: test - image: busybox + image: busybox:1.28 + command: ['sh', '-c', 'echo "The app is running!" && tail -f /dev/null'] volumeMounts: - name: config-vol mountPath: /etc/config diff --git a/content/es/docs/concepts/storage/windows-storage.md b/content/es/docs/concepts/storage/windows-storage.md new file mode 100644 index 0000000000000..6bc77b2982fa4 --- /dev/null +++ b/content/es/docs/concepts/storage/windows-storage.md @@ -0,0 +1,60 @@ +--- +reviewers: + - ramrodo + - krol3 + - electrocucaracha +title: Almacenamiento en Windows +content_type: concept +weight: 110 +--- + + + +Esta página proporciona una descripción general del almacenamiento específico para el sistema operativo Windows. + + + +## Almacenamiento persistente {#storage} + +Windows tiene un controlador de sistema de archivos en capas para montar las capas del contenedor y crear un sistema de archivos de copia basado en NTFS. Todas las rutas de archivos en el contenedor se resuelven únicamente dentro del contexto de ese contenedor. + +- Con Docker, los montajes de volumen solo pueden apuntar a un directorio en el contenedor y no a un archivo individual. Esta limitación no se aplica a containerd. + +- Los montajes de volumen no pueden proyectar archivos o directorios de vuelta al sistema de archivos del host. + +- No se admiten sistemas de archivos de solo lectura debido a que siempre se requiere acceso de escritura para el registro de Windows y la base de datos SAM. Sin embargo, se admiten volúmenes de solo lectura. + +- Las máscaras y permisos de usuario en los volúmenes no están disponibles. Debido a que la base de datos SAM no se comparte entre el host y el contenedor, no hay un mapeo entre ellos. Todos los permisos se resuelven dentro del contexto del contenedor. + +Como resultado, las siguientes funcionalidades de almacenamiento no son compatibles en nodos de Windows: + +- Montajes de subruta de volumen: solo es posible montar el volumen completo en un contenedor de Windows +- Montaje de subruta de volumen para secretos +- Proyección de montaje en el host +- Sistema de archivos raíz de solo lectura (los volúmenes mapeados todavía admiten `readOnly`) +- Mapeo de dispositivos de bloque +- Memoria como medio de almacenamiento (por ejemplo, `emptyDir.medium` configurado como `Memory`) +- Características del sistema de archivos como uid/gid; permisos de sistema de archivos de Linux por usuario +- Configuración de [permisos de secretos con DefaultMode](/docs/tasks/inject-data-application/distribute-credentials-secure/#set-posix-permissions-for-secret-keys) (debido a la dependencia de UID/GID) +- Soporte de almacenamiento/volumen basado en NFS +- Ampliación del volumen montado (resizefs) + +Los {{< glossary_tooltip text="volúmenes" term_id="volume" >}} de Kubernetes habilitan la implementación de aplicaciones complejas, con requisitos de persistencia de datos y uso compartido de volúmenes de Pod, en Kubernetes. +La gestión de volúmenes persistentes asociados a un backend o protocolo de almacenamiento específico incluye acciones como la provisión/desprovisión/redimensión de volúmenes, la conexión/desconexión de un volumen de/para un nodo de Kubernetes, y el montaje/desmontaje de un volumen de/para contenedores individuales en un Pod que necesita persistir datos. + +Los componentes de gestión de volúmenes se envían como [plugin](/docs/concepts/storage/volumes/#volume-types) de volumen de Kubernetes. +Las siguiente variedad de clases de plugins de volumen de Kubernetes son compatibles en Windows: + +- [`FlexVolume plugins`](/docs/concepts/storage/volumes/#flexvolume) + + - Ten en cuenta que los FlexVolumes han sido descontinuados a partir de la versión 1.23. + +- [`CSI Plugins`](/docs/concepts/storage/volumes/#csi) + +##### Plugins de volumen incorporados + +Los siguientes plugins incorporados admiten almacenamiento persistente en nodos de Windows: + +- [`azureFile`](/docs/concepts/storage/volumes/#azurefile) +- [`gcePersistentDisk`](/docs/concepts/storage/volumes/#gcepersistentdisk) +- [`vsphereVolume`](/docs/concepts/storage/volumes/#vspherevolume) diff --git a/content/es/docs/concepts/workloads/controllers/statefulset.md b/content/es/docs/concepts/workloads/controllers/statefulset.md index 95e86a7a3f674..7ed24d2b7edaf 100644 --- a/content/es/docs/concepts/workloads/controllers/statefulset.md +++ b/content/es/docs/concepts/workloads/controllers/statefulset.md @@ -153,7 +153,7 @@ El valor de Cluster Domain se pondrá a `cluster.local` a menos que Kubernetes crea un [PersistentVolume](/docs/concepts/storage/persistent-volumes/) para cada VolumeClaimTemplate. En el ejemplo de nginx de arriba, cada Pod recibirá un único PersistentVolume -con una StorageClass igual a `my-storage-class` y 1 Gib de almacenamiento provisionado. Si no se indica ninguna StorageClass, +con una StorageClass igual a `my-storage-class` y 1 GiB de almacenamiento provisionado. Si no se indica ninguna StorageClass, entonces se usa la StorageClass por defecto. Cuando un Pod se (re)programa en un nodo, sus `volumeMounts` montan los PersistentVolumes asociados con sus PersistentVolume Claims. Nótese que los PersistentVolumes asociados con los diff --git a/content/es/docs/contribute/start.md b/content/es/docs/contribute/start.md index 1f16b2818cabf..644df273d3706 100644 --- a/content/es/docs/contribute/start.md +++ b/content/es/docs/contribute/start.md @@ -70,7 +70,7 @@ Cualquier persona con una cuenta de GitHub puede reportar una incidencia en la d - **En una página existente** - Si ves un problema en una página existente en la [documentación de Kuberenetes](/docs/) ve al final de la página y haz clic en el botón **Abrir un Issue**. Si no estas autenticado en GitHub, te pedirá que te identifiques y posteriormente un formulario de nueva incidencia aparecerá con contenido pre-cargado. + Si ves un problema en una página existente en la [documentación de Kubernetes](/docs/) ve al final de la página y haz clic en el botón **Abrir un Issue**. Si no estas autenticado en GitHub, te pedirá que te identifiques y posteriormente un formulario de nueva incidencia aparecerá con contenido pre-cargado. Utilizando formato Markdown completa todos los detalles que sea posible. En los lugares en que haya corchetes (`[ ]`) pon una `x` en medio de los corchetes para representar la elección de una opción. Si tienes una posible solución al problema añádela. diff --git a/content/es/docs/home/_index.md b/content/es/docs/home/_index.md index 56bb4ca94e121..5f845e74fbb1c 100644 --- a/content/es/docs/home/_index.md +++ b/content/es/docs/home/_index.md @@ -4,7 +4,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Home" +linkTitle: "Documentación" main_menu: true weight: 10 hide_feedback: true diff --git a/content/es/docs/tasks/run-application/delete-stateful-set.md b/content/es/docs/tasks/run-application/delete-stateful-set.md new file mode 100644 index 0000000000000..16cc02e689766 --- /dev/null +++ b/content/es/docs/tasks/run-application/delete-stateful-set.md @@ -0,0 +1,91 @@ +--- +title: Borrar un StatefulSet +content_type: task +weight: 60 +--- + + + +Esta página muestra cómo se debe eliminar un {{< glossary_tooltip term_id="StatefulSet" >}}. + +## {{% heading "prerequisites" %}} + +- Se asume que se tiene una aplicación del tipo StatefulSet corriendo en tu clúster. + + + +## Borrando un StatefulSet + +Se puede eliminar un StatefulSet de la misma manera que se eliminan el resto de los recursos en Kubernetes: +Usa el comando `kubectl delete` y especifica el StatefulSet, usando su nombre o el archivo con el que fue creado. + +```shell +kubectl delete -f +``` + +```shell +kubectl delete statefulsets +``` + +Puede suceder que necesites eliminar los servicios headless asociados después de eliminar el StatefulSet. + +```shell +kubectl delete service +``` + +Cuando se elimina un StatefulSet utilizando `kubectl`, el StatefulSet escala a 0. +Todos los Pods que eran parte de esta carga de trabajo son eliminados. Si tú quieres eliminar +solo el StatefulSet y no los Pods utiliza `--cascade=orphan`. Por ejemplo: + +```shell +kubectl delete -f --cascade=orphan +``` + +Agregando `--cascade=orphan` al comando `kubectl delete`, los Pods administrados por el StatefulSet +dejan de pertenecer al StatefulSet cuando es eliminado. Si los pods tienen una +etiqueta `app.kubernetes.io/name=MyApp` se los puede eliminar de la siguiente manera: + +```shell +kubectl delete pods -l app.kubernetes.io/name=MyApp +``` + +### Volúmenes Persistentes + +Eliminar los Pods de un StatefulSet no va a eliminar los volúmenes asociados. +Esto es para asegurar que se tiene una oportunidad de copiar los datos fuera del volumen +antes de eliminarlo. Borrar el PVC después de que los pods estén terminados puede disparar +la eliminación del Volumen Persistente que hay detrás dependiendo de la clase de almacenamiento +y la política de reclamo. Nunca debes asumir la capacidad de acceder a un volumen +después de la eliminación del claim. + +{{< note >}} +Ten cuidado al borrar un PVC ya que puede llevar una pérdida de datos. +{{< /note >}} + +### Eliminación completa de un StatefulSet + +Para eliminar todo en un StatefulSet, incluyendo los pods asociados, +se puede correr una serie de comandos similares a los siguientes: + +```shell +grace=$(kubectl get pods --template '{{.spec.terminationGracePeriodSeconds}}') +kubectl delete statefulset -l app.kubernetes.io/name=MyApp +sleep $grace +kubectl delete pvc -l app.kubernetes.io/name=MyApp + +``` + +En este ejemplo, los Pods tienen la etiqueta `app.kubernetes.io/name=MyApp`, +sustituye la misma por tu propia etiqueta. + +### Forzar la eliminación de los Pods de un StatefulSet + +Si encuentras algunos pods bloqueados en tu StatefulSet en el estado 'Terminating' +o 'Unknown' por un largo período de tiempo, puede ser que necesites intervenir +manualmente para forzar la eliminación de los pods del apiserver. +Ésta es una tarea potencialmente riesgosa. Visita [Forzar eliminación de Pods en StatefulSet](/docs/tasks/run-application/force-delete-stateful-set-pod/) +para más detalles. + +## {{% heading "whatsnext" %}} + +Aprende más sobre [Forzar eliminación de Pods en StatefulSet](/docs/tasks/run-application/force-delete-stateful-set-pod/). diff --git a/content/es/docs/tasks/run-application/scale-stateful-set.md b/content/es/docs/tasks/run-application/scale-stateful-set.md new file mode 100644 index 0000000000000..b28adca3dc183 --- /dev/null +++ b/content/es/docs/tasks/run-application/scale-stateful-set.md @@ -0,0 +1,92 @@ +--- +title: Escalar un StatefulSet +content_type: task +weight: 50 +--- + + + + +Esta página muestra cómo escalar un StatefulSet. Escalar un StatefulSet es +incrementar o decrementar el número de réplicas. + +## {{% heading "prerequisites" %}} + +- Los StatefulSets están solamente disponibles en Kubernetes 1.5 o posterior. + Para verificar su versión de Kubernetes puede ejecutar `kubectl version`. + +- No todas las aplicaciones que manejan estados escalan correctamente. Si no está seguro sobre si + puede escalar sus StatefulSets, visite los [conceptos de StatefulSet](/docs/es/concepts/workloads/controllers/statefulset/) + o el [tutorial sobre StatefulSet](/docs/tutorials/stateful-application/basic-stateful-set/) para más información. + +- Solamente se debe realizar un escalamiento cuando esté lo suficientemente seguro del buen funcionamiento + de su clúster y de las aplicaciones que manejan estados. + + + +## Escalando StatefulSets + +### Uso de kubectl para escalar StatefulSets + +Como primer paso, identifica el StatefulSet que deseas escalar. + +```shell +kubectl get statefulsets +``` + +Cambia el número de réplicas de tu StatefulSet: + +```shell +kubectl scale statefulsets --replicas= +``` + +### Hacer actualizaciones "in-place" en los StatefulSets + +De manera alternativa, se pueden hacer [actualizaciones in-place](/docs/concepts/cluster-administration/manage-deployment/#in-place-updates-of-resources) +en tus StatefulSets. + +Si el StatefulSet fue inicialmente creado con `kubectl apply`, +puedes actualizar `.spec.replicas` en el manifiesto previamente definido y ahí hacer `kubectl apply`: + +```shell +kubectl apply -f +``` + +De otra manera, edita esa línea con `kubectl edit`: + +```shell +kubectl edit statefulsets +``` + +También puedes usar `kubectl patch`: + +```shell +kubectl patch statefulsets -p '{"spec":{"replicas":}}' +``` + +## Solución de Problemas + +### El escalamiento hacia abajo no funciona correctamente + +No se puede escalar hacia abajo un StatefulSet cuando alguno de los Pods que administra está +dañado. Desescalar solo tiene lugar después de tener los Pods disponibles. + +Si spec.replicas > 1, Kubernetes no puede determinar la razón de un Pod dañado. +Esto puede ser el resultado de una falla permanente o una falla transitoria. Una falla +transitoria puede ser causada por un reinicio necesario para una actualización o mantenimiento. + +Si el Pod está dañado con una falla permanente, escalar +sin corregir la falla puede llevarnos a un estado donde el StatefulSet cae en +una cantidad de miembros inferior a la cantidad de replicas que son necesarias para funcionar +correctamente. Esto puede causar que el StatefulSet no este disponible. + +Si el Pod está dañado por una falla transitoria y el Pod puede volver a estar disponible nuevamente, +el error transitorio puede interferir con la operación de escalar. Algunas bases de datos +distribuidas tienen errores cuando los nodos se unen y abandonan en el mismo momento. Es mejor +analizar acerca de escalar la operación a nivel de la aplicación y realizar +el escalamiento solamente cuando está seguro que el clúster de la aplicación está +funcionando y en buen estado. + +## {{% heading "whatsnext" %}} + +- Aprenda más acerca de [borrar un StatefulSet](/docs/tasks/run-application/delete-stateful-set/). diff --git a/content/es/docs/tasks/tools/included/install-kubectl-linux.md b/content/es/docs/tasks/tools/included/install-kubectl-linux.md index 10b756f079167..5ff454b8f9928 100644 --- a/content/es/docs/tasks/tools/included/install-kubectl-linux.md +++ b/content/es/docs/tasks/tools/included/install-kubectl-linux.md @@ -45,7 +45,7 @@ Por ejemplo, para descargar la versión {{< skew currentPatchVersion >}} en Linu Descargue el archivo de comprobación de kubectl: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" ``` Valide el binario kubectl con el archivo de comprobación: @@ -199,7 +199,7 @@ A continuación, se muestran los procedimientos para configurar el autocompletad Descargue el archivo de comprobación kubectl-convert: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" ``` Valide el binario kubectl-convert con el archivo de comprobación: diff --git a/content/es/docs/tasks/tools/included/optional-kubectl-configs-fish.md b/content/es/docs/tasks/tools/included/optional-kubectl-configs-fish.md new file mode 100644 index 0000000000000..a23b3dcbc37b3 --- /dev/null +++ b/content/es/docs/tasks/tools/included/optional-kubectl-configs-fish.md @@ -0,0 +1,24 @@ +--- +title: "autocompletado con fish" +description: "Configuración opcional para habilitar el autocompletado en la shell fish." +headless: true +_build: + list: never + render: never + publishResources: false +--- + +{{< note >}} +El autocompletado para Fish necesita de kubectl versión 1.23 o superior. +{{< /note >}} + +El script de autocompletado de Fish para kubectl puede ser generado con el comando `kubectl completion fish`. Ejecutando este comando en tu shell habilitará el autocompletado de kubectl para Fish. + +Para qué funcione en sus futuras sesiones shell, debes agregar la siguiente línea al archivo `~/.config/fish/config.fish`: + +```shell +kubectl completion fish | source +``` + +Después de recargar tu shell, el autocompletado para kubectl estará funcionando automáticamente. + diff --git a/content/es/docs/tasks/tools/install-kubectl-linux.md b/content/es/docs/tasks/tools/install-kubectl-linux.md new file mode 100644 index 0000000000000..61ffdee8ac21f --- /dev/null +++ b/content/es/docs/tasks/tools/install-kubectl-linux.md @@ -0,0 +1,348 @@ +--- +title: Instalar y Configurar kubectl en Linux +content_type: task +weight: 10 +--- + +## {{% heading "prerequisites" %}} + +Se debe utilizar la versión de kubectl con una minor versión de diferencia con +tu cluster. Por ejemplo, un cliente con versión v{{< skew currentVersion >}} se puede comunicar +con las siguientes versiones de plano de control v{{< skew currentVersionAddMinor -1 >}}, +v{{< skew currentVersionAddMinor 0 >}} y v{{< skew currentVersionAddMinor 1 >}}. +Utilizar la última versión compatible de kubectl evita posibles errores. + +## Instalar kubectl en Linux + +Existen los siguientes métodos para instalar kubectl en Linux: + +- [Instalación del binario para Linux de kubectl con Curl](#instalación-del-binario-para-linux-de-kubectl-con-curl) +- [Instalación mediante el administrador de paquetes nativo](#instalación-mediante-el-administrador-de-paquetes-nativo) +- [Instalación usando otro administrador de paquetes](#instalación-usando-otro-administrador-de-paquetes) + +### Instalación del binario para Linux de kubectl con Curl + +1. Descargar la última versión con el siguiente comando: + + {{< tabs name="download_binary_linux" >}} + {{< tab name="x86-64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + {{< /tab >}} + {{< tab name="ARM64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl" + {{< /tab >}} + {{< /tabs >}} + + {{< note >}} + Para descargar una versión específica reemplaza la siguiente parte del comando con la + versión que desea instalar `$(curl -L -s https://dl.k8s.io/release/stable.txt)` + + Por ejemplo, para descargar la versión {{< skew currentPatchVersion >}} en linux x86-64: + + ```bash + curl -LO https://dl.k8s.io/release/v{{< skew currentPatchVersion >}}/bin/linux/amd64/kubectl + ``` + + Y para Linux ARM64: + + ```bash + curl -LO https://dl.k8s.io/release/v{{< skew currentPatchVersion >}}/bin/linux/arm64/kubectl + ``` + + {{< /note >}} + +1. Validación del binario (paso opcional) + + Descargar el archivo checksum: + + {{< tabs name="download_checksum_linux" >}} + {{< tab name="x86-64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + {{< /tab >}} + {{< tab name="ARM64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl.sha256" + {{< /tab >}} + {{< /tabs >}} + + Validar el binario de kubectl contra el archivo checksum: + + ```bash + echo "$(cat kubectl.sha256) kubectl" | sha256sum --check + ``` + + Si es válido, va a obtener la siguiente respuesta: + + ```console + kubectl: OK + ``` + + En caso de falla, `sha256` terminará con un estado diferente a cero con una salida similar a: + + ```console + kubectl: FAILED + sha256sum: WARNING: 1 computed checksum did NOT match + ``` + + {{< note >}} + Descarga la misma versión del binario y el checksum. + {{< /note >}} + +1. Instalar kubectl + + ```bash + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + ``` + + {{< note >}} + Si no tienes acceso root en el sistema donde se busca instalar, puedes colocar + el binario kubectl en el directorio `~/.local/bin`: + + ```bash + chmod +x kubectl + mkdir -p ~/.local/bin + mv ./kubectl ~/.local/bin/kubectl + # Y después agrega el directorio ~/.local/bin a tu $PATH + ``` + + {{< /note >}} + +1. Test para asegurar que la versión instalada está actualizada: + + ```bash + kubectl version --client + ``` + + O puedes utilizar lo siguiente para una vista detallada de la versión: + + ```cmd + kubectl version --client --output=yaml + ``` + +### Instalación mediante el administrador de paquetes nativo + +{{< tabs name="kubectl_install" >}} +{{% tab name="Debian-based distributions" %}} + +1. Actualiza el índice del paquete `apt`, luego instala los paquetes necesarios para Kubernetes: + + ```shell + sudo apt-get update + # apt-transport-https may be a dummy package; if so, you can skip that package + sudo apt-get install -y apt-transport-https ca-certificates curl + ``` + +2. Descarga la llave pública firmada para los repositorios de Kubernetes. La misma llave firmada es usada para todos los repositorios por lo que se puede obviar la versión en la URL: + + ```shell + curl -fsSL https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + ``` + +3. Agregar el repositorio apropiado de Kubernetes. Si quieres una versión de Kubernetes diferente a {{< param "version" >}}, + reemplace {{< param "version" >}} con la versión deseada en el siguiente comando: + + ```shell + # Esto sobrescribe cualquier configuración existente en el archivo /etc/apt/sources.list.d/kubernetes.list + echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list + ``` + +{{< note >}} +Para actualizar kubectl a una minor release diferente, se debe reemplazar la versión en el archivo `/etc/apt/sources.list.d/kubernetes.list` antes de ejecutar `apt-get update` y `apt-get upgrade`. Este procedimiento se describe con más detalle en [Cambiando el Repositorio de Kubernetes](/docs/tasks/administer-cluster/kubeadm/change-package-repository/). +{{< /note >}} + +4. Actualiza el índice de `apt`, luego instala kubectl: + + ```shell + sudo apt-get update + sudo apt-get install -y kubectl + ``` + +{{< note >}} +En versiones anteriores a Debian 12 y Ubuntu 22.04 el directorio `/etc/apt/keyrings` no existe por defecto, puede ser creado usando el comando `sudo mkdir -m 755 /etc/apt/keyrings` +{{< /note >}} + +{{% /tab %}} + +{{% tab name="Red Hat-based distributions" %}} + +1. Agregar Kubernetes al repositorio `yum`. Si deseas usar una versión de Kubernetes + diferente a {{< param "version" >}}, reemplaza {{< param "version" >}} con + la versión deseada en el siguiente comando: + + ```bash + # Lo siguiente reemplaza cualquier configuración existente en /etc/yum.repos.d/kubernetes.repo + cat <}}/rpm/ + enabled=1 + gpgcheck=1 + gpgkey=https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/rpm/repodata/repomd.xml.key + EOF + ``` + +{{< note >}} +Para actualizar kubectl a otra versión, será necesario modificar la versión en `/etc/yum.repos.d/kubernetes.repo` +antes de ejecutar `yum update`. Este procedimiento se describe con más detalle en [Changing The Kubernetes Package Repository](/docs/tasks/administer-cluster/kubeadm/change-package-repository/). +{{< /note >}} + +2. Instalar kubectl utilizando `yum`: + + ```bash + sudo yum install -y kubectl + ``` + +{{% /tab %}} + +{{% tab name="SUSE-based distributions" %}} + +1. Agregar Kubernetes al repositorio `zypper`. Si deseas usar una versión de Kubernetes + diferente a {{< param "version" >}}, reemplaza {{< param "version" >}} con + la versión deseada en el siguiente comando: + + ```bash + # Lo siguiente reemplaza cualquier configuración existente en /etc/zypp/repos.d/kubernetes.repo + cat <}}/rpm/ + enabled=1 + gpgcheck=1 + gpgkey=https://pkgs.k8s.io/core:/stable:/{{< param "version" >}}/rpm/repodata/repomd.xml.key + EOF + ``` + +{{< note >}} +Para actualizar kubectl a otra versión será necesario modificar la versión en `/etc/zypp/repos.d/kubernetes.repo` +antes de ejecutar `zypper update`. Este procedimiento se describe con más detalle en [Changing The Kubernetes Package Repository](/docs/tasks/administer-cluster/kubeadm/change-package-repository/). +{{< /note >}} + + 2. Instalar kubectl usando `zypper`: + + ```bash + sudo zypper install -y kubectl + ``` + +{{% /tab %}} +{{< /tabs >}} + +### Instalación usando otro administrador de paquetes + +{{< tabs name="other_kubectl_install" >}} +{{% tab name="Snap" %}} +Si utilizas Ubuntu o alguna distribución que soporte el administrador de +páquetes [snap](https://snapcraft.io/docs/core/install), kubectl +está disponible como una aplicación de [snap](https://snapcraft.io/). + +```shell +snap install kubectl --classic +kubectl version --client +``` + +{{% /tab %}} + +{{% tab name="Homebrew" %}} +Si utilizas [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) en Linux, +kubectl está disponible para su [instalación](https://docs.brew.sh/Homebrew-on-Linux#install). + +```shell +brew install kubectl +kubectl version --client +``` + +{{% /tab %}} + +{{< /tabs >}} + +## Verificar la configuración de kubectl + +{{< include "included/verify-kubectl.md" >}} + +## Configuraciones opcionales y plugins de kubectl + +### Habilitar el autocompletado en la shell + +Kubectl tiene soporte para autocompletar en Bash, Zsh, Fish y Powershell, +lo que puede agilizar el tipeo. + +A continuación están los procedimientos para configurarlo en Bash, Fish y Zsh. + +{{< tabs name="kubectl_autocompletion" >}} +{{< tab name="Bash" include="included/optional-kubectl-configs-bash-linux.md" />}} +{{< tab name="Fish" include="included/optional-kubectl-configs-fish.md" />}} +{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} +{{< /tabs >}} + +### Instalar el plugin `kubectl convert` + +{{< include "included/kubectl-convert-overview.md" >}} + +1. Descarga la última versión con el siguiente comando: + + {{< tabs name="download_convert_binary_linux" >}} + {{< tab name="x86-64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert" + {{< /tab >}} + {{< tab name="ARM64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl-convert" + {{< /tab >}} + {{< /tabs >}} + +1. Valida el binario (opcional) + + Descarga el checksum de kubectl-convert: + + {{< tabs name="download_convert_checksum_linux" >}} + {{< tab name="x86-64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + {{< /tab >}} + {{< tab name="ARM64" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/arm64/kubectl-convert.sha256" + {{< /tab >}} + {{< /tabs >}} + + Ahora se puede validar el binario utilizando el checksum: + + ```bash + echo "$(cat kubectl-convert.sha256) kubectl-convert" | sha256sum --check + ``` + + Si es válido, la salida será: + + ```console + kubectl-convert: OK + ``` + + En caso de falla, `sha256` terminará con un estado diferente a cero con una salida similar a esta: + + ```console + kubectl-convert: FAILED + sha256sum: WARNING: 1 computed checksum did NOT match + ``` + + {{< note >}} + Descargue la misma versión del binario y del checksum. + {{< /note >}} + +1. Instalar kubectl-convert + + ```bash + sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert + ``` + +1. Verificar si el plugin fue instalado correctamente + + ```shell + kubectl convert --help + ``` + + Si no visualizas ningún error quiere decir que el plugin fue instalado correctamente. + +1. Después de instalar el plugin elimina los archivos de instalación: + + ```bash + rm kubectl-convert kubectl-convert.sha256 + ``` + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/es/docs/tutorials/_index.md b/content/es/docs/tutorials/_index.md index 27c804973ae83..86fde686f22b6 100644 --- a/content/es/docs/tutorials/_index.md +++ b/content/es/docs/tutorials/_index.md @@ -22,50 +22,36 @@ Antes de recorrer cada tutorial, recomendamos añadir un marcador a ## Esenciales * [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) se trata de un tutorial interactivo en profundidad para entender Kubernetes y probar algunas funciones básicas. - * [Scalable Microservices with Kubernetes (Udacity)](https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615) - * [Introduction to Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) - * [Hello Minikube](/es/docs/tutorials/hello-minikube/) ## Configuración * [Ejemplo: Configurando un Microservicio en Java](/docs/tutorials/configuration/configure-java-microservice/) - * [Configuring Redis Using a ConfigMap](/docs/tutorials/configuration/configure-redis-using-configmap/) ## Aplicaciones Stateless * [Exposing an External IP Address to Access an Application in a Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/) - * [Example: Deploying PHP Guestbook application with Redis](/docs/tutorials/stateless-application/guestbook/) ## Aplicaciones Stateful * [StatefulSet Basics](/docs/tutorials/stateful-application/basic-stateful-set/) - * [Example: WordPress and MySQL with Persistent Volumes](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) - * [Example: Deploying Cassandra with Stateful Sets](/docs/tutorials/stateful-application/cassandra/) - * [Running ZooKeeper, A CP Distributed System](/docs/tutorials/stateful-application/zookeeper/) ## Clústers * [AppArmor](/docs/tutorials/clusters/apparmor/) - -* [seccomp](/docs/tutorials/clusters/seccomp/) +* [Seccomp](/docs/tutorials/clusters/seccomp/) ## Servicios * [Using Source IP](/docs/tutorials/services/source-ip/) - - ## {{% heading "whatsnext" %}} - Si quieres escribir un tutorial, revisa [utilizando templates](/docs/home/contribute/page-templates/) para obtener información sobre el tipo de página y la plantilla de los tutotriales. - - diff --git a/content/es/examples/pods/storage/projected-secret-downwardapi-configmap.yaml b/content/es/examples/pods/storage/projected-secret-downwardapi-configmap.yaml new file mode 100644 index 0000000000000..453dc08c0c7d9 --- /dev/null +++ b/content/es/examples/pods/storage/projected-secret-downwardapi-configmap.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Pod +metadata: + name: volume-test +spec: + containers: + - name: container-test + image: busybox:1.28 + volumeMounts: + - name: all-in-one + mountPath: "/projected-volume" + readOnly: true + volumes: + - name: all-in-one + projected: + sources: + - secret: + name: mysecret + items: + - key: username + path: my-group/my-username + - downwardAPI: + items: + - path: "labels" + fieldRef: + fieldPath: metadata.labels + - path: "cpu_limit" + resourceFieldRef: + containerName: container-test + resource: limits.cpu + - configMap: + name: myconfigmap + items: + - key: config + path: my-group/my-config diff --git a/content/es/examples/pods/storage/projected-secrets-nondefault-permission-mode.yaml b/content/es/examples/pods/storage/projected-secrets-nondefault-permission-mode.yaml new file mode 100644 index 0000000000000..b921fd93c5833 --- /dev/null +++ b/content/es/examples/pods/storage/projected-secrets-nondefault-permission-mode.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: volume-test +spec: + containers: + - name: container-test + image: busybox:1.28 + volumeMounts: + - name: all-in-one + mountPath: "/projected-volume" + readOnly: true + volumes: + - name: all-in-one + projected: + sources: + - secret: + name: mysecret + items: + - key: username + path: my-group/my-username + - secret: + name: mysecret2 + items: + - key: password + path: my-group/my-password + mode: 511 diff --git a/content/es/examples/pods/storage/projected-service-account-token.yaml b/content/es/examples/pods/storage/projected-service-account-token.yaml new file mode 100644 index 0000000000000..cc307659a78ef --- /dev/null +++ b/content/es/examples/pods/storage/projected-service-account-token.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: sa-token-test +spec: + containers: + - name: container-test + image: busybox:1.28 + volumeMounts: + - name: token-vol + mountPath: "/service-account" + readOnly: true + serviceAccountName: default + volumes: + - name: token-vol + projected: + sources: + - serviceAccountToken: + audience: api + expirationSeconds: 3600 + path: token diff --git a/content/fr/_index.html b/content/fr/_index.html index 87923c286881b..7a76d1ed6315b 100644 --- a/content/fr/_index.html +++ b/content/fr/_index.html @@ -43,12 +43,12 @@

      Les défis de la migration de plus de 150 microservices vers Kubernetes



      - Venez au KubeCon Detroit, Michigan, USA du 24 au 28 Octobre 2022 + Venez au KubeCon EU Paris, France du 19 au 22 Mars 2024



      - Venez au KubeCon EU Amsterdam, Pays-Bas du 17 au 21 Avril 2023 + Venez au KubeCon Salt Lake City, UTAH, USA du 12 au 15 Novembre 2024
      diff --git a/content/fr/community/_index.html b/content/fr/community/_index.html index 61567183927ee..4514f74f4827c 100644 --- a/content/fr/community/_index.html +++ b/content/fr/community/_index.html @@ -227,7 +227,7 @@

      Actualités récentes



      diff --git a/content/fr/docs/concepts/storage/volumes.md b/content/fr/docs/concepts/storage/volumes.md index e78e7ce76b348..c2fdf583cb49a 100644 --- a/content/fr/docs/concepts/storage/volumes.md +++ b/content/fr/docs/concepts/storage/volumes.md @@ -857,7 +857,7 @@ Vous devez créer un secret dans l'API Kubernetes avant de pouvoir l'utiliser. Un conteneur utilisant un secret en tant que point de montage de volume [subPath](#using-subpath) ne recevra pas les mises à jour des secrets. {{< /note >}} -Les secrets sont décrits plus en détails [ici](/docs/user-guide/secrets). +Les secrets sont décrits plus en détails [ici](/docs/concepts/configuration/secret/). ### storageOS {#storageos} diff --git a/content/fr/docs/concepts/workloads/controllers/statefulset.md b/content/fr/docs/concepts/workloads/controllers/statefulset.md index b89e0416b8135..6f7dc76c2bbb9 100644 --- a/content/fr/docs/concepts/workloads/controllers/statefulset.md +++ b/content/fr/docs/concepts/workloads/controllers/statefulset.md @@ -165,7 +165,7 @@ Le domaine cluster sera `cluster.local` à moins qu'il soit Kubernetes crée un [PersistentVolume](/docs/concepts/storage/persistent-volumes/) pour chaque VolumeClaimTemplate. Dans l'exemple nginx ci-dessus, chaque Pod se verra affecter un unique PersistentVolume -avec un StorageClass de `my-storage-class` et 1 Gib de stockage provisionné. Si aucun StorageClass +avec un StorageClass de `my-storage-class` et 1 GiB de stockage provisionné. Si aucun StorageClass n'est spécifié, alors le StorageClass par défaut sera utilisé. Lorsqu'un Pod est (re)schedulé sur un noeud, ses `volumeMounts` montent les PersistentVolumes associés aux PersistentVolumeClaims. Notez que les PersistentVolumes associés avec les PersistentVolumeClaims des Pods diff --git a/content/fr/docs/contribute/generate-ref-docs/federation-api.md b/content/fr/docs/contribute/generate-ref-docs/federation-api.md index 30bb6c525d807..cc99bdc9a6266 100644 --- a/content/fr/docs/contribute/generate-ref-docs/federation-api.md +++ b/content/fr/docs/contribute/generate-ref-docs/federation-api.md @@ -15,7 +15,7 @@ Cette page montre comment générer automatiquement des pages de référence pou * Vous devez avoir [Git](https://git-scm.com/book/fr/v2/D%C3%A9marrage-rapide-Installation-de-Git) installé. -* Vous devez avoir [Golang](https://golang.org/doc/install) version 1.9.1 ou ultérieur installé, et votre variable d'environnement `$GOPATH` doit être définie. +* Vous devez avoir [Golang](https://go.dev/doc/install) version 1.9.1 ou ultérieur installé, et votre variable d'environnement `$GOPATH` doit être définie. * Vous devez avoir [Docker](https://docs.docker.com/engine/installation/) installé. diff --git a/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md b/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md index b039d19c8b9c6..bf7954500aa0c 100644 --- a/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md +++ b/content/fr/docs/contribute/generate-ref-docs/kubernetes-api.md @@ -16,7 +16,7 @@ Cette page montre comment mettre à jour les documents de référence générés Vous devez avoir ces outils installés: * [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) -* [Golang](https://golang.org/doc/install) version 1.9.1 ou ultérieur +* [Golang](https://go.dev/doc/install) version 1.9.1 ou ultérieur * [Docker](https://docs.docker.com/engine/installation/) * [etcd](https://github.com/coreos/etcd/) diff --git a/content/fr/docs/home/_index.md b/content/fr/docs/home/_index.md index 10e038dfa16f9..ff2e0e9e0fea4 100644 --- a/content/fr/docs/home/_index.md +++ b/content/fr/docs/home/_index.md @@ -7,7 +7,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Accueil" +linkTitle: "Documentation" main_menu: true weight: 10 hide_feedback: true diff --git a/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md b/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md index 1973bcd1bb89b..a853ad6723e18 100644 --- a/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/content/fr/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -105,7 +105,7 @@ Lorsque vous avez terminé avec votre shell, entrez `exit`. Dans une fenêtre de commande ordinaire, pas votre shell, répertoriez les variables d'environnement dans le conteneur en cours d'exécution: ```shell -kubectl exec shell-demo env +kubectl exec shell-demo -- env ``` Essayez d'exécuter d'autres commandes. diff --git a/content/hi/_index.html b/content/hi/_index.html index 2009559be3bab..b1faf30bcc533 100644 --- a/content/hi/_index.html +++ b/content/hi/_index.html @@ -8,7 +8,7 @@ {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} -[कुबेरनेट्स]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}), जो K8s के रूप में भी जाना जाता है, कंटेनरीकृत एप्लीकेशन के डिप्लॉयमेंट, स्केलिंग और प्रबंधन को स्वचालित करने के लिए एक ओपन-सोर्स सिस्टम है। +[कुबेरनेट्स]({{< relref "/docs/concepts/overview/_index.md" >}}), जो K8s के रूप में भी जाना जाता है, कंटेनरीकृत एप्लीकेशन के डिप्लॉयमेंट, स्केलिंग और प्रबंधन को स्वचालित करने के लिए एक ओपन-सोर्स सिस्टम है। यह आसान प्रबंधन और खोज के लिए लॉजिकल इकाइयों में एक एप्लीकेशन बनाने वाले कंटेनरों को समूहित करता है। कुबेरनेट्स [Google में उत्पादन कार्यभार चलाने के 15 वर्षों के अनुभव](http://queue.acm.org/detail.cfm?id=2898444) पर निर्माणित है, जो समुदाय के सर्वोत्तम-नस्लीय विचारों और प्रथाओं के साथ संयुक्त है। {{% /blocks/feature %}} diff --git a/content/hi/docs/concepts/overview/what-is-kubernetes.md b/content/hi/docs/concepts/overview/_index.md similarity index 99% rename from content/hi/docs/concepts/overview/what-is-kubernetes.md rename to content/hi/docs/concepts/overview/_index.md index bb59092d2b3b2..6268b8761b780 100644 --- a/content/hi/docs/concepts/overview/what-is-kubernetes.md +++ b/content/hi/docs/concepts/overview/_index.md @@ -1,5 +1,5 @@ --- -title: कुबेरनेट्स क्या है? +title: अवलोकन description: > कुबेरनेट्स कंटेनरीकृत वर्कलोड और सेवाओं के प्रबंधन के लिए एक पोर्टेबल, एक्स्टेंसिबल, ओपन-सोर्स प्लेटफॉर्म है, जो घोषणात्मक कॉन्फ़िगरेशन और स्वचालन दोनों की सुविधा प्रदान करता है। इसका एक बड़ा, तेजी से बढ़ता हुआ पारिस्थितिकी तंत्र है। कुबेरनेट्स सेवाएँ, समर्थन और उपकरण व्यापक रूप से उपलब्ध हैं। content_type: concept diff --git a/content/hi/docs/home/_index.md b/content/hi/docs/home/_index.md index 1978beb04c053..090b7c752b8ba 100644 --- a/content/hi/docs/home/_index.md +++ b/content/hi/docs/home/_index.md @@ -4,7 +4,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "होम" +linkTitle: "प्रलेखन" main_menu: true weight: 10 hide_feedback: true diff --git a/content/hi/docs/setup/_index.md b/content/hi/docs/setup/_index.md index 26416b0c6806f..33034b66aec53 100644 --- a/content/hi/docs/setup/_index.md +++ b/content/hi/docs/setup/_index.md @@ -8,9 +8,9 @@ card: name: setup weight: 20 anchors: - - anchor: "#सीखने-का-वातावरण" + - anchor: "#learning-environment" title: सीखने का वातावरण - - anchor: "#प्रोडक्शन-वातावरण" + - anchor: "#production-environment" title: प्रोडक्शन वातावरण --- @@ -25,13 +25,13 @@ card: -## सीखने का वातावरण +## सीखने का वातावरण {#learning-environment} यदि आप कुबेरनेट्स सीख रहे हैं, तो कुबेरनेट्स समुदाय द्वारा समर्थित टूल का उपयोग करें, या स्थानीय मशीन पर कुबेरनेट्स क्लस्टर सेटअप करने के लिए इकोसिस्टम में उपलब्ध टूल का उपयोग करें। [इंस्टॉल टूल्स](/hi/docs/tasks/tools/) देखें। -## प्रोडक्शन वातावरण +## प्रोडक्शन वातावरण {#production-environment} [प्रोडक्शन वातावरण](/hi/docs/setup/production-environment/) के लिए समाधान का मूल्यांकन करते समय, विचार करें कि कुबेरनेट्स क्लस्टर के किन पहलुओं (या _abstractions_) का संचालन आप स्वयं प्रबंधित करना चाहते हैं और किसे आप एक प्रदाता को सौंपना पसंद करते हैं। diff --git a/content/hi/docs/tasks/tools/install-kubectl-linux.md b/content/hi/docs/tasks/tools/install-kubectl-linux.md index e099ad5ab7a44..b11abf29437c4 100644 --- a/content/hi/docs/tasks/tools/install-kubectl-linux.md +++ b/content/hi/docs/tasks/tools/install-kubectl-linux.md @@ -44,7 +44,7 @@ Linux पर kubectl संस्थापित करने के लिए kubectl चेकसम फाइल डाउनलोड करें: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" ``` चेकसम फ़ाइल से kubectl बाइनरी को मान्य करें: @@ -199,7 +199,7 @@ kubectl Bash और Zsh के लिए ऑटोकम्प्लेशन kubectl-convert चेकसम फ़ाइल डाउनलोड करें: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" ``` चेकसम फ़ाइल से kubectl-convert बाइनरी को मान्य करें: diff --git a/content/hi/docs/tutorials/_index.md b/content/hi/docs/tutorials/_index.md index 5aa154622e4b5..a0af44af67660 100644 --- a/content/hi/docs/tutorials/_index.md +++ b/content/hi/docs/tutorials/_index.md @@ -52,7 +52,7 @@ content_type: concept * [AppArmor](/docs/tutorials/clusters/apparmor/) -* [seccomp](/docs/tutorials/clusters/seccomp/) +* [Seccomp](/docs/tutorials/clusters/seccomp/) ## सर्विस diff --git a/content/id/_index.html b/content/id/_index.html index 29c6646e6aae2..637c69277561a 100644 --- a/content/id/_index.html +++ b/content/id/_index.html @@ -4,6 +4,7 @@ cid: home --- +{{< site-searchbar >}} {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} @@ -43,12 +44,12 @@

      Tantangan yang Dihadapi untuk Melakukan Migrasi 150+ Microservices ke Kubern

      - Hadiri KubeCon North America pada 24-28 Oktober 2022 + Hadiri KubeCon + CloudNativeCon Europe pada 19-22 Maret 2024



      - Hadiri KubeCon Europe pada 17-21 April 2023 + Hadiri KubeCon + CloudNativeCon North America pada 12-15 November 2024

      diff --git a/content/id/community/_index.html b/content/id/community/_index.html index e0d3b38d9e09d..fba7cdf4fa1c7 100644 --- a/content/id/community/_index.html +++ b/content/id/community/_index.html @@ -226,7 +226,7 @@

      Berita Terkini



      diff --git a/content/id/docs/concepts/architecture/cgroups.md b/content/id/docs/concepts/architecture/cgroups.md index 4fa39a40c19d6..b0619b5a60545 100644 --- a/content/id/docs/concepts/architecture/cgroups.md +++ b/content/id/docs/concepts/architecture/cgroups.md @@ -98,7 +98,7 @@ Untuk cgroup v2, outputnya adalah `cgroup2fs`. Untuk cgroup v1, outputnya adalah `tmpfs.` -## {{% heading "Selanjutnya" %}} +## {{% heading "whatsnext" %}} - Pelajari lebih lanjut tentang [cgroups](https://man7.org/linux/man-pages/man7/cgroups.7.html) - Pelajari lebih lanjut tentang [container runtime](/docs/concepts/architecture/cri) diff --git a/content/id/docs/concepts/cluster-administration/addons.md b/content/id/docs/concepts/cluster-administration/addons.md index a39668ee2472b..79876d0efa977 100644 --- a/content/id/docs/concepts/cluster-administration/addons.md +++ b/content/id/docs/concepts/cluster-administration/addons.md @@ -25,7 +25,7 @@ Laman ini akan menjabarkan beberapa *add-ons* yang tersedia serta tautan instruk * [Canal](https://projectcalico.docs.tigera.io/getting-started/kubernetes/flannel/flannel) menggabungkan Flannel dan Calico, menyediakan jaringan serta *policy* jaringan. * [Cilium](https://github.com/cilium/cilium) merupakan *plugin* jaringan L3 dan *policy* jaringan yang dapat menjalankan *policy* HTTP/API/L7 secara transparan. Mendukung mode *routing* maupun *overlay/encapsulation*. * [CNI-Genie](https://github.com/cni-genie/CNI-Genie) memungkinkan Kubernetes agar dapat terkoneksi dengan beragam *plugin* CNI, seperti Calico, Canal, Flannel, Romana, atau Weave dengan mulus. -* [Contiv](http://contiv.github.io) menyediakan jaringan yang dapat dikonfigurasi (*native* L3 menggunakan BGP, *overlay* menggunakan vxlan, klasik L2, dan Cisco-SDN/ACI) untuk berbagai penggunaan serta *policy framework* yang kaya dan beragam. Proyek Contiv merupakan proyek [open source](http://github.com/contiv). Laman [instalasi](http://github.com/contiv/install) ini akan menjabarkan cara instalasi, baik untuk klaster dengan kubeadm maupun non-kubeadm. +* [Contiv](https://contivpp.io) menyediakan jaringan yang dapat dikonfigurasi (*native* L3 menggunakan BGP, *overlay* menggunakan vxlan, klasik L2, dan Cisco-SDN/ACI) untuk berbagai penggunaan serta *policy framework* yang kaya dan beragam. Proyek Contiv merupakan proyek [open source](https://github.com/contiv). Laman [instalasi](https://github.com/contiv/install) ini akan menjabarkan cara instalasi, baik untuk klaster dengan kubeadm maupun non-kubeadm. * [Contrail](http://www.juniper.net/us/en/products-services/sdn/contrail/contrail-networking/), yang berbasis dari [Tungsten Fabric](https://tungsten.io), merupakan sebuah proyek *open source* yang menyediakan virtualisasi jaringan *multi-cloud* serta platform manajemen *policy*. Contrail dan Tungsten Fabric terintegrasi dengan sistem orkestrasi lainnya seperti Kubernetes, OpenShift, OpenStack dan Mesos, serta menyediakan mode isolasi untuk mesin virtual (VM), kontainer/pod dan *bare metal*. * [Flannel](https://github.com/flannel-io/flannel#deploying-flannel-manually) merupakan penyedia jaringan *overlay* yang dapat digunakan pada Kubernetes. * [Knitter](https://github.com/ZTE/Knitter/) merupakan solusi jaringan yang mendukung multipel jaringan pada Kubernetes. diff --git a/content/id/docs/concepts/cluster-administration/logging.md b/content/id/docs/concepts/cluster-administration/logging.md index e00745d1d979f..33266635c333d 100644 --- a/content/id/docs/concepts/cluster-administration/logging.md +++ b/content/id/docs/concepts/cluster-administration/logging.md @@ -21,7 +21,7 @@ Arsitektur _logging_ pada level klaster yang akan dijelaskan berikut mengasumsik Pada bagian ini, kamu dapat melihat contoh tentang dasar _logging_ pada Kubernetes yang mengeluarkan data pada _standard output_. Demonstrasi berikut ini menggunakan sebuah [spesifikasi pod](/examples/debug/counter-pod.yaml) dengan kontainer yang akan menuliskan beberapa teks ke _standard output_ tiap detik. -{{< codenew file="debug/counter-pod.yaml" >}} +{{% codenew file="debug/counter-pod.yaml" %}} Untuk menjalankan pod ini, gunakan perintah berikut: @@ -126,13 +126,13 @@ Dengan menggunakan cara ini kamu dapat memisahkan aliran log dari bagian-bagian Sebagai contoh, sebuah pod berjalan pada satu kontainer tunggal, dan kontainer menuliskan ke dua berkas log yang berbeda, dengan dua format yang berbeda pula. Berikut ini _file_ konfigurasi untuk Pod: -{{< codenew file="admin/logging/two-files-counter-pod.yaml" >}} +{{% codenew file="admin/logging/two-files-counter-pod.yaml" %}} Hal ini akan menyulitkan untuk mengeluarkan log dalam format yang berbeda pada aliran log yang sama, meskipun kamu dapat me-_redirect_ keduanya ke `stdout` dari kontainer. Sebagai gantinya, kamu dapat menggunakan dua buah kontainer _sidecar_. Tiap kontainer _sidecar_ dapat membaca suatu berkas log tertentu dari _shared volume_ kemudian mengarahkan log ke `stdout`-nya sendiri. Berikut _file_ konfigurasi untuk pod yang memiliki dua buah kontainer _sidecard_: -{{< codenew file="admin/logging/two-files-counter-pod-streaming-sidecar.yaml" >}} +{{% codenew file="admin/logging/two-files-counter-pod-streaming-sidecar.yaml" %}} Saat kamu menjalankan pod ini, kamu dapat mengakses tiap aliran log secara terpisah dengan menjalankan perintah berikut: @@ -175,7 +175,7 @@ Menggunakan agen _logging_ di dalam kontainer _sidecar_ dapat berakibat pengguna Sebagai contoh, kamu dapat menggunakan [Stackdriver](/docs/tasks/debug-application-cluster/logging-stackdriver/), yang menggunakan fluentd sebagai agen _logging_. Berikut ini dua _file_ konfigurasi yang dapat kamu pakai untuk mengimplementasikan cara ini. _File_ yang pertama berisi sebuah [ConfigMap](/id/docs/tasks/configure-pod-container/configure-pod-configmap/) untuk mengonfigurasi fluentd. -{{< codenew file="admin/logging/fluentd-sidecar-config.yaml" >}} +{{% codenew file="admin/logging/fluentd-sidecar-config.yaml" %}} {{< note >}} Konfigurasi fluentd berada diluar cakupan artikel ini. Untuk informasi lebih lanjut tentang cara mengonfigurasi fluentd, silakan lihat [dokumentasi resmi fluentd ](http://docs.fluentd.org/). @@ -183,7 +183,7 @@ Konfigurasi fluentd berada diluar cakupan artikel ini. Untuk informasi lebih lan _File_ yang kedua mendeskripsikan sebuah pod yang memiliki kontainer _sidecar_ yang menjalankan fluentd. Pod ini melakukan _mount_ sebuah volume yang akan digunakan fluentd untuk mengambil data konfigurasinya. -{{< codenew file="admin/logging/two-files-counter-pod-agent-sidecar.yaml" >}} +{{% codenew file="admin/logging/two-files-counter-pod-agent-sidecar.yaml" %}} Setelah beberapa saat, kamu akan mendapati pesan log pada _interface_ Stackdriver. diff --git a/content/id/docs/concepts/cluster-administration/manage-deployment.md b/content/id/docs/concepts/cluster-administration/manage-deployment.md index 4bdc7f790e6dc..12525e39e1754 100644 --- a/content/id/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/id/docs/concepts/cluster-administration/manage-deployment.md @@ -17,7 +17,7 @@ Kamu telah melakukan _deploy_ pada aplikasimu dan mengeksposnya melalui sebuah _ Banyak aplikasi memerlukan beberapa _resource_, seperti Deployment dan Service. Pengelolaan beberapa _resource_ dapat disederhanakan dengan mengelompokkannya dalam berkas yang sama (dengan pemisah `---` pada YAML). Contohnya: -{{< codenew file="application/nginx-app.yaml" >}} +{{% codenew file="application/nginx-app.yaml" %}} Beberapa _resource_ dapat dibuat seolah-olah satu _resource_: diff --git a/content/id/docs/concepts/cluster-administration/networking.md b/content/id/docs/concepts/cluster-administration/networking.md index b300c29e98bc0..d587349dcb977 100644 --- a/content/id/docs/concepts/cluster-administration/networking.md +++ b/content/id/docs/concepts/cluster-administration/networking.md @@ -105,7 +105,7 @@ Plugin ini dirancang untuk secara langsung mengkonfigurasi dan _deploy_ dalam VP ### Contiv -[Contiv](https://github.com/contiv/netplugin) menyediakan jaringan yang dapat dikonfigurasi (_native_ l3 menggunakan BGP, _overlay_ menggunakan vxlan, classic l2, atau Cisco-SDN / ACI) untuk berbagai kasus penggunaan. [Contiv](http://contiv.io) semuanya open sourced. +[Contiv](https://github.com/contiv/netplugin) menyediakan jaringan yang dapat dikonfigurasi (_native_ l3 menggunakan BGP, _overlay_ menggunakan vxlan, classic l2, atau Cisco-SDN / ACI) untuk berbagai kasus penggunaan. [Contiv](https://contivpp.io) semuanya open sourced. ### Contrail / Tungsten Fabric diff --git a/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md index aa702827b9ad4..5195243acb70a 100644 --- a/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/id/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -64,7 +64,7 @@ akan mengubah informasi yang kamu berikan ke dalam format JSON ketika melakukan Berikut merupakan contoh _file_ `.yaml` yang menunjukkan _field_ dan _spec_ objek untuk _Deployment_: -{{< codenew file="application/deployment.yaml" >}} +{{% codenew file="application/deployment.yaml" %}} Salah satu cara untuk membuat _Deployment_ menggunakan _file_ `.yaml` seperti yang dijabarkan di atas adalah dengan menggunakan perintah diff --git a/content/id/docs/concepts/policy/pod-security-policy.md b/content/id/docs/concepts/policy/pod-security-policy.md index 3646246150e83..d89e6ca7398f3 100644 --- a/content/id/docs/concepts/policy/pod-security-policy.md +++ b/content/id/docs/concepts/policy/pod-security-policy.md @@ -146,7 +146,7 @@ alias kubectl-user='kubectl --as=system:serviceaccount:psp-example:fake-user -n Beri definisi objek contoh PodSecurityPolicy dalam sebuah berkas. Ini adalah kebijakan yang mencegah pembuatan Pod-Pod yang _privileged_. -{{< codenew file="policy/example-psp.yaml" >}} +{{% codenew file="policy/example-psp.yaml" %}} Dan buatlah PodSecurityPolicy tersebut dengan `kubectl`: @@ -297,11 +297,11 @@ podsecuritypolicy "example" deleted Berikut adalah kebijakan dengan batasan paling sedikit yang dapat kamu buat, ekuivalen dengan tidak menggunakan _admission controller_ Pod Security Policy: -{{< codenew file="policy/privileged-psp.yaml" >}} +{{% codenew file="policy/privileged-psp.yaml" %}} Berikut adalah sebuah contoh kebijakan yang restriktif yang mengharuskan pengguna-pengguna untuk berjalan sebagai pengguna yang _unprivileged_, memblokir kemungkinan eskalasi menjadi _root_, dan mengharuskan penggunaan beberapa mekanisme keamanan. -{{< codenew file="policy/restricted-psp.yaml" >}} +{{% codenew file="policy/restricted-psp.yaml" %}} ## Referensi Kebijakan diff --git a/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md index 4f0f838db5204..6139b0b2d00f0 100644 --- a/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -52,7 +52,7 @@ spec: Kemudian tambahkan sebuah `nodeSelector` seperti berikut: -{{< codenew file="pods/pod-nginx.yaml" >}} +{{% codenew file="pods/pod-nginx.yaml" %}} Ketika kamu menjalankan perintah `kubectl apply -f https://k8s.io/examples/pods/pod-nginx.yaml`, pod tersebut akan dijadwalkan pada node yang memiliki label yang dirinci. Kamu dapat memastikan penambahan nodeSelector berhasil dengan menjalankan `kubectl get pods -o wide` dan melihat "NODE" tempat Pod ditugaskan. @@ -110,7 +110,7 @@ Afinitas node dinyatakan sebagai _field_ `nodeAffinity` dari _field_ `affinity` Berikut ini contoh dari pod yang menggunakan afinitas node: -{{< codenew file="pods/pod-with-node-affinity.yaml" >}} +{{% codenew file="pods/pod-with-node-affinity.yaml" %}} Aturan afinitas node tersebut menyatakan pod hanya bisa ditugaskan pada node dengan label yang memiliki kunci `kubernetes.io/e2e-az-name` dan bernilai `e2e-az1` atau `e2e-az2`. Selain itu, dari semua node yang memenuhi kriteria tersebut, mode dengan label dengan kunci `another-node-label-key` and bernilai `another-node-label-value` harus lebih diutamakan. @@ -151,7 +151,7 @@ Afinitas antar pod dinyatakan sebagai _field_ `podAffinity` dari _field_ `affini #### Contoh pod yang menggunakan pod affinity: -{{< codenew file="pods/pod-with-pod-affinity.yaml" >}} +{{% codenew file="pods/pod-with-pod-affinity.yaml" %}} Afinitas pada pod tersebut menetapkan sebuah aturan afinitas pod dan aturan anti-afinitas pod. Pada contoh ini, `podAffinity` adalah `requiredDuringSchedulingIgnoredDuringExecution` sementara `podAntiAffinity` adalah `preferredDuringSchedulingIgnoredDuringExecution`. Aturan afinitas pod menyatakan bahwa pod dapat dijadwalkan pada node hanya jika node tersebut berada pada zona yang sama dengan minimal satu pod yang sudah berjalan yang memiliki label dengan kunci "security" dan bernilai "S1". (Lebih detail, pod dapat berjalan pada node N jika node N memiliki label dengan kunci `failure-domain.beta.kubernetes.io/zone`dan nilai V sehingga ada minimal satu node dalam klaster dengan kunci `failure-domain.beta.kubernetes.io/zone` dan bernilai V yang menjalankan pod yang memiliki label dengan kunci "security" dan bernilai "S1".) Aturan anti-afinitas pod menyatakan bahwa pod memilih untuk tidak dijadwalkan pada sebuah node jika node tersebut sudah menjalankan pod yang memiliki label dengan kunci "security" dan bernilai "S2". (Jika `topologyKey` adalah `failure-domain.beta.kubernetes.io/zone` maka dapat diartikan bahwa pod tidak dapat dijadwalkan pada node jika node berada pada zona yang sama dengan pod yang memiliki label dengan kunci "security" dan bernilai "S2".) Lihat [design doc](https://git.k8s.io/community/contributors/design-proposals/scheduling/podaffinity.md) untuk lebih banyak contoh afinitas dan anti-afinitas pod, baik `requiredDuringSchedulingIgnoredDuringExecution` diff --git a/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md b/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md index 26a2473f460c9..bd0f5339c8013 100644 --- a/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md +++ b/content/id/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases.md @@ -68,7 +68,7 @@ Selain _boilerplate default_, kita dapat menambahkan entri pada berkas `bar.remote` pada `10.1.2.3`, kita dapat melakukannya dengan cara menambahkan HostAliases pada Pod di bawah _field_ `.spec.hostAliases`: -{{< codenew file="service/networking/hostaliases-pod.yaml" >}} +{{% codenew file="service/networking/hostaliases-pod.yaml" %}} Pod ini kemudian dapat dihidupkan dengan perintah berikut: diff --git a/content/id/docs/concepts/services-networking/connect-applications-service.md b/content/id/docs/concepts/services-networking/connect-applications-service.md index 545f5a76e129a..b4fee74d27dae 100644 --- a/content/id/docs/concepts/services-networking/connect-applications-service.md +++ b/content/id/docs/concepts/services-networking/connect-applications-service.md @@ -25,7 +25,7 @@ Panduan ini menggunakan server *nginx* sederhana untuk mendemonstrasikan konsepn Kita melakukan ini di beberapa contoh sebelumnya, tetapi mari kita lakukan sekali lagi dan berfokus pada prespektif jaringannya. Buat sebuah *nginx Pod*, dan perhatikan bahwa templat tersebut mempunyai spesifikasi *port* kontainer: -{{< codenew file="service/networking/run-my-nginx.yaml" >}} +{{% codenew file="service/networking/run-my-nginx.yaml" %}} Ini membuat aplikasi tersebut dapat diakses dari *node* manapun di dalam klaster kamu. Cek lokasi *node* dimana *Pod* tersebut berjalan: ```shell @@ -66,7 +66,7 @@ service/my-nginx exposed Perintah di atas sama dengan `kubectl apply -f` dengan *yaml* sebagai berikut: -{{< codenew file="service/networking/nginx-svc.yaml" >}} +{{% codenew file="service/networking/nginx-svc.yaml" %}} Spesifikasi ini akan membuat *Service* yang membuka *TCP port 80* di setiap *Pod* dengan label `run: my-nginx` dan mengeksposnya ke dalam *port Service* (`targetPort`: adalah port kontainer yang menerima trafik, `port` adalah *service port* yang dapat berupa *port* apapun yang digunakan *Pod* lain untuk mengakses *Service*). @@ -253,7 +253,7 @@ nginxsecret Opaque 2 1m Sekarang modifikasi replika *nginx* untuk menjalankan server *https* menggunakan *certificate* di dalam *secret* dan *Service* untuk mengekspos semua *port* (80 dan 443): -{{< codenew file="service/networking/nginx-secure-app.yaml" >}} +{{% codenew file="service/networking/nginx-secure-app.yaml" %}} Berikut catatan penting tentang manifes *nginx-secure-app*: @@ -281,7 +281,7 @@ node $ curl -k https://10.244.3.5 Perlu dicatat bahwa kita menggunakan parameter `-k` saat menggunakan *curl*, ini karena kita tidak tau apapun tentang *Pod* yang menjalankan *nginx* saat pembuatan seritifikat, jadi kita harus memberitahu *curl* untuk mengabaikan ketidakcocokan *CName*. Dengan membuat *Service*, kita menghubungkan *CName* yang digunakan pada *certificate* dengan nama pada *DNS* yang digunakan *Pod*. Lakukan pengujian dari sebuah *Pod* (*secret* yang sama digunakan untuk agar mudah, *Pod* tersebut hanya membutuhkan *nginx.crt* untuk mengakses *Service*) -{{< codenew file="service/networking/curlpod.yaml" >}} +{{% codenew file="service/networking/curlpod.yaml" %}} ```shell kubectl apply -f ./curlpod.yaml diff --git a/content/id/docs/concepts/services-networking/dns-pod-service.md b/content/id/docs/concepts/services-networking/dns-pod-service.md index efdba8d7a13be..a7dc17f96f4cd 100644 --- a/content/id/docs/concepts/services-networking/dns-pod-service.md +++ b/content/id/docs/concepts/services-networking/dns-pod-service.md @@ -225,7 +225,7 @@ pada _field_ `dnsConfig`: Di bawah ini merupakan contoh sebuah Pod dengan pengaturan DNS kustom: -{{< codenew file="service/networking/custom-dns.yaml" >}} +{{% codenew file="service/networking/custom-dns.yaml" %}} Ketika Pod diatas dibuat, maka Container `test` memiliki isi berkas `/etc/resolv.conf` sebagai berikut: diff --git a/content/id/docs/concepts/services-networking/dual-stack.md b/content/id/docs/concepts/services-networking/dual-stack.md index 52e892f4f4704..6faed791617ff 100644 --- a/content/id/docs/concepts/services-networking/dual-stack.md +++ b/content/id/docs/concepts/services-networking/dual-stack.md @@ -96,19 +96,19 @@ Kubernetes akan mengalokasikan alamat IP (atau yang dikenal juga sebagai "_cluster IP_") dari `service-cluster-ip-range` yang dikonfigurasi pertama kali untuk Service ini. -{{< codenew file="service/networking/dual-stack-default-svc.yaml" >}} +{{% codenew file="service/networking/dual-stack-default-svc.yaml" %}} Spesifikasi Service berikut memasukkan bagian `ipFamily`. Sehingga Kubernetes akan mengalokasikan alamat IPv6 (atau yang dikenal juga sebagai "_cluster IP_") dari `service-cluster-ip-range` yang dikonfigurasi untuk Service ini. -{{< codenew file="service/networking/dual-stack-ipv6-svc.yaml" >}} +{{% codenew file="service/networking/dual-stack-ipv6-svc.yaml" %}} Sebagai perbandingan, spesifikasi Service berikut ini akan dialokasikan sebuah alamat IPv4 (atau yang dikenal juga sebagai "_cluster IP_") dari `service-cluster-ip-range` yang dikonfigurasi untuk Service ini. -{{< codenew file="service/networking/dual-stack-ipv4-svc.yaml" >}} +{{% codenew file="service/networking/dual-stack-ipv4-svc.yaml" %}} ### Tipe _LoadBalancer_ diff --git a/content/id/docs/concepts/services-networking/ingress.md b/content/id/docs/concepts/services-networking/ingress.md index 84db01b37e827..8e129ff223cf7 100644 --- a/content/id/docs/concepts/services-networking/ingress.md +++ b/content/id/docs/concepts/services-networking/ingress.md @@ -132,7 +132,7 @@ akan diarahkan pada *backend default*. Terdapat konsep Kubernetes yang memungkinkan kamu untuk mengekspos sebuah Service, lihat [alternatif lain](#alternatif-lain). Kamu juga bisa membuat spesifikasi Ingress dengan *backend default* yang tidak memiliki *rules*. -{{< codenew file="service/networking/ingress.yaml" >}} +{{% codenew file="service/networking/ingress.yaml" %}} Jika kamu menggunakan `kubectl apply -f` kamu dapat melihat: diff --git a/content/id/docs/concepts/workloads/controllers/daemonset.md b/content/id/docs/concepts/workloads/controllers/daemonset.md index ea21a7b268fc7..4edafc7f444af 100644 --- a/content/id/docs/concepts/workloads/controllers/daemonset.md +++ b/content/id/docs/concepts/workloads/controllers/daemonset.md @@ -37,7 +37,7 @@ Kamu bisa definisikan DaemonSet dalam berkas YAML. Contohnya, berkas `daemonset.yaml` di bawah mendefinisikan DaemonSet yang menjalankan _image_ Docker fluentd-elasticsearch: -{{< codenew file="controllers/daemonset.yaml" >}} +{{% codenew file="controllers/daemonset.yaml" %}} * Buat DaemonSet berdasarkan berkas YAML: ``` diff --git a/content/id/docs/concepts/workloads/controllers/deployment.md b/content/id/docs/concepts/workloads/controllers/deployment.md index 18f1542418e33..f6a3244174fe0 100644 --- a/content/id/docs/concepts/workloads/controllers/deployment.md +++ b/content/id/docs/concepts/workloads/controllers/deployment.md @@ -41,7 +41,7 @@ Berikut adalah penggunaan yang umum pada Deployment: Berikut adalah contoh Deployment. Dia membuat ReplicaSet untuk membangkitkan tiga Pod `nginx`: -{{< codenew file="controllers/nginx-deployment.yaml" >}} +{{% codenew file="controllers/nginx-deployment.yaml" %}} Dalam contoh ini: diff --git a/content/id/docs/concepts/workloads/controllers/garbage-collection.md b/content/id/docs/concepts/workloads/controllers/garbage-collection.md index 5eb00cf987caa..121d148b2f20b 100644 --- a/content/id/docs/concepts/workloads/controllers/garbage-collection.md +++ b/content/id/docs/concepts/workloads/controllers/garbage-collection.md @@ -22,7 +22,7 @@ Kamu juga bisa menspesifikasikan hubungan antara pemilik dan dependen dengan car Berikut adalah berkas untuk sebuah ReplicaSet yang memiliki tiga Pod: -{{< codenew file="controllers/replicaset.yaml" >}} +{{% codenew file="controllers/replicaset.yaml" %}} Jika kamu membuat ReplicaSet tersebut dan kemudian melihat metadata Pod, kamu akan melihat kolom OwnerReferences: diff --git a/content/id/docs/concepts/workloads/controllers/job.md b/content/id/docs/concepts/workloads/controllers/job.md index 4a7cce3f2a4a3..03a58ea21223b 100644 --- a/content/id/docs/concepts/workloads/controllers/job.md +++ b/content/id/docs/concepts/workloads/controllers/job.md @@ -33,7 +33,7 @@ Berikut merupakan contoh konfigurasi Job. Job ini melakukan komputasi π hingga digit ke 2000 kemudian memberikan hasilnya sebagai keluaran. Job tersebut memerlukan waktu 10 detik untuk dapat diselesaikan. -{{< codenew file="controllers/job.yaml" >}} +{{% codenew file="controllers/job.yaml" %}} Kamu dapat menjalankan contoh tersebut dengan menjalankan perintah berikut: diff --git a/content/id/docs/concepts/workloads/controllers/replicaset.md b/content/id/docs/concepts/workloads/controllers/replicaset.md index 57b1124208a91..e43ccc57c0ac6 100644 --- a/content/id/docs/concepts/workloads/controllers/replicaset.md +++ b/content/id/docs/concepts/workloads/controllers/replicaset.md @@ -29,7 +29,7 @@ Hal ini berarti kamu boleh jadi tidak akan membutuhkan manipulasi objek ReplicaS ## Contoh -{{< codenew file="controllers/frontend.yaml" >}} +{{% codenew file="controllers/frontend.yaml" %}} Menyimpan _manifest_ ini dalam `frontend.yaml` dan mengirimkannya ke klaster Kubernetes akan membuat ReplicaSet yang telah didefinisikan beserta dengan Pod yang dikelola. @@ -131,7 +131,7 @@ Walaupun kamu bisa membuat Pod biasa tanpa masalah, sangat direkomendasikan untu Mengambil contoh ReplicaSet _frontend_ sebelumnya, dan Pod yang ditentukan pada _manifest_ berikut: -{{< codenew file="pods/pod-rs.yaml" >}} +{{% codenew file="pods/pod-rs.yaml" %}} Karena Pod tersebut tidak memiliki Controller (atau objek lain) sebagai referensi pemilik yang sesuai dengan selektor dari ReplicaSet _frontend_, Pod tersebut akan langsung diakuisisi oleh ReplicaSet. @@ -257,7 +257,7 @@ Jumlah Pod pada ReplicaSet dapat diatur dengan mengubah nilai dari _field_ `.spe Pengaturan jumlah Pod pada ReplicaSet juga dapat dilakukan mengunakan [Horizontal Pod Autoscalers (HPA)](/docs/tasks/run-application/horizontal-pod-autoscale/). Berikut adalah contoh HPA terhadap ReplicaSet yang telah dibuat pada contoh sebelumnya. -{{< codenew file="controllers/hpa-rs.yaml" >}} +{{% codenew file="controllers/hpa-rs.yaml" %}} Menyimpan _manifest_ ini dalam `hpa-rs.yaml` dan mengirimkannya ke klaster Kubernetes akan membuat HPA tersebut yang akan mengatur jumlah Pod pada ReplicaSet yang telah didefinisikan bergantung terhadap penggunaan CPU dari Pod yang direplikasi. diff --git a/content/id/docs/concepts/workloads/controllers/replicationcontroller.md b/content/id/docs/concepts/workloads/controllers/replicationcontroller.md index 48ec718a6df67..f53cac7f290c9 100644 --- a/content/id/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/id/docs/concepts/workloads/controllers/replicationcontroller.md @@ -36,7 +36,7 @@ Sebuah contoh sederhana adalah membuat sebuah objek ReplicationController untuk Contoh ReplicationController ini mengonfigurasi tiga salinan dari peladen web nginx. -{{< codenew file="controllers/replication.yaml" >}} +{{% codenew file="controllers/replication.yaml" %}} Jalankan contoh di atas dengan mengunduh berkas contoh dan menjalankan perintah ini: diff --git a/content/id/docs/concepts/workloads/controllers/statefulset.md b/content/id/docs/concepts/workloads/controllers/statefulset.md index a309e223a3693..5c091d3620939 100644 --- a/content/id/docs/concepts/workloads/controllers/statefulset.md +++ b/content/id/docs/concepts/workloads/controllers/statefulset.md @@ -154,7 +154,7 @@ Domain klaster akan diatur menjadi `cluster.local` kecuali Kubernetes membuat sebuah [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) untuk setiap VolumeClaimTemplate. Pada contoh nginx di atas, setiap Pod akan menerima sebuah PersistentVolume -dengan StorageClass `my-storage-class` dan penyimpanan senilai 1 Gib yang sudah di-_provisioning_. Jika tidak ada StorageClass +dengan StorageClass `my-storage-class` dan penyimpanan senilai 1 GiB yang sudah di-_provisioning_. Jika tidak ada StorageClass yang dispesifikasikan, maka StorageClass _default_ akan digunakan. Ketika sebuah Pod dilakukan _(re)schedule_ pada sebuah Node, `volumeMounts` akan me-_mount_ PersistentVolumes yang terkait dengan PersistentVolume Claim-nya. Perhatikan bahwa, PersistentVolume yang terkait dengan @@ -275,4 +275,3 @@ StatefulSet akan mulai membuat Pod dengan templat konfigurasi yang sudah di-_rev * Ikuti contoh yang ada pada [bagaimana cara melakukan deploy Cassandra dengan StatefulSets](/docs/tutorials/stateful-application/cassandra/). - diff --git a/content/id/docs/concepts/workloads/pods/init-containers.md b/content/id/docs/concepts/workloads/pods/init-containers.md index 7ccde41c9b624..380f904644a30 100644 --- a/content/id/docs/concepts/workloads/pods/init-containers.md +++ b/content/id/docs/concepts/workloads/pods/init-containers.md @@ -242,6 +242,33 @@ Gunakan `activeDeadlineSeconds` pada Pod dan `livenessProbe` pada Container untu Nama setiap Container aplikasi dan Init Container pada sebuah Pod haruslah unik; Kesalahan validasi akan terjadi jika ada Container atau Init Container yang memiliki nama yang sama. +### API untuk sidecar containers + +{{< feature-state for_k8s_version="v1.28" state="alpha" >}} + +Mulai dari Kubernetes 1.28 dalam mode alpha, terdapat fitur yang disebut `SidecarContainers` yang memungkinkan Anda untuk menentukan `restartPolicy` untuk kontainer init yang independen dari Pod dan kontainer init lainnya. [Probes] (/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe) juga dapat ditambahkan untuk mengendalikan siklus hidup mereka. + +Jika sebuah kontainer init dibuat dengan `restartPolicy` yang diatur sebagai `Always`, maka kontainer ini akan mulai dan tetap berjalan selama seluruh masa hidup Pod, yang berguna untuk menjalankan layanan pendukung yang terpisah dari kontainer aplikasi utama. + +Jika sebuah `readinessProbe` ditentukan untuk kontainer init ini, hasilnya akan digunakan untuk menentukan status siap dari Pod. + +Karena kontainer-kontainer ini didefinisikan sebagai kontainer init, mereka mendapatkan manfaat dari urutan dan jaminan berurutan yang sama seperti kontainer init lainnya, yang memungkinkan mereka dicampur dengan kontainer init lainnya dalam aliran inisialisasi Pod yang kompleks. + +Dibandingkan dengan kontainer init reguler, kontainer init tipe sidecar terus berjalan, dan kontainer init berikutnya dapat mulai menjalankan saat kubelet telah menetapkan status kontainer `started` menjadi benar untuk kontainer init tipe sidecar. Status tersebut menjadi benar karena ada proses yang berjalan dalam kontainer dan tidak ada probe awal yang ditentukan, atau sebagai hasil dari keberhasilan `startupProbe`. + +Fitur ini dapat digunakan untuk mengimplementasikan pola kontainer sidecar dengan lebih tangguh, karena kubelet selalu akan me-restart kontainer sidecar jika kontainer tersebut gagal. + +Berikut adalah contoh Deployment dengan dua kontainer, salah satunya adalah sidecar: + +{{% code_sample language="yaml" file="application/deployment-sidecar.yaml" %}} + +Fitur ini juga berguna untuk menjalankan Job dengan sidecar, karena kontainer sidecar tidak akan mencegah Job untuk menyelesaikan tugasnya setelah kontainer utama selesai. + +Berikut adalah contoh sebuah Job dengan dua kontainer, salah satunya adalah sidecar: + +{{% code_sample language="yaml" file="application/job/job-sidecar.yaml" %}} + + ### Sumber Daya Karena eksekusi Init Container yang berurutan, aturan-aturan untuk sumber daya berlaku sebagai berikut: diff --git a/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 6f27244ef6dea..188080abb5802 100644 --- a/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/id/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -114,7 +114,7 @@ node2 dan node3 (`P` merepresentasikan Pod): Jika kita ingin Pod baru akan disebar secara merata berdasarkan Pod yang telah ada pada semua zona, maka _spec_ bernilai sebagai berikut: -{{< codenew file="pods/topology-spread-constraints/one-constraint.yaml" >}} +{{% codenew file="pods/topology-spread-constraints/one-constraint.yaml" %}} `topologyKey: zone` berarti persebaran merata hanya akan digunakan pada Node dengan pasangan label "zone: ". `whenUnsatisfiable: DoNotSchedule` memberitahukan penjadwal untuk membiarkan @@ -161,7 +161,7 @@ Ini dibuat berdasarkan contoh sebelumnya. Misalkan kamu memiliki klaster dengan Kamu dapat menggunakan 2 TopologySpreadConstraint untuk mengatur persebaran Pod pada zona dan Node: -{{< codenew file="pods/topology-spread-constraints/two-constraints.yaml" >}} +{{% codenew file="pods/topology-spread-constraints/two-constraints.yaml" %}} Dalam contoh ini, untuk memenuhi batasan pertama, Pod yang baru hanya akan ditempatkan pada "zoneB", sedangkan untuk batasan kedua, Pod yang baru hanya akan ditempatkan pada "node4". Maka hasil dari @@ -224,7 +224,7 @@ sesuai dengan nilai tersebut akan dilewatkan. berkas yaml seperti di bawah, jadi "mypod" akan ditempatkan pada "zoneB", bukan "zoneC". Demikian juga `spec.nodeSelector` akan digunakan. - {{< codenew file="pods/topology-spread-constraints/one-constraint-with-nodeaffinity.yaml" >}} + {{% codenew file="pods/topology-spread-constraints/one-constraint-with-nodeaffinity.yaml" %}} ### Batasan _default_ pada tingkat klaster diff --git a/content/id/docs/home/_index.md b/content/id/docs/home/_index.md index cb0e2dc70f799..bf74b6727be4c 100644 --- a/content/id/docs/home/_index.md +++ b/content/id/docs/home/_index.md @@ -4,7 +4,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Home" +linkTitle: "Dokumentasi" main_menu: true weight: 10 hide_feedback: true diff --git a/content/id/docs/tasks/administer-cluster/dns-debugging-resolution.md b/content/id/docs/tasks/administer-cluster/dns-debugging-resolution.md index 67b451db52b34..fdc31a1147e9c 100644 --- a/content/id/docs/tasks/administer-cluster/dns-debugging-resolution.md +++ b/content/id/docs/tasks/administer-cluster/dns-debugging-resolution.md @@ -21,7 +21,7 @@ kube-dns. ### Membuat Pod sederhana yang digunakan sebagai lingkungan pengujian -{{< codenew file="admin/dns/dnsutils.yaml" >}} +{{% codenew file="admin/dns/dnsutils.yaml" %}} Gunakan manifes berikut untuk membuat sebuah Pod: diff --git a/content/id/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md b/content/id/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md index 1aae3e38f009b..04f7076cfdb07 100644 --- a/content/id/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md +++ b/content/id/docs/tasks/administer-cluster/manage-resources/memory-constraint-namespace.md @@ -40,7 +40,7 @@ kubectl create namespace constraints-mem-example Berikut berkas konfigurasi untuk sebuah LimitRange: -{{< codenew file="admin/resource/memory-constraints.yaml" >}} +{{% codenew file="admin/resource/memory-constraints.yaml" %}} Membuat LimitRange: @@ -85,7 +85,7 @@ Berikut berkas konfigurasi Pod yang memiliki satu Container. Manifes Container menentukan permintaan memori 600 MiB dan limit memori 800 MiB. Nilai tersebut memenuhi batasan minimum dan maksimum memori yang ditentukan oleh LimitRange. -{{< codenew file="admin/resource/memory-constraints-pod.yaml" >}} +{{% codenew file="admin/resource/memory-constraints-pod.yaml" %}} Membuat Pod: @@ -127,7 +127,7 @@ kubectl delete pod constraints-mem-demo --namespace=constraints-mem-example Berikut berkas konfigurasi untuk sebuah Pod yang memiliki satu Container. Container tersebut menentukan permintaan memori 800 MiB dan batas memori 1.5 GiB. -{{< codenew file="admin/resource/memory-constraints-pod-2.yaml" >}} +{{% codenew file="admin/resource/memory-constraints-pod-2.yaml" %}} Mencoba membuat Pod: @@ -148,7 +148,7 @@ pods "constraints-mem-demo-2" is forbidden: maximum memory usage per Container i Berikut berkas konfigurasi untuk sebuah Pod yang memiliki satu Container. Container tersebut menentukan permintaan memori 100 MiB dan limit memori 800 MiB. -{{< codenew file="admin/resource/memory-constraints-pod-3.yaml" >}} +{{% codenew file="admin/resource/memory-constraints-pod-3.yaml" %}} Mencoba membuat Pod: @@ -171,7 +171,7 @@ pods "constraints-mem-demo-3" is forbidden: minimum memory usage per Container i Berikut berkas konfigurasi untuk sebuah Pod yang memiliki satu Container. Container tersebut tidak menentukan permintaan memori dan juga limit memori. -{{< codenew file="admin/resource/memory-constraints-pod-4.yaml" >}} +{{% codenew file="admin/resource/memory-constraints-pod-4.yaml" %}} Mencoba membuat Pod: @@ -202,7 +202,7 @@ dari LimitRange. Pada tahap ini, Containermu mungkin saja berjalan ataupun mungkin juga tidak berjalan. Ingat bahwa prasyarat untuk tugas ini adalah Node harus memiliki setidaknya 1 GiB memori. Jika tiap Node hanya memiliki -1 GiB memori, maka tidak akan ada cukup memori untuk dialokasikan pada setiap Node untuk memenuhi permintaan 1 Gib memori. Jika ternyata kamu menggunakan Node dengan 2 GiB memori, maka kamu mungkin memiliki cukup ruang untuk memenuhi permintaan 1 GiB tersebut. +1 GiB memori, maka tidak akan ada cukup memori untuk dialokasikan pada setiap Node untuk memenuhi permintaan 1 GiB memori. Jika ternyata kamu menggunakan Node dengan 2 GiB memori, maka kamu mungkin memiliki cukup ruang untuk memenuhi permintaan 1 GiB tersebut. Menghapus Pod: diff --git a/content/id/docs/tasks/configure-pod-container/assign-memory-resource.md b/content/id/docs/tasks/configure-pod-container/assign-memory-resource.md index bb092e86a58c6..4a50fb84159c2 100644 --- a/content/id/docs/tasks/configure-pod-container/assign-memory-resource.md +++ b/content/id/docs/tasks/configure-pod-container/assign-memory-resource.md @@ -69,7 +69,7 @@ Dalam latihan ini, kamu akan membuat Pod yang memiliki satu Container. Container sebesar 100 MiB dan batasan memori sebesar 200 MiB. Berikut berkas konfigurasi untuk Pod: -{{< codenew file="pods/resource/memory-request-limit.yaml" >}} +{{% codenew file="pods/resource/memory-request-limit.yaml" %}} Bagian `args` dalam berkas konfigurasi memberikan argumen untuk Container pada saat dimulai. Argumen`"--vm-bytes", "150M"` memberi tahu Container agar mencoba mengalokasikan memori sebesar 150 MiB. @@ -139,7 +139,7 @@ Dalam latihan ini, kamu membuat Pod yang mencoba mengalokasikan lebih banyak mem Berikut adalah berkas konfigurasi untuk Pod yang memiliki satu Container dengan berkas permintaan memori sebesar 50 MiB dan batasan memori sebesar 100 MiB: -{{< codenew file="pods/resource/memory-request-limit-2.yaml" >}} +{{% codenew file="pods/resource/memory-request-limit-2.yaml" %}} Dalam bagian `args` dari berkas konfigurasi, kamu dapat melihat bahwa Container tersebut akan mencoba mengalokasikan memori sebesar 250 MiB, yang jauh di atas batas yaitu 100 MiB. @@ -250,7 +250,7 @@ kapasitas dari Node mana pun dalam klaster kamu. Berikut adalah berkas konfigura Container dengan permintaan memori 1000 GiB, yang kemungkinan besar melebihi kapasitas dari setiap Node dalam klaster kamu. -{{< codenew file="pods/resource/memory-request-limit-3.yaml" >}} +{{% codenew file="pods/resource/memory-request-limit-3.yaml" %}} Buatlah Pod: diff --git a/content/id/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md b/content/id/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md index a60d862fd26c2..3d4a5d079b8d1 100644 --- a/content/id/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md +++ b/content/id/docs/tasks/configure-pod-container/assign-pods-nodes-using-node-affinity.md @@ -64,7 +64,7 @@ Afinitas Node di dalam klaster Kubernetes. Konfigurasi ini menunjukkan sebuah Pod yang memiliki afinitas node `requiredDuringSchedulingIgnoredDuringExecution`, `disktype: ssd`. Dengan kata lain, Pod hanya akan dijadwalkan hanya pada Node yang memiliki label `disktype=ssd`. -{{< codenew file="pods/pod-nginx-required-affinity.yaml" >}} +{{% codenew file="pods/pod-nginx-required-affinity.yaml" %}} 1. Terapkan konfigurasi berikut untuk membuat sebuah Pod yang akan dijadwalkan pada Node yang kamu pilih: @@ -90,7 +90,7 @@ Dengan kata lain, Pod hanya akan dijadwalkan hanya pada Node yang memiliki label Konfigurasi ini memberikan deskripsi sebuah Pod yang memiliki afinitas Node `preferredDuringSchedulingIgnoredDuringExecution`,`disktype: ssd`. Artinya Pod akan diutamakan dijalankan pada Node yang memiliki label `disktype=ssd`. -{{< codenew file="pods/pod-nginx-preferred-affinity.yaml" >}} +{{% codenew file="pods/pod-nginx-preferred-affinity.yaml" %}} 1. Terapkan konfigurasi berikut untuk membuat sebuah Pod yang akan dijadwalkan pada Node yang kamu pilih: diff --git a/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index d56f59a09a646..e03a9d97a331a 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/id/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -46,7 +46,7 @@ Kubernetes menyediakan _probe liveness_ untuk mendeteksi dan memperbaiki situasi Pada latihan ini, kamu akan membuat Pod yang menjalankan Container dari image `registry.k8s.io/busybox`. Berikut ini adalah berkas konfigurasi untuk Pod tersebut: -{{< codenew file="pods/probe/exec-liveness.yaml" >}} +{{% codenew file="pods/probe/exec-liveness.yaml" %}} Pada berkas konfigurasi di atas, kamu dapat melihat bahwa Pod memiliki satu `Container`. _Field_ `periodSeconds` menentukan bahwa kubelet harus melakukan _probe liveness_ setiap 5 detik. @@ -128,7 +128,7 @@ liveness-exec 1/1 Running 1 1m Jenis kedua dari _probe liveness_ menggunakan sebuah permintaan GET HTTP. Berikut ini berkas konfigurasi untuk Pod yang menjalankan Container dari image `registry.k8s.io/liveness`. -{{< codenew file="pods/probe/http-liveness.yaml" >}} +{{% codenew file="pods/probe/http-liveness.yaml" %}} Pada berkas konfigurasi tersebut, kamu dapat melihat Pod memiliki sebuah Container. _Field_ `periodSeconds` menentukan bahwa kubelet harus mengerjakan _probe liveness_ setiap 3 detik. @@ -190,7 +190,7 @@ kubelet akan mencoba untuk membuka soket pada Container kamu dengan porta terten Jika koneksi dapat terbentuk dengan sukses, maka Container dianggap dalam kondisi sehat. Namun jika tidak berhasil terbentuk, maka Container dianggap gagal. -{{< codenew file="pods/probe/tcp-liveness-readiness.yaml" >}} +{{% codenew file="pods/probe/tcp-liveness-readiness.yaml" %}} Seperti yang terlihat, konfigurasi untuk pemeriksaan TCP cukup mirip dengan pemeriksaan HTTP. Contoh ini menggunakan _probe readiness_ dan _liveness_. diff --git a/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md b/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md index 858342880e7e1..79db4e848a754 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md +++ b/content/id/docs/tasks/configure-pod-container/configure-persistent-volume-storage.md @@ -93,7 +93,7 @@ untuk mengatur Berikut berkas konfigurasi untuk hostPath PersistentVolume: -{{< codenew file="pods/storage/pv-volume.yaml" >}} +{{% codenew file="pods/storage/pv-volume.yaml" %}} Berkas konfigurasi tersebut menentukan bahwa volume berada di `/mnt/data` pada klaster Node. Konfigurasi tersebut juga menentukan ukuran dari 10 gibibytes dan @@ -129,7 +129,7 @@ setidaknya untuk satu Node. Berikut berkas konfigurasi untuk PersistentVolumeClaim: -{{< codenew file="pods/storage/pv-claim.yaml" >}} +{{% codenew file="pods/storage/pv-claim.yaml" %}} Membuat sebuah PersistentVolumeClaim: @@ -169,7 +169,7 @@ Langkah selanjutnya adalah membuat sebuah Pod yang akan menggunakan PersistentVo Berikut berkas konfigurasi untuk Pod: -{{< codenew file="pods/storage/pv-pod.yaml" >}} +{{% codenew file="pods/storage/pv-pod.yaml" %}} Perhatikan bahwa berkas konfigurasi Pod menentukan sebuah PersistentVolumeClaim, tetapi tidak menentukan PeristentVolume. Dari sudut pandang Pod, _claim_ adalah volume. diff --git a/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md index bfdad56610635..2cf40c6106aec 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/id/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -467,7 +467,7 @@ configmap/special-config-2-c92b5mmcf2 created 2. Memberikan nilai `special.how` yang sudah terdapat pada ConfigMap pada variabel _environment_ `SPECIAL_LEVEL_KEY` di spesifikasi Pod. - {{< codenew file="pods/pod-single-configmap-env-variable.yaml" >}} + {{% codenew file="pods/pod-single-configmap-env-variable.yaml" %}} Buat Pod: @@ -481,7 +481,7 @@ configmap/special-config-2-c92b5mmcf2 created * Seperti pada contoh sebelumnya, buat ConfigMap terlebih dahulu. - {{< codenew file="configmap/configmaps.yaml" >}} + {{% codenew file="configmap/configmaps.yaml" %}} Buat ConfigMap: @@ -491,7 +491,7 @@ configmap/special-config-2-c92b5mmcf2 created * Tentukan variabel _environment_ pada spesifikasi Pod. - {{< codenew file="pods/pod-multiple-configmap-env-variable.yaml" >}} + {{% codenew file="pods/pod-multiple-configmap-env-variable.yaml" %}} Buat Pod: @@ -509,7 +509,7 @@ Fungsi ini tersedia pada Kubernetes v1.6 dan selanjutnya. * Buat ConfigMap yang berisi beberapa pasangan kunci-nilai. - {{< codenew file="configmap/configmap-multikeys.yaml" >}} + {{% codenew file="configmap/configmap-multikeys.yaml" %}} Buat ConfigMap: @@ -519,7 +519,7 @@ Fungsi ini tersedia pada Kubernetes v1.6 dan selanjutnya. * Gunakan `envFrom` untuk menentukan seluruh data pada ConfigMap sebagai variabel _environment_ kontainer. Kunci dari ConfigMap akan menjadi nama variabel _environment_ di dalam Pod. - {{< codenew file="pods/pod-configmap-envFrom.yaml" >}} + {{% codenew file="pods/pod-configmap-envFrom.yaml" %}} Buat Pod: @@ -536,7 +536,7 @@ Kamu dapat menggunakan variabel _environment_ yang ditentukan ConfigMap pada bag Sebagai contoh, spesifikasi Pod berikut -{{< codenew file="pods/pod-configmap-env-var-valueFrom.yaml" >}} +{{% codenew file="pods/pod-configmap-env-var-valueFrom.yaml" %}} dibuat dengan menjalankan @@ -545,6 +545,9 @@ kubectl create -f https://kubernetes.io/examples/pods/pod-configmap-env-var-valu ``` menghasilkan keluaran pada kontainer `test-container` seperti berikut: +```shell +kubectl logs dapi-test-pod +``` ```shell very charm @@ -556,7 +559,7 @@ Seperti yang sudah dijelaskan pada [Membuat ConfigMap dari berkas](#membuat-conf Contoh pada bagian ini merujuk pada ConfigMap bernama `special-config`, Seperti berikut. -{{< codenew file="configmap/configmap-multikeys.yaml" >}} +{{% codenew file="configmap/configmap-multikeys.yaml" %}} Buat ConfigMap: @@ -570,7 +573,7 @@ Tambahkan nama ConfigMap di bawah bagian `volumes` pada spesifikasi Pod. Hal ini akan menambahkan data ConfigMap pada direktori yang ditentukan oleh `volumeMounts.mountPath` (pada kasus ini, `/etc/config`). Bagian `command` berisi daftar berkas pada direktori dengan nama-nama yang sesuai dengan kunci-kunci pada ConfigMap. -{{< codenew file="pods/pod-configmap-volume.yaml" >}} +{{% codenew file="pods/pod-configmap-volume.yaml" %}} Buat Pod: @@ -594,7 +597,7 @@ Jika ada beberapa berkas pada direktori `/etc/config/`, berkas-berkas tersebut a Gunakan kolom `path` untuk menentukan jalur berkas yang diinginkan untuk butir tertentu pada ConfigMap (butir ConfigMap tertentu). Pada kasus ini, butir `SPECIAL_LEVEL` akan akan dipasangkan sebagai `config-volume` pada `/etc/config/keys`. -{{< codenew file="pods/pod-configmap-volume-specific-key.yaml" >}} +{{% codenew file="pods/pod-configmap-volume-specific-key.yaml" %}} Buat Pod: diff --git a/content/id/docs/tasks/configure-pod-container/configure-service-account.md b/content/id/docs/tasks/configure-pod-container/configure-service-account.md index e53812d65a8b3..f469b257d85cd 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-service-account.md +++ b/content/id/docs/tasks/configure-pod-container/configure-service-account.md @@ -282,7 +282,7 @@ Kubelet juga dapat memproyeksikan _token_ ServiceAccount ke Pod. Kamu dapat mene Perilaku ini diatur pada PodSpec menggunakan tipe ProjectedVolume yaitu [ServiceAccountToken](/id/docs/concepts/storage/volumes/#projected). Untuk memungkinkan Pod dengan _token_ dengan pengguna bertipe _"vault"_ dan durasi validitas selama dua jam, kamu harus mengubah bagian ini pada PodSpec: -{{< codenew file="pods/pod-projected-svc-token.yaml" >}} +{{% codenew file="pods/pod-projected-svc-token.yaml" %}} Buat Pod: diff --git a/content/id/docs/tasks/configure-pod-container/configure-volume-storage.md b/content/id/docs/tasks/configure-pod-container/configure-volume-storage.md index 02d664d530457..e6b6f365a45c0 100644 --- a/content/id/docs/tasks/configure-pod-container/configure-volume-storage.md +++ b/content/id/docs/tasks/configure-pod-container/configure-volume-storage.md @@ -25,7 +25,7 @@ _Filesystem_ dari sebuah Container hanya hidup selama Container itu juga hidup. Pada latihan ini, kamu membuat sebuah Pod yang menjalankan sebuah Container. Pod ini memiliki sebuah Volume dengan tipe [emptyDir](/id/docs/concepts/storage/volumes/#emptydir) yang tetap bertahan, meski Container berakhir dan dimulai ulang. Berikut berkas konfigurasi untuk Pod: -{{< codenew file="pods/storage/redis.yaml" >}} +{{% codenew file="pods/storage/redis.yaml" %}} 1. Membuat Pod: diff --git a/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md b/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md index 50aad8de9a15a..3fe2ce8407c3b 100644 --- a/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md +++ b/content/id/docs/tasks/configure-pod-container/pull-image-private-registry.md @@ -176,7 +176,7 @@ Kamu telah berhasil menetapkan kredensial Docker kamu sebagai sebuah Secret yang Berikut ini adalah berkas konfigurasi untuk Pod yang memerlukan akses ke kredensial Docker kamu pada `regcred`: -{{< codenew file="pods/private-reg-pod.yaml" >}} +{{% codenew file="pods/private-reg-pod.yaml" %}} Unduh berkas diatas: diff --git a/content/id/docs/tasks/configure-pod-container/quality-service-pod.md b/content/id/docs/tasks/configure-pod-container/quality-service-pod.md index c5337c8854a75..5ced04b84f1fb 100644 --- a/content/id/docs/tasks/configure-pod-container/quality-service-pod.md +++ b/content/id/docs/tasks/configure-pod-container/quality-service-pod.md @@ -41,7 +41,7 @@ Agar sebuah Pod memiliki kelas QoS Guaranteed: Berikut adalah berkas konfigurasi untuk sebuah Pod dengan satu Container. Container tersebut memiliki sebuah batasan memori dan sebuah permintaan memori, keduanya sama dengan 200MiB. Container itu juga mempunyai batasan CPU dan permintaan CPU yang sama sebesar 700 milliCPU: -{{< codenew file="pods/qos/qos-pod.yaml" >}} +{{% codenew file="pods/qos/qos-pod.yaml" %}} Buatlah Pod: @@ -100,7 +100,7 @@ Sebuah Pod akan mendapatkan kelas QoS Burstable apabila: Berikut adalah berkas konfigurasi untuk Pod dengan satu Container. Container yang dimaksud memiliki batasan memori sebesar 200MiB dan permintaan memori sebesar 100MiB. -{{< codenew file="pods/qos/qos-pod-2.yaml" >}} +{{% codenew file="pods/qos/qos-pod-2.yaml" %}} Buatlah Pod: @@ -147,7 +147,7 @@ Agar Pod mendapatkan kelas QoS BestEffort, Container dalam pod tidak boleh memiliki batasan atau permintaan memori atau CPU. Berikut adalah berkas konfigurasi untuk Pod dengan satu Container. Container yang dimaksud tidak memiliki batasan atau permintaan memori atau CPU apapun. -{{< codenew file="pods/qos/qos-pod-3.yaml" >}} +{{% codenew file="pods/qos/qos-pod-3.yaml" %}} Buatlah Pod: @@ -183,7 +183,7 @@ kubectl delete pod qos-demo-3 --namespace=qos-example Berikut adalah konfigurasi berkas untuk Pod yang memiliki dua Container. Satu Container menentukan permintaan memori sebesar 200MiB. Container yang lain tidak menentukan permintaan atau batasan apapun. -{{< codenew file="pods/qos/qos-pod-4.yaml" >}} +{{% codenew file="pods/qos/qos-pod-4.yaml" %}} Perhatikan bahwa Pod ini memenuhi kriteria untuk kelas QoS Burstable. Maksudnya, Container tersebut tidak memenuhi kriteria untuk kelas QoS Guaranteed, dan satu dari Container tersebut memiliki permintaan memori. diff --git a/content/id/docs/tasks/configure-pod-container/security-context.md b/content/id/docs/tasks/configure-pod-container/security-context.md index d190468399cf1..a8bd1bfdf9620 100644 --- a/content/id/docs/tasks/configure-pod-container/security-context.md +++ b/content/id/docs/tasks/configure-pod-container/security-context.md @@ -50,7 +50,7 @@ dalam spesifikasi Pod. Bagian `securityContext` adalah sebuah objek Aturan keamanan yang kamu tetapkan untuk Pod akan berlaku untuk semua Container dalam Pod tersebut. Berikut sebuah berkas konfigurasi untuk Pod yang memiliki volume `securityContext` dan `emptyDir`: -{{< codenew file="pods/security/security-context.yaml" >}} +{{% codenew file="pods/security/security-context.yaml" %}} Dalam berkas konfigurasi ini, bagian `runAsUser` menentukan bahwa dalam setiap Container pada Pod, semua proses dijalankan oleh ID pengguna 1000. Bagian `runAsGroup` menentukan grup utama dengan ID 3000 untuk @@ -191,7 +191,7 @@ ada aturan yang tumpang tindih. Aturan pada Container mempengaruhi volume pada P Berikut berkas konfigurasi untuk Pod yang hanya memiliki satu Container. Keduanya, baik Pod dan Container memiliki bagian `securityContext` sebagai berikut: -{{< codenew file="pods/security/security-context-2.yaml" >}} +{{% codenew file="pods/security/security-context-2.yaml" %}} Buatlah Pod tersebut: @@ -244,7 +244,7 @@ bagian `capabilities` pada `securityContext` di manifes Container-nya. Pertama-tama, mari melihat apa yang terjadi ketika kamu tidak menyertakan bagian `capabilities`. Berikut ini adalah berkas konfigurasi yang tidak menambah atau mengurangi kemampuan apa pun dari Container: -{{< codenew file="pods/security/security-context-3.yaml" >}} +{{% codenew file="pods/security/security-context-3.yaml" %}} Buatlah Pod tersebut: @@ -306,7 +306,7 @@ Container ini memiliki kapabilitas tambahan yang sudah ditentukan. Berikut ini adalah berkas konfigurasi untuk Pod yang hanya menjalankan satu Container. Konfigurasi ini menambahkan kapabilitas `CAP_NET_ADMIN` dan `CAP_SYS_TIME`: -{{< codenew file="pods/security/security-context-4.yaml" >}} +{{% codenew file="pods/security/security-context-4.yaml" %}} Buatlah Pod tersebut: diff --git a/content/id/docs/tasks/configure-pod-container/share-process-namespace.md b/content/id/docs/tasks/configure-pod-container/share-process-namespace.md index 9b32d74b3cdf6..c764bd8df3eaa 100644 --- a/content/id/docs/tasks/configure-pod-container/share-process-namespace.md +++ b/content/id/docs/tasks/configure-pod-container/share-process-namespace.md @@ -34,7 +34,7 @@ proses pemecahan masalah (_troubleshoot_) image kontainer yang tidak memiliki ut Pembagian _namespace_ proses (_Process Namespace Sharing_) diaktifkan menggunakan _field_ `shareProcessNamespace` `v1.PodSpec`. Sebagai contoh: -{{< codenew file="pods/share-process-namespace.yaml" >}} +{{% codenew file="pods/share-process-namespace.yaml" %}} 1. Buatlah sebuah Pod `nginx` di dalam klaster kamu: diff --git a/content/id/docs/tasks/debug-application-cluster/debug-application-introspection.md b/content/id/docs/tasks/debug-application-cluster/debug-application-introspection.md index 746c46f045a09..a2c7b2f318610 100644 --- a/content/id/docs/tasks/debug-application-cluster/debug-application-introspection.md +++ b/content/id/docs/tasks/debug-application-cluster/debug-application-introspection.md @@ -18,7 +18,7 @@ Pod kamu. Namun ada sejumlah cara untuk mendapatkan lebih banyak informasi tenta Dalam contoh ini, kamu menggunakan Deployment untuk membuat dua buah Pod, yang hampir sama dengan contoh sebelumnya. -{{< codenew file="application/nginx-with-request.yaml" >}} +{{% codenew file="application/nginx-with-request.yaml" %}} Buat Deployment dengan menjalankan perintah ini: diff --git a/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md b/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md index e15a8a4df6532..432898c0fbc6d 100644 --- a/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md +++ b/content/id/docs/tasks/debug-application-cluster/get-shell-running-container.md @@ -26,7 +26,7 @@ mendapatkan _shell_ untuk masuk ke dalam Container yang sedang berjalan. Dalam latihan ini, kamu perlu membuat Pod yang hanya memiliki satu Container saja. Container tersebut menjalankan _image_ nginx. Berikut ini adalah berkas konfigurasi untuk Pod tersebut: -{{< codenew file="application/shell-demo.yaml" >}} +{{% codenew file="application/shell-demo.yaml" %}} Buatlah Pod tersebut: @@ -108,7 +108,7 @@ Pada jendela (_window_) perintah biasa, bukan pada _shell_ kamu di dalam Contain lihatlah daftar variabel lingkungan (_environment variable_) pada Container yang sedang berjalan: ```shell -kubectl exec shell-demo env +kubectl exec shell-demo -- env ``` Cobalah dengan menjalankan perintah lainnya. Berikut beberapa contohnya: diff --git a/content/id/docs/tasks/inject-data-application/define-command-argument-container.md b/content/id/docs/tasks/inject-data-application/define-command-argument-container.md index 9f2cd7a7aefc8..f2d248232e004 100644 --- a/content/id/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/content/id/docs/tasks/inject-data-application/define-command-argument-container.md @@ -44,7 +44,7 @@ Merujuk pada [catatan](#catatan) di bawah. Pada latihan ini, kamu akan membuat sebuah Pod baru yang menjalankan sebuah Container. Berkas konfigurasi untuk Pod mendefinisikan sebuah perintah dan dua argumen: -{{< codenew file="pods/commands.yaml" >}} +{{% codenew file="pods/commands.yaml" %}} 1. Buat sebuah Pod dengan berkas konfigurasi YAML: diff --git a/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md b/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md index 0f35ef27f7188..584866d4c4d12 100644 --- a/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/content/id/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -30,7 +30,7 @@ Dalam latihan ini, kamu membuat sebuah Pod yang menjalankan satu buah Container. Berkas konfigurasi untuk Pod tersebut mendefinisikan sebuah variabel lingkungan dengan nama `DEMO_GREETING` yang bernilai `"Hello from the environment"`. Berikut berkas konfigurasi untuk Pod tersebut: -{{< codenew file="pods/inject/envars.yaml" >}} +{{% codenew file="pods/inject/envars.yaml" %}} 1. Buatlah sebuah Pod berdasarkan berkas konfigurasi YAML tersebut: diff --git a/content/id/docs/tasks/inject-data-application/distribute-credentials-secure.md b/content/id/docs/tasks/inject-data-application/distribute-credentials-secure.md index c08db9484f6cc..5d4c3633fac96 100644 --- a/content/id/docs/tasks/inject-data-application/distribute-credentials-secure.md +++ b/content/id/docs/tasks/inject-data-application/distribute-credentials-secure.md @@ -37,7 +37,7 @@ Gunakan alat yang telah dipercayai oleh OS kamu untuk menghindari risiko dari pe Berikut ini adalah berkas konfigurasi yang dapat kamu gunakan untuk membuat Secret yang akan menampung nama pengguna dan kata sandi kamu: -{{< codenew file="pods/inject/secret.yaml" >}} +{{% codenew file="pods/inject/secret.yaml" %}} 1. Membuat Secret @@ -95,7 +95,7 @@ Tentu saja ini lebih mudah. Pendekatan yang mendetil setiap langkah di atas bert Berikut ini adalah berkas konfigurasi yang dapat kamu gunakan untuk membuat Pod: -{{< codenew file="pods/inject/secret-pod.yaml" >}} +{{% codenew file="pods/inject/secret-pod.yaml" %}} 1. Membuat Pod: @@ -157,7 +157,7 @@ Berikut ini adalah berkas konfigurasi yang dapat kamu gunakan untuk membuat Pod: * Tentukan nilai `backend-username` yang didefinisikan di Secret ke variabel lingkungan `SECRET_USERNAME` di dalam spesifikasi Pod. - {{< codenew file="pods/inject/pod-single-secret-env-variable.yaml" >}} + {{% codenew file="pods/inject/pod-single-secret-env-variable.yaml" %}} * Membuat Pod: @@ -187,7 +187,7 @@ Berikut ini adalah berkas konfigurasi yang dapat kamu gunakan untuk membuat Pod: * Definisikan variabel lingkungan di dalam spesifikasi Pod. - {{< codenew file="pods/inject/pod-multiple-secret-env-variable.yaml" >}} + {{% codenew file="pods/inject/pod-multiple-secret-env-variable.yaml" %}} * Membuat Pod: @@ -221,7 +221,7 @@ Fitur ini tersedia mulai dari Kubernetes v1.6 dan yang lebih baru. * Gunakan envFrom untuk mendefinisikan semua data Secret sebagai variabel lingkungan Container. _Key_ dari Secret akan mennjadi nama variabel lingkungan di dalam Pod. - {{< codenew file="pods/inject/pod-secret-envFrom.yaml" >}} + {{% codenew file="pods/inject/pod-secret-envFrom.yaml" %}} * Membuat Pod: diff --git a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md index 349a283d7a01a..6bf0f53532aa8 100644 --- a/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/id/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -34,7 +34,7 @@ Untuk informasi lanjut mengenai keterbatasan, lihat [CronJob](/id/docs/concepts/ CronJob membutuhkan sebuah berkas konfigurasi. Ini adalah contoh dari berkas konfigurasi CronJob `.spec` yang akan mencetak waktu sekarang dan pesan "hello" setiap menit: -{{< codenew file="application/job/cronjob.yaml" >}} +{{% codenew file="application/job/cronjob.yaml" %}} Jalankan contoh CronJob menggunakan perintah berikut: diff --git a/content/id/docs/tasks/manage-kubernetes-objects/declarative-config.md b/content/id/docs/tasks/manage-kubernetes-objects/declarative-config.md index 88eeaf38d3079..073937e189409 100644 --- a/content/id/docs/tasks/manage-kubernetes-objects/declarative-config.md +++ b/content/id/docs/tasks/manage-kubernetes-objects/declarative-config.md @@ -52,7 +52,7 @@ Tambahkan parameter `-R` untuk memproses seluruh direktori secara rekursif. Berikut sebuah contoh *file* konfigurasi objek: -{{< codenew file="application/simple_deployment.yaml" >}} +{{% codenew file="application/simple_deployment.yaml" %}} Jalankan perintah `kubectl diff` untuk menampilkan objek yang akan dibuat: @@ -135,7 +135,7 @@ Tambahkan argumen `-R` untuk memproses seluruh direktori secara rekursif. Berikut sebuah contoh *file* konfigurasi: -{{< codenew file="application/simple_deployment.yaml" >}} +{{% codenew file="application/simple_deployment.yaml" %}} Buat objek dengan perintah `kubectl apply`:: @@ -248,7 +248,7 @@ spec: Perbarui *file* konfigurasi `simple_deployment.yaml`, ubah *image* dari `nginx:1.7.9` ke `nginx:1.11.9`, dan hapus *field* `minReadySeconds`: -{{< codenew file="application/update_deployment.yaml" >}} +{{% codenew file="application/update_deployment.yaml" %}} Terapkan perubahan yang telah dibuat di *file* konfigurasi: @@ -379,7 +379,7 @@ Perintah `kubectl apply` menulis konten dari berkas konfigurasi ke anotasi `kube Agar lebih jelas, simak contoh berikut. Misalkan, berikut adalah *file* konfigurasi untuk sebuah objek Deployment: -{{< codenew file="application/update_deployment.yaml" >}} +{{% codenew file="application/update_deployment.yaml" %}} Juga, misalkan, berikut adalah konfigurasi *live* dari objek Deployment yang sama: @@ -627,7 +627,7 @@ TODO(pwittrock): *Uncomment* ini untuk versi 1.6 Berikut adalah sebuah *file* konfigurasi untuk sebuah Deployment. Berkas berikut tidak menspesifikasikan `strategy`: -{{< codenew file="application/simple_deployment.yaml" >}} +{{% codenew file="application/simple_deployment.yaml" %}} Buat objek dengan perintah `kubectl apply`: diff --git a/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 7a23efa6ff3c4..1c16b087b79db 100644 --- a/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/id/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -57,7 +57,7 @@ Bagian ini mendefinisikan laman index.php yang melakukan beberapa komputasi inte Pertama, kita akan memulai Deployment yang menjalankan _image_ dan mengeksposnya sebagai Service menggunakan konfigurasi berikut: -{{< codenew file="application/php-apache.yaml" >}} +{{% codenew file="application/php-apache.yaml" %}} Jalankan perintah berikut: @@ -434,7 +434,7 @@ Semua metrik di HorizontalPodAutoscaler dan metrik API ditentukan menggunakan no Daripada menggunakan perintah `kubectl autoscale` untuk membuat HorizontalPodAutoscaler secara imperatif, kita dapat menggunakan berkas berikut untuk membuatnya secara deklaratif: -{{< codenew file="application/hpa/php-apache.yaml" >}} +{{% codenew file="application/hpa/php-apache.yaml" %}} Kita akan membuat _autoscaler_ dengan menjalankan perintah berikut: diff --git a/content/id/docs/tasks/run-application/run-stateless-application-deployment.md b/content/id/docs/tasks/run-application/run-stateless-application-deployment.md index 74e76c827be57..3e96eb1fda1e6 100644 --- a/content/id/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/id/docs/tasks/run-application/run-stateless-application-deployment.md @@ -38,7 +38,7 @@ Kamu dapat menjalankan aplikasi dengan membuat sebuah objek Deployment Kubernete dapat mendeskripsikan sebuah Deployment di dalam berkas YAML. Sebagai contohnya, berkas YAML berikut mendeskripsikan sebuah Deployment yang menjalankan _image_ Docker nginx:1.14.2: -{{< codenew file="application/deployment.yaml" >}} +{{% codenew file="application/deployment.yaml" %}} 1. Buatlah sebuah Deployment berdasarkan berkas YAML: @@ -100,7 +100,7 @@ YAML berikut mendeskripsikan sebuah Deployment yang menjalankan _image_ Docker n Kamu dapat mengubah Deployment dengan cara mengaplikasikan berkas YAML yang baru. Berkas YAML ini memberikan spesifikasi Deployment untuk menggunakan Nginx versi 1.16.1. -{{< codenew file="application/deployment-update.yaml" >}} +{{% codenew file="application/deployment-update.yaml" %}} 1. Terapkan berkas YAML yang baru: @@ -116,7 +116,7 @@ Kamu dapat meningkatkan jumlah Pod di dalam Deployment dengan menerapkan berkas YAML baru. Berkas YAML ini akan meningkatkan jumlah replika menjadi 4, yang nantinya memberikan spesifikasi agar Deployment memiliki 4 buah Pod. -{{< codenew file="application/deployment-scale.yaml" >}} +{{% codenew file="application/deployment-scale.yaml" %}} 1. Terapkan berkas YAML: diff --git a/content/id/docs/tutorials/_index.md b/content/id/docs/tutorials/_index.md index 041b7b62d2387..ba0deb8011adf 100644 --- a/content/id/docs/tutorials/_index.md +++ b/content/id/docs/tutorials/_index.md @@ -50,7 +50,7 @@ Sebelum melangkah lebih lanjut ke tutorial, sebaiknya tandai dulu halaman [Kamus * [AppArmor](/docs/tutorials/clusters/apparmor/) -* [seccomp](/docs/tutorials/clusters/seccomp/) +* [Seccomp](/docs/tutorials/clusters/seccomp/) ## Servis diff --git a/content/id/docs/tutorials/hello-minikube.md b/content/id/docs/tutorials/hello-minikube.md index 6790dbf47fba5..d2e4a5de76677 100644 --- a/content/id/docs/tutorials/hello-minikube.md +++ b/content/id/docs/tutorials/hello-minikube.md @@ -38,9 +38,9 @@ Kamupun bisa mengikuti tutorial ini kalau sudah instalasi minikube di lokal. Sil Tutorial ini menyediakan image Kontainer yang dibuat melalui barisan kode berikut: -{{< codenew language="js" file="minikube/server.js" >}} +{{% codenew language="js" file="minikube/server.js" %}} -{{< codenew language="conf" file="minikube/Dockerfile" >}} +{{% codenew language="conf" file="minikube/Dockerfile" %}} Untuk info lebih lanjut tentang perintah `docker build`, baca [dokumentasi Docker](https://docs.docker.com/engine/reference/commandline/build/). diff --git a/content/id/docs/tutorials/stateful-application/basic-stateful-set.md b/content/id/docs/tutorials/stateful-application/basic-stateful-set.md index b664a3bb8abf1..7ce5437d61bfc 100644 --- a/content/id/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/id/docs/tutorials/stateful-application/basic-stateful-set.md @@ -59,7 +59,7 @@ Contoh ini menciptakan sebuah [Service _headless_](/id/docs/concepts/services-networking/service/#service-headless), `nginx`, untuk mempublikasikan alamat IP Pod di dalam StatefulSet, `web`. -{{< codenew file="application/web/web.yaml" >}} +{{% codenew file="application/web/web.yaml" %}} Unduh contoh di atas, dan simpan ke dalam berkas dengan nama `web.yaml`. @@ -1075,7 +1075,7 @@ menjalankan atau mengakhiri semua Pod secara bersamaan (paralel), dan tidak menu suatu Pod menjadi Running dan Ready atau benar-benar berakhir sebelum menjalankan atau mengakhiri Pod yang lain. -{{< codenew file="application/web/web-parallel.yaml" >}} +{{% codenew file="application/web/web-parallel.yaml" %}} Unduh contoh di atas, dan simpan ke sebuah berkas dengan nama `web-parallel.yaml`. diff --git a/content/id/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/id/docs/tutorials/stateless-application/expose-external-ip-address.md index df297f4c634b7..2152c8e0e3621 100644 --- a/content/id/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/id/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -42,7 +42,7 @@ yang mengekspos alamat IP eksternal. 1. Jalankan sebuah aplikasi Hello World pada klaster kamu: -{{< codenew file="service/load-balancer-example.yaml" >}} +{{% codenew file="service/load-balancer-example.yaml" %}} ```shell kubectl apply -f https://k8s.io/examples/service/load-balancer-example.yaml diff --git a/content/id/examples/application/deployment-sidecar.yaml b/content/id/examples/application/deployment-sidecar.yaml new file mode 100644 index 0000000000000..3f1b841d31ebf --- /dev/null +++ b/content/id/examples/application/deployment-sidecar.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp + labels: + app: myapp +spec: + replicas: 1 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: myapp + image: alpine:latest + command: ['sh', '-c', 'while true; do echo "logging" >> /opt/logs.txt; sleep 1; done'] + volumeMounts: + - name: data + mountPath: /opt + initContainers: + - name: logshipper + image: alpine:latest + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + volumes: + - name: data + emptyDir: {} \ No newline at end of file diff --git a/content/id/examples/application/job/job-sidecar.yaml b/content/id/examples/application/job/job-sidecar.yaml new file mode 100644 index 0000000000000..9787ad88515b2 --- /dev/null +++ b/content/id/examples/application/job/job-sidecar.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: myjob +spec: + template: + spec: + containers: + - name: myjob + image: alpine:latest + command: ['sh', '-c', 'echo "logging" > /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + initContainers: + - name: logshipper + image: alpine:latest + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + restartPolicy: Never + volumes: + - name: data + emptyDir: {} \ No newline at end of file diff --git a/content/it/_index.html b/content/it/_index.html index 23f7b881eb9a8..177bfa5c09f3b 100644 --- a/content/it/_index.html +++ b/content/it/_index.html @@ -4,6 +4,8 @@ cid: home --- +{{< site-searchbar >}} + {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} ### [Kubernetes (K8s)]({{< relref "/docs/concepts/overview/what-is-kubernetes" >}}) è un software open-source per l'automazione del deployment, scalabilità, e gestione di applicativi in containers. diff --git a/content/it/community/_index.html b/content/it/community/_index.html index 0beaa496a0920..b8030abd4887b 100644 --- a/content/it/community/_index.html +++ b/content/it/community/_index.html @@ -227,7 +227,7 @@

      Notizie Recenti



      diff --git a/content/it/docs/home/_index.md b/content/it/docs/home/_index.md index 3f8e3ae0f09b4..be3a2bbe2a5f9 100644 --- a/content/it/docs/home/_index.md +++ b/content/it/docs/home/_index.md @@ -6,7 +6,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage -linkTitle: "Home" +linkTitle: "Documentazione" main_menu: true weight: 10 hide_feedback: true diff --git a/content/it/docs/tutorials/_index.md b/content/it/docs/tutorials/_index.md index 240fce078ce2e..a9a7e626f5f57 100644 --- a/content/it/docs/tutorials/_index.md +++ b/content/it/docs/tutorials/_index.md @@ -21,9 +21,7 @@ Prima di procedere con vari tutorial, raccomandiamo di aggiungere il ## Per cominciare * [Kubernetes Basics](/docs/tutorials/kubernetes-basics/) è un approfondito tutorial che aiuta a capire cosa è Kubernetes e che permette di testare in modo interattivo alcune semplici funzionalità di Kubernetes. - * [Introduction to Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) - * [Hello Minikube](/docs/tutorials/hello-minikube/) ## Configurazione @@ -33,36 +31,26 @@ Prima di procedere con vari tutorial, raccomandiamo di aggiungere il ## Stateless Applications * [Esporre un External IP Address per permettere l'accesso alle applicazioni nel Cluster](/docs/tutorials/stateless-application/expose-external-ip-address/) - * [Esempio: Rilasciare l'applicazione PHP Guestbook con Redis](/docs/tutorials/stateless-application/guestbook/) ## Stateful Applications * [StatefulSet Basics](/docs/tutorials/stateful-application/basic-stateful-set/) - * [Esempio: WordPress e MySQL con i PersistentVolumes](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) - * [Esempio: Rilasciare Cassandra con i StatefulSets](/docs/tutorials/stateful-application/cassandra/) - * [Eseguire ZooKeeper, un sistema distribuito CP](/docs/tutorials/stateful-application/zookeeper/) ## Clusters * [AppArmor](/docs/tutorials/clusters/apparmor/) - -* [seccomp](/docs/tutorials/clusters/seccomp/) +* [Seccomp](/docs/tutorials/clusters/seccomp/) ## Servizi * [Utilizzare Source IP](/docs/tutorials/services/source-ip/) - - ## {{% heading "whatsnext" %}} - Se sei interessato a scrivere un tutorial, vedi [Utilizzare i Page Templates](/docs/home/contribute/page-templates/) per informazioni su come creare una tutorial page e sul tutorial template. - - diff --git a/content/ja/_index.html b/content/ja/_index.html index 875370db123d1..5971f5ee6c117 100644 --- a/content/ja/_index.html +++ b/content/ja/_index.html @@ -4,6 +4,8 @@ cid: home --- +{{< site-searchbar >}} + {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} ### [Kubernetes (K8s)]({{< relref "/docs/concepts/overview/" >}})は、デプロイやスケーリングを自動化したり、コンテナ化されたアプリケーションを管理したりするための、オープンソースのシステムです。 diff --git a/content/ja/community/_index.html b/content/ja/community/_index.html index 6a03c167c85e3..8785cd452fffa 100644 --- a/content/ja/community/_index.html +++ b/content/ja/community/_index.html @@ -226,7 +226,7 @@

      最新ニュース



      diff --git a/content/ja/docs/concepts/security/overview.md b/content/ja/docs/concepts/security/overview.md index 01cafbbd0c437..b95d113a8e709 100644 --- a/content/ja/docs/concepts/security/overview.md +++ b/content/ja/docs/concepts/security/overview.md @@ -117,7 +117,7 @@ TLS経由のアクセスのみ | コードがTCP通信を必要とする場合 通信ポートの範囲制限 | この推奨事項は一目瞭然かもしれませんが、可能なかぎり、通信とメトリクス収集に必要不可欠なサービスのポートのみを公開します。 | サードパティに依存するセキュリティ | 既知の脆弱性についてアプリケーションのサードパーティ製ライブラリーを定期的にスキャンすることを推奨します。それぞれの言語は自動でこのチェックを実行するツールを持っています。 | 静的コード解析 | ほとんどの言語ではコードのスニペットを解析して、安全でない可能性のあるコーディングを分析する方法が提供しています。可能な限り、コードベースでスキャンして、よく起こるセキュリティエラーを検出できる自動ツールを使用してチェックを実行すべきです。一部のツールはここで紹介されています。 https://owasp.org/www-community/Source_Code_Analysis_Tools | -動的プロービング攻撃 | よく知られているいくつかのサービス攻撃をサービスに対して試すことができる自動ツールがいくつかあります。これにはSQLインジェクション、CSRF、そしてXSSが含まれます。よく知られている動的解析ツールは[OWASP Zed Attack proxy](https://owasp.org/www-project-zap/)toolです。 | +動的プロービング攻撃 | よく知られているいくつかのサービス攻撃をサービスに対して試すことができる自動ツールがいくつかあります。これにはSQLインジェクション、CSRF、そしてXSSが含まれます。よく知られている動的解析ツールは[OWASP Zed Attack proxy](https://www.zaproxy.org/)toolです。 | {{< /table >}} diff --git a/content/ja/docs/concepts/security/secrets-good-practices.md b/content/ja/docs/concepts/security/secrets-good-practices.md new file mode 100644 index 0000000000000..53bf69959a6de --- /dev/null +++ b/content/ja/docs/concepts/security/secrets-good-practices.md @@ -0,0 +1,81 @@ +--- +title: Kubernetes Secretの適切な使用方法 +description: > + クラスター管理者とアプリケーション開発者向けの適切なSecret管理の原則と実践方法。 +content_type: concept +weight: 70 +--- + + + +{{}} + +以下の適切な使用方法は、クラスター管理者とアプリケーション開発者の両方を対象としています。 +これらのガイドラインに従って、Secretオブジェクト内の機密情報のセキュリティを向上させ、Secretの効果的な管理を行ってください。 + + + +## クラスター管理者 + +このセクションでは、クラスター管理者がクラスター内の機密情報のセキュリティを強化するために使用できる適切な方法を提供します。 + +### データ保存時の暗号化を構成する + +デフォルトでは、Secretオブジェクトは{{}}内で暗号化されていない状態で保存されます。 +`etcd`内のSecretデータを暗号化するように構成する必要があります。 +手順については、[機密データ保存時の暗号化](/docs/tasks/administer-cluster/encrypt-data/)を参照してください。 + +### Secretへの最小特権アクセスを構成する {#least-privilege-secrets} + +Kubernetesの{{}} [(RBAC)](/docs/reference/access-authn-authz/rbac/)などのアクセス制御メカニズムを計画する際、`Secret`オブジェクトへのアクセスに関する以下のガイドラインを考慮してください。 +また、[RBACの適切な使用方法](/docs/concepts/security/rbac-good-practices)の他のガイドラインにも従ってください。 + +- **コンポーネント**: `watch`または`list`アクセスを、最上位の特権を持つシステムレベルのコンポーネントのみに制限してください。コンポーネントの通常の動作が必要とする場合にのみ、Secretへの`get`アクセスを許可してください。 +- **ユーザー**: Secretへの`get`、`watch`、`list`アクセスを制限してください。`etcd`へのアクセスはクラスター管理者にのみ許可し、読み取り専用アクセスも許可してください。特定の注釈を持つSecretへのアクセスを制限するなど、より複雑なアクセス制御については、サードパーティの認証メカニズムを検討してください。 + +{{< caution >}} +Secretへの`list`アクセスを暗黙的に許可すると、サブジェクトがSecretの内容を取得できるようになります。 +{{< /caution >}} + +Secretを使用するPodを作成できるユーザーは、そのSecretの値も見ることができます。 +クラスターのポリシーがユーザーにSecretを直接読むことを許可しない場合でも、同じユーザーがSecretを公開するPodを実行するアクセスを持つかもしれません。 +このようなアクセスを持つユーザーによるSecretデータの意図的または偶発的な公開の影響を検出または制限することができます。 +いくつかの推奨事項には以下があります: + +* 短寿命のSecretを使用する +* 特定のイベントに対してアラートを出す監査ルールを実装する(例:単一ユーザーによる複数のSecretの同時読み取り) + +### etcdの管理ポリシーを改善する + +使用しなくなった場合には、`etcd`が使用する永続ストレージを削除するかシュレッダーで処理してください。 + +複数の`etcd`インスタンスがある場合、インスタンス間の通信を暗号化されたSSL/TLS通信に設定して、転送中のSecretデータを保護してください。 + +### 外部Secretへのアクセスを構成する + +{{% thirdparty-content %}} + +外部のSecretストアプロバイダーを使用して機密データをクラスターの外部に保存し、その情報にアクセスするようにPodを構成できます。 +[Kubernetes Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/)は、kubeletが外部ストアからSecretを取得し、データにアクセスすることを許可された特定のPodにSecretをボリュームとしてマウントするDaemonSetです。 + +サポートされているプロバイダーの一覧については、[Secret Store CSI Driverのプロバイダー](https://secrets-store-csi-driver.sigs.k8s.io/concepts.html#provider-for-the-secrets-store-csi-driver)を参照してください。 + +## 開発者 + +このセクションでは、Kubernetesリソースの作成と展開時に機密データのセキュリティを向上させるための開発者向けの適切な使用方法を提供します。 + +### 特定のコンテナへのSecretアクセスを制限する + +Pod内で複数のコンテナを定義し、そのうち1つのコンテナだけがSecretへのアクセスを必要とする場合、他のコンテナがそのSecretにアクセスできないようにボリュームマウントや環境変数の設定を行ってください。 + +### 読み取り後にSecretデータを保護する + +アプリケーションは、環境変数やボリュームから機密情報を読み取った後も、その値を保護する必要があります。 +例えば、アプリケーションは機密情報を平文でログに記録したり、信頼できない第三者に送信したりしないようにする必要があります。 + +### Secretマニフェストの共有を避ける +Secretを{{< glossary_tooltip text="マニフェスト" term_id="manifest" >}}を介して設定し、秘密データをBase64でエンコードしている場合、このファイルを共有したりソースリポジトリにチェックインしたりすると、その秘密はマニフェストを読むことのできる全員に公開されます。 + +{{< caution >}} +Base64エンコードは暗号化方法ではなく、平文と同じく機密性を提供しません。 +{{< /caution >}} diff --git a/content/ja/docs/concepts/storage/storage-limits.md b/content/ja/docs/concepts/storage/storage-limits.md index e3df1f3bc9732..df8519dc1a9f1 100644 --- a/content/ja/docs/concepts/storage/storage-limits.md +++ b/content/ja/docs/concepts/storage/storage-limits.md @@ -53,7 +53,7 @@ Kubernetesスケジューラーには、ノードに接続できるボリュー * Azureでは、ノードの種類に応じて、最大64個のディスクをノードに接続できます。詳細については、[Azureの仮想マシンのサイズ](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes)を参照してください。 -* CSIストレージドライバーが(`NodeGetInfo`を使用して)ノードの最大ボリューム数をアドバタイズする場合、{{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}}はその制限を尊重します。詳細については、[CSIの仕様](https://github.com/ontainer-storage-interface/spec/blob/master/spec.md#nodegetinfo)を参照してください。 +* CSIストレージドライバーが(`NodeGetInfo`を使用して)ノードの最大ボリューム数をアドバタイズする場合、{{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}}はその制限を尊重します。詳細については、[CSIの仕様](https://github.com/container-storage-interface/spec/blob/master/spec.md#nodegetinfo)を参照してください。 * CSIドライバーに移行されたツリー内プラグインによって管理されるボリュームの場合、ボリュームの最大数はCSIドライバーによって報告される数になります。 diff --git a/content/ja/docs/concepts/storage/volumes.md b/content/ja/docs/concepts/storage/volumes.md index c4445d49c49f7..df385712b7a7b 100644 --- a/content/ja/docs/concepts/storage/volumes.md +++ b/content/ja/docs/concepts/storage/volumes.md @@ -899,7 +899,7 @@ spec: Portworxの`CSIMigration`機能が追加されましたが、Kubernetes 1.23ではAlpha状態であるため、デフォルトで無効になっています。 すべてのプラグイン操作を既存のツリー内プラグインから`pxd.portworx.com`Container Storage Interface(CSI)ドライバーにリダイレクトします。 -[Portworx CSIドライバー](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/)をクラスターにインストールする必要があります。 +[Portworx CSIドライバー](https://docs.portworx.com/portworx-enterprise/operations/operate-kubernetes/storage-operations/csi)をクラスターにインストールする必要があります。 この機能を有効にするには、kube-controller-managerとkubeletで`CSIMigrationPortworx=true`を設定します。 ## subPathの使用 {#using-subpath} diff --git a/content/ja/docs/concepts/workloads/controllers/job.md b/content/ja/docs/concepts/workloads/controllers/job.md index 9b406420e29cd..c32b23db8353f 100644 --- a/content/ja/docs/concepts/workloads/controllers/job.md +++ b/content/ja/docs/concepts/workloads/controllers/job.md @@ -332,7 +332,7 @@ Pod失敗ポリシーまたはPod失敗のバックオフポリシーのいず これらはAPIの要件と機能です: - `.spec.podFailurePolicy`フィールドをJobに使いたい場合は、`.spec.restartPolicy`を`Never`に設定してそのJobのPodテンプレートも定義する必要があります。 - `spec.podFailurePolicy.rules`で指定したPod失敗ポリシーのルールが順番に評価されます。あるPodの失敗がルールに一致すると、残りのルールは無視されます。Pod失敗に一致するルールがない場合は、デフォルトの処理が適用されます。 - - `spec.podFailurePolicy.rules[*].containerName`を指定することで、ルールを特定のコンテナに制限することができます。指定しない場合、ルールはすべてのコンテナに適用されます。指定する場合は、Pod テンプレート内のコンテナ名または`initContainer`名のいずれかに一致する必要があります。 + - `spec.podFailurePolicy.rules[*].onExitCodes.containerName`を指定することで、ルールを特定のコンテナに制限することができます。指定しない場合、ルールはすべてのコンテナに適用されます。指定する場合は、Pod テンプレート内のコンテナ名または`initContainer`名のいずれかに一致する必要があります。 - Pod失敗ポリシーが`spec.podFailurePolicy.rules[*].action`にマッチしたときに実行されるアクションを指定できます。指定可能な値は以下のとおりです。 - `FailJob`: PodのJobを`Failed`としてマークし、実行中の Pod をすべて終了させる必要があることを示します。 - `Ignore`: `.spec.backoffLimit`のカウンターは加算されず、代替のPodが作成すべきであることを示します。 diff --git a/content/ja/docs/concepts/workloads/controllers/replicaset.md b/content/ja/docs/concepts/workloads/controllers/replicaset.md index c3d5282fa5c15..1f73fe842f911 100644 --- a/content/ja/docs/concepts/workloads/controllers/replicaset.md +++ b/content/ja/docs/concepts/workloads/controllers/replicaset.md @@ -275,7 +275,7 @@ ReplicaSetは、ただ`.spec.replicas`フィールドを更新することによ [`controller.kubernetes.io/pod-deletion-cost`](/docs/reference/labels-annotations-taints/#pod-deletion-cost)アノテーションを使用すると、ReplicaSetをスケールダウンする際に、どのPodを最初に削除するかについて、ユーザーが優先順位を設定することができます。 -アノテーションはPodに設定する必要があり、範囲は[-2147483647, 2147483647]になります。同じReplicaSetに属する他のPodと比較して、Podを削除する際のコストを表しています。削除コストの低いPodは、削除コストの高いPodより優先的に削除されます。 +アノテーションはPodに設定する必要があり、範囲は[-2147483648, 2147483647]になります。同じReplicaSetに属する他のPodと比較して、Podを削除する際のコストを表しています。削除コストの低いPodは、削除コストの高いPodより優先的に削除されます。 このアノテーションを設定しないPodは暗黙的に0と設定され、負の値は許容されます。 無効な値はAPIサーバーによって拒否されます。 diff --git a/content/ja/docs/concepts/workloads/controllers/statefulset.md b/content/ja/docs/concepts/workloads/controllers/statefulset.md index a952c3bee85c4..48aa86f68bc43 100644 --- a/content/ja/docs/concepts/workloads/controllers/statefulset.md +++ b/content/ja/docs/concepts/workloads/controllers/statefulset.md @@ -142,7 +142,7 @@ Cluster Domain | Service (ns/name) | StatefulSet (ns/name) | StatefulSet Domain ### 安定したストレージ -Kubernetesは各VolumeClaimTemplateに対して、1つの[PersistentVolume](/docs/concepts/storage/persistent-volumes/)を作成します。上記のnginxの例において、各Podは`my-storage-class`というStorageClassをもち、1Gibのストレージ容量を持った単一のPersistentVolumeを受け取ります。もしStorageClassが指定されていない場合、デフォルトのStorageClassが使用されます。PodがNode上にスケジュール(もしくは再スケジュール)されたとき、その`volumeMounts`はPersistentVolume Claimに関連したPersistentVolumeをマウントします。 +Kubernetesは各VolumeClaimTemplateに対して、1つの[PersistentVolume](/docs/concepts/storage/persistent-volumes/)を作成します。上記のnginxの例において、各Podは`my-storage-class`というStorageClassをもち、1GiBのストレージ容量を持った単一のPersistentVolumeを受け取ります。もしStorageClassが指定されていない場合、デフォルトのStorageClassが使用されます。PodがNode上にスケジュール(もしくは再スケジュール)されたとき、その`volumeMounts`はPersistentVolume Claimに関連したPersistentVolumeをマウントします。 注意点として、PodのPersistentVolume Claimと関連したPersistentVolumeは、PodやStatefulSetが削除されたときに削除されません。 削除する場合は手動で行わなければなりません。 diff --git a/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md b/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md index 89d521098c809..a975102d76207 100644 --- a/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/ja/docs/concepts/workloads/pods/pod-lifecycle.md @@ -92,7 +92,7 @@ Podの`spec`には、Always、OnFailure、またはNeverのいずれかの値を PodにはPodStatusがあります。それにはPodが成功したかどうかの情報を持つ[PodCondition](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podcondition-v1-core)の配列が含まれています。kubeletは、下記のPodConditionを管理します: * `PodScheduled`: PodがNodeにスケジュールされました。 -* `PodHasNetwork`: (アルファ版機能; [明示的に有効](#pod-has-network)にしなければならない) Podサンドボックスが正常に成功され、ネットワークの設定が完了しました。 +* `PodHasNetwork`: (アルファ版機能; [明示的に有効](#pod-has-network)にしなければならない) Podサンドボックスが正常に作成され、ネットワークの設定が完了しました。 * `ContainersReady`: Pod内のすべてのコンテナが準備できた状態です。 * `Initialized`: すべての[Initコンテナ](/ja/docs/concepts/workloads/pods/init-containers)が正常に終了しました。 * `Ready`: Podはリクエストを処理でき、一致するすべてのサービスの負荷分散プールに追加されます。 @@ -206,7 +206,7 @@ probeを使ってコンテナをチェックする4つの異なる方法があ : コンテナの診断が失敗しました。 `Unknown` -: コンテナの診断が失敗しました(何も実行する必要はなく、kubeletはさらにチェックを行います)。 +: コンテナの診断自体が失敗しました(何も実行する必要はなく、kubeletはさらにチェックを行います)。 ### Probeの種類 {#types-of-probe} diff --git a/content/ja/docs/home/_index.md b/content/ja/docs/home/_index.md index 097c6f6fce7f2..91959b469c126 100644 --- a/content/ja/docs/home/_index.md +++ b/content/ja/docs/home/_index.md @@ -5,7 +5,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "ホーム" +linkTitle: "ドキュメント" main_menu: true weight: 10 hide_feedback: true diff --git a/content/ja/docs/reference/glossary/addons.md b/content/ja/docs/reference/glossary/addons.md new file mode 100644 index 0000000000000..ee781366e0231 --- /dev/null +++ b/content/ja/docs/reference/glossary/addons.md @@ -0,0 +1,16 @@ +--- +title: Add-ons +id: addons +date: 2019-12-15 +full_link: /ja/docs/concepts/cluster-administration/addons/ +short_description: > + Kubernetesの機能を拡張するリソース。 + +aka: +tags: +- tool +--- + Kubernetesの機能を拡張するリソース。 + + +[アドオンのインストール](/ja/docs/concepts/cluster-administration/addons/)では、クラスターのアドオン使用について詳しく説明し、いくつかの人気のあるアドオンを列挙します。 diff --git a/content/ja/docs/reference/glossary/helm-chart.md b/content/ja/docs/reference/glossary/helm-chart.md new file mode 100644 index 0000000000000..0b142063dec55 --- /dev/null +++ b/content/ja/docs/reference/glossary/helm-chart.md @@ -0,0 +1,19 @@ +--- +title: Helmチャート +id: helm-chart +date: 2018-04-12 +full_link: https://helm.sh/docs/topics/charts/ +short_description: > + Helmツールで管理できる、事前構成されたKubernetesリソースのパッケージ。 + +aka: +tags: +- tool +--- + Helmツールで管理できる、事前構成されたKubernetesリソースのパッケージ。 + + + +チャートは、Kubernetesアプリケーションを作成および共有する再現可能な方法を提供します。 +単一のチャートを使用して、memcached Podなどの単純なもの、またはHTTPサーバー、データベース、キャッシュなどを含む完全なWebアプリスタックなどの複雑なものをデプロイできます。 + diff --git a/content/ja/docs/reference/glossary/kubeadm.md b/content/ja/docs/reference/glossary/kubeadm.md new file mode 100644 index 0000000000000..535f26183dc5a --- /dev/null +++ b/content/ja/docs/reference/glossary/kubeadm.md @@ -0,0 +1,18 @@ +--- +title: Kubeadm +id: kubeadm +date: 2018-04-12 +full_link: /ja/docs/reference/setup-tools/kubeadm/ +short_description: > + Kubernetesを迅速にインストールし、安全なクラスターをセットアップするためのツール。 + +aka: +tags: +- tool +- operation +--- + Kubernetesを迅速にインストールし、安全なクラスターをセットアップするためのツール。 + + + +kubeadmを使用して、コントロールプレーンとワーカーノード{{< glossary_tooltip text="ワーカーノード" term_id="node" >}}コンポーネントの両方をインストールできます。 diff --git a/content/ja/docs/reference/glossary/secret.md b/content/ja/docs/reference/glossary/secret.md index 3324279bd65f9..8f7196f0c6d9c 100644 --- a/content/ja/docs/reference/glossary/secret.md +++ b/content/ja/docs/reference/glossary/secret.md @@ -15,4 +15,6 @@ tags: -機密情報の取り扱い方法を細かく制御することができ、保存時には[暗号化](/ja/docs/tasks/administer-cluster/encrypt-data/#ensure-all-secrets-are-encrypted)するなど、誤って公開してしまうリスクを減らすことができます。{{< glossary_tooltip text="Pod" term_id="pod" >}}は、ボリュームマウントされたファイルとして、またはPodのイメージをPullするkubeletによって、Secretを参照します。Secretは機密情報を扱うのに最適で、機密でない情報には[ConfigMap](/ja/docs/tasks/configure-pod-container/configure-pod-configmap/)が適しています。 +Secretは、機密情報の使用方法をより管理しやすくし、偶発的な漏洩のリスクを減らすことができます。Secretの値はbase64文字列としてエンコードされ、デフォルトでは暗号化されずに保存されますが、[保存時に暗号化](/docs/tasks/administer-cluster/encrypt-data/#ensure-all-secrets-are-encrypted)するように設定することもできます。 + +{{< glossary_tooltip text="Pod" term_id="pod" >}}は、ボリュームマウントや環境変数など、さまざまな方法でSecretを参照できます。Secretは機密データ用に設計されており、[ConfigMap](/ja/docs/tasks/configure-pod-container/configure-pod-configmap/)は非機密データ用に設計されています。 \ No newline at end of file diff --git a/content/ja/docs/setup/best-practices/certificates.md b/content/ja/docs/setup/best-practices/certificates.md index a782b52fee1e9..a499631875a05 100644 --- a/content/ja/docs/setup/best-practices/certificates.md +++ b/content/ja/docs/setup/best-practices/certificates.md @@ -67,7 +67,7 @@ CAの秘密鍵をクラスターにコピーしたくない場合、自身で全 | kube-etcd | etcd-ca | | server, client | ``, ``, `localhost`, `127.0.0.1` | | kube-etcd-peer | etcd-ca | | server, client | ``, ``, `localhost`, `127.0.0.1` | | kube-etcd-healthcheck-client | etcd-ca | | client | | -| kube-apiserver-etcd-client | etcd-ca | system:masters | client | | +| kube-apiserver-etcd-client | etcd-ca | | client | | | kube-apiserver | kubernetes-ca | | server | ``, ``, ``, `[1]` | | kube-apiserver-kubelet-client | kubernetes-ca | system:masters | client | | | front-proxy-client | kubernetes-front-proxy-ca | | client | | diff --git a/content/ja/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/ja/docs/tasks/configure-pod-container/configure-pod-configmap.md index 9974dea10ce28..d42c4871676ee 100644 --- a/content/ja/docs/tasks/configure-pod-container/configure-pod-configmap.md +++ b/content/ja/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -534,6 +534,9 @@ kubectl create -f https://kubernetes.io/examples/pods/pod-configmap-env-var-valu ``` `test-container`コンテナで以下の出力結果を表示します: +```shell +kubectl logs dapi-test-pod +``` ```shell very charm diff --git a/content/ja/docs/tasks/debug/debug-application/get-shell-running-container.md b/content/ja/docs/tasks/debug/debug-application/get-shell-running-container.md index 519a9fdf604e4..da344b4888967 100644 --- a/content/ja/docs/tasks/debug/debug-application/get-shell-running-container.md +++ b/content/ja/docs/tasks/debug/debug-application/get-shell-running-container.md @@ -111,7 +111,7 @@ exit # コンテナ内のシェルを終了する シェルではない通常のコマンドウインドウ内で、実行中のコンテナの環境変数の一覧を表示します: ```shell -kubectl exec shell-demo env +kubectl exec shell-demo -- env ``` 他のコマンドを試します。以下がいくつかの例です: diff --git a/content/ja/docs/tasks/tools/_index.md b/content/ja/docs/tasks/tools/_index.md index 72a8c1361fad9..0d78f6ccd82e1 100644 --- a/content/ja/docs/tasks/tools/_index.md +++ b/content/ja/docs/tasks/tools/_index.md @@ -13,16 +13,16 @@ Kubernetesのコマンドラインツール`kubectl`を使用すると、Kuberne また、[`kubectl`リファレンスドキュメント](/ja/docs/reference/kubectl/)も参照できます。 -## Minikube +## minikube -[Minikube](https://minikube.sigs.k8s.io/)は、Kubernetesをローカルで実行するツールです。MinikubeはシングルノードのKubernetesクラスターをパーソナルコンピューター上(Windows、macOS、Linux PCを含む)で実行することで、Kubernetesを試したり、日常的な開発作業のために利用できます。 +[minikube](https://minikube.sigs.k8s.io/)は、Kubernetesをローカルで実行するツールです。minikubeはシングルノードのKubernetesクラスターをパーソナルコンピューター上(Windows、macOS、Linux PCを含む)で実行することで、Kubernetesを試したり、日常的な開発作業のために利用できます。 ツールのインストールについて知りたい場合は、公式の[Get Started!](https://minikube.sigs.k8s.io/docs/start/)のガイドに従ってください。 -Minikubeが起動したら、[サンプルアプリケーションの実行](/ja/docs/tutorials/hello-minikube/)を試すことができます。 +minikubeが起動したら、[サンプルアプリケーションの実行](/ja/docs/tutorials/hello-minikube/)を試すことができます。 ## kind -Minikubeと同じように、[kind](https://kind.sigs.k8s.io/docs/)もローカルコンピューター上でKubernetesを実行するツールです。Minikubeとは違い、kindは1種類のコンテナランタイム上でしか動作しません。実行には[Docker](https://docs.docker.com/get-docker/)のインストールと設定が必要です。 +minikubeと同じように、[kind](https://kind.sigs.k8s.io/docs/)もローカルコンピューター上でKubernetesを実行するツールです。minikubeとは違い、kindは1種類のコンテナランタイム上でしか動作しません。実行には[Docker](https://docs.docker.com/get-docker/)のインストールと設定が必要です。 [Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/)に、kindの起動に必要な手順が説明されています。 diff --git a/content/ja/docs/tutorials/_index.md b/content/ja/docs/tutorials/_index.md index d02acef023a12..b315c781d5c9c 100644 --- a/content/ja/docs/tutorials/_index.md +++ b/content/ja/docs/tutorials/_index.md @@ -54,7 +54,7 @@ content_type: concept * [クラスターレベルのPod Securityの標準の適用](/docs/tutorials/security/cluster-level-pss/) * [NamespaceレベルのPod Securityの標準の適用](/docs/tutorials/security/ns-level-pss/) * [AppArmor](/docs/tutorials/security/apparmor/) -* [seccomp](/docs/tutorials/security/seccomp/) +* [Seccomp](/docs/tutorials/security/seccomp/) ## {{% heading "whatsnext" %}} diff --git a/content/ja/docs/tutorials/hello-minikube.md b/content/ja/docs/tutorials/hello-minikube.md index b144c8d64eba9..33d0d4ab01b3b 100644 --- a/content/ja/docs/tutorials/hello-minikube.md +++ b/content/ja/docs/tutorials/hello-minikube.md @@ -15,10 +15,10 @@ card: -このチュートリアルでは、[Minikube](/ja/docs/setup/learning-environment/minikube)とKatacodaを使用して、Kubernetes上でサンプルアプリケーションを動かす方法を紹介します。Katacodaはブラウザで無償のKubernetes環境を提供します。 +このチュートリアルでは、[minikube](/ja/docs/setup/learning-environment/minikube)とKatacodaを使用して、Kubernetes上でサンプルアプリケーションを動かす方法を紹介します。Katacodaはブラウザで無償のKubernetes環境を提供します。 {{< note >}} -[Minikubeをローカルにインストール](https://minikube.sigs.k8s.io/docs/start/)している場合もこのチュートリアルを進めることが可能です。 +[minikubeをローカルにインストール](https://minikube.sigs.k8s.io/docs/start/)している場合もこのチュートリアルを進めることが可能です。 {{< /note >}} @@ -26,7 +26,7 @@ card: ## {{% heading "objectives" %}} -* Minikubeへのサンプルアプリケーションのデプロイ +* minikubeへのサンプルアプリケーションのデプロイ * アプリケーションの実行 * アプリケーションログの確認 @@ -43,14 +43,14 @@ card: -## Minikubeクラスターの作成 +## minikubeクラスターの作成 1. **Launch Terminal** をクリックしてください {{< kat-button >}} {{< note >}} - Minikubeをローカルにインストール済みの場合は、`minikube start`を実行してください。 + minikubeをローカルにインストール済みの場合は、`minikube start`を実行してください。 {{< /note >}} 2. ブラウザーでKubernetesダッシュボードを開いてください: @@ -142,7 +142,7 @@ Kubernetesの[*Pod*](/ja/docs/concepts/workloads/pods/) は、コンテナの管 ``` ロードバランサーをサポートするクラウドプロバイダーでは、Serviceにアクセスするための外部IPアドレスが提供されます。 - Minikube では、`LoadBalancer`タイプは`minikube service`コマンドを使用した接続可能なServiceを作成します。 + minikube では、`LoadBalancer`タイプは`minikube service`コマンドを使用した接続可能なServiceを作成します。 3. 次のコマンドを実行します: @@ -158,7 +158,7 @@ Kubernetesの[*Pod*](/ja/docs/concepts/workloads/pods/) は、コンテナの管 ## アドオンの有効化 -Minikubeはビルトインの{{< glossary_tooltip text="アドオン" term_id="addons" >}}があり、有効化、無効化、あるいはローカルのKubernetes環境に公開することができます。 +minikubeはビルトインの{{< glossary_tooltip text="アドオン" term_id="addons" >}}があり、有効化、無効化、あるいはローカルのKubernetes環境に公開することができます。 1. サポートされているアドオンをリストアップします: @@ -250,13 +250,13 @@ kubectl delete service hello-node kubectl delete deployment hello-node ``` -(オプション)Minikubeの仮想マシン(VM)を停止します: +(オプション)minikubeの仮想マシン(VM)を停止します: ```shell minikube stop ``` -(オプション)MinikubeのVMを削除します: +(オプション)minikubeのVMを削除します: ```shell minikube delete diff --git a/content/ja/docs/tutorials/security/cluster-level-pss.md b/content/ja/docs/tutorials/security/cluster-level-pss.md index 34622a32d94e3..1ae584543846c 100644 --- a/content/ja/docs/tutorials/security/cluster-level-pss.md +++ b/content/ja/docs/tutorials/security/cluster-level-pss.md @@ -20,7 +20,7 @@ v{{< skew currentVersion >}}以外のKubernetesバージョンを実行してい ワークステーションに以下をインストールしてください: -- [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +- [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) - [kubectl](/ja/docs/tasks/tools/) このチュートリアルでは、完全な制御下にあるKubernetesクラスターの何を設定できるかをデモンストレーションします。 @@ -230,7 +230,7 @@ v{{< skew currentVersion >}}以外のKubernetesバージョンを実行してい ``` {{}} - macOSでDocker DesktopとKinDを利用している場合は、**Preferences > Resources > File Sharing**のメニュー項目からShared Directoryとして`/tmp`を追加できます。 + macOSでDocker Desktopと*kind*を利用している場合は、**Preferences > Resources > File Sharing**のメニュー項目からShared Directoryとして`/tmp`を追加できます。 {{}} 1. 目的のPodセキュリティの標準を適用するために、Podセキュリティアドミッションを使うクラスターを作成します: diff --git a/content/ko/community/_index.html b/content/ko/community/_index.html index b0e700ec195f0..662ad115f0ce6 100644 --- a/content/ko/community/_index.html +++ b/content/ko/community/_index.html @@ -178,6 +178,6 @@

      글로벌 커뮤니티

      diff --git a/content/ko/docs/home/_index.md b/content/ko/docs/home/_index.md index 247a75802120d..24bd1b5d6857e 100644 --- a/content/ko/docs/home/_index.md +++ b/content/ko/docs/home/_index.md @@ -6,7 +6,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "홈" +linkTitle: "문서" main_menu: true weight: 10 hide_feedback: true diff --git a/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md b/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md index a112f71f2568a..38c2ac1bb637b 100644 --- a/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md +++ b/content/ko/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy.md @@ -10,7 +10,7 @@ weight: 20 이 페이지는 어떻게 네트워크 폴리시(NetworkPolicy)로 실리움(Cilium)를 사용하는지 살펴본다. -실리움의 배경에 대해서는 [실리움 소개](https://docs.cilium.io/en/stable/intro)를 읽어보자. +실리움의 배경에 대해서는 [실리움 소개](https://docs.cilium.io/en/stable/overview/intro)를 읽어보자. ## {{% heading "prerequisites" %}} diff --git a/content/pl/_index.html b/content/pl/_index.html index 1fa0f72c4cd08..f1982741a8294 100644 --- a/content/pl/_index.html +++ b/content/pl/_index.html @@ -4,9 +4,10 @@ cid: home sitemap: priority: 1.0 - --- +{{< site-searchbar >}} + {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} [Kubernetes]({{< relref "/docs/concepts/overview/" >}}), znany też jako K8s, to otwarte oprogramowanie służące do automatyzacji procesów uruchamiania, skalowania i zarządzania aplikacjami w kontenerach. @@ -46,17 +47,15 @@

      The Challenges of Migrating 150+ Microservices to Kubernetes



      - Weź udział w KubeCon + CloudNativeCon Europe 18-21.04.2023 + Weź udział w KubeCon + CloudNativeCon Europe 19-22.03.2024



      - Weź udział w KubeCon + CloudNativeCon North America 6-9.11.2023 + Weź udział w KubeCon + CloudNativeCon North America 12-15.11.2024
      {{< /blocks/section >}} - -{{< blocks/kubernetes-features >}} diff --git a/content/pl/docs/concepts/overview/components.md b/content/pl/docs/concepts/overview/components.md index c4b2700452bd8..a4a81935a9819 100644 --- a/content/pl/docs/concepts/overview/components.md +++ b/content/pl/docs/concepts/overview/components.md @@ -4,7 +4,7 @@ content_type: concept description: > Klaster Kubernetesa tworzą: komponenty warstwy sterowania oraz zbiór maszyn nazywanych węzłami. -weight: 20 +weight: 30 card: name: concepts weight: 20 @@ -47,8 +47,8 @@ Przykładowe kontrolery: * Node controller: Odpowiada za rozpoznawanie i reagowanie na sytuacje, kiedy węzeł staje się z jakiegoś powodu niedostępny. * Job controller: Czeka na obiekty typu *Job*, które definiują zadania uruchamiane jednorazowo i startuje Pody, odpowiadające za ich wykonanie tych zadań. -* Endpoints controller: Dostarcza informacji do obiektów typu *Endpoints* (tzn. łączy ze sobą Serwisy i Pody). -* Service Account & Token controllers: Tworzy domyślne konta i tokeny dostępu API dla nowych przestrzeni nazw (*namespaces*). +* EndpointSlice controller: Dostarcza informacji do obiektów typu *EndpointSlice* (aby zapewnić połaczenie pomiędzy Serwisami i Podami). +* ServiceAccount controllers: Tworzy domyślne konta dla nowych przestrzeni nazw (*namespaces*). ### cloud-controller-manager diff --git a/content/pl/docs/concepts/overview/kubernetes-api.md b/content/pl/docs/concepts/overview/kubernetes-api.md index b769630458969..7e8c842ff0edd 100644 --- a/content/pl/docs/concepts/overview/kubernetes-api.md +++ b/content/pl/docs/concepts/overview/kubernetes-api.md @@ -1,7 +1,7 @@ --- title: API Kubernetesa content_type: concept -weight: 30 +weight: 40 description: > API Kubernetesa służy do odpytywania i zmiany stanu obiektów Kubernetesa. Sercem warstwy sterowania Kubernetesa jest serwer API i udostępniane po HTTP API. Przez ten serwer odbywa się komunikacja pomiędzy użytkownikami, różnymi częściami składowymi klastra oraz komponentami zewnętrznymi. @@ -178,7 +178,8 @@ poprzez kilka różnych wersji API. Załóżmy przykładowo, że istnieją dwie wersje `v1` i `v1beta1` tego samego zasobu. Obiekt utworzony przez wersję `v1beta1` może być odczytany, zaktualizowany i skasowany zarówno przez wersję -`v1beta1`, jak i `v1`. +`v1beta1`, jak i `v1`, do czasu aż wersja `v1beta1` będzie przestarzała i usunięta. +Wtedy możesz dalej korzystać i modyfikować obiekt poprzez wersję `v1`. ## Trwałość API diff --git a/content/pl/docs/home/_index.md b/content/pl/docs/home/_index.md index 1e2ecfe14b61b..f72000862d264 100644 --- a/content/pl/docs/home/_index.md +++ b/content/pl/docs/home/_index.md @@ -4,7 +4,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Strona główna" +linkTitle: "Dokumentacja" main_menu: true weight: 10 hide_feedback: true @@ -39,24 +39,26 @@ cards: description: "Wyszukaj popularne zadania i dowiedz się, jak sobie z nimi efektywnie poradzić." button: "Przegląd zadań" button_path: "/docs/tasks" -- name: training - title: "Szkolenia" - description: "Uzyskaj certyfikat Kubernetes i spraw, aby Twoje projekty cloud native zakończyły się sukcesem!" - button: "Oferta szkoleń" - button_path: "/training" - name: reference title: Dokumentacja źródłowa description: Zapoznaj się z terminologią, składnią poleceń, typami zasobów API i dokumentacją narzędzi instalacyjnych. button: Zajrzyj do źródeł button_path: /docs/reference - name: contribute - title: Weź udział w tworzeniu dokumentacji - description: Każdy może przyczynić się do tworzenia dokumentacji - zarówno nowicjusze, jak i starzy wyjadacze. - button: Weź udział + title: Weź udział w tworzeniu Kubernetesa + description: Każdy może pomóc - zarówno nowicjusze, jak i starzy wyjadacze. + button: Zobacz, jak możesz pomóc button_path: /docs/contribute -- name: release-notes - title: Informacje o wydaniu K8s - description: Jeśli instalujesz lub aktualizujesz Kubernetesa, zajrzyj do informacji o najnowszym wydaniu. +- name: training + title: "Szkolenia" + description: "Uzyskaj certyfikat Kubernetes i spraw, aby Twoje projekty cloud native zakończyły się sukcesem!" + button: "Oferta szkoleń" + button_path: "/training" +- name: Download + title: Pobierz Kubernetesa + description: Zainstaluj Kubernetes lub zakutalizuj do najnowszej wersji. + button: "Pobierz Kubernetesa" + button_path: "/releases/download" - name: about title: O dokumentacji description: Tu znajdziesz dokumentację bieżącej i czterech poprzednich wersji Kubernetes. diff --git a/content/pl/docs/reference/_index.md b/content/pl/docs/reference/_index.md index 9d0e772188ac6..fecda7e8fd454 100644 --- a/content/pl/docs/reference/_index.md +++ b/content/pl/docs/reference/_index.md @@ -4,6 +4,7 @@ linkTitle: "Materiały źródłowe" main_menu: true weight: 70 content_type: concept +no_list: true --- @@ -28,6 +29,7 @@ Aby wywołać Kubernetes API z wybranego języka programowania, możesz skorzyst [bibliotek klienckich](/docs/reference/using-api/client-libraries/). Oficjalnie wspierane biblioteki to: +* [Kubernetes Go client library](https://github.com/kubernetes/client-go/) * [Kubernetes Python client library](https://github.com/kubernetes-client/python) * [Kubernetes Java client library](https://github.com/kubernetes-client/java) * [Kubernetes JavaScript client library](https://github.com/kubernetes-client/javascript) @@ -65,27 +67,44 @@ Kubernetesa lub innych narzędzi. Choć większość tych API nie jest udostępn serwer API w trybie RESTful, są one niezbędne dla użytkowników i administratorów w korzystaniu i zarządzaniu klastrem. -* [kube-apiserver configuration (v1alpha1)](/docs/reference/config-api/apiserver-config.v1alpha1/) -* [kube-apiserver configuration (v1)](/docs/reference/config-api/apiserver-config.v1/) + +* [kubeconfig (v1)](/docs/reference/config-api/kubeconfig.v1/) +* [kube-apiserver admission (v1)](/docs/reference/config-api/apiserver-admission.v1/) +* [kube-apiserver configuration (v1alpha1)](/docs/reference/config-api/apiserver-config.v1alpha1/) i +* [kube-apiserver configuration (v1beta1)](/docs/reference/config-api/apiserver-config.v1beta1/) i + [kube-apiserver configuration (v1)](/docs/reference/config-api/apiserver-config.v1/) * [kube-apiserver encryption (v1)](/docs/reference/config-api/apiserver-encryption.v1/) * [kube-apiserver event rate limit (v1alpha1)](/docs/reference/config-api/apiserver-eventratelimit.v1alpha1/) -* [kubelet configuration (v1alpha1)](/docs/reference/config-api/kubelet-config.v1alpha1/) oraz - [kubelet configuration (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) -* [kubelet credential providers (v1alpha1)](/docs/reference/config-api/kubelet-credentialprovider.v1alpha1/) -* [kubelet credential providers (v1beta1)](/docs/reference/config-api/kubelet-credentialprovider.v1beta1/) -* [kube-scheduler configuration (v1beta2)](/docs/reference/config-api/kube-scheduler-config.v1beta2/) oraz - [kube-scheduler configuration (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) +* [kubelet configuration (v1alpha1)](/docs/reference/config-api/kubelet-config.v1alpha1/), + [kubelet configuration (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) i + [kubelet configuration (v1)](/docs/reference/config-api/kubelet-config.v1/) +* [kubelet credential providers (v1alpha1)](/docs/reference/config-api/kubelet-credentialprovider.v1alpha1/), + [kubelet credential providers (v1beta1)](/docs/reference/config-api/kubelet-credentialprovider.v1beta1/) i + [kubelet credential providers (v1)](/docs/reference/config-api/kubelet-credentialprovider.v1/) +* [kube-scheduler configuration (v1beta3)](/docs/reference/config-api/kube-scheduler-config.v1beta3/) i + [kube-scheduler configuration (v1)](/docs/reference/config-api/kube-scheduler-config.v1/) +* [kube-controller-manager configuration (v1alpha1)](/docs/reference/config-api/kube-controller-manager-config.v1alpha1/) * [kube-proxy configuration (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/) * [`audit.k8s.io/v1` API](/docs/reference/config-api/apiserver-audit.v1/) -* [Client authentication API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) oraz +* [Client authentication API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) i [Client authentication API (v1)](/docs/reference/config-api/client-authentication.v1/) * [WebhookAdmission configuration (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/) * [ImagePolicy API (v1alpha1)](/docs/reference/config-api/imagepolicy.v1alpha1/) ## API konfiguracji dla kubeadm -* [v1beta2](/docs/reference/config-api/kubeadm-config.v1beta2/) + * [v1beta3](/docs/reference/config-api/kubeadm-config.v1beta3/) +* [v1beta4](/docs/reference/config-api/kubeadm-config.v1beta4/) + +## Zewnętrzne API + +Istnieją API, które zostały zdefiniowane w ramach projektu Kubernetes, ale nie zostały zaimplementowane +przez główny projekt: + +* [Metrics API (v1beta1)](/docs/reference/external-api/metrics.v1beta1/) +* [Custom Metrics API (v1beta2)](/docs/reference/external-api/custom-metrics.v1beta2) +* [External Metrics API (v1beta1)](/docs/reference/external-api/external-metrics.v1beta1) ## Dokumentacja projektowa diff --git a/content/pl/docs/reference/glossary/cloud-controller-manager.md b/content/pl/docs/reference/glossary/cloud-controller-manager.md index 5d09fa4695d73..d0398cb755534 100644 --- a/content/pl/docs/reference/glossary/cloud-controller-manager.md +++ b/content/pl/docs/reference/glossary/cloud-controller-manager.md @@ -12,7 +12,7 @@ tags: - operation --- Element składowy {{< glossary_tooltip text="warstwy sterowania" term_id="control-plane" >}} Kubernetesa, -który zarządza usługami realizowanymi po stronie chmur obliczeniowych. Cloud controller manager umożliwia +który zarządza usługami realizowanymi po stronie chmur obliczeniowych. [Cloud controller manager](/docs/concepts/architecture/cloud-controller/) umożliwia połączenie Twojego klastra z API operatora usług chmurowych i rozdziela składniki operujące na platformie chmurowej od tych, które dotyczą wyłącznie samego klastra. diff --git a/content/pl/docs/setup/_index.md b/content/pl/docs/setup/_index.md index 5d63a0fdcfec5..a4a5a7713d1ad 100644 --- a/content/pl/docs/setup/_index.md +++ b/content/pl/docs/setup/_index.md @@ -22,7 +22,7 @@ Instalując Kubernetesa, przy wyborze platformy kieruj się: łatwością w utrz Możesz [pobrać Kubernetesa](/releases/download/), aby zainstalować klaster na lokalnym komputerze, w chmurze czy w prywatnym centrum obliczeniowym. -Niektóre [komponenty Kubernetesa](/docs/concepts/overview/components/), na przykład `kube-apiserver` czy `kube-proxy` mogą być +Niektóre [komponenty Kubernetesa](/docs/concepts/overview/components/), na przykład `{{< glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} czy {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}} mogą być uruchamiane jako [kontenery](/releases/download/#container-images) wewnątrz samego klastra. **Zalecamy** uruchamianie komponentów Kubernetesa jako kontenery zawsze, @@ -59,9 +59,6 @@ jest [kubeadm](/docs/setup/production-environment/tools/kubeadm/). - Wybierz [środowisko uruchomieniowe dla kontenerów](/docs/setup/production-environment/container-runtimes/) w nowym klastrze - Naucz się [najlepszych praktyk](/docs/setup/best-practices/) przy konfigurowaniu klastra -Na stronie [Partnerów Kubernetesa](https://kubernetes.io/partners/#conformance) znajdziesz listę dostawców posiadających -[certyfikację Kubernetes](https://github.com/cncf/k8s-conformance/#certified-kubernetes). - Kubernetes zaprojektowano w ten sposób, że {{< glossary_tooltip term_id="control-plane" text="warstwa sterowania" >}} wymaga do działania systemu Linux. W ramach klastra aplikacje mogą być uruchamiane na systemie Linux i innych, w tym Windows. diff --git a/content/pl/docs/tutorials/_index.md b/content/pl/docs/tutorials/_index.md index ea951644a5d83..e1ab178de9663 100644 --- a/content/pl/docs/tutorials/_index.md +++ b/content/pl/docs/tutorials/_index.md @@ -55,7 +55,7 @@ Przed zapoznaniem się z samouczkami warto stworzyć zakładkę do * [Apply Pod Security Standards at Cluster level](/docs/tutorials/security/cluster-level-pss/) * [Apply Pod Security Standards at Namespace level](/docs/tutorials/security/ns-level-pss/) * [AppArmor](/docs/tutorials/security/apparmor/) -* [seccomp](/docs/tutorials/security/seccomp/) +* [Seccomp](/docs/tutorials/security/seccomp/) ## {{% heading "whatsnext" %}} diff --git a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html index db80f40b444e0..181dec226aa3a 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-interactive.html @@ -9,8 +9,6 @@ - - {{< katacoda-tutorial >}}
      @@ -18,9 +16,6 @@
      -
      - Do pracy z terminalem użyj wersji na desktop/tablet -
      diff --git a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html index 2ed9484e8ba5d..84af97f5345e3 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -9,8 +9,6 @@ - -
      @@ -39,7 +37,7 @@

      Kubernetes Services - przegląd

    • LoadBalancer - Tworzy zewnętrzny load balancer u bieżącego dostawcy usług chmurowych (o ile jest taka możliwość) i przypisuje serwisowi stały, zewnętrzny adres IP. Nadzbiór NodePort.
    • ExternalName - Przypisuje Service do externalName (np. foo.bar.example.com), zwracając rekord CNAME wraz z zawartością. W tym przypadku nie jest wykorzystywany proces przekierowania ruchu metodą proxy. Ta metoda wymaga kube-dns w wersji v1.7 lub wyższej lub CoreDNS w wersji 0.0.8 lub wyższej.
    -

    Więcej informacji na temat różnych typów serwisów znajduje się w samouczku Używanie adresu źródłowego (Source IP). Warto też zapoznać się z Łączeniem Aplikacji z Serwisami.

    +

    Więcej informacji na temat różnych typów serwisów znajduje się w samouczku Używanie adresu źródłowego (Source IP). Warto też zapoznać się z Łączeniem Aplikacji z Serwisami.

    W pewnych przypadkach w serwisie nie specyfikuje się selector. Serwis, który został stworzony bez pola selector, nie utworzy odpowiedniego obiektu Endpoints. W ten sposób użytkownik ma możliwość ręcznego przyporządkowania serwisu do konkretnych endpoints. Inny przypadek, kiedy nie używa się selektora, ma miejsce, kiedy stosujemy type: ExternalName.

    diff --git a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html index 5a3115092757c..db522eacc9aa8 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html +++ b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-interactive.html @@ -9,8 +9,6 @@ - - {{< katacoda-tutorial >}}
    @@ -18,9 +16,6 @@
    -
    - Do pracy z terminalem użyj wersji na desktop/tablet -
    diff --git a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html index bb2c40ffeefcb..07afc984e4aba 100644 --- a/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html +++ b/content/pl/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -9,8 +9,6 @@ - -
    @@ -86,7 +84,7 @@

    Ogólnie o skalowaniu

    -

    Kiedy zwiększamy skalę Deploymentu, uruchomienie nowych Podów jest zlecane na Węzłach, które posiadają odpowiednio dużo zasobów. Operacja skalowania zwiększy liczbę Podów do oczekiwanej wartości. W Kubernetes możliwe jest również autoskalowanie Podów, ale jest ono poza zakresem niniejszego samouczka. Istnieje także możliwość skalowania do zera — w ten sposób zatrzymane zostaną wszystkie Pody należące do konkretnego Deploymentu.

    +

    Kiedy zwiększamy skalę Deploymentu, uruchomienie nowych Podów jest zlecane na Węzłach, które posiadają odpowiednio dużo zasobów. Operacja skalowania zwiększy liczbę Podów do oczekiwanej wartości. W Kubernetes możliwe jest również autoskalowanie Podów, ale jest ono poza zakresem niniejszego samouczka. Istnieje także możliwość skalowania do zera — w ten sposób zatrzymane zostaną wszystkie Pody należące do konkretnego Deploymentu.

    Kiedy działa jednocześnie wiele instancji jednej aplikacji, należy odpowiednio rozłożyć ruch pomiędzy każdą z nich. Serwisy posiadają zintegrowany load-balancer, który dystrybuuje ruch na wszystkie Pody w Deployment wystawionym na zewnątrz. Serwis prowadzi ciągły monitoring Podów poprzez ich punkty dostępowe (endpoints), aby zapewnić, że ruch kierowany jest tylko do tych Podów, które są faktycznie dostępne.

    diff --git a/content/pl/releases/_index.md b/content/pl/releases/_index.md index 515dbf2f85c92..2add7bf6e7c77 100644 --- a/content/pl/releases/_index.md +++ b/content/pl/releases/_index.md @@ -2,15 +2,20 @@ linktitle: Historia wydań title: Wydania type: docs +layout: release-info +notoc: true --- - -Projekt Kubernetes zapewnia wsparcie dla trzech ostatnich wydań _minor_ ({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). Poprawki do wydania 1.19 i nowszych [będą publikowane przez około rok](/releases/patch-releases/#support-period). Kubernetes w wersji 1.18 i wcześniejszych otrzymywał poprawki przez 9 miesięcy. +Projekt Kubernetes zapewnia wsparcie dla trzech ostatnich wydań _minor_ +({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}}). +Poprawki do wydania 1.19 i nowszych [będą publikowane przez około rok](/releases/patch-releases/#support-period). +Kubernetes w wersji 1.18 i wcześniejszych otrzymywał poprawki przez 9 miesięcy. Wersje Kubernetesa oznaczane są jako **x.y.z**, -gdzie **x** jest oznaczeniem wersji głównej (_major_), **y** — podwersji (_minor_), a **z** — numer poprawki (_patch_), zgodnie z terminologią [Semantic Versioning](https://semver.org/). +gdzie **x** jest oznaczeniem wersji głównej (_major_), **y** — podwersji (_minor_), a **z** — numer poprawki (_patch_), +zgodnie z terminologią [Semantic Versioning](https://semver.org/). Więcej informacji można z znaleźć w dokumencie [version skew policy](/releases/version-skew-policy/). @@ -22,6 +27,7 @@ Więcej informacji można z znaleźć w dokumencie [version skew policy](/releas ## Nadchodzące wydania -Zajrzyj na [harmonogram](https://github.com/kubernetes/sig-release/tree/master/releases/release-{{< skew nextMinorVersion >}}) nadchodzącego wydania Kubernetesa numer **{{< skew nextMinorVersion >}}**! +Zajrzyj na [harmonogram](https://github.com/kubernetes/sig-release/tree/master/releases/release-{{< skew nextMinorVersion >}}) +nadchodzącego wydania Kubernetesa numer **{{< skew nextMinorVersion >}}**! ## Przydatne zasoby diff --git a/content/pl/releases/download.md b/content/pl/releases/download.md new file mode 100644 index 0000000000000..36679b6f55904 --- /dev/null +++ b/content/pl/releases/download.md @@ -0,0 +1,49 @@ +--- +title: Ściągnij Kubernetesa +type: docs +--- + +Klaster Kubernetesa dostępny jest w formie plików binarnych dla każdego z jego komponentów i zestawu standardowych aplikacji klienckich wspomagających proces jego szybkiego rozruchu lub obsługi. Składniki Kubernetesa takie jak serwer API mogą być uruchamiane z poziomu obrazów kontenerowych wewnątrz klastra - te ostatnie są także częścią oficjalnego wydania Kubernetesa. Wszystkie pliki binarne i obrazy kontenerowe Kubernetesa udostępniane są dla różnych systemów operacyjnych i architektur sprzętowych. + +### kubectl + + + +[kubectl](/docs/reference/kubectl/kubectl/) to narzędzie powłoki umożliwiające wykonywanie komend w klastrze Kubernetesa służących do m.in. uruchamiania aplikacji, zarządzania zasobami klastra i przeglądania logów. Więcej informacji na temat kubectl, w tym pełną list operacji, jakie możesz za jego pomocą wykonać, znajdziesz w [Dokumentacji `kubectl`](/docs/reference/kubectl/). + +kubectl można zainstalować w rozmaitych systemach z rodziny Linuxa, jak również w systemach macOS i Windows. Niżej znajdziesz odnośniki do instrukcji instalacji dla preferowanego przez siebie systemu: +- [Instalacja kubectl w Linuxie](/docs/tasks/tools/install-kubectl-linux) +- [Instalacja kubectl w macOS-ie](/docs/tasks/tools/install-kubectl-macos) +- [Instalacja kubectl w Windowsie](/docs/tasks/tools/install-kubectl-windows) + +## Obrazy kontenerów + +Wszystkie obrazy kontenerowe umieszczane są w rejestrze `registry.k8s.io`. + +{{< feature-state for_k8s_version="v1.24" state="alpha" >}} + +Dla wersji Kubernetesa {{< param "version">}} następujące obrazy kontenerów opatrzone są podpisem [cosign](https://github.com/sigstore/cosign): + +| Obraz kontenera | Wspierana architektura | +| ------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| registry.k8s.io/kube-apiserver:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | +| registry.k8s.io/kube-controller-manager:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | +| registry.k8s.io/kube-proxy:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | +| registry.k8s.io/kube-scheduler:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | +| registry.k8s.io/conformance:v{{< skew currentPatchVersion >}} | amd64, arm, arm64, ppc64le, s390x | + +Obrazy kontenerów Kubernetesa obsługują rozmaite architektury sprzętowe, ich wyboru powinno zaś dokonać środowisko uruchomieniowe w zależności od wybranej platformy. Istnieje też możliwość pobrania obrazu kontenera dla konkretnej architektury poprzez dodanie do jego nazwy odpowiedniego przyrostka, np. `registry.k8s.io/kube-apiserver-arm64:v{{< skew currentPatchVersion >}}`. Wszystkie te warianty obrazów Kubernetesa są podpisane w taki sam sposób jak w przypadku listy manifestów wieloarchitekturowych. + +Wydawcy Kubernetesa publikują listę podpisanych obrazów kontenerowych w formacie [SPDX 2.3](https://spdx.dev/specifications/). Możesz ją pobrać wpisując w powłoce: + +```shell +curl -Ls "https://sbom.k8s.io/$(curl -Ls https://dl.k8s.io/release/stable.txt)/release" | grep "SPDXID: SPDXRef-Package-registry.k8s.io" | grep -v sha256 | cut -d- -f3- | sed 's/-/\//' | sed 's/-v1/:v1/' +``` + +Dla wersji {{< skew currentVersion >}} Kubernetesa jedynym typem artefaktu kodu, którego integralność możesz zweryfikować, jest obraz kontenera (korzystając z eksperymentalnej opcji podpisu). + +By ręcznie zweryfikować podpisane obrazy kontenerów głównych komponentów Kubernetesa, zobacz [Zweryfikuj podpisane obrazy kontenerów](/docs/tasks/administer-cluster/verify-signed-artifacts). + +## Pliki binarne + +{{< release-binaries >}} \ No newline at end of file diff --git a/content/pt-br/_index.html b/content/pt-br/_index.html index 15782b8423d87..1131ee38e7956 100644 --- a/content/pt-br/_index.html +++ b/content/pt-br/_index.html @@ -4,6 +4,7 @@ cid: home --- +{{< site-searchbar >}} {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} @@ -42,12 +43,12 @@

    Os desafios da migração de mais de 150 micro serviços para o KubernetesAssista Video

    - KubeCon na Europa de 16 a 20 de maio de 2022 + KubeCon + CloudNativeCon na Europa de 19 a 22 de março de 2024



    - KubeCon na América do Norte de 24 a 28 de outubro de 2022 + KubeCon + CloudNativeCon na América do Norte de 12 a 15 de novembro de 2024

    diff --git a/content/pt-br/community/_index.html b/content/pt-br/community/_index.html index 2ad2cc20ee9d8..fa7726379f2b7 100644 --- a/content/pt-br/community/_index.html +++ b/content/pt-br/community/_index.html @@ -194,6 +194,6 @@

    Comunidade global

    diff --git a/content/pt-br/docs/concepts/extend-kubernetes/operator.md b/content/pt-br/docs/concepts/extend-kubernetes/operator.md index 771ba008e8b65..d7d7df1816625 100644 --- a/content/pt-br/docs/concepts/extend-kubernetes/operator.md +++ b/content/pt-br/docs/concepts/extend-kubernetes/operator.md @@ -93,7 +93,7 @@ A seguir estão algumas bibliotecas e ferramentas que você pode usar para escre ## {{% heading "whatsnext" %}} -* Leia o [whitepaper sobre operadores](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md) da {{< glossary_tooltip text="CNCF" term_id="cncf" >}} +* Leia o [whitepaper sobre operadores](https://github.com/cncf/tag-app-delivery/blob/163962c4b1cd70d085107fc579e3e04c2e14d59c/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md) da {{< glossary_tooltip text="CNCF" term_id="cncf" >}} * Saiba mais sobre [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) * Encontre operadores prontos em [OperatorHub.io](https://operatorhub.io/) para atender ao seu caso de uso * [Publique](https://operatorhub.io/) seu operador para outras pessoas usarem diff --git a/content/pt-br/docs/concepts/storage/volumes.md b/content/pt-br/docs/concepts/storage/volumes.md index 8cfb3cd4af93d..7ed655567a5dd 100644 --- a/content/pt-br/docs/concepts/storage/volumes.md +++ b/content/pt-br/docs/concepts/storage/volumes.md @@ -193,6 +193,7 @@ spec: containers: - name: test image: busybox:1.28 + command: ['sh', '-c', 'echo "The app is running!" && tail -f /dev/null'] volumeMounts: - name: config-vol mountPath: /etc/config diff --git a/content/pt-br/docs/concepts/workloads/controllers/replicaset.md b/content/pt-br/docs/concepts/workloads/controllers/replicaset.md index dcd22d1f77000..440187b4b9460 100644 --- a/content/pt-br/docs/concepts/workloads/controllers/replicaset.md +++ b/content/pt-br/docs/concepts/workloads/controllers/replicaset.md @@ -280,7 +280,7 @@ Se o Pod obedecer todos os items acima simultaneamente, a seleção é aleatóri Utilizando a anotação [`controller.kubernetes.io/pod-deletion-cost`](/docs/reference/labels-annotations-taints/#pod-deletion-cost), usuários podem definir uma preferência em relação à quais pods serão removidos primeiro caso o ReplicaSet precise escalonar para baixo. -A anotação deve ser definida no pod, com uma variação de [-2147483647, 2147483647]. Isso representa o custo de deletar um pod comparado com outros pods que pertencem à esse mesmo ReplicaSet. Pods com um custo de deleção menor são eleitos para deleção antes de pods com um custo maior. +A anotação deve ser definida no pod, com uma variação de [-2147483648, 2147483647]. Isso representa o custo de deletar um pod comparado com outros pods que pertencem à esse mesmo ReplicaSet. Pods com um custo de deleção menor são eleitos para deleção antes de pods com um custo maior. O valor implícito para essa anotação para pods que não a tem definida é 0; valores negativos são permitidos. Valores inválidos serão rejeitados pelo servidor API. diff --git a/content/pt-br/docs/home/_index.md b/content/pt-br/docs/home/_index.md index 61c2921ce9bb7..74b80af76d325 100644 --- a/content/pt-br/docs/home/_index.md +++ b/content/pt-br/docs/home/_index.md @@ -6,7 +6,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Home" +linkTitle: "Documentação" main_menu: true weight: 10 hide_feedback: true diff --git a/content/pt-br/docs/reference/glossary/manifest.md b/content/pt-br/docs/reference/glossary/manifest.md new file mode 100644 index 0000000000000..d553c0c57056a --- /dev/null +++ b/content/pt-br/docs/reference/glossary/manifest.md @@ -0,0 +1,13 @@ +--- +title: Manifesto +id: manifest +date: 2019-06-28 +short_description: > + Uma especificação serializada de um ou mais objetos da API do Kubernetes. +aka: +tags: +- fundamental +--- + Especificação de um objeto da API do Kubernetes em formato JSON ou YAML. + +Um manifesto especifica o estado desejado de um objeto que o Kubernetes manterá quando você aplicar o manifesto. Cada arquivo de configuração pode conter vários manifestos. diff --git a/content/pt-br/docs/reference/glossary/shuffle-sharding.md b/content/pt-br/docs/reference/glossary/shuffle-sharding.md new file mode 100644 index 0000000000000..a72019df569f6 --- /dev/null +++ b/content/pt-br/docs/reference/glossary/shuffle-sharding.md @@ -0,0 +1,42 @@ +--- +title: Shuffle-sharding +id: shuffle-sharding +date: 2020-03-04 +full_link: +short_description: > + Uma técnica para atribuir requisições para filas que proporciona melhor isolamento do que efetuar a operação módulo (resto da divisão) do _hash_ da requisição pelo número de filas. + +aka: +tags: +- fundamental +--- +Uma técnica para atribuir requisições para filas que proporciona melhor isolamento do que efetuar a operação módulo (resto da divisão) do _hash_ da requisição pelo número de filas. + + + +Nos preocupamos frequentemente em isolar diferentes fluxos de requisições +uma das outras, de modo que um fluxo de alta intensidade não afete um fluxo de baixa intensidade. +Uma forma simples de colocar requisições em fila é gerar um _hash_ baseado +em características da requisição, efetuar a operação módulo, ou resto da divisão, do _hash_ calculado pelo número de filas, para ter o +índice da fila à ser usada. A função _hash_ usa como entrada características da requisição +que se alinha com o fluxo. Por exemplo, na Internet isso é frequentemente a tupla de 5 elementos de +endereço de origem e destino, protocolo, e portas de origem e destino. + +Esse simples esquema baseado em _hash_ tem a propriedade de que qualquer fluxo de alta intensidade +irá tirar todos fluxos de baixa intensidade dessa mesma fila. +Fornecer um bom isolamento para um grande número de fluxos requer um grande +número de filas, o que é problemático. Shuffle-sharding é uma +técnica mais ágil que pode fazer um trabalho melhor de isolar fluxos de baixa intensidade +dos fluxos de alta intensidade. A terminologia do shuffle-sharding faz uso +da metáfora de distribuição da mão de cartas; cada fila é uma carta metafórica. +A técnica de shuffle-sharding começa fazendo _hashing_ das características de identificação +do fluxo da solicitação, para produzir um valor _hash_ com dezenas ou mais bits. +Em seguida, o valor _hash_ é usado como uma fonte de entropia para embaralhar o baralho e +dar uma mão de cartas (filas). Todas as filas tratadas são examinadas e a solicitação +é colocada em uma das filas examinadas com menor tamanho. +Com um número modesto de cartas, não custa examinar todas as cartas distribudas, em um dado +fluxo de baixa intensidade que tem uma boa chance de se esquivar dos efeitos de um dado +fluxo de alta intensidade. +Com um número maior de cartas é caro examinar as filas tratadas e mais dificil para que os +fluxos de baixa intensidade se esquivem do efeitos coletivos de um conjunto de fluxos de alta +intensidade. Assim, o tamanho da mão do baralho deve ser escolhido criteriosamente. diff --git a/content/pt-br/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/pt-br/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 679431374a983..de08f48e37470 100644 --- a/content/pt-br/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/pt-br/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -133,7 +133,7 @@ Para mais detalhes sobre compatibilidade entre as versões, veja: ```shell sudo apt-get update - sudo apt-get install -y apt-transport-https ca-certificates curl + sudo apt-get install -y apt-transport-https ca-certificates curl gpg ``` 2. Faça o download da chave de assinatura pública da Google Cloud: diff --git a/content/pt-br/docs/tasks/tools/install-kubectl-linux.md b/content/pt-br/docs/tasks/tools/install-kubectl-linux.md index 4c37e5f96b0fb..2656115f22b4c 100644 --- a/content/pt-br/docs/tasks/tools/install-kubectl-linux.md +++ b/content/pt-br/docs/tasks/tools/install-kubectl-linux.md @@ -44,7 +44,7 @@ Por exemplo, para fazer download da versão {{< skew currentPatchVersion >}} no Faça download do arquivo checksum de verificação do kubectl: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" ``` Valide o binário kubectl em relação ao arquivo de verificação: @@ -215,7 +215,7 @@ Abaixo estão os procedimentos para configurar o autocompletar para Bash, Fish e Faça download do arquivo checksum de verificação do kubectl-convert: ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" ``` Valide o binário kubectl-convert com o arquivo de verificação: diff --git a/content/pt-br/docs/tutorials/_index.md b/content/pt-br/docs/tutorials/_index.md index b0a9fe75af293..9df0be602d96f 100644 --- a/content/pt-br/docs/tutorials/_index.md +++ b/content/pt-br/docs/tutorials/_index.md @@ -64,7 +64,7 @@ Antes de iniciar um tutorial, é interessante que você salve a página do * [AppArmor](/docs/tutorials/security/apparmor/) -* [seccomp](/docs/tutorials/security/seccomp/) +* [Seccomp](/docs/tutorials/security/seccomp/) ## {{% heading "whatsnext" %}} diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/_index.html b/content/pt-br/docs/tutorials/kubernetes-basics/_index.html index 20d0529c6258d..8fa280c8ce306 100644 --- a/content/pt-br/docs/tutorials/kubernetes-basics/_index.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/_index.html @@ -62,25 +62,25 @@

    Módulos básicos do Kubernetes

    @@ -90,17 +90,17 @@

    Módulos básicos do Kubernetes

    diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index c1b06dedca975..e181a606b76ad 100644 --- a/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -132,7 +132,7 @@

    Implante seu primeiro aplicativo no empacotado em um contêiner que utiliza o NGINX para repetir todas as requisições. (Se você ainda não tentou criar o aplicativo hello-node e implantá-lo usando um contêiner, você pode fazer isso primeiro seguindo as - instruções do tutorial Olá, Minikube!). + instruções do tutorial Olá, Minikube!).

    diff --git a/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html index ee6b9d3241498..dbff93774c91f 100644 --- a/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html +++ b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -1,46 +1,71 @@ --- -title: Executando múltiplas instâncias de seu aplicativo +title: Executando Múltiplas Instâncias da sua Aplicação weight: 10 +description: |- + Escalone uma aplicação existente de forma manual utilizando kubectl. --- - + - -

    -
    -

    Objetivos

    +
    +

    Objetivos

      -
    • Escalar uma aplicação usando kubectl.
    • +
    • Escalonar uma aplicação usando kubectl.
    -

    Escalando uma aplicação

    - -

    Nos módulos anteriores nós criamos um Deployment, e então o expusemos publicamente através de um serviço (Service). O Deployment criou apenas um único Pod para executar nossa aplicação. Quando o tráfego aumentar nós precisaremos escalar a aplicação para suportar a demanda de usuários.

    +

    Escalonando uma aplicação

    -

    O escalonamento é obtido pela mudança do número de réplicas em um Deployment

    +

    + Nos módulos anteriores, criamos um + Deployment, + e então o expusemos publicamente através de um serviço + (Service). + O Deployment criou apenas um único Pod para executar nossa aplicação. + Quando o tráfego aumentar, precisaremos escalonar a aplicação para + suportar a demanda de usuários. +

    +

    + Se você ainda não tiver estudado as seções anteriores, inicie + pelo tutorial + Usando Minikube para criar um cluster. +

    +

    + O escalonamento é obtido pela mudança do número de + réplicas em um Deployment +

    +

    + NOTA Se você estiver seguindo este tutorial após a + seção anterior, + poderá ser necessário refazer a seção criando um cluster, + pois os serviços podem ter sido removidos. +

    Resumo:

      -
    • Escalando um Deployment
    • +
    • Escalonando um Deployment
    -

    Você pode criar desde o início um Deployment com múltiplas instâncias usando o parâmetro --replicas para que o kubectl crie o comando de deployment

    +

    + Você pode criar desde o início um Deployment com + múltiplas instâncias usando o parâmetro --replicas + do comando kubectl create deployment +

    @@ -86,14 +111,34 @@

    Visão geral sobre escalonamento

    -

    Escalar um Deployment garantirá que novos Pods serão criados e agendados para nós de processamento com recursos disponíveis. O escalonamento aumentará o número de Pods para o novo estado desejado. O Kubernetes também suporta o auto-escalonamento (autoscaling) de Pods, mas isso está fora do escopo deste tutorial. Escalar para zero também é possível, e isso terminará todos os Pods do Deployment especificado.

    +

    + Escalonar um Deployment garantirá que novos Pods serão criados + e alocados em nós de processamento com recursos disponíveis. O + escalonamento aumentará o número de Pods para o novo estado + desejado. O Kubernetes também suporta o auto-escalonamento + (autoscaling) + de Pods, mas isso está fora do escopo deste tutorial. Escalonar + para zero também é possível, e encerrará todos os Pods do + Deployment especificado. +

    -

    Executar múltiplas instâncias de uma aplicação irá requerer uma forma de distribuir o tráfego entre todas elas. Serviços possuem um balanceador de carga integrado que distribuirá o tráfego de rede entre todos os Pods de um Deployment exposto. Serviços irão monitorar continuamente os Pods em execução usando endpoints para garantir que o tráfego seja enviado apenas para Pods disponíveis.

    +

    + Executar múltiplas instâncias de uma aplicação requer uma forma + de distribuir o tráfego entre todas elas. Serviços possuem um + balanceador de carga integrado que distribui o tráfego de rede + entre todos os Pods de um Deployment exposto. Serviços irão + monitorar continuamente os Pods em execução usando endpoints + para garantir que o tráfego seja enviado apenas para Pods + disponíveis. +

    -

    O Escalonamento é obtido pela mudança do número de réplicas em um Deployment.

    +

    + O escalonamento é obtido pela mudança do número de + réplicas em um Deployment. +

    @@ -102,17 +147,121 @@

    Visão geral sobre escalonamento

    -

    No momento em que múltiplas instâncias de uma aplicação estiverem em execução será possível realizar atualizações graduais no cluster sem que ocorra indisponibilidade. Nós cobriremos isso no próximo módulo. Agora, vamos ao terminal online e escalar nossa aplicação.

    +

    + Uma vez que você tenha múltiplas instâncias de uma aplicação + em execução será possível realizar atualizações graduais no + cluster sem que ocorra indisponibilidade. Cobriremos isso no + próximo módulo. Agora, vamos ao terminal escalonar nossa aplicação. +

    +
    +
    + +
    +
    +

    Escalonando um Deployment

    +

    + Para listar seus Deployments, utilize o subcomando + get deployments: + kubectl get deployments +

    +

    A saída deve ser semelhante a:

    +
    +                NAME                  READY   UP-TO-DATE   AVAILABLE   AGE
    +                kubernetes-bootcamp   1/1     1            1           11m
    +                
    +

    + Teremos um único Pod. Se nenhum Pod aparecer, tente rodar o + comando novamente. +

    +
      +
    • NAME lista os nomes dos Deployments no cluster.
    • +
    • + READY exibe a proporção de réplicas atuais/desejadas + (CURRENT/DESIRED). +
    • +
    • + UP-TO-DATE exibe o número de réplicas que foram + atualizadas para atingir o estado desejado. +
    • +
    • + AVAILABLE exibe o número de réplicas da aplicação + que estão disponíveis para seus usuários. +
    • +
    • + AGE exibe há quanto tempo a aplicação está rodando. +
    • +
    +

    Para ver o ReplicaSet criado pelo Deployment, execute + kubectl get rs

    +

    Observe que o nome do ReplicaSet sempre é exibido no formato + [NOME-DO-DEPLOYMENT]-[TEXTO-ALEATÓRIO]. O texto aleatório + é gerado e utiliza o valor do pod-template-hash como semente.

    +

    Duas colunas importantes desta saída são:

    +
      +
    • DESIRED exibe o número desejado de réplicas da aplicação, + que você define quando cria o objeto Deployment. Este é o estado + desejado.
    • +
    • CURRENT exibe quantas réplicas estão em execução atualmente.
    • +
    +

    A seguir, vamos escalonar o Deployment para 4 réplicas. Utilizaremos + o comando kubectl scale, seguido pelo tipo Deployment, + nome e o número desejado de instâncias:

    +

    kubectl scale deployments/kubernetes-bootcamp --replicas=4

    +

    Para listar seus Deployments mais uma vez, utilize get deployments:

    +

    kubectl get deployments

    +

    A mudança foi aplicada, e temos 4 instâncias da aplicação disponíveis. A seguir, + vamos verificar se o número de Pods mudou:

    +

    kubectl get pods -o wide

    +

    Temos 4 Pods agora, com endereços IP diferentes. A mudança foi registrada no log + de eventos do Deployment. Para verificar esta mudança, utilize o subcomando describe:

    +

    kubectl describe deployments/kubernetes-bootcamp

    +

    Você pode ver na saída deste comando que temos 4 réplicas agora.

    -
    - Iniciar tutorial interativo +

    Balanceamento de carga

    +

    Vamos verificar que o Service está efetuando o balanceamento de carga + do tráfego recebido. Para encontrar o endereço IP exposto e a porta podemos + utilizar o comando para descrever o serviço como aprendemos na seção anterior:

    +

    kubectl describe services/kubernetes-bootcamp

    +

    Crie uma variável de ambiente chamada NODE_PORT que possui + o valor da porta do nó:

    +

    export NODE_PORT="$(kubectl get services/kubernetes-bootcamp -o go-template='{{(index .spec.ports 0).nodePort}}')"

    +

    echo NODE_PORT=$NODE_PORT

    +

    A seguir, iremos executar o comando curl para efetuar + uma requisição para o endereço IP e porta expostos. Rode este comando + múltiplas vezes:

    +

    curl http://"$(minikube ip):$NODE_PORT"

    +

    Cada requisição é atendida por um Pod diferente. Isso demonstra que o + balanceamento de carga está funcionando.

    +
    +
    +

    Reduzir o número de réplicas

    +

    Para reduzir o número de réplicas do Deployment para 2, execute + o subcomando scale novamente:

    +

    kubectl scale deployments/kubernetes-bootcamp --replicas=2

    +

    Liste os Deployments para verificar se a mudança foi aplicada + com o subcomando get deployments:

    +

    kubectl get deployments

    +

    O número de réplicas reduziu para 2. Liste o número de Pods com + o comando get pods:

    +

    kubectl get pods -o wide

    +

    Isso confirma que 2 Pods foram encerrados.

    +
    +
    + +
    +

    + Assim que você finalizar este tutorial, vá para + Performing a Rolling Update (em inglês).

    +

    +
    +
    diff --git a/content/ru/_index.html b/content/ru/_index.html index 305f652aea368..3054abc629044 100644 --- a/content/ru/_index.html +++ b/content/ru/_index.html @@ -43,12 +43,12 @@

    О сложности миграции 150+ микросервисов в Ku

    - Посетите KubeCon + CloudNativeCon в Европе, 18-21 апреля 2023 года + Посетите KubeCon + CloudNativeCon в Европе, 19-22 марта 2024 года



    - Посетите KubeCon + CloudNativeCon в Северной Америке, 6-9 ноября 2023 года + Посетите KubeCon + CloudNativeCon в Северной Америке, 12-15 ноября 2024 года

    diff --git a/content/ru/community/_index.html b/content/ru/community/_index.html index 0d7b49b5a5051..9cd10318719b5 100644 --- a/content/ru/community/_index.html +++ b/content/ru/community/_index.html @@ -226,7 +226,7 @@

    Последние новости



    diff --git a/content/ru/docs/concepts/workloads/_index.md b/content/ru/docs/concepts/workloads/_index.md new file mode 100644 index 0000000000000..d5f653fb25564 --- /dev/null +++ b/content/ru/docs/concepts/workloads/_index.md @@ -0,0 +1,102 @@ +--- +title: "Рабочие нагрузки" +weight: 55 +description: > + Поймите под, самый маленький развертываемый вычислительный объект в Kubernetes, и абстракции более высокого уровня, которые помогут вам их запускать. +no_list: true +card: + title: Рабочие нагрузки и поды + name: concepts + weight: 60 +--- + +Рабочая нагрузка — это приложение, работающее в Kubernetes. +Независимо от того, представляет ли ваша рабочая нагрузка один компонент +или несколько, которые работают вместе, в Kubernetes вы +запускаете ее внутри набора [_подов_](/docs/concepts/workloads/pods). +В Kubernetes под представляет собой набор работающих +{{< glossary_tooltip text="контейнеров" term_id="container" >}} в кластере. + +Поды Kubernetes имеют определенный [жизненный цикл](/docs/concepts/workloads/pods/pod-lifecycle/). +Например, если у вас запущен под в кластере, то критическая ошибка на +{{< glossary_tooltip text="узле" term_id="node" >}}, где этот модуль работает, +означает, что все поды на этом узле выходят из строя. Kubernetes считает +этот уровень сбоя безвозвратным: потребуется создать новый под для восстановления, +даже если узел позже станет работоспособным. + +Однако, чтобы значительно облегчить жизнь, не нужно +непосредственно контролировать каждый под. Вместо этого можно использовать +_ресурсы рабочей нагрузки_, которые управляют набором подов за вас. +Эти ресурсы настраивают {{< glossary_tooltip term_id="controller" text="контроллеры" >}}, +которые обеспечивают запуск нужного количества модулей +нужного типа в соответствии с указанным вами состоянием. + +Kubernetes предоставляет несколько встроенных ресурсов для рабочих нагрузок: + +* Деплоймент ([Deployment](/docs/concepts/workloads/controllers/deployment/)) и [ReplicaSet](/docs/concepts/workloads/controllers/replicaset/) + (замена устаревшего ресурса + {{< glossary_tooltip text="ReplicationController" term_id="replication-controller" >}}). + Deployment хорошо подходит для управления неизменными (stateless) приложениями + в кластере, то есть для случаев, когда любой под + в деплойменте не содержит изменяемых данных + и может быть заменен при необходимости. +* [StatefulSet](/docs/concepts/workloads/controllers/statefulset/) позволяет запускать один или несколько + связанных подов, которые как-то отслеживают состояние (являются stateful). Например, если ваше + приложение записывает постоянные данные, вы можете использовать StatefulSet, + который сопоставляет каждый под с [PersistentVolume](/docs/concepts/storage/persistent-volumes/). + Ваш код, работающий в подах этого StatefulSet, может + копировать данные в другие поды в том же StatefulSet, + чтобы повысить общую отказоустойчивость. +* [DaemonSet](/docs/concepts/workloads/controllers/daemonset/) создает поды, которые + предоставляют инструменты, которые доступны локально для узлов. + Каждый раз, когда вы добавляете в кластер узел, + соответствующий спецификации DaemonSet, + слой управления (control plane) планирует (т.е. запускает) под с этим DaemonSet + на новом узле. Каждый под в DaemonSet выполняет работу, + аналогичную работе системного демона на классическом сервере Unix/POSIX. + DaemonSet может иметь основополагающее значение для работы вашего кластера, + например, в случае плагина для запуска сети кластера ([cluster networking](/ru/docs/concepts/cluster-administration/networking/#реализация-сетевой-модели-kubernetes)). + Этот ресурс может помочь управлять узлом + или предоставить дополнительные возможности + для используемой контейнерной платформы. +* [Job](/docs/concepts/workloads/controllers/job/) и + [CronJob](/docs/concepts/workloads/controllers/cron-jobs/) предоставляют различные + способы запуска задач, которые выполняются до своего завершения, а затем останавливаются. + [Job](/docs/concepts/workloads/controllers/job/) используется для задачи, + которая выполняется только один раз. Вы можете + использовать [CronJob](/docs/concepts/workloads/controllers/cron-jobs/) для запуска этого же задания + несколько раз по расписанию. + +В экосистеме вокруг проекта Kubernetes можно найти сторонние ресурсы для +рабочих нагрузок, которые предоставляют дополнительные функции. Используя +[custom resource definition](/docs/concepts/extend-kubernetes/api-extension/custom-resources/), +вы можете добавить сторонний ресурс рабочей нагрузки, если нужно определенное +поведение, не являющееся частью стандартного Kubernetes. Например, если вы хотите +запустить группу подов для своего приложения, но хотите свернуть работу в случае, +когда не все поды доступны (возможно, для распределенной задачи с высокой пропускной способностью), +можно реализовать или установить расширение, которое предоставляет эту функцию. + +## {{% heading "whatsnext" %}} + +В дополнение к информации о каждом виде API для управления рабочей нагрузкой вы можете прочитать, +как выполнять конкретные задачи: + +* [Запустите stateless-приложение, используя деплоймент](/docs/tasks/run-application/run-stateless-application-deployment/) +* Запустите stateful-приложение в [единственном экземпляре](/docs/tasks/run-application/run-single-instance-stateful-application/) + или в виде [множества реплик](/docs/tasks/run-application/run-replicated-stateful-application/) +* [Запустите задачи автоматизации с помощью CronJob](/docs/tasks/job/automated-tasks-with-cron-jobs/) + +Чтобы узнать о механизмах отделения кода от конфигурации в Kubernetes, посетите +раздел [Configuration](/docs/concepts/configuration/). + +Есть два вспомогательных концепта, которые дают представление о том, +как Kubernetes управляет подами для приложений: +* Сборщик мусора ([Garbage collection](/ru/docs/concepts/architecture/garbage-collection/)) вычищает объекты из + вашего кластера после удаления _ресурса-владельца_. +* Контроллер времени существования после завершения ([_time-to-live after finished_ controller](/docs/concepts/workloads/controllers/ttlafterfinished/)) + удаляет задания (Jobs) по истечении определенного времени с момента их завершения. + +После запуска вашего приложения можно сделать его доступным в интернете +с помощью [Service](/docs/concepts/services-networking/service/) или, только в случае веб-приложения, +используя [Ingress](/docs/concepts/services-networking/ingress). + diff --git a/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md b/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md index ba529883efe92..ba9a74a20a2a0 100644 --- a/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md +++ b/content/ru/docs/contribute/generate-ref-docs/contribute-upstream.md @@ -22,7 +22,7 @@ weight: 20 - Установленные инструменты: - [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) - - [Golang](https://golang.org/doc/install) версии 1.13+ + - [Golang](https://go.dev/doc/install) версии 1.13+ - [Docker](https://docs.docker.com/engine/installation/) - [etcd](https://github.com/coreos/etcd/) - [make](https://www.gnu.org/software/make/) diff --git a/content/ru/docs/home/_index.md b/content/ru/docs/home/_index.md index 13c08174b2e40..a9108d47d76f2 100644 --- a/content/ru/docs/home/_index.md +++ b/content/ru/docs/home/_index.md @@ -6,7 +6,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Главная" +linkTitle: "Документация" main_menu: true weight: 10 hide_feedback: true diff --git a/content/ru/docs/tasks/administer-cluster/certificates.md b/content/ru/docs/tasks/administer-cluster/certificates.md index 3170f5b4120b5..304ad57e96c3b 100644 --- a/content/ru/docs/tasks/administer-cluster/certificates.md +++ b/content/ru/docs/tasks/administer-cluster/certificates.md @@ -17,7 +17,7 @@ weight: 20 1. Скачайте, распакуйте и инициализируйте пропатченную версию `easyrsa3`. ```shell - curl -LO curl -LO https://dl.k8s.io/easy-rsa/easy-rsa.tar.gz + curl -LO https://dl.k8s.io/easy-rsa/easy-rsa.tar.gz tar xzf easy-rsa.tar.gz cd easy-rsa-master/easyrsa3 ./easyrsa init-pki diff --git a/content/ru/docs/tutorials/_index.md b/content/ru/docs/tutorials/_index.md index 6cbe48cbe440f..e0b402f750aba 100644 --- a/content/ru/docs/tutorials/_index.md +++ b/content/ru/docs/tutorials/_index.md @@ -47,7 +47,7 @@ content_type: concept * [AppArmor](/docs/tutorials/clusters/apparmor/) -* [seccomp](/docs/tutorials/clusters/seccomp/) +* [Seccomp](/docs/tutorials/clusters/seccomp/) ## Сервисы diff --git a/content/uk/_index.html b/content/uk/_index.html index a8b05fa13c528..ff0b5312422af 100644 --- a/content/uk/_index.html +++ b/content/uk/_index.html @@ -4,6 +4,8 @@ cid: home --- +{{< site-searchbar >}} + {{< blocks/section id="oceanNodes" >}} {{% blocks/feature image="flower" %}} -Давайте повернемось назад у часі та дізнаємось, завдяки чому Kubernetes став таким корисним. +Повернімось назад у часі та дізнаємось, завдяки чому Kubernetes став таким корисним. ![Еволюція розгортання](/images/docs/Container_Evolution.svg) diff --git a/content/uk/docs/home/_index.md b/content/uk/docs/home/_index.md index 5a8cfc3c51a09..d534d5f3ceeea 100644 --- a/content/uk/docs/home/_index.md +++ b/content/uk/docs/home/_index.md @@ -4,7 +4,7 @@ noedit: true cid: docsHome layout: docsportal_home class: gridPage gridPageHome -linkTitle: "Головна" +linkTitle: "Документація" main_menu: true weight: 10 hide_feedback: true diff --git a/content/uk/docs/tutorials/_index.md b/content/uk/docs/tutorials/_index.md index a249e4814ce93..7f8e0024403bc 100644 --- a/content/uk/docs/tutorials/_index.md +++ b/content/uk/docs/tutorials/_index.md @@ -28,11 +28,8 @@ Before walking through each tutorial, you may want to bookmark the * [Основи Kubernetes](/docs/tutorials/kubernetes-basics/) - детальний навчальний матеріал з інтерактивними уроками, що допоможе вам зрозуміти Kubernetes і спробувати його базову функціональність. - * [Масштабовані мікросервіси з Kubernetes (Udacity)](https://www.udacity.com/course/scalable-microservices-with-kubernetes--ud615) - * [Вступ до Kubernetes (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) - * [Привіт Minikube](/docs/tutorials/hello-minikube/) -#### 处处适用 +#### 永不过时 @@ -45,8 +45,8 @@ {{% blocks/feature image="suitcase" %}} - -#### 永不过时 + +#### 处处适用 - 参加 2023 年 4 月 18-21 日的欧洲 KubeCon + CloudNativeCon + + 参加 2024 年 3 月 19-22 日的欧洲 KubeCon + CloudNativeCon



    - - 参加 2023 年 11 月 6-9 日的北美 KubeCon + CloudNativeCon + + 参加 2024 年 11 月 12-15 日的北美 KubeCon + CloudNativeCon
    diff --git a/content/zh-cn/blog/2023-11-24-sig-testing-spotlight.md b/content/zh-cn/blog/2023-11-24-sig-testing-spotlight.md new file mode 100644 index 0000000000000..59072b6d93eec --- /dev/null +++ b/content/zh-cn/blog/2023-11-24-sig-testing-spotlight.md @@ -0,0 +1,388 @@ +--- +layout: blog +title: "聚焦 SIG Testing" +slug: sig-testing-spotlight-2023 +date: 2023-11-24 +--- + + +**作者:** Sandipan Panda + +**译者:** [Michael Yao](https://github.com/windsonsea) + + +欢迎阅读又一期的 “SIG 聚光灯” 系列博客,这些博客重点介绍 Kubernetes +项目中各个特别兴趣小组(SIG)所从事的令人赞叹的工作。这篇博客将聚焦 +[SIG Testing](https://github.com/kubernetes/community/tree/master/sig-testing#readme), +这是一个致力于有效测试 Kubernetes,让此项目的繁琐工作实现自动化的兴趣小组。 +SIG Testing 专注于创建和运行工具和基础设施,使社区更容易编写和运行测试,并对测试结果做贡献、分析和处理。 + + +为了深入了解 SIG Testing 的情况, +[Sandipan Panda](https://github.com/sandipanpanda) +采访了 Google 高级软件工程师兼 SIG Testing 主席 +[Michelle Shepardson](https://github.com/michelle192837) +以及英特尔软件工程师、架构师兼 SIG Testing 技术负责人 +[Patrick Ohly](https://github.com/pohly)。 + + +## 会见贡献者 + +**Sandipan:** 你能简单介绍一下自己吗,谈谈你的职责角色以及你是如何参与 +Kubernetes 项目和 SIG Testing 的? + + +**Michelle:** 嗨!我是 Michelle,是 Google 高级软件工程师。 +我最初是为 SIG Testing 开发工具(如 TestGrid 的外部实例)而参与到 Kubernetes 项目的。 +我是 TestGrid 和 Prow 的轮值人员,现在也是这个 SIG 的主席。 + + +**Patrick:** 你好!我在英特尔的一个团队中担任软件工程师和架构师,专注于开源云原生项目。 +当我开始学习 Kubernetes 开发存储驱动时,我最初的问题是“如何在集群中进行测试以及如何记录信息?” +这个兴趣点引发了各种增强提案,直到我(重新)编写了足够多的代码,也正式担任了 SIG Testing 技术负责人 +(负责 [E2E 框架](https://github.com/kubernetes-sigs/e2e-framework))兼结构化日志工作组负责人。 + + +## 测试实践和工具 + +**Sandipan:** 测试是一个存在多种方法和工具的领域,你们是如何形成现有实践方式的? + + +**Patrick:** 我没法谈论早期情况,因为那时我还未参与其中 😆,但回顾一些提交历史可以明显看出, +当时开发人员只是看看有什么可用的工具并开始使用这些工具。对于 E2E 测试来说,使用的是 +[Ginkgo + Gomega](https://github.com/onsi/ginkgo)。集成一些黑科技是必要的, +例如在测试运行后进行清理和对测试进行分类。最终形成了 Ginkgo v2 +和[重新修订的 E2E 测试最佳实践](/blog/2023/04/12/e2e-testing-best-practices-reloaded/)。 +关于单元测试,意见非常多样化:一些维护者倾向于只使用 Go 标准库和手动检查。 +而其他人使用 stretchr/testify 这类辅助工具包。这种多样性是可以接受的,因为单元测试是自包含的: +贡献者只需在处理许多不同领域时保持灵活。集成测试介于二者之间,它基于 Go 单元测试, +但需要复杂的辅助工具包来启动 API 服务器和其他组件,然后运行更像是 E2E 测试的测试。 + + +## SIG Testing 拥有的子项目 + +**Sandipan:** SIG Testing 非常多样化。你能简要介绍一下 SIG Testing 拥有的各个子项目吗? + + +**Michelle:** 广义上来说,我们拥有与测试框架相关的子项目和基础设施,尽管它们肯定存在重叠。 +我们的子项目包括: + +- [e2e-framework](https://pkg.go.dev/sigs.k8s.io/e2e-framework)(外部使用) +- [test/e2e/framework](https://pkg.go.dev/k8s.io/kubernetes/test/e2e/framework) + (用于 Kubernetes 本身) +- kubetest2(用于端到端测试) +- boskos(用于 e2e 测试的资源租赁) +- [KIND](https://kind.sigs.k8s.io/)(在 Docker 中运行 Kubernetes,用于本地测试和开发) +- 以及 KIND 的云驱动。 + +我们的基础设施包括: + +- [Prow](https://docs.prow.k8s.io/)(基于 K8s 的 CI/CD 和 chatops) +- test-infra 仓库中用于分类、分析、覆盖率、Prow/TestGrid 配置生成等的其他工具和实用程序。 + + +**如果你有兴趣了解更多并参与到 SIG Testing 的任何子项目中,查阅 +[SIG Testing 的 README](https://github.com/kubernetes/community/tree/master/sig-testing#subprojects)。** + + +## 主要挑战和成就 + +**Sandipan:** 你们面临的一些主要挑战是什么? + + +**Michelle:** Kubernetes 从贡献者到代码再到用户等各方面看都是一个庞大的项目。 +测试和基础设施必须满足这种规模,跟上 Kubernetes 每个仓库的所有变化, +同时尽可能地促进开发、改进和发布项目,尽管当然我们并不是唯一参与其中的 SIG。 +我认为另一个挑战是子项目的人员配置。SIG Testing 有一些已经存在多年的子项目, +但其中许多最初的维护者已经转到其他领域或者没有时间继续维护它们。 +我们需要在这些子项目中培养长期的专业知识和 Owner。 + + +**Patrick:** 正如 Michelle 所说,规模本身可能就是一个挑战。 +不仅基础设施要与之匹配,我们的流程也必须与贡献者数量相匹配。 +记录最佳实践是好的,但还不够好:我们有许多新的贡献者,这是好事, +但是让 Reviewer 靠人工解释最佳实践并不可行,这前提是 Reviewer 了解这些最佳实践! +如果现有代码不能被立即更新也无济于事,因为代码实在太多了,特别是对于 E2E 测试来说更是如此。 +在接受现有代码无法通过同样的 linter 检查的同时, +[为新代码或代码修改应用更严格的 lint 检查](https://groups.google.com/a/kubernetes.io/g/dev/c/myGiml72IbM/m/QdO5bgQiAQAJ)对于改善情况会有所帮助。 + + +**Sandipan:** 有没有一些 SIG 成就使你感到自豪,想要重点说一下? + + +**Patrick:** 我有一些拙见,因为我一直在推动这个项目,但我认为现在 +[E2E 框架](https://github.com/kubernetes-sigs/e2e-framework)和 lint 机制比以前好得多。 +我们可能很快就能在启用竞争检测的情况下运行集成测试,这很重要, +因为目前我们只能对单元测试进行竞争检测,而那些往往不太复杂。 + + +**Sandipan:** 测试始终很重要,但在 Kubernetes 发布过程中,你的工作是否有任何特殊之处? + + +**Patrick:** [测试不稳定](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/flaky-tests.md)…… +如果我们有太多这样的不稳定测试,开发速度就会下降,因为我们无法在没有干净测试运行环境的情况下合并 PR, +并且这些环境会越来越少。开发者也会失去对测试的信任,只是“重新测试”直到有了一个干净的运行环境为止, +而不会检查失败是否确实与当前更改中的回归有关。 + + +## 人员和范围 + +**Sandipan:** 这个 SIG 中有哪些让你热爱的? + + +**Michelle:** 当然是人 🙂。除此之外,我喜欢 SIG Testing 的宽广范围。 +我觉得即使是小的改动也可以对其他贡献者产生重大影响,即使随着时间的推移我的兴趣发生变化, +我也永远不会缺少项目可供我参与。 + + +**Patrick:** 我的工作是为了让我和其他开发人员的工作变得更好, +比如建设在其他地方开发新特性时每天必须使用的工具。 + +**Sandipan:** 你们有没有任何好玩/酷炫/日常趣事可以告诉我们? + + +**Patrick:** 五年前,我开始致力于 E2E 框架的增强,然后在一段时间内参与活动较少。 +当我回来并想要测试一些新的增强功能时,我询问如何为新代码编写单元测试, +并被指向了一些看起来有些熟悉的、好像以前**见过**的现有测试。 +我查看了提交历史,发现这些测试是我自己**编写的**! +你可以决定这是否说明了我的长期记忆力衰退还是这很正常... +无论如何,伙计们,要谨记让每个 Commit 的消息和注释明确、友好; +某一刻会有人需要看这些消息和注释 - 甚至可能就是你自己! + + +## 展望未来 + +**Sandipan:** 在哪些领域和/或子项目上,你们的 SIG 需要帮助? + + +**Michelle:** 目前有一些子项目没有人员配置,需要有意愿了解更多的人参与进来。 +[boskos](https://github.com/kubernetes-sigs/boskos#boskos) 和 +[kubetest2](https://github.com/kubernetes-sigs/kubetest2#kubetest2) 对我来说尤其突出, +因为它们对于测试非常重要,但却缺乏专门的负责人。 + + +**Sandipan:** 新的 SIG Testing 贡献者可以带来哪些有用的技能? +如果他们的背景与编程没有直接关联,有哪些方面可以帮助到这个 SIG? + + +**Michelle:** 我认为具备用户共情、清晰反馈和识别模式的能力非常有用。 +有人使用测试框架或工具,并能用清晰的示例概述痛点,或者能够识别项目中的更广泛的问题并提供数据来支持解决方案。 + + +**Sandipan:** SIG Testing 的下一步是什么? + +**Patrick:** 对于新代码,更严格的 lint 检查很快将成为强制要求。 +如果有人愿意承担这项工作,我们可以对一些 E2E 框架的子工具包进行现代化改造。 +我还看到一个机会,可以统一一些 E2E 和集成测试的辅助代码,但这需要更多的思考和讨论。 + + +**Michelle:** 我期待为我们的工具和基础设施进行一些可用性改进, +并支持更多长期贡献者的贡献和成长,使他们在 SIG 中担任长期角色。如果你有兴趣,请联系我们! + + +展望未来,SIG Testing 有令人兴奋的计划。你可以通过他们的 +[Slack 频道](https://kubernetes.slack.com/messages/sig-testing)与 SIG Testing 的人员取得联系, +或参加他们定期举行的[每两周的周二会议](https://github.com/kubernetes/community/tree/master/sig-testing#meetings)。 +如果你有兴趣为社区更轻松地运行测试并贡献测试结果,确保 Kubernetes +在各种集群配置和云驱动中保持稳定,请立即加入 SIG Testing 社区! diff --git a/content/zh-cn/blog/_posts/2019-04-24-Hardware-Accelerated-SSLTLS-Termination-in-Ingress-Controllers-using-Kubernetes-Device-Plugins-and-RuntimeClass.md b/content/zh-cn/blog/_posts/2019-04-24-Hardware-Accelerated-SSLTLS-Termination-in-Ingress-Controllers-using-Kubernetes-Device-Plugins-and-RuntimeClass.md index e5876c0ac7b39..94a10b5d6c0ca 100644 --- a/content/zh-cn/blog/_posts/2019-04-24-Hardware-Accelerated-SSLTLS-Termination-in-Ingress-Controllers-using-Kubernetes-Device-Plugins-and-RuntimeClass.md +++ b/content/zh-cn/blog/_posts/2019-04-24-Hardware-Accelerated-SSLTLS-Termination-in-Ingress-Controllers-using-Kubernetes-Device-Plugins-and-RuntimeClass.md @@ -171,12 +171,12 @@ underlying host devices. 基于 PCIe 的加密加速设备功能 可以受益于 IO 硬件虚拟化,通过 I/O 内存管理单元(IOMMU),提供隔离:IOMMU 将设备分组,为工作负载提供隔离的资源 (假设加密卡不与其他设备共享 **IOMMU 组**)。如果PCIe设备支持单根 I/O 虚拟化(SR-IOV)规范,则可以进一步增加隔离资源的数量。 -SR-IOV 允许将 PCIe 设备将**物理功能项(Physical Functions,PF)**设备进一步拆分为 +SR-IOV 允许将 PCIe 设备将 **物理功能项(Physical Functions,PF)** 设备进一步拆分为 **虚拟功能项(Virtual Functions, VF)**,并且每个设备都属于自己的 IOMMU 组。 要将这些借助 IOMMU 完成隔离的设备功能项暴露给用户空间和容器,主机内核应该将它们绑定到特定的设备驱动程序。 在 Linux 中,这个驱动程序是 vfio-pci, 它通过字符设备将设备提供给用户空间。内核 vfio-pci 驱动程序使用一种称为 -**PCI 透传(PCI Passthrough)**的机制, +**PCI 透传(PCI Passthrough)** 的机制, 为用户空间应用程序提供了对 PCIe 设备与功能项的直接的、IOMMU 支持的访问。 用户空间框架,如数据平面开发工具包(Data Plane Development Kit,DPDK)可以利用该接口。 此外,虚拟机(VM)管理程序可以向 VM 提供这些用户空间设备节点,并将它们作为 PCI 设备暴露给寄宿内核。 diff --git a/content/zh-cn/blog/_posts/2022-07-13-gateway-api-in-beta.md b/content/zh-cn/blog/_posts/2022-07-13-gateway-api-in-beta.md index f57e9b816f52e..e3872f7c5468e 100644 --- a/content/zh-cn/blog/_posts/2022-07-13-gateway-api-in-beta.md +++ b/content/zh-cn/blog/_posts/2022-07-13-gateway-api-in-beta.md @@ -326,7 +326,7 @@ please join us in the #sig-network-gateway-api channel on Kubernetes Slack or ou 请通过 Kubernetes Slack 的 #sig-network-gateway-api 频道或我们每周的 [社区电话会议](https://gateway-api.sigs.k8s.io/contributing/community/#meetings)加入我们。 -[gep1016]:https://github.com/kubernetes-sigs/gateway-api/blob/master/site-src/geps/gep-1016.md +[gep1016]:https://github.com/kubernetes-sigs/gateway-api/blob/main/geps/gep-1016.md [grpc]:https://grpc.io/ [pr1085]:https://github.com/kubernetes-sigs/gateway-api/pull/1085 [tcpr]:https://github.com/kubernetes-sigs/gateway-api/blob/main/apis/v1alpha2/tcproute_types.go diff --git a/content/zh-cn/blog/_posts/2022-12-19-devicemanager-ga.md/deviceplugin-framework-overview.svg b/content/zh-cn/blog/_posts/2022-12-19-devicemanager-ga.md/deviceplugin-framework-overview.svg new file mode 100644 index 0000000000000..64d4288a202b4 --- /dev/null +++ b/content/zh-cn/blog/_posts/2022-12-19-devicemanager-ga.md/deviceplugin-framework-overview.svg @@ -0,0 +1,4 @@ + + + +
    Kubelet





















    Kubelet...
    Device Plugin





















    Device Plugin...
    Device Plugin gRPC server















    Device Plugin gRPC server...
    GetDevicePluginOptions
    GetDevicePluginOptions
    ListAndWatch
    ListAndWatch
    GetPreferredAllocation
    GetPreferredAllocation
    Allocate
    Allocate
    PreStartContainer
    PreStartContainer
    GetDevicePluginOptions
    GetDevicePluginOptions
    ListAndWatch
    ListAndWatch
    GetPreferredAllocation
    GetPreferredAllocation
    Allocate
    Allocate
    PreStartContainer
    PreStartContainer
    Kubelet gRPC server




    Kubelet gRPC server...
    Register
    Register
    Register
    Register
    Kubelet gRPC API implementation
    Kubelet gRPC API impl...
    Kubelet gRPC client call 
    Kubelet gRPC client c...
    Device Plugin gRPC API implementation
    Device Plugin gRPC AP...
    Device Plugin gRPC client call
    Device Plugin gRPC cl...
    Text is not SVG - cannot display
    \ No newline at end of file diff --git a/content/zh-cn/blog/_posts/2022-12-19-devicemanager-ga.md/index.md b/content/zh-cn/blog/_posts/2022-12-19-devicemanager-ga.md/index.md new file mode 100644 index 0000000000000..5198f696237d9 --- /dev/null +++ b/content/zh-cn/blog/_posts/2022-12-19-devicemanager-ga.md/index.md @@ -0,0 +1,211 @@ +--- +layout: blog +title: 'Kubernetes 1.26:设备管理器正式发布' +date: 2022-12-19 +slug: devicemanager-ga +--- + + + + +**作者**: Swati Sehgal (Red Hat) + +**译者**: Jin Li (UOS) + + +设备插件框架是在 Kubernetes v1.8 版本中引入的,它是一个与供应商无关的框架, +旨在实现对外部设备的发现、公布和分配,而无需修改核心 Kubernetes。 +该功能在 v1.10 版本中升级为 Beta 版本。随着 Kubernetes v1.26 的最新发布, +设备管理器现已正式发布(GA)。 + + +在 kubelet 中,设备管理器通过 Unix 套接字使用 gRPC 实现与设备插件的通信。 +设备管理器和设备插件都充当 gRPC 服务器和客户端的角色,分别提供暴露的 gRPC 服务并进行连接。 +设备插件提供 gRPC 服务,kubelet 连接该服务进行设备的发现、公布(作为扩展资源)和分配。 +设备管理器连接到 kubelet 提供的 `Registration` gRPC 服务,以向 kubelet 注册自身。 + + +请查阅文档中的[示例](/zh-cn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#example-pod), +了解一个 Pod 如何通过设备插件请求集群中暴露的设备。 + + +以下是设备插件的一些示例实现: +- [AMD GPU 设备插件](https://github.com/RadeonOpenCompute/k8s-device-plugin) +- [用于 Kubernetes 的 Intel 设备插件集合](https://github.com/intel/intel-device-plugins-for-kubernetes) +- [用于 Kubernetes 的 NVIDIA 设备插件](https://github.com/NVIDIA/k8s-device-plugin) +- [用于 Kubernetes 的 SRIOV 网络设备插件](https://github.com/k8snetworkplumbingwg/sriov-network-device-plugin) + + +## 自设备插件框架引入以来的重要进展 + + +### Kubelet APIs 移至 kubelet 暂存库 +在 v1.17 版本中,面向外部的 `deviceplugin` API 包已从 `k8s.io/kubernetes/pkg/kubelet/apis/` +移动到了 `k8s.io/kubelet/pkg/apis/`。有关此变更背后的更多详细信息, +请参阅 [Move external facing kubelet apis to staging](https://github.com/kubernetes/kubernetes/pull/83551) + + +### 设备插件 API 更新 + + +新增了额外的 gRPC 端点: + +1. `GetDevicePluginOptions` 用于设备插件向 `DeviceManager` 传递选项,以指示是否支持 + `PreStartContainer`、`GetPreferredAllocation` 或其他将来的可选调用, + 并可在向容器提供设备之前进行调用。 + + +2. `GetPreferredAllocation` 允许设备插件将优先分配信息传递给 `DeviceManager`, + 使其能够将此信息纳入其分配决策中。`DeviceManager` 在 Pod + 准入时向插件请求指定大小的优选设备分配,以便做出更明智的决策。 + 例如,在为容器分配设备时,指定设备间的约束条件以表明对最佳连接设备集合的偏好。 + + +3. 在注册阶段由设备插件指示时,`PreStartContainer` 会在每次容器启动之前被调用。 + 它允许设备插件在所请求的设备上执行特定的设备操作。 + 例如,在容器启动前对 FPGA 进行重新配置或重新编程。 + + +引入这些更改的 PR 为: +1. [Invoke preStart RPC call before container start, if desired by plugin](https://github.com/kubernetes/kubernetes/pull/58282) +1. [Add GetPreferredAllocation() call to the v1beta1 device plugin API](https://github.com/kubernetes/kubernetes/pull/92665) + + +引入上述端点后,kubelet 中的设备管理器与设备管理器之间的交互如下所示: + + +{{< figure src="deviceplugin-framework-overview.svg" +alt="展示设备插件框架,显示 kubelet 与设备插件之间的关系" +class="diagram-large" caption="设备插件框架概述" >}} + + +### 设备插件注册流程的语义变更 +设备插件的代码经过重构,将 'plugin' 包独立于 `devicemanager` 包之外, +为引入 `v1beta2` 设备插件 API 做好了前期准备。 +这将允许在 `devicemanager` 中添加支持,以便同时为多个设备插件 API 提供服务。 + + +通过这次重构工作,现在设备插件必须在向 kubelet 注册之前开始提供其 gRPC 服务。 +之前这两个操作是异步的,设备插件可以在启动其 gRPC 服务器之前注册自己,但现在不再允许。 +更多细节请参考 [PR #109016](https://github.com/kubernetes/kubernetes/pull/109016) 和 +[Issue #112395](https://github.com/kubernetes/kubernetes/issues/112395)。 + + +### 动态资源分配 + +在 Kubernetes 1.26 中,受 Kubernetes +处理[持久卷](/zh-cn/docs/concepts/storage/persistent-volumes)方式的启发, +引入了[动态资源分配](/zh-cn/docs/concepts/scheduling-eviction/dynamic-resource-allocation/), +以满足那些具有更复杂资源需求的设备,例如: + + +1. 将设备的初始化和分配与 Pod 生命周期解耦。 +1. 促进容器和 Pod 之间设备的动态共享。 +1. 支持自定义特定资源参数。 +1. 启用特定资源的设置和清理操作。 +1. 实现对网络附加资源的支持,不再局限于节点本地资源。 + + +## 设备插件 API 目前已经稳定了吗? +不,设备插件 API 仍然不稳定;目前最新的可用设备插件 API 版本是 `v1beta1`。 +社区计划引入 `v1beta2` API,以便同时为多个插件 API 提供服务。 +对每个 API 的调用都具有请求/响应类型,可以在不明确升级 API 的情况下添加对新 API 版本的支持。 + + +除此之外,社区中存在一些提案,打算引入额外的端点 +[KEP-3162: Add Deallocate and PostStopContainer to Device Manager API](https://github.com/kubernetes/enhancements/issues/3162)。 diff --git a/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/decision-tree.svg b/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/decision-tree.svg index c9e57f34b6c5f..fea40a3e3b56b 100644 --- a/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/decision-tree.svg +++ b/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/decision-tree.svg @@ -1,3 +1,580 @@ -PriorityClasses and their values
    1
    1
    dev-pc (value:1000000)
    dev-pc (value:1000000)
    2
    2
    preprod-pc (value:2000000)
    preprod-pc (value:2000000)
    3
    3
    prod-pc (value:4000000)
    prod-pc (value:4000000)






    dev-nginx
    dev-ngin...






    %3CmxGraphModel%3E%3Croot%3E%3CmxCell%20id%3D%220%22%2F%3E%3CmxCell%20id%3D%221%22%20parent%3D%220%22%2F%3E%3CmxCell%20id%3D%222%22%20value%3D%22%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3Bdev-nginx%22%20style%3D%22sketch%3D0%3Bhtml%3D1%3Bdashed%3D0%3Bwhitespace%3Dwrap%3BfillColor%3D%232875E2%3BstrokeColor%3D%23ffffff%3Bpoints%3D%5B%5B0.005%2C0.63%2C0%5D%2C%5B0.1%2C0.2%2C0%5D%2C%5B0.9%2C0.2%2C0%5D%2C%5B0.5%2C0%2C0%5D%2C%5B0.995%2C0.63%2C0%5D%2C%5B0.72%2C0.99%2C0%5D%2C%5B0.5%2C1%2C0%5D%2C%5B0.28%2C0.99%2C0%5D%5D%3Bshape%3Dmxgraph.kubernetes.icon%3BprIcon%3Dpod%3BfontSize%3D12%3B%22%20vertex%3D%221%22%20parent%3D%221%22%3E%3CmxGeometry%20x%3D%22210%22%20y%3D%22340%22%20width%3D%2250%22%20height%3D%2248%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3C%2Froot%3E%3C%2FmxGraphModel%3Epreprod-nginx
    %3CmxGra...






    prod-nginx
    prod-ngi...
    Scheduling queue
    Scheduling queue
    Based on the
    priorityClass values, scheduler
    places all the pods
    in the queue.

    dev-nginx being the lowest
    and prod-nginx being
    the highest.
    Based on the...
    Do the worker
    nodes have enough resources to run
    the pods?
    Do the worker...
    The preemption logic kicks-in and preempts lowest priority pod.
    The preemption logic kicks-in...
    No
    No
    In our case we don't
    have enough resources. Therefore dev-nginx
    was evicted.
    In our case we don't...
    Scheduling of pods happens normally.
    Scheduling of pods happe...
    Yes
    Yes
    Lowest priority pod gets gracefully terminated to make room for high-priority pods.
    Lowest priority pod gets gracef...
    Since we have enough room for high-priority pods, the scheduler schedules them.
    Since we have enough room for high-p...
    In our case prod-nginx was
    successfully scheduled
    to node01.
    In our case prod-nginx was...
    Do the worker
    nodes have enough resources to run
    high priority pods
     now ?
    Do the worker...
    Yes
    Yes
    No
    No
    Just in case there are still
    resource constraints, the
    process continues.
    Just in case there are still...
    Text is not SVG - cannot display
    \ No newline at end of file + + + + + + + + + PriorityClass 及其值 + + + + +
    +
    +
    + 1
    +
    +
    +
    1 +
    +
    + + + + +
    +
    +
    + dev-pc (value:1000000) + +
    +
    +
    +
    dev-pc (value:1000000) +
    +
    + + + + +
    +
    +
    + 2
    +
    +
    +
    2 +
    +
    + + + + +
    +
    +
    + preprod-pc (value:2000000) + +
    +
    +
    +
    preprod-pc (value:2000000) +
    +
    + + + + +
    +
    +
    + 3
    +
    +
    +
    3 +
    +
    + + + + +
    +
    +
    + prod-pc (value:4000000) + +
    +
    +
    +
    prod-pc (value:4000000) +
    +
    + + + + + + + + + +
    +
    +
    +





    dev-nginx
    +
    +
    +
    dev-ngin... +
    +
    + + + + + + +
    +
    +
    +





    %3CmxGraphModel%3E%3Croot%3E%3CmxCell%20id%3D%220%22%2F%3E%3CmxCell%20id%3D%221%22%20parent%3D%220%22%2F%3E%3CmxCell%20id%3D%222%22%20value%3D%22%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3B%26lt%3Bbr%26gt%3Bdev-nginx%22%20style%3D%22sketch%3D0%3Bhtml%3D1%3Bdashed%3D0%3Bwhitespace%3Dwrap%3BfillColor%3D%232875E2%3BstrokeColor%3D%23ffffff%3Bpoints%3D%5B%5B0.005%2C0.63%2C0%5D%2C%5B0.1%2C0.2%2C0%5D%2C%5B0.9%2C0.2%2C0%5D%2C%5B0.5%2C0%2C0%5D%2C%5B0.995%2C0.63%2C0%5D%2C%5B0.72%2C0.99%2C0%5D%2C%5B0.5%2C1%2C0%5D%2C%5B0.28%2C0.99%2C0%5D%5D%3Bshape%3Dmxgraph.kubernetes.icon%3BprIcon%3Dpod%3BfontSize%3D12%3B%22%20vertex%3D%221%22%20parent%3D%221%22%3E%3CmxGeometry%20x%3D%22210%22%20y%3D%22340%22%20width%3D%2250%22%20height%3D%2248%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3C%2Froot%3E%3C%2FmxGraphModel%3Epreprod-nginx +
    +
    +
    +
    %3CmxGra... +
    +
    + + + + + + +
    +
    +
    +





    prod-nginx
    +
    +
    +
    prod-ngi... +
    +
    + + + + +
    +
    +
    + 调度队列 +
    +
    +
    +
    调度队列 +
    +
    + + + + +
    +
    +
    + 根据 PriorityClass 值,
    调度程序将所有 + Pod 放入
    队列中。dev-nginx 最低,
    而 prod-nginx 最高。
    +
    +
    +
    +
    Based on the... +
    +
    + + + + + + + + + +
    +
    +
    + 工作节点是否
    有足够的资源
    来运行 Pod?
    +
    +
    +
    Do the worker... +
    +
    + + + + + + +
    +
    +
    + 抢占逻辑启动并
    抢占优先级最低的 Pod。
    +
    +
    +
    +
    The preemption logic kicks-in... +
    +
    + + + +
    +
    +
    + 否
    +
    +
    +
    No +
    +
    + + + + +
    +
    +
    + 在我们的例子中,
    因为没有足够的资源,
    dev-nginx 被驱逐了
    +
    +
    +
    In our case we don't... +
    +
    + + + + + +
    +
    +
    + Pod 调度正常进行。
    +
    +
    +
    Scheduling of pods happe... +
    +
    + + + +
    +
    +
    + 是
    +
    +
    +
    Yes +
    +
    + + + + + + +
    +
    +
    + 最低优先级的 Pod 被体面地终止,
    为高优先级的 Pod 腾出空间。
    +
    +
    +
    Lowest priority pod gets gracef... +
    +
    + + + + +
    +
    +
    + 由于我们有足够的空间容纳
    高优先级的 Pod,因此调度程序会对它们进行调度。
    +
    +
    +
    Since we have enough room for high-p... +
    +
    + + + + +
    +
    +
    + 在我们的例子中,prod-nginx
    被成功调度到 node01。
    +
    +
    +
    In our case prod-nginx was... +
    +
    + + + + + +
    +
    +
    + 工作节点现在是否
    有足够的资源来运行
    高优先级的 Pod?
    +
    +
    +
    Do the worker... +
    +
    + + + + + +
    +
    +
    + 是
    +
    +
    +
    Yes +
    +
    + + + + + +
    +
    +
    + 否
    +
    +
    +
    No +
    +
    + + + + +
    +
    +
    + 如果资源仍然有限,
    该过程将继续进行。
    +
    +
    +
    Just in case there are still... +
    +
    + +
    + + Text is not SVG - cannot display + +
    \ No newline at end of file diff --git a/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/kube-scheduler.svg b/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/kube-scheduler.svg index 53f5c1fb7b7a3..aa7b8400aa8b2 100644 --- a/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/kube-scheduler.svg +++ b/content/zh-cn/blog/_posts/2023-01-12-protect-mission-critical-pods-priorityclass/kube-scheduler.svg @@ -1,4 +1,651 @@ -









    kube-apiserver
    kube-apiserver...





    purple-pod
    purple-p...





    client
    client...





    brown-pod
    brown-po...





    indigo-pod
    indigo-p...









    etcd
    etcd...









    kube-scheduler
    kube-scheduler...









    kube-controller-manager
    kube-controller-manager...





    kubelet
    kubelet...





    kubelet
    kubelet...





    kubelet
    kubelet...





    blue-pod
    blue-pod...





    red-pod
    red-pod...





    pink-pod
    pink-pod...





    green-pod
    green-po...


    kube-scheduler watches
    for new pods with no
    nodeName assigned.

    Once it finds one, it update
    the nodeName key and
    schedules it.
    kube-scheduler watches...
    etcd is where k8s
    objects are persisted.

    In this case it will persist
    the information about
    the new pods.
    etcd is where k8s...
    Based on the value of
    nodeName, kubelet
    launches the pod.
    Based on the value of...
    1
    1
    2%3CmxGraphModel%3E%3Croot%3E%3CmxCell%20id%3D%220%22%2F%3E%3CmxCell%20id%3D%221%22%20parent%3D%220%22%2F%3E%3CmxCell%20id%3D%222%22%20value%3D%221%22%20style%3D%22ellipse%3BwhiteSpace%3Dwrap%3Bhtml%3D1%3BfillColor%3D%23FFFFFF%3B%22%20vertex%3D%221%22%20parent%3D%221%22%3E%3CmxGeometry%20x%3D%22460%22%20y%3D%22290%22%20width%3D%2220%22%20height%3D%2220%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3C%2Froot%3E%3C%2FmxGraphModel%3E
    2%3...
    3
    3
    4
    4
    new incoming pods.
    new incoming pods.
    Text is not SVG - cannot display
    \ No newline at end of file + + + + + + + + + + +
    +
    +
    +








    kube-apiserver
    +
    +
    +
    kube-apiserver... +
    +
    + + + + + + + + + +
    +
    +
    +




    purple-pod
    +
    +
    +
    purple-p... +
    +
    + + + + + + + + +
    +
    +
    +




    客户端
    +
    +
    +
    client... +
    +
    + + + + + + +
    +
    +
    +




    brown-pod
    +
    +
    +
    brown-po... +
    +
    + + + + + + +
    +
    +
    +




    indigo-pod
    +
    +
    +
    indigo-p... +
    +
    + + + + +
    +
    +
    +








    etcd
    +
    +
    +
    etcd... +
    +
    + + + + + + + + + +
    +
    +
    +








    kube-scheduler
    +
    +
    +
    kube-scheduler... +
    +
    + + + + + + + +
    +
    +
    +








    kube-controller-manager
    +
    +
    +
    kube-controller-manager... +
    +
    + + + + + + + + +
    +
    +
    +




    kubelet
    +
    +
    +
    kubelet... +
    +
    + + + + + + + + +
    +
    +
    +




    kubelet
    +
    +
    +
    kubelet... +
    +
    + + + + + + + + +
    +
    +
    +




    kubelet
    +
    +
    +
    kubelet... +
    +
    + + + + + + + + + +
    +
    +
    +




    blue-pod
    +
    +
    +
    blue-pod... +
    +
    + + + + + + +
    +
    +
    +




    red-pod
    +
    +
    +
    red-pod... +
    +
    + + + + + + +
    +
    +
    +




    pink-pod
    +
    +
    +
    pink-pod... +
    +
    + + + + + + +
    +
    +
    +




    green-pod
    +
    +
    +
    green-po... +
    +
    + + + + +
    +
    +
    +

    + kube-scheduler 监视未分配
    nodeName 的新 + Pod。

    一旦找到,就会更新
    nodeName 键并对其
    进行调度。
    +
    +
    +
    +
    kube-scheduler watches... +
    +
    + + + + + +
    +
    +
    +
    etcd 是 k8s 对象
    持久化的地方。

     在这个例子中,它将保存了
      关于新 Pod 的信息。 +
    +
    +
    +
    +
    etcd is where k8s... +
    +
    + + + + + +
    +
    +
    + 根据 nodeName 的值,
    kubelet 启动 Pod。
    +
    +
    +
    +
    Based on the value of... +
    +
    + + + + + + +
    +
    +
    + 1
    +
    +
    +
    1 +
    +
    + + + + +
    +
    +
    + 2%3CmxGraphModel%3E%3Croot%3E%3CmxCell%20id%3D%220%22%2F%3E%3CmxCell%20id%3D%221%22%20parent%3D%220%22%2F%3E%3CmxCell%20id%3D%222%22%20value%3D%221%22%20style%3D%22ellipse%3BwhiteSpace%3Dwrap%3Bhtml%3D1%3BfillColor%3D%23FFFFFF%3B%22%20vertex%3D%221%22%20parent%3D%221%22%3E%3CmxGeometry%20x%3D%22460%22%20y%3D%22290%22%20width%3D%2220%22%20height%3D%2220%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3C%2Froot%3E%3C%2FmxGraphModel%3E +
    +
    +
    +
    2%3... +
    +
    + + + + +
    +
    +
    + 3
    +
    +
    +
    3 +
    +
    + + + + +
    +
    +
    + 4
    +
    +
    +
    4 +
    +
    + + + + + +
    +
    +
    + 新进入的 Pod。 +
    +
    +
    +
    new incoming pods. +
    +
    + + + + + + + + +
    + + Text is not SVG - cannot display + +
    \ No newline at end of file diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-best-effort.svg b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-best-effort.svg new file mode 100644 index 0000000000000..e35b2f39509bb --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-best-effort.svg @@ -0,0 +1,395 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-limit.svg b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-limit.svg new file mode 100644 index 0000000000000..a2ba00c58fd4e --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-limit.svg @@ -0,0 +1,1072 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-no-limits.svg b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-no-limits.svg new file mode 100644 index 0000000000000..57b207b80a0be --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high-no-limits.svg @@ -0,0 +1,951 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high.svg b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high.svg new file mode 100644 index 0000000000000..4ba0b15957a28 --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-high.svg @@ -0,0 +1,1195 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-max.svg b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-max.svg new file mode 100644 index 0000000000000..5d4602069b957 --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-max.svg @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-min.svg b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-min.svg new file mode 100644 index 0000000000000..f9711a641c521 --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/container-memory-min.svg @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/index.md b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/index.md new file mode 100644 index 0000000000000..c61a6f08bb3d4 --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/index.md @@ -0,0 +1,548 @@ +--- +layout: blog +title: 'Kubernetes 1.27:内存资源的服务质量(QoS)Alpha' +date: 2023-05-05 +slug: qos-memory-resources +--- + + + +**作者**:Dixita Narang (Google) + +**译者**:Wilson Wu (DaoCloud) + + +Kubernetes v1.27 于 2023 年 4 月发布,引入了对内存 QoS(Alpha)的更改,用于提高 Linux 节点中的内存管理功能。 + + +对内存 QoS 的支持最初是在 Kubernetes v1.22 中添加的,后来发现了关于计算 `memory.high` +公式的一些[不足](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2570-memory-qos#reasons-for-changing-the-formula-of-memoryhigh-calculation-in-alpha-v127)。 +这些不足在 Kubernetes v1.27 中得到解决。 + + +## 背景 {#background} + + +Kubernetes 允许你在 Pod 规约中设置某容器对每类资源的需求。通常要设置的资源是 CPU 和内存。 + + +例如,定义容器资源需求的 Pod 清单可能如下所示: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: example +spec: + containers: + - name: nginx + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "64Mi" + cpu: "500m" +``` + +* `spec.containers[].resources.requests` + + + 当你为 Pod 中的容器设置资源请求时, + [Kubernetes 调度器](/zh-cn/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler)使用此信息来决定将 Pod 放置在哪个节点上。 + 调度器确保对于每种资源类型,已调度容器的资源请求总和小于节点上可分配资源的总量。 + +* `spec.containers[].resources.limits` + + + 当你为 Pod 中的容器设置资源限制时,kubelet 会强制实施这些限制, + 以便运行的容器使用的资源不得超过你设置的限制。 + + +当 kubelet 将容器作为 Pod 的一部分启动时,kubelet 会将容器的 CPU 和内存请求和限制传递给容器运行时。 +容器运行时将 CPU 请求和 CPU 限制设置到容器上。如果系统有空闲的 CPU 时间, +就保证为容器分配它们请求的 CPU 数量。容器使用的 CPU 数量不能超过配置的限制, +即,如果容器在给定时间片内使用的 CPU 数量超过指定的限制,则容器的 CPU 使用率将受到限制。 + + +在内存 QoS 特性出现之前,容器运行时仅使用内存限制并忽略内存的 `request` +(请求值从前到现在一直被用于影响[调度](/zh-cn/docs/concepts/scheduling-eviction/#scheduling))。 +如果容器使用的内存超过所配置的限制,则会调用 Linux 内存不足(OOM)杀手机制。 + + +让我们比较一下在有和没有内存 QoS 特性时,Linux 上的容器运行时通常如何在 cgroup 中配置内存请求和限制: + + +* **内存请求** + + + 内存请求主要由 kube-scheduler 在(Kubernetes)Pod 调度时使用。 + 在 cgroups v1 中,没有任何控件来设置 cgroup 必须始终保留的最小内存量。 + 因此,容器运行时不使用 Pod 规约中设置的内存请求值。 + + + cgroups v2 中引入了一个 `memory.min` 设置,用于设置给定 cgroup 中的进程确定可用的最小内存量。 + 如果 cgroup 的内存使用量在其有效最小边界内,则该 cgroup 的内存在任何情况下都不会被回收。 + 如果内核无法为 cgroup 中的进程维护至少 `memory.min` 字节的内存,内核将调用其 OOM 杀手机制。 + 换句话说,内核保证至少有这么多内存可用,或者终止进程(可能在 cgroup 之外)以腾出更多内存。 + MemoryQoS 机制将 `memory.min` 映射到 `spec.containers[].resources.requests.memory`, + 以确保 Kubernetes Pod 中容器的内存可用性。 + + +* **内存限制** + + + `memory.limit` 指定内存限制,如果容器尝试分配更多内存,超出该限制, + Linux 内核将通过 OOM(内存不足)来杀死并终止进程。如果终止的进程是容器内的主 + (或唯一)进程,则容器可能会退出。 + + + 在 cgroups v1 中,`memory.limit_in_bytes` 接口用于设置内存用量限制。 + 然而,与 CPU 不同的是,内存用量是无法抑制的:一旦容器超过内存限制,它就会被 OOM 杀死。 + + + 在 cgroups v2 中,`memory.max` 类似于 cgroupv1 中的 `memory.limit_in_bytes`。 + MemoryQoS 机制将 `memory.max` 映射到 `spec.containers[].resources.limits.memory` + 以设置内存用量的硬性限制。如果内存消耗超过此水平,内核将调用其 OOM 杀手机制。 + + + cgroups v2 中还添加了 `memory.high` 配置。MemoryQoS 机制使用 `memory.high` 来设置内存用量抑制上限。 + 如果超出了 `memory.high` 限制,则违规的 cgroup 会受到抑制,并且内核会尝试回收内存,这可能会避免 OOM 终止。 + + +## 如何工作 {#how-it-works} + + +### Cgroups v2 内存控制器接口和 Kubernetes 容器资源映 {#cgroups-v2-memory-controller-interfaces-kubernetes-container-resources-mapping} + + +MemoryQoS 机制使用 cgroups v2 的内存控制器来保证 Kubernetes 中的内存资源。 +此特性使用的 cgroupv2 接口有: + +* `memory.max` +* `memory.min` +* `memory.high` + + +{{< figure src="/blog/2023/05/05/qos-memory-resources/memory-qos-cal.svg" title="内存 QoS 级别" alt="内存 QoS 级别" >}} + + +`memory.max` 映射到 Pod 规约中指定的 `limits.memory`。 +kubelet 和容器运行时在对应的 cgroup 中配置限制值。内核强制执行限制机制以防止容器用量超过所配置的资源限制。 +如果容器中的进程尝试消耗的资源超过所设置的限制值,内核将终止进程并报告内存不足(OOM)错误。 + + +{{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-max.svg" title="memory.max 映射到 limit.memory" alt="memory.max 映射到 limit.memory" >}} + + +`memory.min` 被映射到 `requests.memory`,这会导致内存资源被预留而永远不会被内核回收。 +这就是 MemoryQoS 机制确保 Kubernetes Pod 内存可用性的方式。 +如果没有不受保护的、可回收的内存,则内核会调用 OOM 杀手以提供更多可用内存。 + + +{{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-min.svg" title="memory.min 映射到 requests.memory" alt="memory.min 映射到 requests.memory" >}} + + +对于内存保护,除了原来的限制内存用量的方式之外,MemoryQoS 机制还会对用量接近其内存限制的工作负载进行抑制, +确保系统不会因内存使用的零星增加而不堪重负。当你启用 MemoryQoS 特性时, +KubeletConfiguration 中将提供一个新字段 `memoryThrottlingFactor`。默认设置为 0.9。 +`memory.high` 被映射到通过 `memoryThrottlingFactor`、`requests.memory` 和 `limits.memory` +计算得出的抑制上限,计算方法如下式所示,所得的值向下舍入到最接近的页面大小: + + +{{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-high.svg" title="memory.high 公式" alt="memory.high 公式" >}} + + +**注意**:如果容器没有指定内存限制,则 `limits.memory` 将被替换为节点可分配内存的值。 + + +**总结:** + + + + + + + + + + + + + + + + + + + + + +
    文件描述
    memory.maxmemory.max 指定允许容器使用的最大内存限制。 + 如果容器内的进程尝试使用的内存量超过所配置的限制值,内核将终止该进程并显示内存不足(OOM)错误。 +
    +
    + 此配置映射到 Pod 清单中指定的容器内存限制。 +
    memory.minmemory.min 指定 cgroup 必须始终保留的最小内存量, + 即系统永远不应回收的内存。如果没有可用的未受保护的可回收内存,则会调用 OOM 终止程序。 +
    +
    + 此配置映射到 Pod 清单中指定的容器的内存请求。 +
    memory.highmemory.high 指定内存用量抑制上限。这是控制 cgroup 内存用量的主要机制。 + 如果 cgroups 内存使用量超过此处指定的上限,则 cgroups 进程将受到抑制并标记回收压力较大。 +
    +
    + Kubernetes 使用公式来计算 memory.high,具体取决于容器的内存请求、 + 内存限制或节点可分配内存(如果容器的内存限制为空)和抑制因子。有关公式的更多详细信息, + 请参阅 KEP +
    + + +**注意**:`memory.high` 仅可在容器级别的 cgroups 上设置, +而 `memory.min` 则可在容器、Pod 和节点级别的 cgroups 上设置。 + + +### 针对 cgroup 层次结构的 `memory.min` 计算 {#memory-min-calculations-for-cgroups-heirarchy} + + +当发出容器内存请求时,kubelet 在创建容器期间通过 CRI 中的 `Unified` 字段将 `memory.min` +传递给后端 CRI 运行时(例如 containerd 或 CRI-O)。容器级别 cgroup 中的 `memory.min` 将设置为: + +$memory.min = pod.spec.containers[i].resources.requests[memory]$ + +对于 Pod 中每个 ith 容器 +
    +
    + +由于 `memory.min` 接口要求祖先 cgroups 目录全部被设置, +因此需要正确设置 Pod 和节点的 cgroups 目录。 + + +Pod 级别 cgroup 中的 `memory.min`: + +$memory.min = \sum_{i=0}^{no. of pods}pod.spec.containers[i].resources.requests[memory]$ + +对于 Pod 中每个 ith 容器 +
    +
    + +节点级别 cgroup 中的 `memory.min`: + +$memory.min = \sum_{i}^{no. of nodes}\sum_{j}^{no. of pods}pod[i].spec.containers[j].resources.requests[memory]$ + +对于节点中每个 ith Pod 中的每个 jth 容器 +
    +
    + +Kubelet 将直接使用 libcontainer 库(来自 runc 项目)管理 Pod 级别和节点级别 +cgroups 的层次结构,而容器 cgroups 限制由容器运行时管理。 + + +### 支持 Pod QoS 类别 {#support-for-pod-qos-classes} + + +根据用户对 Kubernetes v1.22 中 Alpha 特性的反馈,一些用户希望在 Pod 层面选择不启用 MemoryQoS, +以确保不会出现早期内存抑制现象。因此,在 Kubernetes v1.27 中 MemoryQoS 还支持根据 +服务质量(QoS)对 Pod 类设置 memory.high。以下是按 QoS 类设置 memory.high 的几种情况: + + +1. **Guaranteed Pods**:根据其 QoS 定义,要求 Pod 的内存请求等于其内存限制,并且不允许超配。 + 因此,通过不设置 memory.high,MemoryQoS 特性会针对这些 Pod 被禁用。 + 这样做可以确保 **Guaranteed Pod** 充分利用其内存请求,也就是其内存限制,并且不会被抑制。 + + +2. **Burstable Pod**:根据其 QoS 定义,要求 Pod 中至少有一个容器具有 CPU 或内存请求或限制设置。 + + + * 当 requests.memory 和 limits.memory 都被设置时,公式按原样使用: + + + {{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-high-limit.svg" title="当请求和限制被设置时的 memory.high" alt="当请求和限制被设置时的 memory.high" >}} + + + * 当设置了 requests.memory 但未设置 limits.memory 时,公式中的 limits.memory 替换为节点可分配内存: + + + {{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-high-no-limits.svg" title="当请求和限制未被设置时的 memory.high" alt="当请求和限制未被设置时的 memory.high" >}} + + +3. **BestEffort Pod**:根据其 QoS 定义,不需要设置内存或 CPU 限制或请求。对于这种情况, + kubernetes 设置 requests.memory = 0 并将公式中的 limits.memory 替换为节点可分配内存: + + + {{< figure src="/blog/2023/05/05/qos-memory-resources/container-memory-high-best-effort.svg" title="BestEffort Pod 的 memory.high" alt="BestEffort Pod 的 memory.high" >}} + + +**总结**:只有 Burstable 和 BestEffort QoS 类别中的 Pod 才会设置 `memory.high`。 +Guaranteed QoS 的 Pod 不会设置 `memory.high`,因为它们的内存是有保证的。 + + +## 我该如何使用它? {#how-do-i-use-it} + + +在 Linux 节点上启用 MemoryQoS 特性的先决条件是: + + +1. 验证是否满足 + [Kubernetes 对 cgroup v2 支持](/zh-cn/docs/concepts/architecture/cgroups)的相关[要求](/zh-cn/docs/concepts/architecture/cgroups/#requirements)。 + +2. 确保 CRI 运行时支持内存 QoS。在撰写本文时, + 只有 Containerd 和 CRI-O 提供与内存 QoS(alpha)兼容的支持。是在以下 PR 中实现的: + * Containerd:[Feature: containerd-cri support LinuxContainerResources.Unified #5627](https://github.com/containerd/containerd/pull/5627)。 + * CRI-O:[implement kube alpha features for 1.22 #5207](https://github.com/cri-o/cri-o/pull/5207)。 + + +MemoryQoS 在 Kubernetes v1.27 中仍然是 Alpha 特性。 +你可以通过在 kubelet 配置文件中设置 `MemoryQoS=true` 来启用该特性: + +```yaml +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +featureGates: + MemoryQoS: true +``` + + +## 我如何参与? {#how-do-i-get-involved} + + +非常感谢所有帮助设计、实施和审查此功能的贡献者: + +* Dixita Narang ([ndixita](https://github.com/ndixita)) +* Tim Xu ([xiaoxubeii](https://github.com/xiaoxubeii)) +* Paco Xu ([pacoxu](https://github.com/pacoxu)) +* David Porter([bobbypage](https://github.com/bobbypage)) +* Mrunal Patel([mrunalp](https://github.com/mrunalp)) + + +对于那些有兴趣参与未来内存 QoS 特性讨论的人,你可以通过多种方式联系 SIG Node: + + +- Slack:[#sig-node](https://kubernetes.slack.com/messages/sig-node) +- [邮件列表](https://groups.google.com/forum/#!forum/kubernetes-sig-node) +- [开放社区 Issue/PR](https://github.com/kubernetes/community/labels/sig%2Fnode) diff --git a/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/memory-qos-cal.svg b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/memory-qos-cal.svg new file mode 100644 index 0000000000000..a85a2b1ea257b --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-05-05-memory-qos-cgroups-v2/memory-qos-cal.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/zh-cn/blog/_posts/2023-07-06-confidential-kubernetes.md b/content/zh-cn/blog/_posts/2023-07-06-confidential-kubernetes.md index 0898a03d9cf74..7ff90e03b1913 100644 --- a/content/zh-cn/blog/_posts/2023-07-06-confidential-kubernetes.md +++ b/content/zh-cn/blog/_posts/2023-07-06-confidential-kubernetes.md @@ -256,11 +256,11 @@ SEV 还可以计算内存内容的签名,该签名可以作为证明初始客 -SEV 的第二代,称为[加密状态](https://www.amd.com/system/files/TechDocs/Protecting%20VM%20Register%20State%20with%20SEV-ES.pdf) +SEV 的第二代,称为[加密状态](https://www.amd.com/content/dam/amd/en/documents/epyc-business-docs/white-papers/Protecting-VM-Register-State-with-SEV-ES.pdf) 或 SEV-ES,通过在发生上下文切换时加密所有 CPU 寄存器内容,提供了对虚拟机管理程序的额外保护。 + + +**作者**:Marko Mudrinić (Kubermatic) + +**译者**:Wilson Wu (DaoCloud) + + +我很高兴代表 Kubernetes SIG Release 介绍 Kubernetes +社区自有的 Debian 和 RPM 软件仓库:`pkgs.k8s.io`! +这些全新的仓库取代了我们自 Kubernetes v1.5 以来一直使用的托管在 +Google 的仓库(`apt.kubernetes.io` 和 `yum.kubernetes.io`)。 + + +这篇博文包含关于这些新的包仓库的信息、它对最终用户意味着什么以及如何迁移到新仓库。 + + +**ℹ️ 更新(2023 年 8 月 31 日):旧版托管在 Google 的仓库已被弃用,并将于 2023 年 9 月 13 日开始被冻结。** +查看[弃用公告](/zh-cn/blog/2023/08/31/legacy-package-repository-deprecation/)了解有关此更改的更多详细信息。 + + +## 关于新的包仓库,你需要了解哪些信息? {#what-you-need-to-know-about-the-new-package-repositories} + + +**(更新于 2023 年 8 月 31 日)** + + +- 这是一个**明确同意的更改**;你需要手动从托管在 Google 的仓库迁移到 + Kubernetes 社区自有的仓库。请参阅本公告后面的[如何迁移](#how-to-migrate), + 了解迁移信息和说明。 + +- 旧版托管在 Google 的仓库**自 2023 年 8 月 31 日起被弃用**, + 并将**于 2023 年 9 月 13 日左右被冻结**。 + 冻结将在计划于 2023 年 9 月发布补丁之后立即发生。 + 冻结旧仓库意味着我们在 2023 年 9 月 13 日这个时间点之后仅将 Kubernetes + 项目的包发布到社区自有的仓库。有关此更改的更多详细信息, + 请查看[弃用公告](/zh-cn/blog/2023/08/31/legacy-package-repository-deprecation/)。 + +- 旧仓库中的现有包将在可预见的未来一段时间内可用。 + 然而,Kubernetes 项目无法保证这会持续多久。 + 已弃用的旧仓库及其内容可能会在未来随时被删除,恕不另行通知。 + + +- 鉴于在 2023 年 9 月 13 日这个截止时间点之后不会向旧仓库发布任何新版本, + 如果你不在该截止时间点迁移至新的 Kubernetes 仓库, + 你将无法升级到该日期之后发布的任何补丁或次要版本。 + 也就是说,我们建议**尽快**迁移到新的 Kubernetes 仓库。 + +- 新的 Kubernetes 仓库中包含社区开始接管包构建以来仍在支持的 Kubernetes 版本的包。 + 这意味着 v1.24.0 之前的任何内容都只存在于托管在 Google 的仓库中。 + +- 每个 Kubernetes 次要版本都有一个专用的仓库。 + 当升级到不同的次要版本时,你必须记住,仓库详细信息也会发生变化。 + + +## 为什么我们要引入新的包仓库? {#why-are-we-introducing-new-package-repositories} + + +随着 Kubernetes 项目的不断发展,我们希望确保最终用户获得最佳体验。 +托管在 Google 的仓库多年来一直为我们提供良好的服务, +但我们开始面临一些问题,需要对发布包的方式进行重大变更。 +我们的另一个目标是对所有关键组件使用社区拥有的基础设施,其中包括仓库。 + + +将包发布到托管在 Google 的仓库是一个手动过程, +只能由名为 [Google 构建管理员](/zh-cn/releases/release-managers/#build-admins)的 Google 员工团队来完成。 +[Kubernetes 发布管理员团队](/zh-cn/releases/release-managers/#release-managers)是一个非常多元化的团队, +尤其是在我们工作的时区方面。考虑到这一限制,我们必须对每个版本进行非常仔细的规划, +确保我们有发布经理和 Google 构建管理员来执行发布。 + + +另一个问题是由于我们只有一个包仓库。因此,我们无法发布预发行版本 +(Alpha、Beta 和 RC)的包。这使得任何有兴趣测试的人都更难测试 Kubernetes 预发布版本。 +我们从测试这些版本的人员那里收到的反馈对于确保版本的最佳质量至关重要, +因此我们希望尽可能轻松地测试这些版本。最重要的是,只有一个仓库限制了我们对 +`cri-tools` 和 `kubernetes-cni` 等依赖进行发布, + + +尽管存在这些问题,我们仍非常感谢 Google 和 Google 构建管理员这些年来的参与、支持和帮助! + + +## 新的包仓库如何工作? {#how-the-new-package-repositories-work} + + +新的 Debian 和 RPM 仓库托管在 `pkgs.k8s.io`。 +目前,该域指向一个 CloudFront CDN,其后是包含仓库和包的 S3 存储桶。 +然而,我们计划在未来添加更多的镜像站点,让其他公司有可能帮助我们提供软件包服务。 + + +包通过 [OpenBuildService(OBS)平台](http://openbuildservice.org)构建和发布。 +经过长时间评估不同的解决方案后,我们决定使用 OpenBuildService 作为管理仓库和包的平台。 +首先,OpenBuildService 是一个开源平台,被大量开源项目和公司使用, +如 openSUSE、VideoLAN、Dell、Intel 等。OpenBuildService 具有许多功能, +使其非常灵活且易于与我们现有的发布工具集成。 +它还允许我们以与托管在 Google 的仓库类似的方式构建包,从而使迁移过程尽可能无缝。 + + +SUSE 赞助 Kubernetes 项目并且支持访问其引入的 OpenBuildService 环境 +([`build.opensuse.org`](http://build.opensuse.org)), +还提供将 OBS 与我们的发布流程集成的技术支持。 + + +我们使用 SUSE 的 OBS 实例来构建和发布包。构建新版本后, +我们的工具会自动将所需的制品和包设置推送到 `build.opensuse.org`。 +这将触发构建过程,为所有支持的架构(AMD64、ARM64、PPC64LE、S390X)构建包。 +最后,生成的包将自动推送到我们社区拥有的 S3 存储桶,以便所有用户都可以使用它们。 + + +我们想借此机会感谢 SUSE 允许我们使用 `build.opensuse.org` +以及他们的慷慨支持,使这种集成成为可能! + + +## 托管在 Google 的仓库和 Kubernetes 仓库之间有哪些显著差异? {#what-are-significant-differences-between-the-google-hosted-and-kubernetes-package-repositories} + + +你应该注意三个显著差异: + + +- 每个 Kubernetes 次要版本都有一个专用的仓库。例如, + 名为 `core:/stable:/v1.28` 的仓库仅托管稳定 Kubernetes v1.28 版本的包。 + 这意味着你可以从此仓库安装 v1.28.0,但无法安装 v1.27.0 或 v1.28 之外的任何其他次要版本。 + 升级到另一个次要版本后,你必须添加新的仓库并可以选择删除旧的仓库 + +- 每个 Kubernetes 仓库中可用的 `cri-tools` 和 `kubernetes-cni` 包版本有所不同 + - 这两个包是 `kubelet` 和 `kubeadm` 的依赖项 + - v1.24 到 v1.27 的 Kubernetes 仓库与托管在 Google 的仓库具有这些包的相同版本 + - v1.28 及更高版本的 Kubernetes 仓库将仅发布该 Kubernetes 次要版本 + - 就 v1.28 而言,Kubernetes v1.28 的仓库中仅提供 kubernetes-cni 1.2.0 和 cri-tools v1.28 + - 与 v1.29 类似,我们只计划发布 cri-tools v1.29 以及 Kubernetes v1.29 将使用的 kubernetes-cni 版本 + +- 包版本的修订部分(`1.28.0-00` 中的 `-00` 部分)现在由 OpenBuildService + 平台自动生成,并具有不同的格式。修订版本现在采用 `-x.y` 格式,例如 `1.28.0-1.1` + + +## 这是否会影响现有的托管在 Google 的仓库? {#does-this-in-any-way-affect-existing-google-hosted-repositories} + + +托管在 Google 的仓库以及发布到其中的所有包仍然可用,与之前一样。 +我们构建包并将其发布到托管在 Google 仓库的方式没有变化, +所有新引入的更改仅影响发布到社区自有仓库的包。 + + +然而,正如本文开头提到的,我们计划将来停止将包发布到托管在 Google 的仓库。 + + +## 如何迁移到 Kubernetes 社区自有的仓库? {#how-to-migrate} + + +### 使用 `apt`/`apt-get` 的 Debian、Ubuntu 一起其他操作系统 {#how-to-migrate-deb} + + +1. 替换 `apt` 仓库定义,以便 `apt` 指向新仓库而不是托管在 Google 的仓库。 + 确保将以下命令中的 Kubernetes 次要版本替换为你当前使用的次要版本: + + ```shell + echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list + ``` + + +2. 下载 Kubernetes 仓库的公共签名密钥。所有仓库都使用相同的签名密钥, + 因此你可以忽略 URL 中的版本: + + ```shell + curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + ``` + + +3. 更新 `apt` 包索引: + + ```shell + sudo apt-get update + ``` + + +### 使用 `rpm`/`dnf` 的 CentOS、Fedora、RHEL 以及其他操作系统 {#how-to-migrate-rpm} + + +1. 替换 `yum` 仓库定义,使 `yum` 指向新仓库而不是托管在 Google 的仓库。 + 确保将以下命令中的 Kubernetes 次要版本替换为你当前使用的次要版本: + + ```shell + cat < +## 迁移到 Kubernetes 仓库后是否可以回滚到托管在 Google 的仓库? {#can-i-rollback-to-the-google-hosted-repository-after-migrating-to-the-kubernetes-repositories} + + +一般来说,可以。只需执行与迁移时相同的步骤,但使用托管在 Google 的仓库参数。 +你可以在[“安装 kubeadm”](/zh-cn/docs/setup/production-environment/tools/kubeadm/install-kubeadm)等文档中找到这些参数。 + + +## 为什么没有固定的域名/IP 列表?为什么我无法限制包下载? {#why-isn-t-there-a-stable-list-of-domains-ips-why-can-t-i-restrict-package-downloads} + + +我们对 `pkgs.k8s.io` 的计划是使其根据用户位置充当一组后端(包镜像)的重定向器。 +此更改的本质意味着下载包的用户可以随时重定向到任何镜像。 +鉴于架构和我们计划在不久的将来加入更多镜像,我们无法提供给你可以添加到允许列表中的 +IP 地址或域名列表。 + + +限制性控制机制(例如限制访问特定 IP/域名列表的中间人代理或网络策略)将随着此更改而中断。 +对于这些场景,我们鼓励你将包的发布版本与你可以严格控制的本地仓库建立镜像。 + + +## 如果我发现新的仓库有异常怎么办? {#what-should-i-do-if-i-detect-some-abnormality-with-the-new-repositories} + + +如果你在新的 Kubernetes 仓库中遇到任何问题, +请在 [`kubernetes/release` 仓库](https://github.com/kubernetes/release/issues/new/choose)中提交问题。 diff --git a/content/zh-cn/blog/_posts/2023-08-28-a-new-alpha-mechanism-for-safer-cluster-upgrades.md b/content/zh-cn/blog/_posts/2023-08-28-a-new-alpha-mechanism-for-safer-cluster-upgrades.md index 3dc95e7699ca8..353d4cd7112b5 100644 --- a/content/zh-cn/blog/_posts/2023-08-28-a-new-alpha-mechanism-for-safer-cluster-upgrades.md +++ b/content/zh-cn/blog/_posts/2023-08-28-a-new-alpha-mechanism-for-safer-cluster-upgrades.md @@ -55,7 +55,7 @@ apiserver 中;因此,它会收到 404("Not Found")的响应报错,这 --> ## 如何解决此问题? -{{< figure src="/images/blog/2023-08-28-a-new-alpha-mechanism-for-safer-cluster-upgrades/mvp-flow-diagram.svg" class="diagram-large" >}} +{{< figure src="/images/blog/2023-08-28-a-new-alpha-mechanism-for-safer-cluster-upgrades/mvp-flow-diagram_zh.svg" class="diagram-large" >}} ## 我可以继续使用旧软件包仓库吗? {#can-i-continue-to-use-the-legacy-package-repositories} -旧仓库中的现有软件包将在可预见的未来内保持可用。然而, +~~旧仓库中的现有软件包将在可预见的未来内保持可用。然而, Kubernetes 项目无法对这会持续多久提供**任何**保证。 -已弃用的旧仓库及其内容可能会在未来随时删除,恕不另行通知。 +已弃用的旧仓库及其内容可能会在未来随时删除,恕不另行通知。~~ + +**更新**: 旧版软件包预计将于 2024 年 1 月被删除。 1. **增强了弹性**:etcd learner 节点是非投票成员,在完全进入角色之前会追随领导者的日志。 这样可以防止新的集群成员干扰投票结果或引起领导者选举,从而使集群在成员变更期间更具弹性。 -2. **减少了集群不可用时间**:传统的添加新成员的方法通常会造成一段时间集群不可用,特别是在基础设施迟缓或误配的情况下更为明显。 +1. **减少了集群不可用时间**:传统的添加新成员的方法通常会造成一段时间集群不可用,特别是在基础设施迟缓或误配的情况下更为明显。 而 etcd learner 模式可以最大程度地减少此类干扰。 -3. **简化了维护**:learner 节点提供了一种更安全、可逆的方式来添加或替换集群成员。 +1. **简化了维护**:learner 节点提供了一种更安全、可逆的方式来添加或替换集群成员。 这降低了由于误配或在成员添加过程中出错而导致集群意外失效的风险。 -4. **改进了网络容错性**:在涉及网络分区的场景中,learner 模式允许更优雅的处理。 +1. **改进了网络容错性**:在涉及网络分区的场景中,learner 模式允许更优雅的处理。 根据新成员所落入的分区,它可以无缝地与现有集群集成,而不会造成中断。 要检查 Kubernetes 控制平面是否健康,运行 `kubectl get node -l node-role.kubernetes.io/control-plane=` 并检查节点是否就绪。 -注意:建议在 etcd 集群中的成员个数为奇数。 +{{< note >}} + +建议在 etcd 集群中的成员个数为奇数。 +{{< /note >}} + 在将工作节点接入新的 Kubernetes 集群之前,确保控制平面节点健康。 ## 接下来的步骤 {#whats-next} @@ -190,7 +194,9 @@ Before joining a worker node to the new Kubernetes cluster, ensure that the cont Was this guide helpful? If you have any feedback or encounter any issues, please let us know. Your feedback is always welcome! Join the bi-weekly [SIG Cluster Lifecycle meeting](https://docs.google.com/document/d/1Gmc7LyCIL_148a9Tft7pdhdee0NBHdOfHS1SAF0duI4/edit) -or weekly [kubeadm office hours](https://docs.google.com/document/d/130_kiXjG7graFNSnIAgtMS1G8zPDwpkshgfRYS0nggo/edit). Or reach us via [Slack](https://slack.k8s.io/) (channel **#kubeadm**), or the [SIG's mailing list](https://groups.google.com/g/kubernetes-sig-cluster-lifecycle). +or weekly [kubeadm office hours](https://docs.google.com/document/d/130_kiXjG7graFNSnIAgtMS1G8zPDwpkshgfRYS0nggo/edit). +Or reach us via [Slack](https://slack.k8s.io/) (channel **#kubeadm**), or the +[SIG's mailing list](https://groups.google.com/g/kubernetes-sig-cluster-lifecycle). --> ## 反馈 {#feedback} diff --git a/content/zh-cn/blog/_posts/2023-10-02-steering-committee-results-2023.md b/content/zh-cn/blog/_posts/2023-10-02-steering-committee-results-2023.md new file mode 100644 index 0000000000000..092eec2101ef7 --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-10-02-steering-committee-results-2023.md @@ -0,0 +1,130 @@ +--- +layout: blog +title: "公布 2023 年指导委员会选举结果" +date: 2023-10-02 +slug: steering-committee-results-2023 +--- + + + + +**作者**:Kaslin Fields + +**译者**:Xin Li(DaoCloud) + + +[2023 年指导委员会选举](https://github.com/kubernetes/community/tree/master/events/elections/2023)现已完成。 +Kubernetes 指导委员会由 7 个席位组成,其中 4 个席位于 2023 年进行选举。 +新任委员会成员的任期为 2 年,所有成员均由 Kubernetes 社区选举产生。 + + +这个社区机构非常重要,因为它负责监督整个 Kubernetes 项目的治理。 +权力越大责任越大,你可以在其 +[章程](https://github.com/kubernetes/steering/blob/master/charter.md)中了解有关指导委员会角色的更多信息。 + + +感谢所有在选举中投票的人;你们的参与有助于支持社区的持续健康和成功。 + + +## 结果 + +祝贺当选的委员会成员,其两年任期立即开始(按 GitHub 名称字母顺序列出): + +* **Stephen Augustus ([@justaugustus](https://github.com/justaugustus)), Cisco** +* **Paco Xu 徐俊杰 ([@pacoxu](https://github.com/pacoxu)), DaoCloud** +* **Patrick Ohly ([@pohly](https://github.com/pohly)), Intel** +* **Maciej Szulik ([@soltysh](https://github.com/soltysh)), Red Hat** + + +他们将与一下连任成员一起工作: + +* **Benjamin Elder ([@bentheelder](https://github.com/bentheelder)), Google** +* **Bob Killen ([@mrbobbytables](https://github.com/mrbobbytables)), Google** +* **Nabarun Pal ([@palnabarun](https://github.com/palnabarun)), VMware** + + +Stephen Augustus 是回归的指导委员会成员。 + + +## 十分感谢! + +感谢并祝贺本轮选举官员成功完成选举工作: + +* Bridget Kromhout ([@bridgetkromhout](https://github.com/bridgetkromhout)) +* Davanum Srinavas ([@dims](https://github.com/dims)) +* Kaslin Fields ([@kaslin](https://github.com/kaslin)) + + +感谢名誉指导委员会成员,你们的服务受到社区的赞赏: + +* Christoph Blecker ([@cblecker](https://github.com/cblecker)) +* Carlos Tadeu Panato Jr. ([@cpanato](https://github.com/cpanato)) +* Tim Pepper ([@tpepper](https://github.com/tpepper)) + + +感谢所有前来竞选的候选人。 + + +## 参与指导委员会 + +你可以关注指导委员会[积压的项目](https://github.com/orgs/kubernetes/projects/40), +并通过提交 Issue 或针对其 [repo](https://github.com/kubernetes/steering) 创建 PR 来参与。 +他们在[太平洋时间每月第一个周一上午 9:30](https://github.com/kubernetes/steering) 举行开放的会议。 +你还可以通过其公共邮件列表 steering@kubernetes.io 与他们联系。 + + +你可以通过在 [YouTube 播放列表](https://www.youtube.com/playlist?list=PL69nYSiGNLP1yP1B_nd9-drjoxp0Q14qM)上观看过去的会议来了解指导委员会会议的全部内容。 + +如果你想认识一些新当选的指导委员会成员,请参加我们在[芝加哥 Kubernetes 贡献者峰会](https://k8s.dev/summit)举行的 Steering AMA。 + +--- + + +**这篇文章是由[贡献者通信子项目](https://github.com/kubernetes/community/tree/master/communication/contributor-comms)撰写的。 +如果你想撰写有关 Kubernetes 社区的故事,请了解有关我们的更多信息。** diff --git a/content/zh-cn/blog/_posts/2023-10-05-sig-architecture-conformance-spotlight.md b/content/zh-cn/blog/_posts/2023-10-05-sig-architecture-conformance-spotlight.md new file mode 100644 index 0000000000000..99aefd4cce49c --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-10-05-sig-architecture-conformance-spotlight.md @@ -0,0 +1,341 @@ +--- +layout: blog +title: "聚焦 SIG Architecture: Conformance" +slug: sig-architecture-conformance-spotlight-2023 +date: 2023-10-05 +--- + + + +**作者**:Frederico Muñoz (SAS Institute) + +**译者**:[Michael Yao](https://github.com/windsonsea) (DaoCloud) + + +**这是 SIG Architecture 焦点访谈系列的首次采访,这一系列访谈将涵盖多个子项目。 +我们从 SIG Architecture:Conformance 子项目开始。** + +在本次 [SIG Architecture](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md) +访谈中,我们与 [Riaan Kleinhans](https://github.com/Riaankl) (ii-Team) 进行了对话,他是 +[Conformance 子项目](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md#conformance-definition-1)的负责人。 + + +## 关于 SIG Architecture 和 Conformance 子项目 + +**Frederico (FSM)**:你好 Riaan,欢迎!首先,请介绍一下你自己,你的角色以及你是如何参与 Kubernetes 的。 + +**Riaan Kleinhans (RK)**:嗨!我叫 Riaan Kleinhans,我住在南非。 +我是新西兰 [ii-Team](ii.nz) 的项目经理。在我加入 ii 时,本来计划在 2020 年 4 月搬到新西兰, +然后新冠疫情爆发了。幸运的是,作为一个灵活和富有活力的团队,我们能够在各个不同的时区以远程方式协作。 + + +ii 团队负责管理 Kubernetes Conformance 测试的技术债务,并编写测试内容来消除这些技术债务。 +我担任项目经理的角色,成为监控、测试内容编写和社区之间的桥梁。通过这项工作,我有幸在最初的几个月里结识了 +[Dan Kohn](https://github.com/dankohn),他对我们的工作充满热情,给了我很大的启发。 + + +**FSM**:谢谢!所以,你参与 SIG Architecture 是因为合规性的工作? + +**RK**:SIG Architecture 负责管理 Kubernetes Conformance 子项目。 +最初,我大部分时间直接与 SIG Architecture 交流 Conformance 子项目。 +然而,随着我们开始按 SIG 来组织工作任务,我们开始直接与各个 SIG 进行协作。 +与拥有未被测试的 API 的这些 SIG 的协作帮助我们加快了工作进度。 + + +**FSM**:你如何描述 Conformance 子项目的主要目标和介入的领域? + +**RM**: Kubernetes Conformance 子项目专注于通过开发和维护全面的合规性测试套件来确保兼容性并遵守 +Kubernetes 规范。其主要目标包括确保不同 Kubernetes 实现之间的兼容性,验证 API 规范的遵守情况, +通过鼓励合规性认证来支持生态体系,并促进 Kubernetes 社区内的合作。 +通过提供标准化的测试并促进一致的行为和功能, +Conformance 子项目为开发人员和用户提供了一个可靠且兼容的 Kubernetes 生态体系。 + + +## 关于 Conformance Test Suite 的更多内容 + +**FSM**:我认为,提供这些标准化测试的一部分工作在于 +[Conformance Test Suite](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/conformance-tests.md)。 +你能解释一下它是什么以及其重要性吗? + +**RK**:Kubernetes Conformance Test Suite 检查 Kubernetes 发行版是否符合项目的规范, +确保在不同的实现之间的兼容性。它涵盖了诸如 API、联网、存储、调度和安全等各个特性。 +能够通过测试,则表示实现合理,便于推动构建一致且可移植的容器编排平台。 + + +**FSM**:是的,这些测试很重要,因为它们定义了所有 Kubernetes 集群必须支持的最小特性集合。 +你能描述一下决定将哪些特性包含在内的过程吗?在最小特性集的思路与其他 SIG 提案之间是否有所冲突? + +**RK**:SIG Architecture 针对经受合规性测试的每个端点的要求,都有明确的定义。 +API 端点只有正式发布且不是可选的特性,才会被(进一步)考虑是否合规。 +多年来,关于合规性配置文件已经进行了若干讨论, +探讨将被大多数终端用户广泛使用的可选端点(例如 RBAC)纳入特定配置文件中的可能性。 +然而,这一方面仍在不断改进中。 + + +不满足合规性标准的端点被列在 +[ineligible_endpoints.yaml](https://github.com/kubernetes/kubernetes/blob/master/test/conformance/testdata/ineligible_endpoints.yaml) 中, +该文件放在 Kubernetes 代码仓库中,是被公开访问的。 +随着这些端点的状态或要求发生变化,此文件可能会被更新以添加或删除端点。 +不合格的端点也可以在 [APISnoop](https://apisnoop.cncf.io/) 上看到。 + +对于 SIG Architecture 来说,确保透明度并纳入社区意见以确定端点的合格或不合格状态是至关重要的。 + + +**FSM**:为新特性编写测试内容通常需要某种强制执行方式。 +你如何看待 Kubernetes 中这方面的演变?是否有人在努力改进这个流程, +使得必须具备测试成为头等要务,或许这从来都不是一个问题? + +**RK**:在 2018 年开始围绕 Kubernetes 合规性计划进行讨论时,只有大约 11% 的端点被测试所覆盖。 +那时,CNCF 的管理委员会提出一个要求,如果要提供资金覆盖缺失的合规性测试,Kubernetes 社区应采取一个策略, +即如果新特性没有包含稳定 API 的合规性测试,则不允许添加此特性。 + + +SIG Architecture 负责监督这一要求,[APISnoop](https://apisnoop.cncf.io/) +在此方面被证明是一个非常有价值的工具。通过自动化流程,APISnoop 在每个周末生成一个 PR, +以突出 Conformance 覆盖范围的变化。如果有端点在没有进行合规性测试的情况下进阶至正式发布, +将会被迅速识别发现。这种方法有助于防止积累新的技术债务。 + +此外,我们计划在不久的将来创建一个发布通知任务,作用是添加额外一层防护,以防止产生新的技术债务。 + + +**FSM**:我明白了,工具化和自动化在其中起着重要的作用。 +在你看来,就合规性而言,还有哪些领域需要做一些工作? +换句话说,目前标记为优先改进的领域有哪些? + +**RK**:在 1.27 版本中,我们已完成了 “100% 合规性测试” 的里程碑! + + +当时,社区重新审视了所有被列为不合规的端点。这个列表是收集多年的社区意见后填充的。 +之前被认为不合规的几个端点已被挑选出来并迁移到一个新的专用列表中, +该列表中包含目前合规性测试开发的焦点。同样,可以在 apisnoop.cncf.io 上查阅此列表。 + + +为了确保在合规性项目中避免产生新的技术债务,我们计划建立一个发布通知任务作为额外的预防措施。 + +虽然 APISnoop 目前被托管在 CNCF 基础设施上,但此项目已慷慨地捐赠给了 Kubernetes 社区。 +因此,它将在 2023 年底之前转移到社区自治的基础设施上。 + + +**FSM**:这是个好消息!对于想要提供帮助的人们,你能否重点说明一下协作的价值所在? +参与贡献是否需要对 Kubernetes 有很扎实的知识,或否有办法让一些新人也能为此项目做出贡献? + +**RK**:参与合规性测试就像 "洗碗" 一样,它可能不太显眼,但仍然非常重要。 +这需要对 Kubernetes 有深入的理解,特别是在需要对端点进行测试的领域。 +这就是为什么与负责测试 API 端点的每个 SIG 进行协作会如此重要。 + + +我们的承诺是让所有人都能参与测试内容编写,作为这一承诺的一部分, +ii 团队目前正在开发一个 “点击即部署(click and deploy)” 的解决方案。 +此解决方案旨在使所有人都能在几分钟内快速创建一个在真实硬件上工作的环境。 +我们将在准备好后分享有关此项开发的更新。 + + +**FSM**:那会非常有帮助,谢谢。最后你还想与我们的读者分享些什么见解吗? + +**RK**:合规性测试是一个协作性的社区工作,涉及各个 SIG 之间的广泛合作。 +SIG Architecture 在推动倡议并提供指导方面起到了领头作用。然而, +工作的进展在很大程度上依赖于所有 SIG 在审查、增强和认可测试方面的支持。 + + +我要衷心感谢 ii 团队多年来对解决技术债务的坚定承诺。 +特别要感谢 [Hippie Hacker](https://github.com/hh) 的指导和对愿景的引领作用,这是非常宝贵的。 +此外,我还要特别表扬 Stephen Heywood 在最近几个版本中承担了大部分测试内容编写工作而做出的贡献, +还有 Zach Mandeville 对 APISnoop 也做了很好的贡献。 + + +**FSM**:非常感谢你参加本次访谈并分享你的深刻见解,我本人从中获益良多,我相信读者们也会同样受益。 diff --git a/content/zh-cn/blog/_posts/2023-10-10-cri-o-community-package-infrastructure.md b/content/zh-cn/blog/_posts/2023-10-10-cri-o-community-package-infrastructure.md new file mode 100644 index 0000000000000..af91a47f32952 --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-10-10-cri-o-community-package-infrastructure.md @@ -0,0 +1,284 @@ +--- +layout: blog +title: "CRI-O 正迁移至 pkgs.k8s.io" +date: 2023-10-10 +slug: cri-o-community-package-infrastructure +--- + + + +**作者**:Sascha Grunert + + +**译者**:Wilson Wu (DaoCloud) + + +Kubernetes 社区[最近宣布](/zh-cn/blog/2023/08/31/legacy-package-repository-deprecation/)旧的软件包仓库已被冻结, +现在这些软件包将被迁移到由 [OpenBuildService(OBS)](https://build.opensuse.org/project/subprojects/isv:kubernetes) +提供支持的[社区自治软件包仓库](/blog/2023/08/15/pkgs-k8s-io-introduction)中。 +很久以来,CRI-O 一直在利用 [OBS 进行软件包构建](https://github.com/cri-o/cri-o/blob/e292f17/install.md#install-packaged-versions-of-cri-o), +但到目前为止,所有打包工作都是手动完成的。 + + +CRI-O 社区非常喜欢 Kubernetes,这意味着他们很高兴地宣布: + + +**所有未来的 CRI-O 包都将作为在 pkgs.k8s.io 上托管的官方支持的 Kubernetes 基础设施的一部分提供!** + + +现有软件包将进入一个弃用阶段,目前正在 +[CRI-O 社区中讨论](https://github.com/cri-o/cri-o/discussions/7315)。 +新的基础设施将仅支持 CRI-O `>= v1.28.2` 的版本以及比 `release-1.28` 新的版本分支。 + + +## 如何使用新软件包 {#how-to-use-the-new-packages} + + +与 Kubernetes 社区一样,CRI-O 提供 `deb` 和 `rpm` 软件包作为 OBS 中专用子项目的一部分, +被称为 [`isv:kubernetes:addons:cri-o`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o)。 +这个项目是一个集合,提供 `stable`(针对 CRI-O 标记)以及 `prerelease`(针对 CRI-O `release-1.y` 和 `main` 分支)版本的软件包。 + + +**稳定版本:** + + +- [`isv:kubernetes:addons:cri-o:stable`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:stable):稳定软件包 + - [`isv:kubernetes:addons:cri-o:stable:v1.29`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:stable:v1.29 ):`v1.29.z` 标记 + - [`isv:kubernetes:addons:cri-o:stable:v1.28`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:stable:v1.28 ):`v1.28.z` 标记 + + +**预发布版本:** + + +- [`isv:kubernetes:addons:cri-o:prerelease`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease):预发布软件包 + - [`isv:kubernetes:addons:cri-o:prerelease:main`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease:main): + [`main`](https://github.com/cri-o/cri-o/commits/main) 分支 + - [`isv:kubernetes:addons:cri-o:prerelease:v1.29`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease:v1.29): + [`release-1.29`](https://github.com/cri-o/cri-o/commits/release-1.29) 分支 + - [`isv:kubernetes:addons:cri-o:prerelease:v1.28`](https://build.opensuse.org/project/show/isv:kubernetes:addons:cri-o:prerelease:v1.28): + [`release-1.28`](https://github.com/cri-o/cri-o/commits/release-1.28) 分支 + + +v1.29 仓库中尚无可用的稳定版本,因为 v1.29.0 将于 12 月发布。 +CRI-O 社区也**不**支持早于 `release-1.28` 的版本分支, +因为已经有 CI 需求合并到 `main` 中,只有通过适当的努力才能向后移植到 `release-1.28`。 + + +例如,如果最终用户想要安装 CRI-O `main` 分支的最新可用版本, +那么他们可以按照与 Kubernetes 相同的方式添加仓库。 + + +### 基于 `rpm` 的发行版 {#rpm-based-distributions} + + +对于基于 `rpm` 的发行版,您可以以 `root` +用户身份运行以下命令来将 CRI-O 与 Kubernetes 一起安装: + + +#### 添加 Kubernetes 仓库 {#add-the-kubernetes-repo} + +```bash +cat < +#### 添加 CRI-O 仓库 {#add-the-cri-o-repo} + +```bash +cat < +#### 安装官方包依赖 {#install-official-package-dependencies} + +```bash +dnf install -y \ + conntrack \ + container-selinux \ + ebtables \ + ethtool \ + iptables \ + socat +``` + + +#### 从添加的仓库中安装软件包 {#install-the-packages-from-the-added-repos} + +```bash +dnf install -y --repo cri-o --repo kubernetes \ + cri-o \ + kubeadm \ + kubectl \ + kubelet +``` + + +### 基于 `deb` 的发行版 {#deb-based-distributions} + + +对于基于 `deb` 的发行版,您可以以 `root` 用户身份运行以下命令: + + +#### 安装用于添加仓库的依赖项 {#install-dependencies-for-adding-the-repositories} + +```bash +apt-get update +apt-get install -y software-properties-common curl +``` + + +#### 添加 Kubernetes 仓库 {#add-the-kubernetes-repository} + +```bash +curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | + gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.28/deb/ /" | + tee /etc/apt/sources.list.d/kubernetes.list +``` + + +#### 添加 CRI-O 仓库 {#add-the-cri-o-repository} + +```bash +curl -fsSL https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/Release.key | + gpg --dearmor -o /etc/apt/keyrings/cri-o-apt-keyring.gpg +echo "deb [signed-by=/etc/apt/keyrings/cri-o-apt-keyring.gpg] https://pkgs.k8s.io/addons:/cri-o:/prerelease:/main/deb/ /" | + tee /etc/apt/sources.list.d/cri-o.list +``` + + +#### 安装软件包 {#install-the-packages} + +```bash +apt-get update +apt-get install -y cri-o kubelet kubeadm kubectl +``` + + +#### 启动 CRI-O {#start-cri-o} + +```bash +systemctl start crio.service +``` + + +如果使用的是另一个包序列,CRI-O 包路径中项目的 `prerelease:/main` +前缀可以替换为 `stable:/v1.28`、`stable:/v1.29`、`prerelease:/v1.28` 或 `prerelease :/v1.29`。 + + +你可以使用 `kubeadm init` 命令来[引导集群](/docs/setup/product-environment/tools/kubeadm/install-kubeadm/), +该命令会自动检测后台正在运行 CRI-O。还有适用于 +[Fedora 38](https://github.com/cri-o/packaging/blob/91df5f7/test/rpm/Vagrantfile) +以及 [Ubuntu 22.04](https://github.com/cri-o/packaging/blob/91df5f7/test/deb/Vagrantfile) +的 `Vagrantfile` 示例,可在使用 `kubeadm` 的场景中测试下载的软件包。 + + +## 它是如何工作的 {#how-it-works-under-the-hood} + + +与这些包相关的所有内容都位于新的 [CRI-O 打包仓库](https://github.com/cri-o/packaging)中。 +它包含 [Daily Reconciliation](https://github.com/cri-o/packaging/blob/91df5f7/.github/workflows/schedule.yml) GitHub 工作流, +支持所有发布分支以及 CRI-O 标签。 +OBS 工作流程中的[测试管道](https://github.com/cri-o/packaging/actions/workflows/obs.yml)确保包在发布之前可以被正确安装和使用。 +所有包的暂存和发布都是在 [Kubernetes 发布工具箱(krel)](https://github.com/kubernetes/release/blob/1f85912/docs/krel/README.md)的帮助下完成的, +这一工具箱也被用于官方 Kubernetes `deb` 和 `rpm` 软件包。 + + +包构建的输入每天都会被动态调整,并使用 CRI-O 的静态二进制包。 +这些包是基于 CRI-O CI 中的每次提交来构建和签名的, +并且包含 CRI-O 在特定架构上运行所需的所有内容。静态构建是可重复的, +由 [nixpkgs](https://github.com/NixOS/nixpkgs) 提供支持, +并且仅适用于 `x86_64`、`aarch64` 以及 `ppc64le` 架构。 + + +CRI-O 维护者将很乐意听取有关新软件包工作情况的任何反馈或建议! +感谢您阅读本文,请随时通过 Kubernetes [Slack 频道 #crio](https://kubernetes.slack.com/messages/CAZH62UR1) +联系维护人员或在[打包仓库](https://github.com/cri-o/packaging/issues)中创建 Issue。 diff --git a/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/index.md b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/index.md new file mode 100644 index 0000000000000..34896fb95f96b --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/index.md @@ -0,0 +1,225 @@ +--- +layout: blog +title: "2023 中国 Kubernetes 贡献者峰会简要回顾" +slug: kcs-shanghai +date: 2023-10-20 +--- + + + +**作者:** Paco Xu 和 Michael Yao (DaoCloud) + +2023 年 9 月 26 日,即 +[KubeCon + CloudNativeCon + Open Source Summit China 2023](https://www.lfasiallc.com/kubecon-cloudnativecon-open-source-summit-china/) +第一天,近 50 位社区贡献者济济一堂,在上海聚首 Kubernetes 贡献者峰会。 + + +{{< figure src="/blog/2023/10/20/kcs-shanghai/kcs04.jpeg" alt="2023 Kubernetes 贡献者峰会与会者集体合影" caption="2023 Kubernetes 贡献者峰会与会者集体合影" >}} + + +这是疫情三年之后,首次在中国本土召开的面对面线下聚会。 + +## 开心遇见 + +首先是本次 KubeCon 活动的联席主席、来自华为云的 [Kevin Wang](https://github.com/kevin-wangzefeng) +和来自 Gaint Swarm 的 [Puja](https://github.com/puja108) 做了欢迎致辞。 + + +随后在座的几十位贡献者分别做了简单的自我介绍,80% 以上的与会者来自中国,还有一些贡献者专程从欧美飞到上海参会。 +其中不乏来自微软、Intel、华为的技术大咖,也有来自 DaoCloud 这样的新锐中坚力量。 +欢声笑语齐聚一堂,无论是操着欧美口音的英语,还是地道的中国话,都在诠释着舒心与欢畅,表达着尊敬和憧憬。 +是曾经做出的贡献拉近了彼此,是互相的肯定和成就赋予了这次线下聚会的可能。 + + +{{< figure src="/blog/2023/10/20/kcs-shanghai/kcs06.jpeg" alt="Face to face meeting in Shanghai" caption="Face to face meeting in Shanghai" >}} + + +与会的贡献者不再是简单的 GitHub ID,而是进阶为一个个鲜活的面孔, +从静坐一堂,到合照留影,到寻觅彼此辨别 Who is Who 的那一刻起,我们事实上已形成了一个松散的集体。 +这个 team 结构松散、自由开放,却是为了追逐梦想而成立。 + +一分耕耘一分收获,每一份努力都已清晰地记录在 Kubernetes 社区贡献中。 +无论时光如何流逝,社区中不会抹去那些发光的痕迹,璀璨可能是你的 PR、Issue 或 comments, +也可能是某次 Meetup 的合影笑脸,还可能是贡献者口口相传的故事。 + + +## 技术分享和讨论 + +接下来是 3 个技术分享: + +- [sig-multi-cluster](https://github.com/kubernetes/community/blob/master/sig-multicluster/README.md): + Karmada 的维护者 [Hongcai Ren](https://github.com/RainbowMango) 介绍了这个 SIG 的职责和作用。 + 这个 SIG 负责设计、讨论、实现和维护多集群管理相关的 API、工具和文档。 + 其中涉及的 Cluster Federation 也是 Karmada 的核心概念之一。 + +- [helmfile](https://github.com/helmfile/helmfile):来自[极狐 GitLab](https://gitlab.cn/) 的 + [yxxhero](https://github.com/yxxhero) 介绍了如何声明式部署 Kubernetes 清单,如何自定义配置, + 如何使用 Helm 的最新特性 Helmfile 等内容。 +- [sig-scheduling](https://github.com/kubernetes/community/blob/master/sig-scheduling/README.md): + 来自华为云的 [william-wang](https://github.com/william-wang) 介绍了 + [SIG Scheduling](https://github.com/kubernetes/community/blob/master/sig-scheduling/README.md) + 最近更新的特性以及未来的规划。SIG Scheduling 负责设计、开发和测试 Pod 调度相关的组件。 + + +{{< figure src="/blog/2023/10/20/kcs-shanghai/kcs03.jpeg" alt="有关 sig-multi-cluster 的技术主题演讲" caption="有关 sig-multi-cluster 的技术主题演讲" >}} + + +随后播放了来自 SIG-Node Chair [Sergey Kanzhelev](https://github.com/SergeyKanzhelev) +的贡献者招募视频,希望更多贡献者参与到 Kubernetes 社区,特别是社区热门的 SIG-Node 方向。 + +最后,Kevin 主持了 Unconference 的集体讨论活动,主要涉及到多集群、调度、弹性、AI 等方向。 +有关 Unconference 会议纪要,参阅 + + +## 中国贡献者数据 + +本次贡献者峰会在上海举办,有 90% 的与会者为华人。而在 CNCF 生态体系中,来自中国的贡献数据也在持续增长,目前: + +- 中国贡献者占比 9% +- 中国贡献量占比 11.7% +- 全球贡献排名第 2 + +{{< note >}} + +以上数据来自 CNCF 首席技术官 Chris Aniszczyk 在 2023 年 9 月 26 日 KubeCon 的主题演讲。 +另外,由于大量中国贡献者使用 VPN 连接社区,这些统计数据可能与真实数据有所差异。 +{{< /note >}} + + +Kubernetes 贡献者峰会是一个自由开放的 Meetup,欢迎社区所有贡献者参与: + +- 新人 +- 老兵 + - 文档 + - 代码 + - 社区管理 +- 子项目 Owner 和参与者 +- 特别兴趣小组(SIG)或工作小组(WG)人员 +- 活跃的贡献者 +- 临时贡献者 + + +## 致谢 + +感谢本次活动的组织者: + +- [Kevin Wang](https://github.com/kevin-wangzefeng) 是本次 KubeCon 活动的联席主席,也是贡献者峰会的负责人 +- [Paco Xu](https://github.com/pacoxu) 积极联络场地餐食,联系和邀请国内外贡献者,建立微信群征集议题, + [会前会后公示活动细节](https://github.com/kubernetes/community/issues/7510)等 +- [Mengjiao Liu](https://github.com/mengjiao-liu) 负责组织协调和联络事宜 + + +我们衷心感谢所有参加在上海举办的中国 Kubernetes 贡献者峰会的贡献者们。 +你们对 Kubernetes 社区的奉献和承诺是无价之宝。 +让我们携手共进,继续推动云原生技术的边界,塑造这个生态系统的未来。 diff --git a/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs03.jpeg b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs03.jpeg new file mode 100644 index 0000000000000..c6131bfc911f2 Binary files /dev/null and b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs03.jpeg differ diff --git a/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs04.jpeg b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs04.jpeg new file mode 100644 index 0000000000000..61cb7ef8526fe Binary files /dev/null and b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs04.jpeg differ diff --git a/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs06.jpeg b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs06.jpeg new file mode 100644 index 0000000000000..f66c505e7a8d4 Binary files /dev/null and b/content/zh-cn/blog/_posts/2023-10-20-kcs-shanghai/kcs06.jpeg differ diff --git a/content/zh-cn/blog/_posts/2023-10-23-pv-last-phase-transtition-time.md b/content/zh-cn/blog/_posts/2023-10-23-pv-last-phase-transtition-time.md new file mode 100644 index 0000000000000..71a5fdabe04c0 --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-10-23-pv-last-phase-transtition-time.md @@ -0,0 +1,208 @@ +--- +layout: blog +title: Kubernetes 中 PersistentVolume 的最后阶段转换时间 +date: 2023-10-23 +slug: persistent-volume-last-phase-transition-time +--- + + + + +**作者:** Roman Bednář (Red Hat) + +**译者:** Xin Li (DaoCloud) + + +在最近的 Kubernetes v1.28 版本中,我们(SIG Storage)引入了一项新的 Alpha 级别特性, +旨在改进 PersistentVolume(PV)存储管理并帮助集群管理员更好地了解 PV 的生命周期。 +通过将 `lastPhaseTransitionTime` 字段添加到 PV 的状态中,集群管理员现在可以跟踪 +PV 上次转换到不同[阶段](/zh-cn/docs/concepts/storage/persistent-volumes/#phase)的时间, +从而实现更高效、更明智的资源管理。 + + +## 我们为什么需要新的 PV 字段? {#why-new-field} + +Kubernetes 中的 PersistentVolume 在为集群中运行的工作负载提供存储资源方面发挥着至关重要的作用。 +然而,有效管理这些 PV 可能具有挑战性,特别是在确定 PV 在不同阶段(`Pending`、`Bound` 或 `Released`)之间转换的最后时间时。 +管理员通常需要知道 PV 上次使用或转换到某些阶段的时间;例如,实施保留策略、执行清理或监控存储运行状况时。 + + +过去,Kubernetes 用户在使用 `Delete` 保留策略时面临数据丢失问题,不得不使用更安全的 `Retain` 策略。 +当我们计划引入新的 `lastPhaseTransitionTime` 字段时,我们希望提供一个更通用的解决方案, +可用于各种用例,包括根据卷上次使用时间进行手动清理或根据状态转变时间生成警报。 + + +## lastPhaseTransitionTime 如何提供帮助 + +如果你已启用特性门控(请参阅[如何使用它](#how-to-use-it)),则每次 PV 从一个阶段转换到另一阶段时, +PersistentVolume(PV)的新字段 `.status.lastPhaseTransitionTime` 都会被更新。 + + +无论是从 `Pending` 转换到 `Bound`、`Bound` 到 `Released`,还是任何其他阶段转换,都会记录 `lastPhaseTransitionTime`。 +对于新创建的 PV,将被声明为处于 `Pending` 阶段,并且 `lastPhaseTransitionTime` 也将被记录。 + + +此功能允许集群管理员: + + +1. 实施保留政策 + + 通过 `lastPhaseTransitionTime`,管理员可以跟踪 PV 上次使用或转换到 `Released` 阶段的时间。 + 此信息对于实施保留策略以清理在特定时间内处于 `Released` 阶段的资源至关重要。 + 例如,现在编写一个脚本或一个策略来删除一周内处于 `Released` 阶段的所有 PV 是很简单的。 + + +2. 监控存储运行状况 + + 通过分析 PV 的相变时间,管理员可以更有效地监控存储运行状况。 + 例如,他们可以识别处于 `Pending` 阶段时间异常长的 PV,这可能表明存储配置程序存在潜在问题。 + + +## 如何使用它 + +从 Kubernetes v1.28 开始,`lastPhaseTransitionTime` 为 Alpha 特性字段,因此需要启用 +`PersistentVolumeLastPhaseTransitionTime` 特性门控。 + + +如果你想在该特性处于 Alpha 阶段时对其进行测试,则需要在 `kube-controller-manager` +和 `kube-apiserver` 上启用此特性门控。 + +使用 `--feature-gates` 命令行参数: + +```shell +--feature-gates="...,PersistentVolumeLastPhaseTransitionTime=true" +``` + + +请记住,该特性启用后不会立即生效;而是在 PV 更新以及阶段之间转换时,填充新字段。 +然后,管理员可以通过查看 PV 状态访问新字段,此状态可以使用标准 Kubernetes API +调用或通过 Kubernetes 客户端库进行检索。 + + +以下示例展示了如何使用 `kubectl` 命令行工具检索特定 PV 的 `lastPhaseTransitionTime`: + +```shell +kubectl get pv -o jsonpath='{.status.lastPhaseTransitionTime}' +``` + + +## 未来发展 + +此特性最初是作为 Alpha 特性引入的,位于默认情况下禁用的特性门控之下。 +在 Alpha 阶段,我们(Kubernetes SIG Storage)将收集最终用户的反馈并解决发现的任何问题或改进。 + +一旦收到足够的反馈,或者没有收到投诉,该特性就可以进入 Beta 阶段。 +Beta 阶段将使我们能够进一步验证实施并确保其稳定性。 + + +在该字段升级到 Beta 级别和将该字段升级为通用版 (GA) 的版本之间,至少会经过两个 Kubernetes 版本。 +这意味着该字段 GA 的最早版本是 Kubernetes 1.32,可能计划于 2025 年初发布。 + + +## 欢迎参与 + +我们始终欢迎新的贡献者,因此如果你想参与其中,可以加入我们的 +[Kubernetes 存储特殊兴趣小组](https://github.com/kubernetes/community/tree/master/sig-storage)(SIG)。 + + +如果你想分享反馈,可以在我们的 [公共 Slack 频道](https://app.slack.com/client/T09NY5SBT/C09QZFCE5)上分享。 +如果你尚未加入 Slack 工作区,可以访问 https://slack.k8s.io/ 获取邀请。 + + +特别感谢所有提供精彩评论、分享宝贵意见并帮助实现此特性的贡献者(按字母顺序排列): + +- Han Kang ([logicalhan](https://github.com/logicalhan)) +- Jan Šafránek ([jsafrane](https://github.com/jsafrane)) +- Jordan Liggitt ([liggitt](https://github.com/liggitt)) +- Kiki ([carlory](https://github.com/carlory)) +- Michelle Au ([msau42](https://github.com/msau42)) +- Tim Bannister ([sftim](https://github.com/sftim)) +- Wojciech Tyczynski ([wojtek-t](https://github.com/wojtek-t)) +- Xing Yang ([xing-yang](https://github.com/xing-yang)) diff --git a/content/zh-cn/blog/_posts/2023-10-31-Gateway-API-GA/gateway-api-logo.png b/content/zh-cn/blog/_posts/2023-10-31-Gateway-API-GA/gateway-api-logo.png new file mode 100644 index 0000000000000..5a2215397f327 Binary files /dev/null and b/content/zh-cn/blog/_posts/2023-10-31-Gateway-API-GA/gateway-api-logo.png differ diff --git a/content/zh-cn/blog/_posts/2023-10-31-Gateway-API-GA/index.md b/content/zh-cn/blog/_posts/2023-10-31-Gateway-API-GA/index.md new file mode 100644 index 0000000000000..05774dfd8a98e --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-10-31-Gateway-API-GA/index.md @@ -0,0 +1,294 @@ +--- +layout: blog +title: "Gateway API v1.0:正式发布(GA)" +date: 2023-10-31T10:00:00-08:00 +slug: gateway-api-ga +--- + + + + +**作者:** Shane Utt (Kong), Nick Young (Isovalent), Rob Scott (Google) + +**译者:** Xin Li (Daocloud) + + +我们代表 Kubernetes SIG Network 很高兴地宣布 [Gateway API](https://gateway-api.sigs.k8s.io/) +v1.0 版本发布!此版本是该项目的一个重要里程碑。几个关键的 API 正在逐步进入 GA(正式发布)阶段, +同时其他重要特性已添加到实验(Experimental)通道中。 + + +## 新增内容 + +### 升级到 v1 + +此版本将 [Gateway](https://gateway-api.sigs.k8s.io/api-types/gateway/)、 +[GatewayClass](https://gateway-api.sigs.k8s.io/api-types/gatewayclass/) 和 +[HTTPRoute](https://gateway-api.sigs.k8s.io/api-types/httproute/) 升级到 v1 版本, +这意味着它们现在是正式发布(GA)的版本。这个 API 版本表明我们对 API 的可感知方面具有较强的信心,并提供向后兼容的保证。 +需要注意的是,虽然标准(Standard)通道中所包含的这个版本的 API 集合现在被认为是稳定的,但这并不意味着它们是完整的。 +即便这些 API 已满足毕业标准,仍将继续通过实验(Experimental)通道接收新特性。要了解相关工作的组织方式的进一步信息,请参阅 +[Gateway API 版本控制策略](https://gateway-api.sigs.k8s.io/concepts/versioning/)。 + + +Gateway API 现在有了自己的 Logo!这个 Logo 是通过协作方式设计的, +旨在表达这是一组用于路由南北向和东西向流量的 Kubernetes API: + +![Gateway API Logo](gateway-api-logo.png "Gateway API Logo") + + +### CEL 验证 + +过去,Gateway API 在安装 API 时绑定了一个验证性质(Validation)的 Webhook。 +从 v1.0 开始,Webhook 的安装是可选的,仅建议在 Kubernetes 1.24 版本上使用。 +Gateway API 现在将 [CEL](/zh-cn/docs/reference/using-api/cel/) 验证规则包含在 +[CRD](/zh-cn/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +中。Kubernetes 1.25 及以上版本支持这种新形式的验证,因此大多数安装中不再需要验证性质的 Webhook。 + + +### 标准(Standard)通道 + +此发行版本主要侧重于确保现有 Beta 级别 API 定义良好且足够稳定,可以升级为 GA。 +其背后意味着为了提高与 Gateway API 交互时的整体用户体验而作的各种规范的澄清以及一些改进。 + + +## 实验(Experimental)通道 + +此发行版本中包含的大部分更改都限于实验通道。这些更改包括 HTTPRoute +超时、用于 Gateway 访问后端的 TLS 配置、WebSocket 支持、Gateway 基础设施的标签等等。 +请继续关注后续博客,我们将详细介绍这些新特性。 + + +## 其他内容 + +有关此版本中包含的所有更改的完整列表,请参阅 +[v1.0.0 版本说明](https://github.com/kubernetes-sigs/gateway-api/releases/tag/v1.0.0)。 + + +## 发展历程 + +Gateway API 的想法最初是在 4 年前的 KubeCon 圣地亚哥[提出](https://youtu.be/Ne9UJL6irXY?si=wgtC9w8PMB5ZHil2)的, +下一代 Ingress API。那次会议之后,诞生了一个令人难以置信的社区,致力于开发一种可能是 Kubernetes +历史上协作关系最密切的 API。 +迄今为止,已有超过 170 人为此 API 做出了贡献,而且这个数字还在不断增长。 + + +特别感谢 20 多位[愿意在项目中担任正式角色](https://github.com/kubernetes-sigs/gateway-api/blob/main/OWNERS_ALIASES)的社区成员, +他们付出了时间进行评审并分担项目维护的负担! + +我们特别要强调那些在项目早期发展中起到关键作用的荣誉维护者: + +* [Bowei Du](https://github.com/bowei) +* [Daneyon Hansen](https://github.com/danehans) +* [Harry Bagdi](https://github.com/hbagdi) + + +## 尝试一下 + +与其他 Kubernetes API 不同,你无需升级到最新版本的 Kubernetes 即可获取最新版本的 +Gateway API。只要运行的是 Kubernetes 最新的 5 个次要版本之一(1.24+), +就可以使用最新版本的 Gateway API。 + +要尝试此 API,请参照我们的[入门指南](https://gateway-api.sigs.k8s.io/guides/)。 + + +## 下一步 + +此版本只是 Gateway API 更广泛前景的开始,将来的 API 版本中还有很多新特性和新想法。 + + +我们未来的一个关键目标是努力稳定和升级 API 的其他实验级特性。 +这些特性包括支持[服务网格](https://gateway-api.sigs.k8s.io/concepts/gamma/)、 +额外的路由类型([GRPCRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GRPCRoute)、 +[TCPRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute)、 +[TLSRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TLSRoute)、 +[UDPRoute](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.UDPRoute))以及各种实验级特性。 + + +我们还致力于将 [ReferenceGrant](https://gateway-api.sigs.k8s.io/api-types/referencegrant/) +移入内置的 Kubernetes API 中,使其不仅仅可用于网关 API。在 Gateway API 中,我们使用这个资源来安全地实现跨命名空间引用, +而这个概念现在被其他 SIG 采纳。这个 API 的新版本将归 SIG Auth 所有,在移到内置的 +Kubernetes API 时可能至少包含一些修改。 + + +### Gateway API 现身于 KubeCon + CloudNativeCon + +在 [KubeCon 北美(芝加哥)](https://events.linuxfoundation.org/kubecon-cloudnativecon-north-america/) +和同场的[贡献者峰会](https://www.kubernetes.dev/events/2023/kcsna/)上, +有几个与 Gateway API 相关的演讲将详细介绍这些主题。如果你今年要参加其中的一场活动, +请考虑将它们添加到你的日程安排中。 + + +**贡献者峰会:** + +- [使用 CRD 构建 GA API 的经验教训](https://sched.co/1Sp9u) +- [合规性配置文件:构建通用合规性测试报告框架](https://sched.co/1Sp9l) +- [Gateway API:GA 以后](https://sched.co/1SpA9) + + +**KubeCon 主要活动:** + +- [Gateway API:Kubernetes 历史上协作性最强的 API 已经正式发布](https://sched.co/1R2qM) + + +**KubeCon 办公时间:** + +如果你想就相关主题发起讨论或参与头脑风暴,请参加 Gateway API 维护人员在 KubeCon 上举行办公时间会议。 +要获取有关这些会议的最新更新,请加入 [Kubernetes Slack](https://slack.kubernetes.io/) +上的 `#sig-network-gateway-api` 频道。 + + +## 参与其中 + +我们只是初步介绍了 Gateway API 正在进行的工作。 +有很多机会参与并帮助定义 Ingress 和 Mesh 的 Kubernetes 路由 API 的未来。 + + +如果你对此感兴趣,请[加入我们的社区](https://gateway-api.sigs.k8s.io/contributing/)并帮助我们共同构建 +Gateway API 的未来! diff --git a/content/zh-cn/blog/_posts/2023-11-07-introducing-sig-etcd.md b/content/zh-cn/blog/_posts/2023-11-07-introducing-sig-etcd.md new file mode 100644 index 0000000000000..e3b9992674cfe --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-11-07-introducing-sig-etcd.md @@ -0,0 +1,145 @@ +--- +layout: blog +title: "介绍 SIG etcd" +slug: introducing-sig-etcd +date: 2023-11-07 +canonicalUrl: https://etcd.io/blog/2023/introducing-sig-etcd/ +--- + + + + +**作者**:Han Kang (Google), Marek Siarkowicz (Google), Frederico Muñoz (SAS Institute) + +**译者**:Xin Li (Daocloud) + + +特殊兴趣小组(SIG)是 Kubernetes 项目的基本组成部分,很大一部分的 Kubernetes 社区活动都在其中进行。 +当有需要时,可以创建[新的 SIG](https://github.com/kubernetes/community/blob/master/sig-wg-lifecycle.md), +而这正是最近发生的事情。 + + +[SIG etcd](https://github.com/kubernetes/community/blob/master/sig-etcd/README.md) +是 Kubernetes SIG 列表中的最新成员。在这篇文章中,我们将更好地认识它,了解它的起源、职责和计划。 + + +## etcd 的关键作用 + +如果我们查看 Kubernetes 集群的控制平面内部,我们会发现 +[etcd](https://kubernetes.io/zh-cn/docs/concepts/overview/components/#etcd), +一个一致且高可用的键值存储,用作 Kubernetes 所有集群数据的后台数据库 -- 仅此描述就突出了 +etcd 所扮演的关键角色,以及它在 Kubernetes 生态系统中的重要性。 + + +由于 etcd 在生态中的关键作用,其项目和社区的健康成为了一个重要的考虑因素, +并且人们 2022 年初[对项目状态的担忧](https://groups.google.com/a/kubernetes.io/g/steering/c/e-O-tVSCJOk/m/N9IkiWLEAgAJ) +并没有被忽视。维护团队的变化以及其他因素导致了一些情况需要被解决。 + + +## 为什么要设立特殊兴趣小组 + +考虑到 etcd 的关键作用,有人提出未来的方向是创建一个新的特殊兴趣小组。 +如果 etcd 已经成为 Kubernetes 的核心,创建专门的 SIG 不仅是对这一角色的认可, +还会使 etcd 成为 Kubernetes 社区的一等公民。 + + +SIG etcd 的成立为明确 etcd 和 Kubernetes API 机制之间的契约关系创造了一个专门的空间, +并防止在 etcd 级别上发生违反此契约的更改。此外,etcd 将能够采用 Kubernetes 提供的 SIG +流程([KEP](https://www.kubernetes.dev/resources/keps/)、 +[PRR](https://github.com/kubernetes/community/blob/master/sig-architecture/production-readiness.md)、 +[分阶段特性门控](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/)以及其他流程) +以提高代码库的一致性和可靠性,这将为 etcd 社区带来巨大的好处。 + + +作为 SIG,etcd 还能够从 Kubernetes 获得贡献者的支持:Kubernetes 维护者对 etcd +的积极贡献将通过增加潜在审核者数量以及与现有测试框架的集成来降低破坏 Kubernetes 更改的可能性。 +这不仅有利于 Kubernetes,由于它能够更好地参与并塑造 etcd 所发挥的关键作用,从而也将有利于整个 etcd。 + + +## 关于 SIG etcd + +最近创建的 SIG 已经在努力实现其[章程](https://github.com/kubernetes/community/blob/master/sig-etcd/charter.md) +和[愿景](https:///github.com/kubernetes/community/blob/master/sig-etcd/vision.md)中定义的目标。 +其目的很明确:确保 etcd 是一个可靠、简单且可扩展的生产就绪存储,用于构建云原生分布式系统并通过 Kubernetes 等编排器管理云原生基础设施。 + + +SIG etcd 的范围不仅仅涉及将 etcd 作为 Kubernetes 组件,还涵盖将 etcd 作为标准解决方案。 +我们的目标是使 etcd 成为可在任何地方使用的最可靠的键值存储,不受任何 kubernetes 特定限制的约束,并且可以扩展以满足许多不同用例的需求。 + + +我们相信,SIG etcd 的创建将成为项目生命周期中的一个重要里程碑,同时改进 etcd 本身以及 +etcd 与 Kubernetes 的集成。我们欢迎所有对 etcd +感兴趣的人[访问我们的页面](https://github.com/kubernetes/community/blob/master/sig-etcd/README.md)、 +[加入我们的 Slack 频道](https://kubernetes.slack.com/messages/etcd),并参与 etcd 生命的新阶段。 diff --git a/content/zh-cn/blog/_posts/2023-11-16-mid-cycle-1.29.md b/content/zh-cn/blog/_posts/2023-11-16-mid-cycle-1.29.md new file mode 100644 index 0000000000000..67cabde7fcefb --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-11-16-mid-cycle-1.29.md @@ -0,0 +1,181 @@ +--- +layout: blog +title: 'Kubernetes 1.29 中的移除、弃用和主要变更' +date: 2023-11-16 +slug: kubernetes-1-29-upcoming-changes +--- + + + +**作者:** Carol Valencia, Kristin Martin, Abigail McCarthy, James Quigley, Hosam Kamel + +**译者:** [Michael Yao](https://github.com/windsonsea) (DaoCloud) + + +和其他每次发布一样,Kubernetes v1.29 将弃用和移除一些特性。 +一贯以来生成高质量发布版本的能力是开发周期稳健和社区健康的证明。 +下文列举即将发布的 Kubernetes 1.29 中的一些弃用和移除事项。 + + +## Kubernetes API 移除和弃用流程 + +Kubernetes 项目对特性有一个文档完备的弃用策略。此策略规定,只有当同一 API 有了较新的、稳定的版本可用时, +原有的稳定 API 才可以被弃用,各个不同稳定级别的 API 都有一个最短的生命周期。 +弃用的 API 指的是已标记为将在后续某个 Kubernetes 发行版本中被移除的 API; +移除之前该 API 将继续发挥作用(从被弃用起至少一年时间),但使用时会显示一条警告。 +被移除的 API 将在当前版本中不再可用,此时你必须转为使用替代的 API。 + + +- 正式发布(GA)或稳定的 API 版本可能被标记为已弃用,但只有在 Kubernetes 主版本变化时才会被移除。 +- 测试版(Beta)或预发布 API 版本在弃用后必须在后续 3 个版本中继续支持。 +- Alpha 或实验性 API 版本可以在任何版本中被移除,不另行通知。 + + +无论一个 API 是因为某特性从 Beta 进阶至稳定阶段而被移除,还是因为该 API 根本没有成功, +所有移除均遵从上述弃用策略。无论何时移除一个 API,文档中都会列出迁移选项。 + + +## k8s.gcr.io 重定向到 registry.k8s.io 相关说明 + +Kubernetes 项目为了托管其容器镜像,使用社区自治的一个名为 registry.k8s.io 的镜像仓库。 +从最近的 3 月份起,所有流向 k8s.gcr.io 旧仓库的请求开始被重定向到 registry.k8s.io。 +已弃用的 k8s.gcr.io 仓库最终将被淘汰。有关这一变更的细节或若想查看你是否受到影响,参阅 +[k8s.gcr.io 重定向到 registry.k8s.io - 用户须知](/zh-cn/blog/2023/03/10/image-registry-redirect/)。 + + +## Kubernetes 社区自治软件包仓库相关说明 + +在 2023 年年初,Kubernetes 项目[引入了](/zh-cn/blog/2023/08/15/pkgs-k8s-io-introduction/) `pkgs.k8s.io`, +这是 Debian 和 RPM 软件包所用的社区自治软件包仓库。这些社区自治的软件包仓库取代了先前由 Google 管理的仓库 +(`apt.kubernetes.io` 和 `yum.kubernetes.io`)。在 2023 年 9 月 13 日,这些老旧的仓库被正式弃用,其内容被冻结。 + + +有关这一变更的细节或你若想查看是否受到影响, +请参阅[弃用公告](/zh-cn/blog/2023/08/31/legacy-package-repository-deprecation/)。 + + +## Kubernetes v1.29 的弃用和移除说明 + +有关 Kubernetes v1.29 计划弃用的完整列表, +参见官方 [API 移除](/zh-cn/docs/reference/using-api/deprecation-guide/#v1-29)列表。 + + +### 移除与云驱动的内部集成([KEP-2395](https://kep.k8s.io/2395)) + +对于 Kubernetes v1.29,默认特性门控 `DisableCloudProviders` 和 `DisableKubeletCloudCredentialProviders` +都将被设置为 `true`。这个变更将要求当前正在使用内部云驱动集成(Azure、GCE 或 vSphere)的用户启用外部云控制器管理器, +或者将关联的特性门控设置为 `false` 以选择传统的集成方式。 + + +启用外部云控制器管理器意味着你必须在集群的控制平面中运行一个合适的云控制器管理器; +同时还需要为 kubelet(在每个相关节点上)及整个控制平面(kube-apiserver 和 kube-controller-manager) +设置命令行参数 `--cloud-provider=external`。 + + +有关如何启用和运行外部云控制器管理器的细节, +参阅[管理云控制器管理器](/zh-cn/docs/tasks/administer-cluster/running-cloud-controller/)和 +[迁移多副本的控制面以使用云控制器管理器](/zh-cn/docs/tasks/administer-cluster/controller-manager-leader-migration/)。 + +有关云控制器管理器的常规信息,请参阅 Kubernetes +文档中的[云控制器管理器](/zh-cn/docs/concepts/architecture/cloud-controller/)。 + + +### 移除 `v1beta2` 流量控制 API 组 + +在 Kubernetes v1.29 中,将[不再提供](/zh-cn/docs/reference/using-api/deprecation-guide/#v1-29) +FlowSchema 和 PriorityLevelConfiguration 的 **flowcontrol.apiserver.k8s.io/v1beta2** API 版本。 + +为了做好准备,你可以编辑现有的清单(Manifest)并重写客户端软件,以使用自 v1.26 起可用的 +`flowcontrol.apiserver.k8s.io/v1beta3` API 版本。所有现有的持久化对象都可以通过新的 API 访问。 +`flowcontrol.apiserver.k8s.io/v1beta3` 中的显著变化包括将 PriorityLevelConfiguration 的 +`spec.limited.assuredConcurrencyShares` 字段更名为 `spec.limited.nominalConcurrencyShares`。 + + +### 弃用针对 Node 的 `status.nodeInfo.kubeProxyVersion` 字段 + +在 v1.29 中,针对 Node 对象的 `.status.kubeProxyVersion` 字段将被 +[标记为弃用](https://github.com/kubernetes/enhancements/issues/4004), +准备在未来某个发行版本中移除。这是因为此字段并不准确,它由 kubelet 设置, +而 kubelet 实际上并不知道 kube-proxy 版本,甚至不知道 kube-proxy 是否在运行。 + + +## 了解更多 + +弃用信息是在 Kubernetes 发布说明(Release Notes)中公布的。你可以在以下版本的发布说明中看到待弃用的公告: + +* [Kubernetes v1.25](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#deprecation) +* [Kubernetes v1.26](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.26.md#deprecation) +* [Kubernetes v1.27](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#deprecation) +* [Kubernetes v1.28](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md#deprecation) + + +我们将在 +[Kubernetes v1.29](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md#deprecation) +的 CHANGELOG 中正式宣布与该版本相关的弃用信息。 + +有关弃用和移除流程的细节,参阅 Kubernetes +官方[弃用策略](/zh-cn/docs/reference/using-api/deprecation-policy/#deprecating-parts-of-the-api)文档。 diff --git a/content/zh-cn/blog/_posts/2023-12-15-volume-attributes-class/index.md b/content/zh-cn/blog/_posts/2023-12-15-volume-attributes-class/index.md new file mode 100644 index 0000000000000..c95a8bb3d7c4d --- /dev/null +++ b/content/zh-cn/blog/_posts/2023-12-15-volume-attributes-class/index.md @@ -0,0 +1,257 @@ +--- +layout: blog +title: "Kubernetes 1.29:修改卷之 VolumeAttributesClass" +date: 2023-12-15 +slug: kubernetes-1-29-volume-attributes-class +--- + + + +**作者**:Sunny Song (Google) + +**译者**:[Baofa Fan](https://github.com/carlory) (DaoCloud) + + +Kubernetes v1.29 版本引入了一个 Alpha 功能,支持通过变更 PersistentVolumeClaim(PVC)的 +`volumeAttributesClassName` 字段来修改卷。启用该功能后,Kubernetes 可以处理除容量以外的卷属性的更新。 +允许更改卷属性,而无需通过不同提供商的 API 对其进行管理,这直接简化了当前流程。 + +你可以在 Kubernetes 文档中,阅读有关 VolumeAttributesClass 的详细使用信息,或者继续阅读了解 +Kubernetes 项目为什么支持此功能。 + +## VolumeAttributesClass + + +新的 `storage.k8s.io/v1alpha1` API 组提供了两种新类型: + +**VolumeAttributesClass** + + +表示由 CSI 驱动程序定义的可变卷属性的规约。你可以在 PersistentVolumeClaim 动态制备时指定它, +并且允许在制备完成后在 PersistentVolumeClaim 规约中进行更改。 + +**ModifyVolumeStatus** + + +表示 `ControllerModifyVolume` 操作的状态对象。 + + + +启用此 Alpha 功能后,PersistentVolumeClaim 的 `spec.VolumeAttributesClassName` 字段指明了在 PVC 中使用的 VolumeAttributesClass。 +在制备卷时,`CreateVolume` 操作将应用 VolumeAttributesClass 中的参数以及 StorageClass 中的参数。 + + +当 PVC 的 `spec.VolumeAttributesClassName` 发生变化时,external-resizer sidecar 将会收到一个 informer 事件。 +基于当前的配置状态,resizer 将触发 CSI ControllerModifyVolume。更多细节可以在 +[KEP-3751](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/3751-volume-attributes-class/README.md) 中找到。 + + +## 如何使用它 + +如果你想在 Alpha 版本中测试该功能,需要在 `kube-controller-manager` 和 `kube-apiserver` 中启用相关的特性门控。 +使用 `--feature-gates` 命令行参数: + +``` +--feature-gates="...,VolumeAttributesClass=true" +``` + + +它还需要 CSI 驱动程序实现 ModifyVolume API。 + + +### 用户流程 + +如果你想看到该功能的运行情况,并验证它在你的集群中是否正常工作,可以尝试以下操作: + + +1. 定义 StorageClass 和 VolumeAttributesClass + + ```yaml + apiVersion: storage.k8s.io/v1 + kind: StorageClass + metadata: + name: csi-sc-example + provisioner: pd.csi.storage.gke.io + parameters: + disk-type: "hyperdisk-balanced" + volumeBindingMode: WaitForFirstConsumer + ``` + + + ```yaml + apiVersion: storage.k8s.io/v1alpha1 + kind: VolumeAttributesClass + metadata: + name: silver + driverName: pd.csi.storage.gke.io + parameters: + provisioned-iops: "3000" + provisioned-throughput: "50" + ``` + + +2. 定义并创建 PersistentVolumeClaim + + ```yaml + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: test-pv-claim + spec: + storageClassName: csi-sc-example + volumeAttributesClassName: silver + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Gi + ``` + + +3. 验证 PersistentVolumeClaim 是否已正确制备: + + ``` + kubectl get pvc + ``` + + +4. 创建一个新的名为 gold 的 VolumeAttributesClass: + + ```yaml + apiVersion: storage.k8s.io/v1alpha1 + kind: VolumeAttributesClass + metadata: + name: gold + driverName: pd.csi.storage.gke.io + parameters: + iops: "4000" + throughput: "60" + ``` + + +5. 使用新的 VolumeAttributesClass 更新 PVC 并应用: + + ```yaml + apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: test-pv-claim + spec: + storageClassName: csi-sc-example + volumeAttributesClassName: gold + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Gi + ``` + + +6. 验证 PersistentVolumeClaims 是否具有更新的 VolumeAttributesClass 参数: + + ``` + kubectl describe pvc + ``` + + +## 后续步骤 + +* 有关设计的更多信息,请参阅 [VolumeAttributesClass KEP](https://kep.k8s.io/3751) +* 你可以在[项目看板](https://github.com/orgs/kubernetes-csi/projects/72)上查看或评论 VolumeAttributesClass +* 为了将此功能推向 Beta 版本,我们需要社区的反馈,因此这里有一个行动倡议:为 CSI 驱动程序添加支持, + 尝试此功能,考虑它如何帮助解决你的用户遇到的问题... + + +## 参与其中 + +我们始终欢迎新的贡献者。因此,如果你想参与其中,可以加入我们的 +[Kubernetes 存储特别兴趣小组](https://github.com/kubernetes/community/tree/master/sig-storage) (SIG)。 + + +如果你想分享反馈意见,可以在我们的[公共 Slack 频道](https://app.slack.com/client/T09NY5SBT/C09QZFCE5) 上留言。 + + +特别感谢所有为此功能提供了很好的评论、分享了宝贵见解并帮助实现此功能的贡献者(按字母顺序): + +* Baofa Fan (calory) +* Ben Swartzlander (bswartz) +* Connor Catlett (ConnorJC3) +* Hemant Kumar (gnufied) +* Jan Šafránek (jsafrane) +* Joe Betz (jpbetz) +* Jordan Liggitt (liggitt) +* Matthew Cary (mattcary) +* Michelle Au (msau42) +* Xing Yang (xing-yang) \ No newline at end of file diff --git a/content/zh-cn/community/_index.html b/content/zh-cn/community/_index.html index 506e2a0fc77ca..8c46769d3c323 100644 --- a/content/zh-cn/community/_index.html +++ b/content/zh-cn/community/_index.html @@ -2,259 +2,241 @@ title: 社区 layout: basic cid: community +community_styles_migrated: true --- - - -
    -
    - Kubernetes 会议一览 - Kubernetes 会议一览 -
    - -
    -
    - -

    -Kubernetes 社区 -- 用户、贡献者以及我们一起塑造的文化 -- 是这个开源项目持续增长的最重要原因。 -我们的文化和价值观随着项目自身的成长和变化而成长着、变化着。 -我们一起努力地持续改进项目本身,以及我们在这个项目中的工作方式。 -

    我们是登记缺陷、提出拉取请求、参加 SIG 会议、Kubernetes Meetup、KubeCon, -为了技术采纳和创新四处宣讲、运行 kubectl get pods -并通过难以计数的其他重要方式作出贡献的一群人。 -如果想要了解如何参与进来并成为这一令人赞叹的社区的一员,请继续阅读。

    -
    - -
    - -贡献者社区      -社区价值观      -行为规范      -视频      -讨论      -活动与聚会      -新闻      -发行版本 - -
    -

    -
    -
    -
    - Kubernetes 会议一览 + + +
    + +

    + Kubernetes 社区 -- 用户、贡献者以及我们一起塑造的文化 -- + 是这个开源项目持续增长的最重要原因。我们的文化和价值观随着项目自身的成长和变化而成长着、变化着。我们一起努力地持续改进项目本身,以及我们在这个项目中的工作方式。 +

    +

    + 我们是登记缺陷、提出拉取请求、参加 SIG 会议、Kubernetes 聚会、KubeCon、为了技术采纳和创新四处宣讲、运行 + kubectl get pods + 并通过难以计数的其他重要方式作出贡献的一群人。如果想要了解如何参与进来并成为这一令人赞叹的社区的一员,请继续阅读。 +

    +
    + +