diff --git a/Makefile b/Makefile index 12b6f67d5ab5b..a6f987547be7a 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,7 @@ container-build: module-check $(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 $(CONTAINER_IMAGE) sh -c "npm ci && hugo --minify" container-serve: module-check ## Boot the development server using container. Run `make container-image` before this. - $(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir + $(CONTAINER_RUN) --cap-drop=ALL --cap-add=AUDIT_WRITE --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir test-examples: scripts/test_examples.sh install @@ -91,4 +91,4 @@ clean-api-reference: ## Clean all directories in API reference directory, preser api-reference: clean-api-reference ## Build the API reference pages. go needed cd api-ref-generator/gen-resourcesdocs && \ - go run cmd/main.go kwebsite --config-dir config/v1.20/ --file api/v1.20/swagger.json --output-dir ../../content/en/docs/reference/kubernetes-api --templates templates + go run cmd/main.go kwebsite --config-dir config/v1.21/ --file api/v1.21/swagger.json --output-dir ../../content/en/docs/reference/kubernetes-api --templates templates diff --git a/OWNERS b/OWNERS index f352793ec1bd7..9b12305b4b026 100644 --- a/OWNERS +++ b/OWNERS @@ -11,7 +11,7 @@ emeritus_approvers: # - jaredbhatti, commented out to disable PR assignments # - steveperry-53, commented out to disable PR assignments - stewart-yu -- zacharysarah +# - zacharysarah, commented out to disable PR assignments labels: - sig/docs diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index c31b04d5b0a60..b958f84bac368 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -3,7 +3,6 @@ aliases: - castrojo - kbarnard10 - onlydole - - zacharysarah - mrbobbytables sig-docs-blog-reviewers: # Reviewers for blog content - castrojo @@ -33,7 +32,6 @@ aliases: - sftim - steveperry-53 - tengqm - - zacharysarah - zparnold sig-docs-en-reviews: # PR reviews for English content - bradtopol @@ -50,11 +48,9 @@ aliases: - zparnold sig-docs-es-owners: # Admins for Spanish content - raelga - - alexbrand + - electrocucaracha sig-docs-es-reviews: # PR reviews for Spanish content - raelga - - alexbrand - # glo-pena - electrocucaracha sig-docs-fr-owners: # Admins for French content - remyleone diff --git a/README-de.md b/README-de.md index b570f43671591..b6f4491e70c9b 100644 --- a/README-de.md +++ b/README-de.md @@ -9,7 +9,7 @@ Herzlich willkommen! Dieses Repository enthält alle Assets, die zur Erstellung Sie können auf die Schaltfläche **Fork** im oberen rechten Bereich des Bildschirms klicken, um eine Kopie dieses Repositorys in Ihrem GitHub-Konto zu erstellen. Diese Kopie wird als *Fork* bezeichnet. Nehmen Sie die gewünschten Änderungen an Ihrem Fork vor. Wenn Sie bereit sind, diese Änderungen an uns zu senden, gehen Sie zu Ihrem Fork und erstellen Sie eine neue Pull-Anforderung, um uns darüber zu informieren. -Sobald Ihre Pull-Anfrage erstellt wurde, übernimmt ein Rezensent von Kubernetes die Verantwortung für klares, umsetzbares Feedback. Als Eigentümer des Pull-Request **liegt es in Ihrer Verantwortung Ihren Pull-Reqest enstsprechend des Feedbacks, dass Sie vom Kubernetes-Reviewer erhalten haben abzuändern.** Beachten Sie auch, dass Sie am Ende mehr als einen Rezensenten von Kubernetes erhalten, der Ihnen Feedback gibt, oder dass Sie Rückmeldungen von einem Rezensenten von Kubernetes erhalten, der sich von demjenigen unterscheidet, der ursprünglich für das Feedback zugewiesen wurde. In einigen Fällen kann es vorkommen, dass einer Ihrer Prüfer bei Bedarf eine technische Überprüfung von einem [Kubernetes Tech-Reviewer](https://github.com/kubernetes/website/wiki/tech-reviewers) anfordert. Reviewer geben ihr Bestes, um zeitnah Feedback zu geben, die Antwortzeiten können jedoch je nach den Umständen variieren. +Sobald Ihre Pull-Anfrage erstellt wurde, übernimmt ein Rezensent von Kubernetes die Verantwortung für klares, umsetzbares Feedback. Als Eigentümer des Pull-Request **liegt es in Ihrer Verantwortung Ihren Pull-Reqest entsprechend des Feedbacks, dass Sie vom Kubernetes-Reviewer erhalten haben abzuändern.** Beachten Sie auch, dass Sie am Ende mehr als einen Rezensenten von Kubernetes erhalten, der Ihnen Feedback gibt, oder dass Sie Rückmeldungen von einem Rezensenten von Kubernetes erhalten, der sich von demjenigen unterscheidet, der ursprünglich für das Feedback zugewiesen wurde. In einigen Fällen kann es vorkommen, dass einer Ihrer Prüfer bei Bedarf eine technische Überprüfung von einem [Kubernetes Tech-Reviewer](https://github.com/kubernetes/website/wiki/tech-reviewers) anfordert. Reviewer geben ihr Bestes, um zeitnah Feedback zu geben, die Antwortzeiten können jedoch je nach den Umständen variieren. Weitere Informationen zum Beitrag zur Kubernetes-Dokumentation finden Sie unter: diff --git a/README-pl.md b/README-pl.md index 7d89d518cb5f4..5426aef445ca8 100644 --- a/README-pl.md +++ b/README-pl.md @@ -1,60 +1,45 @@ # Dokumentacja projektu Kubernetes -[![Build Status](https://api.travis-ci.org/kubernetes/website.svg?branch=master)](https://travis-ci.org/kubernetes/website) -[![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) - -Witamy! +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-master-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) W tym repozytorium znajdziesz wszystko, czego potrzebujesz do zbudowania [strony internetowej Kubernetesa wraz z dokumentacją](https://kubernetes.io/). Bardzo nam miło, że chcesz wziąć udział w jej współtworzeniu! -## Twój wkład w dokumentację ++ [Twój wkład w dokumentację](#twój-wkład-w-dokumentację) ++ [Informacje o wersjach językowych](#informacje-o-wersjach-językowych) -Możesz kliknąć w przycisk **Fork** w prawym górnym rogu ekranu, aby stworzyć kopię tego repozytorium na swoim koncie GitHub. Taki rodzaj kopii (odgałęzienia) nazywa się *fork*. Zmieniaj w nim, co chcesz, a kiedy będziesz już gotowy/a przesłać te zmiany do nas, przejdź do swojej kopii i stwórz nowy *pull request*, abyśmy zostali o tym poinformowani. +# Jak używać tego repozytorium -Po stworzeniu *pull request*, jeden z recenzentów projektu Kubernetes podejmie się przekazania jasnych wskazówek pozwalających podjąć następne działania. Na Tobie, jako właścicielu *pull requesta*, **spoczywa odpowiedzialność za wprowadzenie poprawek zgodnie z uwagami recenzenta.** Może też się zdarzyć, że swoje uwagi zgłosi więcej niż jeden recenzent, lub że recenzję będzie robił ktoś inny, niż ten, kto został przydzielony na początku. W niektórych przypadkach, jeśli zajdzie taka potrzeba, recenzent może poprosić dodatkowo o recenzję jednego z [recenzentów technicznych](https://github.com/kubernetes/website/wiki/Tech-reviewers). Recenzenci zrobią wszystko, aby odpowiedzieć sprawnie, ale konkretny czas odpowiedzi zależy od wielu czynników. +Możesz uruchomić serwis lokalnie poprzez Hugo (Extended version) lub ze środowiska kontenerowego. Zdecydowanie zalecamy korzystanie z kontenerów, bo dzięki temu lokalna wersja będzie spójna z tym, co jest na oficjalnej stronie. -Więcej informacji na temat współpracy przy tworzeniu dokumentacji znajdziesz na stronach: +## Wymagania wstępne -* [Jak rozpocząć współpracę](https://kubernetes.io/docs/contribute/start/) -* [Podgląd wprowadzanych zmian w dokumentacji](http://kubernetes.io/docs/contribute/intermediate#view-your-changes-locally) -* [Szablony stron](https://kubernetes.io/docs/contribute/style/page-content-types/) -* [Styl pisania dokumentacji](http://kubernetes.io/docs/contribute/style/style-guide/) -* [Lokalizacja dokumentacji Kubernetes](https://kubernetes.io/docs/contribute/localization/) - -## Różne wersje językowe `README.md` +Aby móc skorzystać z tego repozytorium, musisz lokalnie zainstalować: -| | | -|----------------------------------------|----------------------------------------| -| [README po angielsku](README.md) | [README po francusku](README-fr.md) | -| [README po koreańsku](README-ko.md) | [README po niemiecku](README-de.md) | -| [README po portugalsku](README-pt.md) | [README w hindi](README-hi.md) | -| [README po hiszpańsku](README-es.md) | [README po indonezyjsku](README-id.md) | -| [README po chińsku](README-zh.md) | [README po japońsku](README-ja.md) | -| [README po wietnamsku](README-vi.md) | [README po rosyjsku](README-ru.md) | -| [README po włosku](README-it.md) | [README po ukraińsku](README-uk.md) | -| | | +- [npm](https://www.npmjs.com/) +- [Go](https://golang.org/) +- [Hugo (Extended version)](https://gohugo.io/) +- Środowisko obsługi kontenerów, np. [Docker-a](https://www.docker.com/). -## Jak uruchomić lokalną kopię strony przy pomocy Dockera? +Przed rozpoczęciem zainstaluj niezbędne zależności. Sklonuj repozytorium i przejdź do odpowiedniego katalogu: -Zalecaną metodą uruchomienia serwisu internetowego Kubernetesa lokalnie jest użycie specjalnego obrazu [Dockera](https://docker.com), który zawiera generator stron statycznych [Hugo](https://gohugo.io). +``` +git clone https://github.com/kubernetes/website.git +cd website +``` -> Użytkownicy Windows będą potrzebowali dodatkowych narzędzi, które mogą zainstalować przy pomocy [Chocolatey](https://chocolatey.org). +Strona Kubernetesa używa [Docsy Hugo theme](https://github.com/google/docsy#readme). Nawet jeśli planujesz uruchomić serwis w środowisku kontenerowym, zalecamy pobranie podmodułów i innych zależności za pomocą polecenia: -```bash -choco install make +``` +# pull in the Docsy submodule +git submodule update --init --recursive --depth 1 ``` -> Jeśli wolisz uruchomić serwis lokalnie bez Dockera, przeczytaj [jak uruchomić serwis lokalnie przy pomocy Hugo](#jak-uruchomić-lokalną-kopię-strony-przy-pomocy-hugo) poniżej. +## Uruchomienie serwisu w kontenerze -Jeśli [zainstalowałeś i uruchomiłeś](https://www.docker.com/get-started) już Dockera, zbuduj obraz `kubernetes-hugo` lokalnie: +Aby zbudować i uruchomić serwis wewnątrz środowiska kontenerowego, wykonaj następujące polecenia: -```bash -make container-image ``` - -Po zbudowaniu obrazu, możesz uruchomić serwis lokalnie: - -```bash +make container-image make container-serve ``` @@ -62,29 +47,106 @@ Aby obejrzeć zawartość serwisu otwórz w przeglądarce adres http://localhost ## Jak uruchomić lokalną kopię strony przy pomocy Hugo? -Zajrzyj do [oficjalnej dokumentacji Hugo](https://gohugo.io/getting-started/installing/) po instrukcję instalacji. Upewnij się, że instalujesz rozszerzoną wersję Hugo, określoną przez zmienną środowiskową `HUGO_VERSION` w pliku [`netlify.toml`](netlify.toml#L9). +Upewnij się, że zainstalowałeś odpowiednią wersję Hugo "extended", określoną przez zmienną środowiskową `HUGO_VERSION` w pliku [`netlify.toml`](netlify.toml#L10). -Aby uruchomić serwis lokalnie po instalacji Hugo, napisz: +Aby uruchomić i przetestować serwis lokalnie, wykonaj: ```bash +# install dependencies +npm ci make serve ``` Zostanie uruchomiony lokalny serwer Hugo na porcie 1313. Otwórz w przeglądarce adres http://localhost:1313, aby obejrzeć zawartość serwisu. Po każdej zmianie plików źródłowych, Hugo automatycznie aktualizuje stronę i odświeża jej widok w przeglądarce. -## Społeczność, listy dyskusyjne, uczestnictwo i wsparcie +## Budowanie dokumentacji źródłowej API + +Budowanie dokumentacji źródłowej API zostało opisane w [angielskiej wersji pliku README.md](README.md#building-the-api-reference-pages). + +## Rozwiązywanie problemów +### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version + +Z przyczyn technicznych, Hugo jest rozprowadzany w dwóch wersjach. Aktualny serwis używa tylko wersji **Hugo Extended**. Na stronie z [wydaniami](https://github.com/gohugoio/hugo/releases) poszukaj archiwum z `extended` w nazwie. Dla potwierdzenia, uruchom `hugo version` i poszukaj słowa `extended`. + +### Błąd w środowisku macOS: "too many open files" + +Jeśli po uruchomieniu `make serve` na macOS widzisz następujący błąd: + +``` +ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files +make: *** [serve] Error 1 +``` + +sprawdź aktualny limit otwartych plików: -Zajrzyj na stronę [społeczności](http://kubernetes.io/community/), aby dowiedzieć się, jak możesz zaangażować się w jej działania. +`launchctl limit maxfiles` + +Uruchom następujące polecenia: (na podstawie https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c): + +```shell +#!/bin/sh + +# These are the original gist links, linking to my gists now. +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist + +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxfiles.plist +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxproc.plist + +sudo mv limit.maxfiles.plist /Library/LaunchDaemons +sudo mv limit.maxproc.plist /Library/LaunchDaemons + +sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +sudo chown root:wheel /Library/LaunchDaemons/limit.maxproc.plist + +sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist +``` + +Przedstawiony sposób powinien działać dla MacOS w wersji Catalina i Mojave. + + +# Zaangażowanie w prace SIG Docs + +O społeczności SIG Docs i terminach spotkań dowiesz z [jej strony](https://github.com/kubernetes/community/tree/master/sig-docs#meetings). Możesz kontaktować się z gospodarzami projektu za pomocą: -* [Komunikatora Slack](https://kubernetes.slack.com/messages/sig-docs) -* [List dyskusyjnych](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) +- [Komunikatora Slack](https://kubernetes.slack.com/messages/sig-docs) [Tutaj możesz dostać zaproszenie do tej grupy Slack-a](https://slack.k8s.io/) +- [List dyskusyjnych](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) + +# Twój wkład w dokumentację + +Możesz kliknąć w przycisk **Fork** w prawym górnym rogu ekranu, aby stworzyć kopię tego repozytorium na swoim koncie GitHub. Taki rodzaj kopii (odgałęzienia) nazywa się *fork*. Zmieniaj w nim, co chcesz, a kiedy będziesz już gotowy/a przesłać te zmiany do nas, przejdź do swojej kopii i stwórz nowy *pull request*, abyśmy zostali o tym poinformowani. + +Po stworzeniu *pull request*, jeden z recenzentów projektu Kubernetes podejmie się przekazania jasnych wskazówek pozwalających podjąć następne działania. Na Tobie, jako właścicielu *pull requesta*, **spoczywa odpowiedzialność za wprowadzenie poprawek zgodnie z uwagami recenzenta.** + +Może też się zdarzyć, że swoje uwagi zgłosi więcej niż jeden recenzent, lub że recenzję będzie robił ktoś inny, niż ten, kto został przydzielony na początku. + +W niektórych przypadkach, jeśli zajdzie taka potrzeba, recenzent może poprosić dodatkowo o recenzję jednego z [recenzentów technicznych](https://github.com/kubernetes/website/wiki/Tech-reviewers). Recenzenci zrobią wszystko, aby odpowiedzieć sprawnie, ale konkretny czas odpowiedzi zależy od wielu czynników. + +Więcej informacji na temat współpracy przy tworzeniu dokumentacji znajdziesz na stronach: + +* [Udział w rozwijaniu dokumentacji](https://kubernetes.io/docs/contribute/) +* [Rodzaje stron](https://kubernetes.io/docs/contribute/style/page-content-types/) +* [Styl pisania dokumentacji](http://kubernetes.io/docs/contribute/style/style-guide/) +* [Lokalizacja dokumentacji Kubernetes](https://kubernetes.io/docs/contribute/localization/) + +# Różne wersje językowe `README.md` + +| Język | Język | +|---|---| +| [angielski](README.md) | [francuski](README-fr.md) | +| [koreański](README-ko.md) | [niemiecki](README-de.md) | +| [portugalski](README-pt.md) | [hindi](README-hi.md) | +| [hiszpański](README-es.md) | [indonezyjski](README-id.md) | +| [chiński](README-zh.md) | [japoński](README-ja.md) | +| [wietnamski](README-vi.md) | [rosyjski](README-ru.md) | +| [włoski](README-it.md) | [ukraiński](README-uk.md) | -### Zasady postępowania +# Zasady postępowania -Udział w działaniach społeczności Kubernetes jest regulowany przez [Kodeks postępowania](code-of-conduct.md). +Udział w działaniach społeczności Kubernetesa jest regulowany przez [Kodeks postępowania CNCF](https://github.com/cncf/foundation/blob/master/code-of-conduct-languages/pl.md). -## Dziękujemy! +# Dziękujemy! Kubernetes rozkwita dzięki zaangażowaniu społeczności — doceniamy twój wkład w tworzenie naszego serwisu i dokumentacji! diff --git a/README.md b/README.md index 8ec876eef283d..d548cd32aab7a 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,8 @@ make container-image make container-serve ``` +If you see errors, it probably means that the hugo container did not have enough computing resources available. To solve it, increase the amount of allowed CPU and memory usage for Docker on your machine ([MacOSX](https://docs.docker.com/docker-for-mac/#resources) and [Windows](https://docs.docker.com/docker-for-windows/#resources)). + Open up your browser to http://localhost:1313 to view the website. As you make changes to the source files, Hugo updates the website and forces a browser refresh. ## Running the website locally using Hugo diff --git a/api-ref-generator b/api-ref-generator index ce97454e557b2..78e64febda1b5 160000 --- a/api-ref-generator +++ b/api-ref-generator @@ -1 +1 @@ -Subproject commit ce97454e557b2b164f77326cb06ef619ab623599 +Subproject commit 78e64febda1b53cafc79979c5978b42162cea276 diff --git a/config.toml b/config.toml index c9efbe9c561d7..7f5d255b9cea3 100644 --- a/config.toml +++ b/config.toml @@ -196,7 +196,7 @@ fullversion = "v1.20.7" version = "v1.20" githubbranch = "v1.20.7" docsbranch = "release-1.20" -url = "https://v1-20.kubernetes.io" +url = "https://v1-20.docs.kubernetes.io" [[params.versions]] fullversion = "v1.19.11" @@ -391,15 +391,15 @@ time_format_blog = "02.01.2006" # A list of language codes to look for untranslated content, ordered from left to right. language_alternatives = ["en"] -[languages.pt] +[languages.pt-br] title = "Kubernetes" description = "Orquestração de contêineres em nível de produção" languageName ="Português" weight = 9 -contentDir = "content/pt" +contentDir = "content/pt-br" languagedirection = "ltr" -[languages.pt.params] +[languages.pt-br.params] time_format_blog = "02.01.2006" # A list of language codes to look for untranslated content, ordered from left to right. language_alternatives = ["en"] diff --git a/content/de/docs/concepts/architecture/nodes.md b/content/de/docs/concepts/architecture/nodes.md index b790e68035be7..933346a4d5dfb 100644 --- a/content/de/docs/concepts/architecture/nodes.md +++ b/content/de/docs/concepts/architecture/nodes.md @@ -147,7 +147,8 @@ Die zweite ist, die interne Node-Liste des Node Controllers mit der Liste der ve Wenn ein Node in einer Cloud-Umgebung ausgeführt wird und sich in einem schlechten Zustand befindet, fragt der Node Controller den Cloud-Anbieter, ob die virtuelle Maschine für diesen Node noch verfügbar ist. Wenn nicht, löscht der Node Controller den Node aus seiner Node-Liste. Der dritte ist die Überwachung des Zustands der Nodes. Der Node Controller ist dafür verantwortlich, -die NodeReady-Bedingung von NodeStatus auf ConditionUnknown zu aktualisieren, wenn ein wenn ein Node unerreichbar wird (der Node Controller empfängt aus irgendeinem Grund keine Herzschläge mehr, z.B. weil der Node heruntergefahren ist) und später alle Pods aus dem Node zu entfernen (und diese ordnungsgemäss zu beenden), wenn der Node weiterhin unzugänglich ist. (Die Standard-Timeouts sind 40s, um ConditionUnknown zu melden und 5 Minuten, um mit der Evakuierung der Pods zu beginnen). +die NodeReady-Bedingung von NodeStatus auf ConditionUnknown zu aktualisieren, wenn ein Node unerreichbar wird (der Node Controller empfängt aus irgendeinem Grund keine Herzschläge mehr, z.B. weil der Node heruntergefahren ist) und später alle Pods aus dem Node zu entfernen (und diese ordnungsgemäss zu beenden), wenn der Node weiterhin unzugänglich ist. (Die Standard-Timeouts sind 40s, um ConditionUnknown zu melden und 5 Minuten, um mit der Evakuierung der Pods zu beginnen). + Der Node Controller überprüft den Zustand jedes Nodes alle `--node-monitor-period` Sekunden. diff --git a/content/de/docs/concepts/workloads/pods/_index.md b/content/de/docs/concepts/workloads/pods/_index.md new file mode 100644 index 0000000000000..956190e6c760b --- /dev/null +++ b/content/de/docs/concepts/workloads/pods/_index.md @@ -0,0 +1,369 @@ +--- +title: Pods +content_type: concept +weight: 10 +no_list: true +card: + name: concepts + weight: 60 +--- + + + +_Pods_ sind die kleinsten einsetzbaren Einheiten, die in Kubernetes +erstellt und verwaltet werden können. + +Ein _Pod_ (übersetzt Gruppe/Schote, wie z. B. eine Gruppe von Walen oder eine +Erbsenschote) ist eine Gruppe von einem oder mehreren +{{< glossary_tooltip text="Containern" term_id="container" >}} mit gemeinsam +genutzten Speicher- und Netzwerkressourcen und einer Spezifikation für die +Ausführung der Container. Die Ressourcen eines Pods befinden sich immer auf dem +gleichen (virtuellen) Server, werden gemeinsam geplant und in einem +gemeinsamen Kontext ausgeführt. Ein Pod modelliert einen anwendungsspezifischen +"logischen Server": Er enthält eine oder mehrere containerisierte Anwendungen, +die relativ stark voneinander abhängen. +In Nicht-Cloud-Kontexten sind Anwendungen, die auf +demselben physischen oder virtuellen Server ausgeführt werden, vergleichbar zu +Cloud-Anwendungen, die auf demselben logischen Server ausgeführt werden. + +Ein Pod kann neben Anwendungs-Containern auch sogenannte +[Initialisierungs-Container](/docs/concepts/workloads/pods/init-containers/) +enthalten, die beim Starten des Pods ausgeführt werden. +Es können auch +kurzlebige/[ephemere Container](/docs/concepts/workloads/pods/ephemeral-containers/) +zum Debuggen gestartet werden, wenn dies der Cluster anbietet. + + + +## Was ist ein Pod? + +{{< note >}} +Obwohl Kubernetes abgesehen von [Docker](https://www.docker.com/) auch andere +{{}} unterstützt, ist Docker am bekanntesten und + es ist hilfreich, Pods mit der Terminologie von Docker zu beschreiben. +{{< /note >}} + +Der gemeinsame Kontext eines Pods besteht aus einer Reihe von Linux-Namespaces, +Cgroups und möglicherweise anderen Aspekten der Isolation, also die gleichen +Dinge, die einen Dockercontainer isolieren. Innerhalb des Kontexts eines Pods +können die einzelnen Anwendungen weitere Unterisolierungen haben. + +Im Sinne von Docker-Konzepten ähnelt ein Pod einer Gruppe von Docker-Containern, +die gemeinsame Namespaces und Dateisystem-Volumes nutzen. + +## Pods verwenden + +Normalerweise müssen keine Pods erzeugt werden, auch keine Singleton-Pods. +Stattdessen werden sie mit Workload-Ressourcen wie {{}} oder {{}} erzeugt. Für Pods, die von einem Systemzustand +abhängen, ist die Nutzung von {{}}-Ressourcen zu erwägen. + +Pods in einem Kubernetes-Cluster werden hauptsächlich auf zwei Arten verwendet: + +* **Pods, die einen einzelnen Container ausführen**. Das +"Ein-Container-per-Pod"-Modell ist der häufigste Kubernetes-Anwendungsfall. In +diesem Fall kannst du dir einen einen Pod als einen Behälter vorstellen, der einen +einzelnen Container enthält; Kubernetes verwaltet die Pods anstatt die +Container direkt zu verwalten. +* **Pods, in denen mehrere Container ausgeführt werden, die zusammenarbeiten +müssen**. Wenn eine Softwareanwendung aus co-lokaliserten Containern besteht, +die sich gemeinsame Ressourcen teilen und stark voneinander abhängen, kann ein +Pod die Container verkapseln. +Diese Container bilden eine einzelne zusammenhängende +Serviceeinheit, z. B. ein Container, der Daten in einem gemeinsam genutzten +Volume öffentlich verfügbar macht, während ein separater _Sidecar_-Container +die Daten aktualisiert. Der Pod fasst die Container, die Speicherressourcen +und eine kurzlebiges Netzwerk-Identität als eine Einheit zusammen. + +{{< note >}} +Das Gruppieren mehrerer gemeinsam lokalisierter und gemeinsam verwalteter +Container in einem einzigen Pod ist ein relativ fortgeschrittener +Anwendungsfall. Du solltest diese Architektur nur in bestimmten Fällen +verwenden, wenn deine Container stark voneinander abhängen. +{{< /note >}} + +Jeder Pod sollte eine einzelne Instanz einer gegebenen Anwendung ausführen. Wenn +du deine Anwendung horizontal skalieren willst (um mehr Instanzen auszuführen +und dadurch mehr Gesamtressourcen bereitstellen), solltest du mehrere Pods +verwenden, einen für jede Instanz. +In Kubernetes wird dies typischerweise als Replikation bezeichnet. +Replizierte Pods werden normalerweise als eine Gruppe durch eine +Workload-Ressource und deren +{{}} erstellt +und verwaltet. + +Der Abschnitt [Pods und Controller](#pods-und-controller) beschreibt, wie +Kubernetes Workload-Ressourcen und deren Controller verwendet, um Anwendungen +zu skalieren und zu heilen. + +### Wie Pods mehrere Container verwalten + +Pods unterstützen mehrere kooperierende Prozesse (als Container), die eine +zusammenhängende Serviceeinheit bilden. Kubernetes plant und stellt automatisch +sicher, dass sich die Container in einem Pod auf demselben physischen oder +virtuellen Server im Cluster befinden. Die Container können Ressourcen und +Abhängigkeiten gemeinsam nutzen, miteinander kommunizieren und +ferner koordinieren wann und wie sie beendet werden. + +Zum Beispiel könntest du einen Container haben, der als Webserver für Dateien in +einem gemeinsamen Volume arbeitet. Und ein separater "Sidecar" -Container +aktualisiert die Daten von einer externen Datenquelle, siehe folgenden +Abbildung: + +{{< figure src="/images/docs/pod.svg" alt="Pod-Beispieldiagramm" width="50%" >}} + +Einige Pods haben sowohl {{}} als auch {{}}. +Initialisierungs-Container werden gestartet und beendet bevor die +Anwendungs-Container gestartet werden. + +Pods stellen standardmäßig zwei Arten von gemeinsam Ressourcen für die +enthaltenen Container bereit: +[Netzwerk](#pod-netzwerk) und [Speicher](#datenspeicherung-in-pods). + + +## Mit Pods arbeiten + +Du wirst selten einzelne Pods direkt in Kubernetes erstellen, selbst +Singleton-Pods. Das liegt daran, dass Pods als relativ kurzlebige +Einweg-Einheiten konzipiert sind. Wann Ein Pod erstellt wird (entweder direkt +von Ihnen oder indirekt von einem +{{}}), wird die +Ausführung auf einem {{}} in Ihrem Cluster +geplant. Der Pod bleibt auf diesem (virtuellen) Server, bis entweder der Pod die +Ausführung beendet hat, das Pod-Objekt gelöscht wird, der Pod aufgrund +mangelnder Ressourcen *evakuiert* wird oder oder der Node ausfällt. + +{{< note >}} +Das Neustarten eines Containers in einem Pod sollte nicht mit dem Neustarten +eines Pods verwechselt werden. Ein Pod ist kein Prozess, sondern eine Umgebung +zur Ausführung von Containern. Ein Pod bleibt bestehen bis er gelöscht wird. +{{< /note >}} + +Stelle beim Erstellen des Manifests für ein Pod-Objekt sicher, dass der +angegebene Name ein gültiger +[DNS-Subdomain-Name](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) +ist. + +### Pods und Controller + +Mit Workload-Ressourcen kannst du mehrere Pods erstellen und verwalten. Ein +Controller für die Ressource kümmert sich um Replikation, Roll-Out sowie +automatische Wiederherstellung im Fall von versagenden Pods. Wenn beispielsweise ein Node +ausfällt, bemerkt ein Controller, dass die Pods auf dem Node nicht mehr laufen +und plant die Ausführung eines Ersatzpods auf einem funktionierenden Node. +Hier sind einige Beispiele für Workload-Ressourcen, die einen oder mehrere Pods +verwalten: + +* {{< glossary_tooltip text="Deployment" term_id="deployment" >}} +* {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}} +* {{< glossary_tooltip text="DaemonSet" term_id="daemonset" >}} + +### Pod Vorlagen + +Controller für +{{}}-Ressourcen +erstellen Pods von einer _Pod Vorlage_ und verwalten diese Pods für dich. + +Pod Vorlagen sind Spezifikationen zum Erstellen von Pods und sind in +Workload-Ressourcen enthalten wie z. B. +[Deployments](/docs/concepts/workloads/controllers/deployment/), +[Jobs](/docs/concepts/workloads/controllers/job/), and +[DaemonSets](/docs/concepts/workloads/controllers/daemonset/). + +Jeder Controller für eine Workload-Ressource verwendet die Pod Vorlage innerhalb +des Workload-Objektes, um Pods zu erzeugen. Die Pod Vorlage ist Teil des +gewünschten Zustands der Workload-Ressource, mit der du deine Anwendung +ausgeführt hast. + +Das folgende Beispiel ist ein Manifest für einen einfachen Job mit einer +`Vorlage`, die einen Container startet. Der Container in diesem Pod druckt +eine Nachricht und pausiert dann. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hello +spec: + template: + # Dies is the Pod Vorlage + spec: + containers: + - name: hello + image: busybox + command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600'] + restartPolicy: OnFailure + # Die Pod Vorlage endet hier +``` +Das Ändern der Pod Vorlage oder der Wechsel zu einer neuen Pod Vorlage hat keine +direkten Auswirkungen auf bereits existierende Pods. Wenn du die Pod Vorlage für +eine Workload-Ressource änderst, dann muss diese Ressource die Ersatz-Pods +erstellen, welche die aktualisierte Vorlage verwenden. + +Beispielsweise stellt der StatefulSet-Controller sicher, dass für jedes +StatefulSet-Objekt die ausgeführten Pods mit der aktueller Pod Vorlage +übereinstimmen. Wenn du das StatefulSet bearbeitest und die Vorlage änderst, +beginnt das StatefulSet mit der Erstellung neuer Pods basierend auf der +aktualisierten Vorlage. Schließlich werden alle alten Pods durch neue Pods +ersetzt, und das Update ist abgeschlossen. + +Jede Workload-Ressource implementiert eigenen Regeln für die Umsetzung von +Änderungen der Pod Vorlage. Wenn du mehr über StatefulSet erfahren möchtest, +dann lese die Seite +[Update-Strategien](/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) +im Tutorial StatefulSet Basics. + + +Auf Nodes beobachtet oder verwaltet das +{{< glossary_tooltip term_id="kubelet" text="Kubelet" >}} +nicht direkt die Details zu Pod Vorlagen und Updates. Diese Details sind +abstrahiert. Die Abstraktion und Trennung von Aufgaben vereinfacht die +Systemsemantik und ermöglicht so das Verhalten des Clusters zu ändern ohne +vorhandenen Code zu ändern. + +## Pod Update und Austausch + +Wie im vorherigen Abschnitt erwähnt, erstellt der Controller neue Pods basierend +auf der aktualisierten Vorlage, wenn die Pod Vorlage für eine Workload-Ressource +geändert wird anstatt die vorhandenen Pods zu aktualisieren oder zu patchen. + +Kubernetes hindert dich nicht daran, Pods direkt zu verwalten. Es ist möglich, +einige Felder eines laufenden Pods zu aktualisieren. Allerdings haben +Pod-Aktualisierungsvorgänge wie zum Beispiel +[`patch`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#patch-pod-v1-core), +und +[`replace`](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#replace-pod-v1-core) +einige Einschränkungen: + +- Die meisten Metadaten zu einem Pod können nicht verändert werden. Zum Beispiel kannst + du nicht die Felder `namespace`, `name`, `uid`, oder `creationTimestamp` + ändern. Das `generation`-Feld muss eindeutig sein. Es werden nur Aktualisierungen + akzeptiert, die den Wert des Feldes inkrementieren. +- Wenn das Feld `metadata.deletionTimestamp` gesetzt ist, kann kein neuer + Eintrag zur Liste `metadata.finalizers` hinzugefügt werden. +- Pod-Updates dürfen keine Felder ändern, die Ausnahmen sind + `spec.containers[*].image`, + `spec.initContainers[*].image`,` spec.activeDeadlineSeconds` oder + `spec.tolerations`. Für `spec.tolerations` kannnst du nur neue Einträge + hinzufügen. +- Für `spec.activeDeadlineSeconds` sind nur zwei Änderungen erlaubt: + + 1. ungesetztes Feld in eine positive Zahl + 1. positive Zahl in eine kleinere positive Zahl, die nicht negativ ist + +## Gemeinsame Nutzung von Ressourcen und Kommunikation + +Pods ermöglichen den Datenaustausch und die Kommunikation zwischen den +Containern, die im Pod enthalten sind. + +### Datenspeicherung in Pods + +Ein Pod kann eine Reihe von gemeinsam genutzten Speicher- +{{}} spezifizieren. Alle +Container im Pod können auf die gemeinsamen Volumes zugreifen und dadurch Daten +austauschen. Volumes ermöglichen auch, dass Daten ohne Verlust gespeichert +werden, falls einer der Container neu gestartet werden muss. +Im Kapitel [Datenspeicherung](/docs/concepts/storage/) findest du weitere +Informationen, wie Kubernetes gemeinsam genutzten Speicher implementiert und +Pods zur Verfügung stellt. + +### Pod-Netzwerk + +Jedem Pod wird für jede Adressenfamilie eine eindeutige IP-Adresse zugewiesen. +Jeder Container in einem Pod nutzt den gemeinsamen Netzwerk-Namespace, +einschließlich der IP-Adresse und der Ports. In einem Pod (und **nur** dann) +können die Container, die zum Pod gehören, über `localhost` miteinander +kommunizieren. Wenn Container in einem Pod mit Entitäten *außerhalb des Pods* +kommunizieren, müssen sie koordinieren, wie die gemeinsam genutzten +Netzwerkressourcen (z. B. Ports) verwenden werden. Innerhalb eines Pods teilen +sich Container eine IP-Adresse und eine Reihe von Ports und können sich +gegenseitig über `localhost` finden. Die Container in einem Pod können auch die +üblichen Kommunikationsverfahren zwischen Prozessen nutzen, wie z. B. +SystemV-Semaphoren oder "POSIX Shared Memory". Container in verschiedenen Pods +haben unterschiedliche IP-Adressen und können nicht per IPC ohne +[spezielle Konfiguration](/docs/concepts/policy/pod-security-policy/) +kommunizieren. Container, die mit einem Container in einem anderen Pod +interagieren möchten, müssen IP Netzwerke verwenden. + +Für die Container innerhalb eines Pods stimmt der "hostname" mit dem +konfigurierten `Namen` des Pods überein. Mehr dazu im Kapitel +[Netzwerke](/docs/concepts/cluster-administration/networking/). + +## Privilegierter Modus für Container + +Jeder Container in einem Pod kann den privilegierten Modus aktivieren, indem +das Flag `privileged` im +[Sicherheitskontext](/docs/tasks/configure-pod-container/security-context/) +der Container-Spezifikation verwendet wird. +Dies ist nützlich für Container, die Verwaltungsfunktionen des Betriebssystems +verwenden möchten, z. B. das Manipulieren des Netzwerk-Stacks oder den Zugriff +auf Hardware. Prozesse innerhalb eines privilegierten Containers erhalten fast +die gleichen Rechte wie sie Prozessen außerhalb eines Containers zur Verfügung +stehen. + +{{< note >}} +Ihre +{{}} +muss das Konzept eines privilegierten Containers unterstützen, damit diese +Einstellung relevant ist. +{{< /note >}} + + +## Statische Pods + +_Statische Pods_ werden direkt vom Kubelet-Daemon auf einem bestimmten Node +verwaltet ohne dass sie vom +{{}} überwacht +werden. + +Die meisten Pods werden von der Kontrollebene verwaltet (z. B. +{{< glossary_tooltip text="Deployment" term_id="deployment" >}}). Aber für +statische Pods überwacht das Kubelet jeden statischen Pod direkt (und startet +ihn neu, wenn er ausfällt). + +Statische Pods sind immer an ein {{}} auf +einem bestimmten Node gebunden. Der Hauptanwendungsfall für statische Pods +besteht darin, eine selbst gehostete Steuerebene auszuführen. Mit anderen +Worten: Das Kubelet dient zur Überwachung der einzelnen +[Komponenten der Kontrollebene](/docs/concepts/overview/components/#control-plane-components). + +Das Kubelet versucht automatisch auf dem Kubernetes API-Server für jeden +statischen Pod einen spiegelbildlichen Pod +(im Englischen: {{}}) +zu erstellen. +Das bedeutet, dass die auf einem Node ausgeführten Pods auf dem API-Server +sichtbar sind jedoch von dort nicht gesteuert werden können. + +## {{% heading "whatsnext" %}} + +* Verstehe den + [Lebenszyklus eines Pods](/docs/concepts/workloads/pods/pod-lifecycle/). +* Erfahre mehr über [RuntimeClass](/docs/concepts/containers/runtime-class/) + und wie du damit verschiedene Pods mit unterschiedlichen + Container-Laufzeitumgebungen konfigurieren kannst. +* Mehr zum Thema + [Restriktionen für die Verteilung von Pods](/docs/concepts/workloads/pods/pod-topology-spread-constraints/). +* Lese + [Pod-Disruption-Budget](/docs/concepts/workloads/pods/disruptions/) + und wie du es verwenden kannst, um die Verfügbarkeit von Anwendungen bei + Störungen zu verwalten. Die + [Pod](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core) + -Objektdefinition beschreibt das Objekt im Detail. +* [The Distributed System Toolkit: Patterns for Composite Containers](https://kubernetes.io/blog/2015/06/the-distributed-system-toolkit-patterns) + erläutert allgemeine Layouts für Pods mit mehr als einem Container. + +Um den Hintergrund zu verstehen, warum Kubernetes eine gemeinsame Pod-API in +andere Ressourcen, wie z. B. +{{< glossary_tooltip text="StatefulSets" term_id="statefulset" >}} +oder {{< glossary_tooltip text="Deployments" term_id="deployment" >}} einbindet, +kannst du Artikel zu früheren Technologien lesen, unter anderem: + * [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema) + * [Borg](https://research.google.com/pubs/pub43438.html) + * [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html) + * [Omega](https://research.google/pubs/pub41684/) + * [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/). \ No newline at end of file diff --git a/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md b/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md index 2474d07a95052..80926b6bc4d29 100644 --- a/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md +++ b/content/en/blog/_posts/2019-11-26-cloud-native-java-controller-sdk.md @@ -58,7 +58,7 @@ Take maven project as example, adding the following dependencies into your depen Then we can make use of the provided builder libraries to write your own controller. For example, the following one is a simple controller prints out node information -on watch notification, see complete example [here](https://github.com/kubernetes-client/java/blob/master/examples/src/main/java/io/kubernetes/client/examples/ControllerExample.java): +on watch notification, see complete example [here](https://github.com/kubernetes-client/java/blob/master/examples/examples-release-13/src/main/java/io/kubernetes/client/examples/ControllerExample.java): ```java ... diff --git a/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md b/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md index f5c6761eab373..983d7ba31ee09 100644 --- a/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md +++ b/content/en/blog/_posts/2019-12-09-kubernetes-1.17-release-announcement.md @@ -31,9 +31,9 @@ Standard labels are used by Kubernetes components to support some features. For The labels are reaching general availability in this release. Kubernetes components have been updated to populate the GA and beta labels and to react to both. However, if you are using the beta labels in your pod specs for features such as node affinity, or in your custom controllers, we recommend that you start migrating them to the new GA labels. You can find the documentation for the new labels here: -- [node.kubernetes.io/instance-type](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#nodekubernetesioinstance-type) -- [topology.kubernetes.io/region](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#topologykubernetesioregion) -- [topology.kubernetes.io/zone](https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#topologykubernetesiozone) +- [node.kubernetes.io/instance-type](/docs/reference/labels-annotations-taints/#nodekubernetesioinstance-type) +- [topology.kubernetes.io/region](/docs/reference/labels-annotations-taints/#topologykubernetesioregion) +- [topology.kubernetes.io/zone](/docs/reference/labels-annotations-taints/#topologykubernetesiozone) ## Volume Snapshot Moves to Beta diff --git a/content/en/blog/_posts/2020-09-03-warnings/index.md b/content/en/blog/_posts/2020-09-03-warnings/index.md index d88dd8328dba8..50576c032939e 100644 --- a/content/en/blog/_posts/2020-09-03-warnings/index.md +++ b/content/en/blog/_posts/2020-09-03-warnings/index.md @@ -325,7 +325,7 @@ Now that we have a way to communicate helpful information to users in context, we're already considering other ways we can use this to improve people's experience with Kubernetes. A couple areas we're looking at next are warning about [known problematic values](http://issue.k8s.io/64841#issuecomment-395141013) we cannot reject outright for compatibility reasons, and warning about use of deprecated fields or field values -(like selectors using beta os/arch node labels, [deprecated in v1.14](/docs/reference/kubernetes-api/labels-annotations-taints/#beta-kubernetes-io-arch-deprecated)). +(like selectors using beta os/arch node labels, [deprecated in v1.14](/docs/reference/labels-annotations-taints/#beta-kubernetes-io-arch-deprecated)). I'm excited to see progress in this area, continuing to make it easier to use Kubernetes. --- diff --git a/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md b/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md index e0fef7ab90f2c..deb459c4bea0d 100644 --- a/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md +++ b/content/en/blog/_posts/2020-12-08-kubernetes-release-1.20.md @@ -64,7 +64,7 @@ The Kubernetes community has written a [detailed blog post about deprecation](ht A longstanding bug regarding exec probe timeouts that may impact existing pod definitions has been fixed. Prior to this fix, the field `timeoutSeconds` was not respected for exec probes. Instead, probes would run indefinitely, even past their configured deadline, until a result was returned. With this change, the default value of `1 second` will be applied if a value is not specified and existing pod definitions may no longer be sufficient if a probe takes longer than one second. A feature gate, called `ExecProbeTimeout`, has been added with this fix that enables cluster operators to revert to the previous behavior, but this will be locked and removed in subsequent releases. In order to revert to the previous behavior, cluster operators should set this feature gate to `false`. -Please review the updated documentation regarding [configuring probes](docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) for more details. +Please review the updated documentation regarding [configuring probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) for more details. ## Other Updates diff --git a/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/controller-flowchart.svg b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/controller-flowchart.svg new file mode 100644 index 0000000000000..9357c89d40a74 --- /dev/null +++ b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/controller-flowchart.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/index.md b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/index.md new file mode 100644 index 0000000000000..bb9214b027aa1 --- /dev/null +++ b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/index.md @@ -0,0 +1,105 @@ +--- +layout: blog +title: 'Kubernetes 1.21: CronJob Reaches GA' +date: 2021-04-09 +slug: kubernetes-release-1.21-cronjob-ga +--- + + **Authors:** Alay Patel (Red Hat), and Maciej Szulik (Red Hat) + +In Kubernetes v1.21, the +[CronJob](/docs/concepts/workloads/controllers/cron-jobs/) resource +reached general availability (GA). We've also substantially improved the +performance of CronJobs since Kubernetes v1.19, by implementing a new +controller. + +In Kubernetes v1.20 we launched a revised v2 controller for CronJobs, +initially as an alpha feature. Kubernetes 1.21 uses the newer controller by +default, and the CronJob resource itself is now GA (group version: `batch/v1`). + +In this article, we'll take you through the driving forces behind this new +development, give you a brief description of controller design for core +Kubernetes, and we'll outline what you will gain from this improved controller. + +The driving force behind promoting the API was Kubernetes' policy choice to +[ensure APIs move beyond beta](/blog/2020/08/21/moving-forward-from-beta/). +That policy aims to prevent APIs from being stuck in a “permanent beta” state. +Over the years the old CronJob controller implementation had received healthy +feedback from the community, with reports of several widely recognized +[issues](https://github.com/kubernetes/kubernetes/issues/82659). + +If the beta API for CronJob was to be supported as GA, the existing controller +code would need substantial rework. Instead, the SIG Apps community decided +to introduce a new controller and gradually replace the old one. + +## How do controllers work? + +Kubernetes [controllers](/docs/concepts/architecture/controller/) are control +loops that watch the state of resource(s) in your cluster, then make or +request changes where needed. Each controller tries to move part of the +current cluster state closer to the desired state. + +The v1 CronJob controller works by performing a periodic poll and sweep of all +the CronJob objects in your cluster, in order to act on them. It is a single +worker implementation that gets all CronJobs every 10 seconds, iterates over +each one of them, and syncs them to their desired state. This was the default +way of doing things almost 5 years ago when the controller was initially +written. In hindsight, we can certainly say that such an approach can +overload the API server at scale. + +These days, every core controller in kubernetes must follow the guidelines +described in [Writing Controllers](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/controllers.md#readme). +Among many details, that document prescribes using +[shared informers](https://www.cncf.io/blog/2019/10/15/extend-kubernetes-via-a-shared-informer/) +to “receive notifications of adds, updates, and deletes for a particular +resource”. Upon any such events, the related object(s) is placed in a queue. +Workers pull items from the queue and process them one at a time. This +approach ensures consistency and scalability. + +The picture below shows the flow of information from kubernetes API server, +through shared informers and queue, to the main part of a controller - a +reconciliation loop which is responsible for performing the core functionality. + +![Controller flowchart](controller-flowchart.svg) + +The CronJob controller V2 uses a queue that implements the DelayingInterface to +handle the scheduling aspect. This queue allows processing an element after a +specific time interval. Every time there is a change in a CronJob or its related +Jobs, the key that represents the CronJob is pushed to the queue. The main +handler pops the key, processes the CronJob, and after completion +pushes the key back into the queue for the next scheduled time interval. This is +immediately a more performant implementation, as it no longer requires a linear +scan of all the CronJobs. On top of that, this controller can be scaled by +increasing the number of workers processing the CronJobs in parallel. + +## Performance impact of the new controller {#performance-impact} + +In order to test the performance difference of the two controllers a VM instance +with 128 GiB RAM and 64 vCPUs was used to set up a single node Kubernetes cluster. +Initially, a sample workload was created with 20 CronJob instances with a schedule +to run every minute, and 2100 CronJobs running every 20 hours. Additionally, +over the next few minutes we added 1000 CronJobs with a schedule to run every +20 hours, until we reached a total of 5120 CronJobs. + +![Visualization of performance](performance-impact-graph.svg) + +We observed that for every 1000 CronJobs added, the old controller used +around 90 to 120 seconds more wall-clock time to schedule 20 Jobs every cycle. +That is, at 5120 CronJobs, the old controller took approximately 9 minutes +to create 20 Jobs. Hence, during each cycle, about 8 schedules were missed. +The new controller, implemented with architectural change explained above, +created 20 Jobs without any delay, even when we created an additional batch +of 1000 CronJobs reaching a total of 6120. + +As a closing remark, the new controller exposes a histogram metric +`cronjob_controller_cronjob_job_creation_skew_duration_seconds` which helps +monitor the time difference between when a CronJob is meant to run and when +the actual Job is created. + +Hopefully the above description is a sufficient argument to follow the +guidelines and standards set in the Kubernetes project, even for your own +controllers. As mentioned before, the new controller is on by default starting +from Kubernetes v1.21; if you want to check it out in the previous release (1.20), +you can enable the `CronJobControllerV2` +[feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +for the kube-controller-manager: `--feature-gate="CronJobControllerV2=true"`. diff --git a/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/performance-impact-graph.svg b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/performance-impact-graph.svg new file mode 100644 index 0000000000000..976b428d91aa1 --- /dev/null +++ b/content/en/blog/_posts/2021-04-08-cronjob-reaches-ga/performance-impact-graph.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-08-kubernetes-release-1.21.md b/content/en/blog/_posts/2021-04-08-kubernetes-release-1.21.md index 1a253ffbbe3db..ed0da32f2506d 100644 --- a/content/en/blog/_posts/2021-04-08-kubernetes-release-1.21.md +++ b/content/en/blog/_posts/2021-04-08-kubernetes-release-1.21.md @@ -9,7 +9,7 @@ slug: kubernetes-1-21-release-announcement We’re pleased to announce the release of Kubernetes 1.21, our first release of 2021! This release consists of 51 enhancements: 13 enhancements have graduated to stable, 16 enhancements are moving to beta, 20 enhancements are entering alpha, and 2 features have been deprecated. -This release cycle, we saw a major shift in ownership of processes around the release team. We moved from a synchronous mode of communcation, where we periodically asked the community for inputs, to a mode where the community opts-in features and/or blogs to the release. These changes have resulted in an increase in collaboration and teamwork across the community. The result of all that is reflected in Kubernetes 1.21 having the most number of features in the recent times. +This release cycle, we saw a major shift in ownership of processes around the release team. We moved from a synchronous mode of communication, where we periodically asked the community for inputs, to a mode where the community opts-in to contribute features and/or blogs to the release. These changes have resulted in an increase in collaboration and teamwork across the community. The result of all that is reflected in Kubernetes 1.21 having the most number of features in the recent times. ## Major Themes diff --git a/content/en/blog/_posts/2021-04-12-introducing-suspended-jobs.md b/content/en/blog/_posts/2021-04-12-introducing-suspended-jobs.md new file mode 100644 index 0000000000000..d03c9d0c25d2c --- /dev/null +++ b/content/en/blog/_posts/2021-04-12-introducing-suspended-jobs.md @@ -0,0 +1,110 @@ +--- +title: "Introducing Suspended Jobs" +date: 2021-04-12 +slug: introducing-suspended-jobs +layout: blog +--- + +**Author:** Adhityaa Chandrasekar (Google) + +[Jobs](/docs/concepts/workloads/controllers/job/) are a crucial part of +Kubernetes' API. While other kinds of workloads such as [Deployments](/docs/concepts/workloads/controllers/deployment/), +[ReplicaSets](/docs/concepts/workloads/controllers/replicaset/), +[StatefulSets](/docs/concepts/workloads/controllers/statefulset/), and +[DaemonSets](/docs/concepts/workloads/controllers/daemonset/) +solve use-cases that require Pods to run forever, Jobs are useful when Pods need +to run to completion. Commonly used in parallel batch processing, Jobs can be +used in a variety of applications ranging from video rendering and database +maintenance to sending bulk emails and scientific computing. + +While the amount of parallelism and the conditions for Job completion are +configurable, the Kubernetes API lacked the ability to suspend and resume Jobs. +This is often desired when cluster resources are limited and a higher priority +Job needs to execute in the place of another Job. Deleting the lower priority +Job is a poor workaround as Pod completion history and other metrics associated +with the Job will be lost. + +With the recent Kubernetes 1.21 release, you will be able to suspend a Job by +updating its spec. The feature is currently in **alpha** and requires you to +enable the `SuspendJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) +and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) +in order to use it. + +## API changes + +We introduced a new boolean field `suspend` into the `.spec` of Jobs. Let's say +I create the following Job: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: my-job +spec: + suspend: true + parallelism: 2 + completions: 10 + template: + spec: + containers: + - name: my-container + image: busybox + command: ["sleep", "5"] + restartPolicy: Never +``` + +Jobs are not suspended by default, so I'm explicitly setting the `suspend` field +to _true_ in the `.spec` of the above Job manifest. In the above example, the +Job controller will refrain from creating Pods until I'm ready to start the Job, +which I can do by updating `suspend` to false. + +As another example, consider a Job that was created with the `suspend` field +omitted. The Job controller will happily create Pods to work towards Job +completion. However, before the Job completes, if I explicitly set the field to +true with a Job update, the Job controller will terminate all active Pods that +are running and will wait indefinitely for the flag to be flipped back to false. +Typically, Pod termination is done by sending a SIGTERM signal to all container +processes in the Pod; the [graceful termination period](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) +defined in the Pod spec will be honoured. Pods terminated this way will not be +counted as failures by the Job controller. + +It is important to understand that succeeded and failed Pods from the past will +continue to exist after you suspend a Job. That is, that they will count towards +Job completion once you resume it. You can verify this by looking at Job's +status before and after suspension. + +Read the [documentation](/docs/concepts/workloads/controllers/job#suspending-a-job) +for a full overview of this new feature. + +## Where is this useful? + +Let's say I'm the operator of a large cluster. I have many users submitting Jobs +to the cluster, but not all Jobs are created equal — some Jobs are more +important than others. Cluster resources aren't infinite either, so all users +must share resources. If all Jobs were created in the suspended state and placed +in a pending queue, I can achieve priority-based Job scheduling by resuming Jobs +in the right order. + +As another motivational use-case, consider a cloud provider where compute +resources are cheaper at night than in the morning. If I have a long-running Job +that takes multiple days to complete, being able to suspend the Job in the +morning and then resume it in the evening every day can reduce costs. + +Since this field is a part of the Job spec, [CronJobs](/docs/concepts/workloads/controllers/cron-jobs/) +automatically get this feature for free too. + +## References and next steps + +If you're interested in a deeper dive into the rationale behind this feature and +the decisions we have taken, consider reading the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-apps/2232-suspend-jobs). +There's more detail on suspending and resuming jobs in the documentation for [Job](/docs/concepts/workloads/controllers/job#suspending-a-job). + +As previously mentioned, this feature is currently in alpha and is available +only if you explicitly opt-in through the `SuspendJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). +If this is a feature you're interested in, please consider testing suspended +Jobs in your cluster and providing feedback. You can discuss this enhancement [on GitHub](https://github.com/kubernetes/enhancements/issues/2232). +The SIG Apps community also [meets regularly](https://github.com/kubernetes/community/tree/master/sig-apps#meetings) +and can be reached through [Slack or the mailing list](https://github.com/kubernetes/community/tree/master/sig-apps#contact). +Barring any unexpected changes to the API, we intend to graduate the feature to +beta in Kubernetes 1.22, so that the feature becomes available by default. diff --git a/content/en/blog/_posts/2021-04-13-kube-state-metrics-goes-v-2-0.md b/content/en/blog/_posts/2021-04-13-kube-state-metrics-goes-v-2-0.md new file mode 100644 index 0000000000000..822880547d0c2 --- /dev/null +++ b/content/en/blog/_posts/2021-04-13-kube-state-metrics-goes-v-2-0.md @@ -0,0 +1,45 @@ +--- +layout: blog +title: "kube-state-metrics goes v2.0" +date: 2021-04-13 +slug: kube-state-metrics-v-2-0 +--- + +**Authors:** Lili Cosic (Red Hat), Frederic Branczyk (Polar Signals), Manuel Rüger (Sony Interactive Entertainment), Tariq Ibrahim (Salesforce) + +## What? + +[kube-state-metrics](https://github.com/kubernetes/kube-state-metrics), a project under the Kubernetes organization, generates Prometheus format metrics based on the current state of the Kubernetes native resources. It does this by listening to the Kubernetes API and gathering information about resources and objects, e.g. Deployments, Pods, Services, and StatefulSets. A full list of resources is available in the [documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs) of kube-state-metrics. + +## Why? + +There are numerous useful metrics and insights provided by `kube-state-metrics` right out of the box! These metrics can be used to serve as an insight into your cluster: Either through metrics alone, in the form of dashboards, or through an alerting pipeline. To provide a few examples: + +* `kube_pod_container_status_restarts_total` can be used to alert on a crashing pod. +* `kube_deployment_status_replicas` which together with `kube_deployment_status_replicas_available` can be used to alert on whether a deployment is rolled out successfully or stuck. +* `kube_pod_container_resource_requests` and `kube_pod_container_resource_limits` can be used in capacity planning dashboards. + +And there are many more metrics available! To learn more about the other metrics and their details, please check out the [documentation](https://github.com/kubernetes/kube-state-metrics/tree/master/docs#readme). + +## What is new in v2.0? + +So now that we know what kube-state-metrics is, we are excited to announce the next release: kube-state-metrics v2.0! This release was long-awaited and started with an alpha release in September 2020. To ease maintenance we removed tech debt and also adjusted some confusing wording around user-facing flags and APIs. We also removed some metrics that caused unnecessarily high cardinality in Prometheus! For the 2.0 release, we took the time to set up scale and performance testing. This allows us to better understand if we hit any issues in large clusters and also to document resource request recommendations for your clusters. In this release (and v1.9.8) container builds providing support for multiple architectures were introduced allowing you to run kube-state-metrics on ARM, ARM64, PPC64 and S390x as well! + +So without further ado, here is the list of more noteworthy user-facing breaking changes. A full list of changes, features and bug fixes is available in the changelog at the end of this post. + +* Flag `--namespace` was renamed to `--namespaces`. If you are using the former, please make sure to update the flag before deploying the latest release. +* Flag `--collectors` was renamed to `--resources`. +* Flags `--metric-blacklist` and `--metric-whitelist` were renamed to `--metric-denylist` and `--metric-allowlist`. +* Flag `--metric-labels-allowlist` allows you to specify a list of Kubernetes labels that get turned into the dimensions of the `kube__labels` metrics. By default, the metric contains only name and namespace labels. +* All metrics with a prefix of `kube_hpa_*` were renamed to `kube_horizontalpodautoscaler_*`. +* Metric labels that relate to Kubernetes were converted to snake_case. +* If you are importing kube-state-metrics as a library, we have updated our go module path to `k8s.io/kube-state-metrics/v2` +* All deprecated stable metrics were removed as per the [notice in the v1.9 release](https://github.com/kubernetes/kube-state-metrics/tree/release-1.9/docs#metrics-deprecation). +* `quay.io/coreos/kube-state-metrics` images will no longer be updated. `k8s.gcr.io/kube-state-metrics/kube-state-metrics` is the new canonical location. +* The helm chart that is part of the kubernetes/kube-state-metrics repository is deprecated. https://github.com/prometheus-community/helm-charts will be its new location. + +For the full list of v2.0 release changes includes features, bug fixes and other breaking changes see the full [CHANGELOG](https://github.com/kubernetes/kube-state-metrics/blob/master/CHANGELOG.md). + +## Found a problem? + +Thanks to all our users for testing so far and thank you to all our contributors for your issue reports as well as code and documentation changes! If you find any problems, we the [maintainers](https://github.com/kubernetes/kube-state-metrics/blob/master/OWNERS) are more than happy to look into them, so please report them by opening a [GitHub issue](https://github.com/kubernetes/kube-state-metrics/issues/new/choose). diff --git a/content/en/blog/_posts/2021-04-14-local-storage-features-go-beta.md b/content/en/blog/_posts/2021-04-14-local-storage-features-go-beta.md new file mode 100644 index 0000000000000..457e9238f3c8d --- /dev/null +++ b/content/en/blog/_posts/2021-04-14-local-storage-features-go-beta.md @@ -0,0 +1,216 @@ +--- +layout: blog +title: "Local Storage: Storage Capacity Tracking, Distributed Provisioning and Generic Ephemeral Volumes hit Beta" +date: 2021-04-14 +slug: local-storage-features-go-beta +--- + + **Authors:** Patrick Ohly (Intel) + +The ["generic ephemeral +volumes"](/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) +and ["storage capacity +tracking"](/docs/concepts/storage/storage-capacity/) +features in Kubernetes are getting promoted to beta in Kubernetes +1.21. Together with the [distributed provisioning +support](https://github.com/kubernetes-csi/external-provisioner#deployment-on-each-node) +in the CSI external-provisioner, development and deployment of +Container Storage Interface (CSI) drivers which manage storage locally +on a node become a lot easier. + +This blog post explains how such drivers worked before and how these +features can be used to make drivers simpler. + +## Problems we are solving + +There are drivers for local storage, like +[TopoLVM](https://github.com/cybozu-go/topolvm) for traditional disks +and [PMEM-CSI](https://intel.github.io/pmem-csi/latest/README.html) +for [persistent memory](https://pmem.io/). They work and are ready for +usage today also on older Kubernetes releases, but making that possible +was not trivial. + +### Central component required + +The first problem is volume provisioning: it is handled through the +Kubernetes control plane. Some component must react to +[PersistentVolumeClaims](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +(PVCs) +and create volumes. Usually, that is handled by a central deployment +of the [CSI +external-provisioner](https://kubernetes-csi.github.io/docs/external-provisioner.html) +and a CSI driver component that then connects to the storage +backplane. But for local storage, there is no such backplane. + +TopoLVM solved this by having its different components communicate +with each other through the Kubernetes API server by creating and +reacting to custom resources. So although TopoLVM is based on CSI, a +standard that is independent of a particular container orchestrator, +TopoLVM only works on Kubernetes. + +PMEM-CSI created its own storage backplane with communication through +gRPC calls. Securing that communication depends on TLS certificates, +which made driver deployment more complicated. + +### Informing Pod scheduler about capacity + +The next problem is scheduling. When volumes get created independently +of pods ("immediate binding"), the CSI driver must pick a node without +knowing anything about the pod(s) that are going to use it. Topology +information then forces those pods to run on the node where the volume +was created. If other resources like RAM or CPU are exhausted there, +the pod cannot start. This can be avoided by configuring in the +StorageClass that volume creation is meant to wait for the first pod +that uses a volume (`volumeBinding: WaitForFirstConsumer`). In that +mode, the Kubernetes scheduler tentatively picks a node based on other +constraints and then the external-provisioner is asked to create a +volume such that it is usable there. If local storage is exhausted, +the provisioner [can +ask](https://github.com/kubernetes-csi/external-provisioner/blob/master/doc/design.md) +for another scheduling round. But without information about available +capacity, the scheduler might always pick the same unsuitable node. + +Both TopoLVM and PMEM-CSI solved this with scheduler extenders. This +works, but it is hard to configure when deploying the driver because +communication between kube-scheduler and the driver is very dependent +on how the cluster was set up. + +### Rescheduling + +A common use case for local storage is scratch space. A better fit for +that use case than persistent volumes are ephemeral volumes that get +created for a pod and destroyed together with it. The initial API for +supporting ephemeral volumes with CSI drivers (hence called ["*CSI* +ephemeral +volumes"](/docs/concepts/storage/ephemeral-volumes/#csi-ephemeral-volumes)) +was [designed for light-weight +volumes](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md) +where volume creation is unlikely to fail. Volume creation happens +after pods have been permanently scheduled onto a node, in contrast to +the traditional provisioning where volume creation is tried before +scheduling a pod onto a node. CSI drivers must be modified to support +"CSI ephemeral volumes", which was done for TopoLVM and PMEM-CSI. But +due to the design of the feature in Kubernetes, pods can get stuck +permanently if storage capacity runs out on a node. The scheduler +extenders try to avoid that, but cannot be 100% reliable. + +## Enhancements in Kubernetes 1.21 + +### Distributed provisioning + +Starting with [external-provisioner +v2.1.0](https://github.com/kubernetes-csi/external-provisioner/releases/tag/v2.1.0), +released for Kubernetes 1.20, provisioning can be handled by +external-provisioner instances that get [deployed together with the +CSI driver on each +node](https://github.com/kubernetes-csi/external-provisioner#deployment-on-each-node) +and then cooperate to provision volumes ("distributed +provisioning"). There is no need any more to have a central component +and thus no need for communication between nodes, at least not for +provisioning. + +### Storage capacity tracking + +A scheduler extender still needs some way to find out about capacity +on each node. When PMEM-CSI switched to distributed provisioning in +v0.9.0, this was done by querying the metrics data exposed by the +local driver containers. But it is better also for users to eliminate +the need for a scheduler extender completely because the driver +deployment becomes simpler. [Storage capacity +tracking](/docs/concepts/storage/storage-capacity/), [introduced in +1.19](/blog/2020/09/01/ephemeral-volumes-with-storage-capacity-tracking/) +and promoted to beta in Kubernetes 1.21, achieves that. It works by +publishing information about capacity in `CSIStorageCapacity` +objects. The scheduler itself then uses that information to filter out +unsuitable nodes. Because information might be not quite up-to-date, +pods may still get assigned to nodes with insufficient storage, it's +just less likely and the next scheduling attempt for a pod should work +better once the information got refreshed. + +### Generic ephemeral volumes + +So CSI drivers still need the ability to recover from a bad scheduling +decision, something that turned out to be impossible to implement for +"CSI ephemeral volumes". ["*Generic* ephemeral +volumes"](/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes), +another feature that got promoted to beta in 1.21, don't have that +limitation. This feature adds a controller that will create and manage +PVCs with the lifetime of the Pod and therefore the normal recovery +mechanism also works for them. Existing storage drivers will be able +to process these PVCs without any new logic to handle this new +scenario. + +## Known limitations + +Both generic ephemeral volumes and storage capacity tracking increase +the load on the API server. Whether that is a problem depends a lot on +the kind of workload, in particular how many pods have volumes and how +often those need to be created and destroyed. + +No attempt was made to model how scheduling decisions affect storage +capacity. That's because the effect can vary considerably depending on +how the storage system handles storage. The effect is that multiple +pods with unbound volumes might get assigned to the same node even +though there is only sufficient capacity for one pod. Scheduling +should recover, but it would be more efficient if the scheduler knew +more about storage. + +Because storage capacity gets published by a running CSI driver and +the cluster autoscaler needs information about a node that hasn't been +created yet, it will currently not scale up a cluster for pods that +need volumes. There is an [idea how to provide that +information](https://github.com/kubernetes/autoscaler/pull/3887), but +more work is needed in that area. + +Distributed snapshotting and resizing are not currently supported. It +should be doable to adapt the respective sidecar and there are +tracking issues for external-snapshotter and external-resizer open +already, they just need some volunteer. + +The recovery from a bad scheduling decising can fail for pods with +multiple volumes, in particular when those volumes are local to nodes: +if one volume can be created and then storage is insufficient for +another volume, the first volume continues to exist and forces the +scheduler to put the pod onto the node of that volume. There is an +idea how do deal with this, [rolling back the provision of the +volume](https://github.com/kubernetes/enhancements/pull/1703), but +this is only in the very early stages of brainstorming and not even a +merged KEP yet. For now it is better to avoid creating pods with more +than one persistent volume. + +## Enabling the new features and next steps + +With the feature entering beta in the 1.21 release, no additional actions are needed to enable it. Generic +ephemeral volumes also work without changes in CSI drivers. For more +information, see the +[documentation](/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) +and the [previous blog +post](/blog/2020/09/01/ephemeral-volumes-with-storage-capacity-tracking/) +about it. The API has not changed at all between alpha and beta. + +For the other two features, the external-provisioner documentation +explains how CSI driver developers must change how their driver gets +deployed to support [storage capacity +tracking](https://github.com/kubernetes-csi/external-provisioner#capacity-support) +and [distributed +provisioning](https://github.com/kubernetes-csi/external-provisioner#deployment-on-each-node). +These two features are independent, therefore it is okay to enable +only one of them. + +[SIG +Storage](https://github.com/kubernetes/community/tree/master/sig-storage) +would like to hear from you if you are using these new features. We +can be reached through +[email](https://groups.google.com/forum/#!forum/kubernetes-sig-storage), +[Slack](https://slack.k8s.io/) (channel [`#sig-storage`](https://kubernetes.slack.com/messages/sig-storage)) and in the +[regular SIG +meeting](https://github.com/kubernetes/community/tree/master/sig-storage#meeting). +A description of your workload would be very useful to validate design +decisions, set up performance tests and eventually promote these +features to GA. + +## Acknowledgements + +Thanks a lot to the members of the community who have contributed to these +features or given feedback including members of SIG Scheduling, SIG Auth, +and of course SIG Storage! diff --git a/content/en/blog/_posts/2021-04-15-Three-Tenancy-Models-for-Kubernetes.md b/content/en/blog/_posts/2021-04-15-Three-Tenancy-Models-for-Kubernetes.md new file mode 100644 index 0000000000000..a3687cf5fd111 --- /dev/null +++ b/content/en/blog/_posts/2021-04-15-Three-Tenancy-Models-for-Kubernetes.md @@ -0,0 +1,80 @@ +--- +layout: blog +title: 'Three Tenancy Models For Kubernetes' +date: 2021-04-15 +slug: three-tenancy-models-for-kubernetes +--- + +**Authors:** Ryan Bezdicek (Medtronic), Jim Bugwadia (Nirmata), Tasha Drew (VMware), Fei Guo (Alibaba), Adrian Ludwin (Google) + +Kubernetes clusters are typically used by several teams in an organization. In other cases, Kubernetes may be used to deliver applications to end users requiring segmentation and isolation of resources across users from different organizations. Secure sharing of Kubernetes control plane and worker node resources allows maximizing productivity and saving costs in both cases. + +The Kubernetes Multi-Tenancy Working Group is chartered with defining tenancy models for Kubernetes and making it easier to operationalize tenancy related use cases. This blog post, from the working group members, describes three common tenancy models and introduces related working group projects. + +We will also be presenting on this content and discussing different use cases at our Kubecon EU 2021 panel session, [Multi-tenancy vs. Multi-cluster: When Should you Use What?](https://sched.co/iE66). + +## Namespaces as a Service + +With the *namespaces-as-a-service* model, tenants share a cluster and tenant workloads are restricted to a set of Namespaces assigned to the tenant. The cluster control plane resources like the API server and scheduler, and worker node resources like CPU, memory, etc. are available for use across all tenants. + +To isolate tenant workloads, each namespace must also contain: +* **[role bindings](/docs/reference/access-authn-authz/rbac/#rolebinding-and-clusterrolebinding):** for controlling access to the namespace +* **[network policies](/docs/concepts/services-networking/network-policies/):** to prevent network traffic across tenants +* **[resource quotas](/docs/concepts/policy/resource-quotas/):** to limit usage and ensure fairness across tenants + +With this model, tenants share cluster-wide resources like ClusterRoles and CustomResourceDefinitions (CRDs) and hence cannot create or update these cluster-wide resources. + +The [Hierarchical Namespace Controller (HNC)](/blog/2020/08/14/introducing-hierarchical-namespaces/) project makes it easier to manage namespace based tenancy by allowing users to create additional namespaces under a namespace, and propagating resources within the namespace hierarchy. This allows self-service namespaces for tenants, without requiring cluster-wide permissions. + +The [Multi-Tenancy Benchmarks (MTB)](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks) project provides benchmarks and a command-line tool that performs several configuration and runtime checks to report if tenant namespaces are properly isolated and the necessary security controls are implemented. + +## Clusters as a Service + +With the *clusters-as-a-service* usage model, each tenant gets their own cluster. This model allows tenants to have different versions of cluster-wide resources such as CRDs, and provides full isolation of the Kubernetes control plane. + +The tenant clusters may be provisioned using projects like [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io/) where a management cluster is used to provision multiple workload clusters. A workload cluster is assigned to a tenant and tenants have full control over cluster resources. Note that in most enterprises a central platform team may be responsible for managing required add-on services such as security and monitoring services, and for providing cluster lifecycle management services such as patching and upgrades. A tenant administrator may be restricted from modifying the centrally managed services and other critical cluster information. + +## Control planes as a Service + +In a variation of the *clusters-as-a-service* model, the tenant cluster may be a **virtual cluster** where each tenant gets their own dedicated Kubernetes control plane but share worker node resources. As with other forms of virtualization, users of a virtual cluster see no significant differences between a virtual cluster and other Kubernetes clusters. This is sometimes referred to as `Control Planes as a Service` (CPaaS). + +A virtual cluster of this type shares worker node resources and workload state independent control plane components, like the scheduler. Other workload aware control-plane components, like the API server, are created on a per-tenant basis to allow overlaps, and additional components are used to synchronize and manage state across the per-tenant control plane and the underlying shared cluster resources. With this model users can manage their own cluster-wide resources. + +The [Virtual Cluster](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/virtualcluster) project implements this model, where a `supercluster` is shared by multiple `virtual clusters`. The [Cluster API Nested](https://github.com/kubernetes-sigs/cluster-api-provider-nested) project is extending this work to conform to the CAPI model, allowing use of familiar API resources to provision and manage virtual clusters. + +## Security considerations + +Cloud native security involves different system layers and lifecycle phases as described in the [Cloud Native Security Whitepaper](/blog/2020/11/18/cloud-native-security-for-your-clusters) from CNCF SIG Security. Without proper security measures implemented across all layers and phases, Kubernetes tenant isolation can be compromised and a security breach with one tenant can threaten other tenants. + +It is important for any new user to Kubernetes to realize that the default installation of a new upstream Kubernetes cluster is not secure, and you are going to need to invest in hardening it in order to avoid security issues. + +At a minimum, the following security measures are required: +* image scanning: container image vulnerabilities can be exploited to execute commands and access additional resources. +* [RBAC](/docs/reference/access-authn-authz/rbac/): for *namespaces-as-a-service* user roles and permissions must be properly configured at a per-namespace level; for other models tenants may need to be restricted from accessing centrally managed add-on services and other cluster-wide resources. +* [network policies](/docs/concepts/services-networking/network-policies/): for *namespaces-as-a-service* default network policies that deny all ingress and egress traffic are recommended to prevent cross-tenant network traffic and may also be used as a best practice for other tenancy models. +* [Kubernetes Pod Security Standards](/docs/concepts/security/pod-security-standards/): to enforce Pod hardening best practices the `Restricted` policy is recommended as the default for tenant workloads with exclusions configured only as needed. +* [CIS Benchmarks for Kubernetes](https://www.cisecurity.org/benchmark/kubernetes/): the CIS Benchmarks for Kubernetes guidelines should be used to properly configure Kubernetes control-plane and worker node components. + +Additional recommendations include using: +* policy engines: for configuration security best practices, such as only allowing trusted registries. +* runtime scanners: to detect and report runtime security events. +* VM-based container sandboxing: for stronger data plane isolation. + +While proper security is required independently of tenancy models, not having essential security controls like [pod security](/docs/concepts/security/pod-security-standards/) in a shared cluster provides attackers with means to compromise tenancy models and possibly access sensitive information across tenants increasing the overall risk profile. + +## Summary + +A 2020 CNCF survey showed that production Kubernetes usage has increased by over 300% since 2016. As an increasing number of Kubernetes workloads move to production, organizations are looking for ways to share Kubernetes resources across teams for agility and cost savings. + +The **namespaces as a service** tenancy model allows sharing clusters and hence enables resource efficiencies. However, it requires proper security configurations and has limitations as all tenants share the same cluster-wide resources. + +The **clusters as a service** tenancy model addresses these limitations, but with higher management and resource overhead. + +The **control planes as a service** model provides a way to share resources of a single Kubernetes cluster and also let tenants manage their own cluster-wide resources. Sharing worker node resources increases resource effeciencies, but also exposes cross tenant security and isolation concerns that exist for shared clusters. + + In many cases, organizations will use multiple tenancy models to address different use cases and as different product and development teams will have varying needs. Following security and management best practices, such as applying [Pod Security Standards](/docs/concepts/security/pod-security-standards/) and not using the `default` namespace, makes it easer to switch from one model to another. + +The [Kubernetes Multi-Tenancy Working Group](https://github.com/kubernetes-sigs/multi-tenancy) has created several projects like [Hierarchical Namespaces Controller](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/hnc), [Virtual Cluster](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/virtualcluster) / [CAPI Nested](https://github.com/kubernetes-sigs/cluster-api-provider-nested), and [Multi-Tenancy Benchmarks](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks) to make it easier to provision and manage multi-tenancy models. + +If you are interested in multi-tenancy topics, or would like to share your use cases, please join us in an upcoming [community meeting](https://github.com/kubernetes/community/blob/master/wg-multitenancy/README.md) or reach out on the *wg-multitenancy channel* on the [Kubernetes slack](https://slack.k8s.io/). + diff --git a/content/en/blog/_posts/2021-04-16-volume-health-monitoring-alpha.md b/content/en/blog/_posts/2021-04-16-volume-health-monitoring-alpha.md new file mode 100644 index 0000000000000..99b2fd65b2e32 --- /dev/null +++ b/content/en/blog/_posts/2021-04-16-volume-health-monitoring-alpha.md @@ -0,0 +1,95 @@ +--- +layout: blog +title: "Volume Health Monitoring Alpha Update" +date: 2021-04-16 +slug: volume-health-monitoring-alpha-update +--- + +**Author:** Xing Yang (VMware) + +The CSI Volume Health Monitoring feature, originally introduced in 1.19 has undergone a large update for the 1.21 release. + +## Why add Volume Health Monitoring to Kubernetes? + +Without Volume Health Monitoring, Kubernetes has no knowledge of the state of the underlying volumes of a storage system after a PVC is provisioned and used by a Pod. Many things could happen to the underlying storage system after a volume is provisioned in Kubernetes. For example, the volume could be deleted by accident outside of Kubernetes, the disk that the volume resides on could fail, it could be out of capacity, the disk may be degraded which affects its performance, and so on. Even when the volume is mounted on a pod and used by an application, there could be problems later on such as read/write I/O errors, file system corruption, accidental unmounting of the volume outside of Kubernetes, etc. It is very hard to debug and detect root causes when something happened like this. + +Volume health monitoring can be very beneficial to Kubernetes users. It can communicate with the CSI driver to retrieve errors detected by the underlying storage system. PVC events can be reported up to the user to take action. For example, if the volume is out of capacity, they could request a volume expansion to get more space. + +## What is Volume Health Monitoring? + +CSI Volume Health Monitoring allows CSI Drivers to detect abnormal volume conditions from the underlying storage systems and report them as events on PVCs or Pods. + +The Kubernetes components that monitor the volumes and report events with volume health information include the following: + +* Kubelet, in addition to gathering the existing volume stats will watch the volume health of the PVCs on that node. If a PVC has an abnormal health condition, an event will be reported on the pod object using the PVC. If multiple pods are using the same PVC, events will be reported on all pods using that PVC. +* An [External Volume Health Monitor Controller](https://github.com/kubernetes-csi/external-health-monitor) watches volume health of the PVCs and reports events on the PVCs. + +Note that the node side volume health monitoring logic was an external agent when this feature was first introduced in the Kubernetes 1.19 release. In Kubernetes 1.21, the node side volume health monitoring logic was moved from the external agent into the Kubelet, to avoid making duplicate CSI function calls. With this change in 1.21, a new alpha [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) `CSIVolumeHealth` was introduced for the volume health monitoring logic in Kubelet. + +Currently the Volume Health Monitoring feature is informational only as it only reports abnormal volume health events on PVCs or Pods. Users will need to check these events and manually fix the problems. This feature serves as a stepping stone towards programmatic detection and resolution of volume health issues by Kubernetes in the future. + +## How do I use Volume Health on my Kubernetes Cluster? + +To use the Volume Health feature, first make sure the CSI driver you are using supports this feature. Refer to this [CSI drivers doc](https://kubernetes-csi.github.io/docs/drivers.html) to find out which CSI drivers support this feature. + +To enable Volume Health Monitoring from the node side, the alpha feature gate `CSIVolumeHealth` needs to be enabled. + +If a CSI driver supports the Volume Health Monitoring feature from the controller side, events regarding abnormal volume conditions will be recorded on PVCs. + +If a CSI driver supports the Volume Health Monitoring feature from the controller side, user can also get events regarding node failures if the `enable-node-watcher` flag is set to true when deploying the External Health Monitor Controller. When a node failure event is detected, an event will be reported on the PVC to indicate that pods using this PVC are on a failed node. + +If a CSI driver supports the Volume Health Monitoring feature from the node side, events regarding abnormal volume conditions will be recorded on pods using the PVCs. + +## As a storage vendor, how do I add support for volume health to my CSI driver? + +Volume Health Monitoring includes two parts: +* An External Volume Health Monitoring Controller monitors volume health from the controller side. +* Kubelet monitors volume health from the node side. + +For details, see the [CSI spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) and the [Kubernetes-CSI Driver Developer Guide](https://kubernetes-csi.github.io/docs/volume-health-monitor.html). + +There is a sample implementation for volume health in [CSI host path driver](https://github.com/kubernetes-csi/csi-driver-host-path). + +### Controller Side Volume Health Monitoring + +To learn how to deploy the External Volume Health Monitoring controller, see [CSI external-health-monitor-controller](https://kubernetes-csi.github.io/docs/external-health-monitor-controller.html) in the CSI documentation. + +The External Health Monitor Controller calls either `ListVolumes` or `ControllerGetVolume` CSI RPC and reports VolumeConditionAbnormal events with messages on PVCs if abnormal volume conditions are detected. Only CSI drivers with `LIST_VOLUMES` and `VOLUME_CONDITION` controller capability or `GET_VOLUME` and `VOLUME_CONDITION` controller capability support Volume Health Monitoring in the external controller. + +To implement the volume health feature from the controller side, a CSI driver **must** add support for the new controller capabilities. + +If a CSI driver supports `LIST_VOLUMES` and `VOLUME_CONDITION` controller capabilities, it **must** implement controller RPC `ListVolumes` and report the volume condition in the response. + +If a CSI driver supports `GET_VOLUME` and `VOLUME_CONDITION` controller capability, it **must** implement controller PRC `ControllerGetVolume` and report the volume condition in the response. + +If a CSI driver supports `LIST_VOLUMES`, `GET_VOLUME`, and `VOLUME_CONDITION` controller capabilities, only `ListVolumes` CSI RPC will be invoked by the External Health Monitor Controller. + +### Node Side Volume Health Monitoring + +Kubelet calls `NodeGetVolumeStats` CSI RPC and reports VolumeConditionAbnormal events with messages on Pods if abnormal volume conditions are detected. Only CSI drivers with `VOLUME_CONDITION` node capability support Volume Health Monitoring in Kubelet. + +To implement the volume health feature from the node side, a CSI driver **must** add support for the new node capabilities. + +If a CSI driver supports `VOLUME_CONDITION` node capability, it **must** report the volume condition in node RPC `NodeGetVoumeStats`. + +## What’s next? + +Depending on feedback and adoption, the Kubernetes team plans to push the CSI volume health implementation to beta in either 1.22 or 1.23. + +We are also exploring how to use volume health information for programmatic detection and automatic reconcile in Kubernetes. + +## How can I learn more? + +To learn the design details for Volume Health Monitoring, read the [Volume Health Monitor](https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1432-volume-health-monitor) enhancement proposal. + +The Volume Health Monitor controller source code is at [https://github.com/kubernetes-csi/external-health-monitor](https://github.com/kubernetes-csi/external-health-monitor). + +There are also more details about volume health checks in the [Container Storage Interface Documentation](https://kubernetes-csi.github.io/docs/). + +## How do I get involved? + +The [Kubernetes Slack channel #csi](https://kubernetes.slack.com/messages/csi) and any of the [standard SIG Storage communication channels](https://github.com/kubernetes/community/blob/master/sig-storage/README.md#contact) are great mediums to reach out to the SIG Storage and the CSI team. + +We offer a huge thank you to the contributors who helped release this feature in 1.21. We want to thank Yuquan Ren ([NickrenREN](https://github.com/nickrenren)) who implemented the initial volume health monitor controller and agent in the external health monitor repo, thank Ran Xu ([fengzixu](https://github.com/fengzixu)) who moved the volume health monitoring logic from the external agent to Kubelet in 1.21, and we offer special thanks to the following people for their insightful reviews: David Ashpole ([dashpole](https://github.com/dashpole)), Michelle Au ([msau42](https://github.com/msau42)), David Eads ([deads2k](https://github.com/deads2k)), Elana Hashman ([ehashman](https://github.com/ehashman)), Seth Jennings ([sjenning](https://github.com/sjenning)), and Jiawei Wang ([Jiawei0227](https://github.com/Jiawei0227)). + +Those interested in getting involved with the design and development of CSI or any part of the Kubernetes Storage system, join the [Kubernetes Storage Special Interest Group](https://github.com/kubernetes/community/tree/master/sig-storage) (SIG). We’re rapidly growing and always welcome new contributors. diff --git a/content/en/blog/_posts/2021-04-19-introducing-indexed-jobs.md b/content/en/blog/_posts/2021-04-19-introducing-indexed-jobs.md new file mode 100644 index 0000000000000..990dd6308b825 --- /dev/null +++ b/content/en/blog/_posts/2021-04-19-introducing-indexed-jobs.md @@ -0,0 +1,95 @@ +--- +title: "Introducing Indexed Jobs" +date: 2021-04-19 +slug: introducing-indexed-jobs +--- + +**Author:** Aldo Culquicondor (Google) + +Once you have containerized a non-parallel [Job](/docs/concepts/workloads/controllers/job/), +it is quite easy to get it up and running on Kubernetes without modifications to +the binary. In most cases, when running parallel distributed Jobs, you had +to set a separate system to partition the work among the workers. For +example, you could set up a task queue to [assign one work item to each +Pod](/docs/tasks/job/coarse-parallel-processing-work-queue/) or [multiple items +to each Pod until the queue is emptied](/docs/tasks/job/fine-parallel-processing-work-queue/). + +The Kubernetes 1.21 release introduces a new field to control Job _completion mode_, +a configuration option that allows you to control how Pod completions affect the +overall progress of a Job, with two possible options (for now): + +- `NonIndexed` (default): the Job is considered complete when there has been + a number of successfully completed Pods equal to the specified number in + `.spec.completions`. In other words, each Pod completion is homologous to + each other. Any Job you might have created before the introduction of + completion modes is implicitly NonIndexed. +- `Indexed`: the Job is considered complete when there is one successfully + completed Pod associated with each index from 0 to `.spec.completions-1`. The + index is exposed to each Pod in the `batch.kubernetes.io/job-completion-index` + annotation and the `JOB_COMPLETION_INDEX` environment variable. + +You can start using Jobs with Indexed completion mode, or Indexed Jobs, for +short, to easily start parallel Jobs. Then, each worker Pod can have a statically +assigned partition of the data based on the index. This saves you from having to +set up a queuing system or even having to modify your binary! + +## Creating an Indexed Job + +To create an Indexed Job, you just have to add `completionMode: Indexed` to the +Job spec and make use of the `JOB_COMPLETION_INDEX` environment variable. + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: 'sample-job' +spec: + completions: 3 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + containers: + - command: + - 'bash' + - '-c' + - 'echo "My partition: ${JOB_COMPLETION_INDEX}"' + image: 'docker.io/library/bash' + name: 'sample-load' +``` + +Note that completion mode is an alpha feature in the 1.21 release. To be able to +use it in your cluster, make sure to enable the `IndexedJob` [feature +gate](/docs/reference/command-line-tools-reference/feature-gates/) on the +[API server](docs/reference/command-line-tools-reference/kube-apiserver/) and +the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). + +When you run the example, you will see that each of the three created Pods gets a +different completion index. For the user's convenience, the control plane sets the +`JOB_COMPLETION_INDEX` environment variable, but you can choose to [set your +own](/docs/tasks/inject-data-application/environment-variable-expose-pod-information/) +or [expose the index as a file](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/). + +See [Indexed Job for parallel processing with static work +assignment](/docs/tasks/job/indexed-parallel-processing-static/) for a +step-by-step guide, and a few more examples. + +## Future plans + +SIG Apps envisions that there might be more completion modes that enable more +use cases for the Job API. We welcome you to open issues in +[kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) with your +suggestions. + +In particular, we are considering an `IndexedAndUnique` mode where the indexes +are not just available as annotation, but they are part of the Pod names, +similar to {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}. +This should facilitate inter-Pod communication for tightly coupled Pods. +You can join the discussion in the [open issue](https://github.com/kubernetes/kubernetes/issues/99497). + +## Wrap-up + +Indexed Jobs allows you to statically partition work among the workers of your +parallel Jobs. SIG Apps hopes that this feature facilitates the migration of +more batch workloads to Kubernetes. \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-20-Defining-NetworkPolicy-Conformance-For-CNIs.md b/content/en/blog/_posts/2021-04-20-Defining-NetworkPolicy-Conformance-For-CNIs.md new file mode 100644 index 0000000000000..86c005eed1136 --- /dev/null +++ b/content/en/blog/_posts/2021-04-20-Defining-NetworkPolicy-Conformance-For-CNIs.md @@ -0,0 +1,479 @@ +--- +layout: blog +title: "Defining Network Policy Conformance for Container Network Interface (CNI) providers" +date: 2021-04-20 +slug: defining-networkpolicy-conformance-cni-providers +--- + +Authors: Matt Fenwick (Synopsys), Jay Vyas (VMWare), Ricardo Katz, Amim Knabben (Loadsmart), Douglas Schilling Landgraf (Red Hat), Christopher Tomkins (Tigera) + +Special thanks to Tim Hockin and Bowie Du (Google), Dan Winship and Antonio Ojea (Red Hat), +Casey Davenport and Shaun Crampton (Tigera), and Abhishek Raut and Antonin Bas (VMware) for +being supportive of this work, and working with us to resolve issues in different Container Network Interfaces (CNIs) over time. + +A brief conversation around "node local" Network Policies in April of 2020 inspired the creation of a NetworkPolicy subproject from SIG Network. It became clear that as a community, +we need a rock-solid story around how to do pod network security on Kubernetes, and this story needed a community around it, so as to grow the cultural adoption of enterprise security patterns in K8s. + +In this post we'll discuss: + +- Why we created a subproject for [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) +- How we changed the Kubernetes e2e framework to `visualize` NetworkPolicy implementation of your CNI provider +- The initial results of our comprehensive NetworkPolicy conformance validator, _Cyclonus_, built around these principles +- Improvements subproject contributors have made to the NetworkPolicy user experience + +## Why we created a subproject for NetworkPolicies + +In April of 2020 it was becoming clear that many CNIs were emerging, and many vendors +implement these CNIs in subtly different ways. Users were beginning to express a little bit +of confusion around how to implement policies for different scenarios, and asking for new features. +It was clear that we needed to begin unifying the way we think about Network Policies +in Kubernetes, to avoid API fragmentation and unnecessary complexity. + +For example: +- In order to be flexible to the user’s environment, Calico as a CNI provider can be run using IPIP or VXLAN mode, or without encapsulation overhead. CNIs such as Antrea + and Cilium offer similar configuration options as well. +- Some CNI plugins offer iptables for NetworkPolicies amongst other options, whereas other CNIs use a completely + different technology stack (for example, the Antrea project uses Open vSwitch rules). +- Some CNI plugins only implement a subset of the Kubernetes NetworkPolicy API, and some a superset. For example, certain plugins don't support the + ability to target a named port; others don't work with certain IP address types, and there are diverging semantics for similar policy types. +- Some CNI plugins combine with OTHER CNI plugins in order to implement NetworkPolicies (canal), some CNI's might mix implementations (multus), and some clouds do routing separately from NetworkPolicy implementation. + +Although this complexity is to some extent necessary to support different environments, end-users find that they need to follow a multistep process to implement Network Policies to secure their applications: +- Confirm that their network plugin supports NetworkPolicies (some don't, such as Flannel) +- Confirm that their cluster's network plugin supports the specific NetworkPolicy features that they are interested in (again, the named port or port range examples come to mind here) +- Confirm that their application's Network Policy definitions are doing the right thing +- Find out the nuances of a vendor's implementation of policy, and check whether or not that implementation has a CNI neutral implementation (which is sometimes adequate for users) + +The NetworkPolicy project in upstream Kubernetes aims at providing a community where +people can learn about, and contribute to, the Kubernetes NetworkPolicy API and the surrounding ecosystem. + +## The First step: A validation framework for NetworkPolicies that was intuitive to use and understand + +The Kubernetes end to end suite has always had NetworkPolicy tests, but these weren't +run in CI, and the way they were implemented didn't provide holistic, easily consumable +information about how a policy was working in a cluster. +This is because the original tests didn't provide any kind of visual summary of connectivity +across a cluster. We thus initially set out to make it easy to confirm CNI support for NetworkPolicies by +making the end to end tests (which are often used by administrators or users to diagnose cluster conformance) easy to interpret. + +To solve the problem of confirming that CNIs support the basic features most users care about +for a policy, we built a new NetworkPolicy validation tool into the Kubernetes e2e +framework which allows for visual inspection of policies and their effect on a standard set of pods in a cluster. +For example, take the following test output. We found a bug in +[OVN Kubernetes](https://github.com/ovn-org/ovn-kubernetes/issues/1782). This bug has now been resolved. With this tool the bug was really +easy to characterize, wherein certain policies caused a state-modification that, +later on, caused traffic to incorrectly be blocked (even after all Network Policies were deleted from the cluster). + +This is the network policy for the test in question: +```yaml +metadata: + creationTimestamp: null + name: allow-ingress-port-80 +spec: + ingress: + - ports: + - port: serve-80-tcp + podSelector: {} +``` + +These are the expected connectivity results. The test setup is 9 pods (3 namespaces: x, y, and z; +and 3 pods in each namespace: a, b, and c); each pod runs a server on the same port and protocol +that can be reached through HTTP calls in the absence of network policies. Connectivity is verified +by using the [agnhost](https://github.com/kubernetes/kubernetes/tree/master/test/images/agnhost) network utility to issue HTTP calls on a port and protocol that other pods are +expected to be serving. A test scenario first +runs a connectivity check to ensure that each pod can reach each other pod, for 81 (= 9 x 9) data +points. This is the "control". Then perturbations are applied, depending on the test scenario: +policies are created, updated, and deleted; labels are added and removed from pods and namespaces, +and so on. After each change, the connectivity matrix is recollected and compared to the expected +connectivity. + +These results give a visual indication of connectivity in a simple matrix. Going down the leftmost column is the "source" +pod, or the pod issuing the request; going across the topmost row is the "destination" pod, or the pod +receiving the request. A `.` means that the connection was allowed; an `X` means the connection was +blocked. For example: + +``` +Nov 4 16:58:43.449: INFO: expected: + +- x/a x/b x/c y/a y/b y/c z/a z/b z/c +x/a . . . . . . . . . +x/b . . . . . . . . . +x/c . . . . . . . . . +y/a . . . . . . . . . +y/b . . . . . . . . . +y/c . . . . . . . . . +z/a . . . . . . . . . +z/b . . . . . . . . . +z/c . . . . . . . . . +``` + +Below are the observed connectivity results in the case of the OVN Kubernetes bug. Notice how the top three rows indicate that +all requests from namespace x regardless of pod and destination were blocked. Since these +experimental results do not match the expected results, a failure will be reported. Note +how the specific pattern of failure provides clear insight into the nature of the problem -- +since all requests from a specific namespace fail, we have a clear clue to start our +investigation. + +``` +Nov 4 16:58:43.449: INFO: observed: + +- x/a x/b x/c y/a y/b y/c z/a z/b z/c +x/a X X X X X X X X X +x/b X X X X X X X X X +x/c X X X X X X X X X +y/a . . . . . . . . . +y/b . . . . . . . . . +y/c . . . . . . . . . +z/a . . . . . . . . . +z/b . . . . . . . . . +z/c . . . . . . . . . +``` + +This was one of our earliest wins in the Network Policy group, as we were able to +identify and work with the OVN Kubernetes group to fix a bug in egress policy processing. + +However, even though this tool has made it easy to validate roughly 30 common scenarios, +it doesn't validate *all* Network Policy scenarios - because there are an enormous number of possible +permutations that one might create (technically, we might say this number is +infinite given that there's an infinite number of possible namespace/pod/port/protocol variations one can create). + +Once these tests were in play, we worked with the Upstream SIG Network and SIG Testing communities +(thanks to Antonio Ojea and Ben Elder) to put a testgrid Network Policy job in place. This job +continuously runs the entire suite of Network Policy tests against +[GCE with Calico as a Network Policy provider](https://testgrid.k8s.io/sig-network-gce#presubmit-network-policies,%20google-gce). + +Part of our role as a subproject is to help make sure that, when these tests break, we can help triage them effectively. + +## Cyclonus: The next step towards Network Policy conformance {#cyclonus} + +Around the time that we were finishing the validation work, it became clear from the community that, +in general, we needed to solve the overall problem of testing ALL possible Network Policy implementations. +For example, a KEP was recently written which introduced the concept of micro versioning to +Network Policies to accommodate [describing this at the API level](https://github.com/kubernetes/enhancements/pull/2137/files), by Dan Winship. + +In response to this increasingly obvious need to comprehensively evaluate Network +Policy implementations from all vendors, Matt Fenwick decided to evolve our approach to Network Policy validation again by creating Cyclonus. + +Cyclonus is a comprehensive Network Policy fuzzing tool which verifies a CNI provider +against hundreds of different Network Policy scenarios, by defining similar truth table/policy +combinations as demonstrated in the end to end tests, while also providing a hierarchical +representation of policy "categories". We've found some interesting nuances and issues +in almost every CNI we've tested so far, and have even contributed some fixes back. + +To perform a Cyclonus validation run, you create a Job manifest similar to: + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: cyclonus +spec: + template: + spec: + restartPolicy: Never + containers: + - command: + - ./cyclonus + - generate + - --perturbation-wait-seconds=15 + - --server-protocol=tcp,udp + name: cyclonus + imagePullPolicy: IfNotPresent + image: mfenwick100/cyclonus:latest + serviceAccount: cyclonus +``` + +Cyclonus outputs a report of all the test cases it will run: +``` +test cases to run by tag: +- target: 6 +- peer-ipblock: 4 +- udp: 16 +- delete-pod: 1 +- conflict: 16 +- multi-port/protocol: 14 +- ingress: 51 +- all-pods: 14 +- egress: 51 +- all-namespaces: 10 +- sctp: 10 +- port: 56 +- miscellaneous: 22 +- direction: 100 +- multi-peer: 0 +- any-port-protocol: 2 +- set-namespace-labels: 1 +- upstream-e2e: 0 +- allow-all: 6 +- namespaces-by-label: 6 +- deny-all: 10 +- pathological: 6 +- action: 6 +- rule: 30 +- policy-namespace: 4 +- example: 0 +- tcp: 16 +- target-namespace: 3 +- named-port: 24 +- update-policy: 1 +- any-peer: 2 +- target-pod-selector: 3 +- IP-block-with-except: 2 +- pods-by-label: 6 +- numbered-port: 28 +- protocol: 42 +- peer-pods: 20 +- create-policy: 2 +- policy-stack: 0 +- any-port: 14 +- delete-namespace: 1 +- delete-policy: 1 +- create-pod: 1 +- IP-block-no-except: 2 +- create-namespace: 1 +- set-pod-labels: 1 +testing 112 cases +``` + +Note that Cyclonus tags its tests based on the type of policy being created, because +the policies themselves are auto-generated, and thus have no meaningful names to be recognized by. + +For each test, Cyclonus outputs a truth table, which is again similar to that of the +E2E tests, along with the policy being validated: + +``` +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + creationTimestamp: null + name: base + namespace: x +spec: + egress: + - ports: + - port: 81 + to: + - namespaceSelector: + matchExpressions: + - key: ns + operator: In + values: + - "y" + - z + podSelector: + matchExpressions: + - key: pod + operator: In + values: + - a + - b + - ports: + - port: 53 + protocol: UDP + ingress: + - from: + - namespaceSelector: + matchExpressions: + - key: ns + operator: In + values: + - x + - "y" + podSelector: + matchExpressions: + - key: pod + operator: In + values: + - b + - c + ports: + - port: 80 + protocol: TCP + podSelector: + matchLabels: + pod: a + policyTypes: + - Ingress + - Egress + +0 wrong, 0 ignored, 81 correct ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| TCP/80 | X/A | X/B | X/C | Y/A | Y/B | Y/C | Z/A | Z/B | Z/C | +| TCP/81 | | | | | | | | | | +| UDP/80 | | | | | | | | | | +| UDP/81 | | | | | | | | | | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| x/a | X | X | X | X | X | X | X | X | X | +| | X | X | X | . | . | X | . | . | X | +| | X | X | X | X | X | X | X | X | X | +| | X | X | X | X | X | X | X | X | X | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| x/b | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| x/c | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| y/a | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| y/b | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| y/c | . | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| z/a | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| z/b | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +| z/c | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | +| | X | . | . | . | . | . | . | . | . | ++--------+-----+-----+-----+-----+-----+-----+-----+-----+-----+ +``` + +Both Cyclonus and the e2e tests use the same strategy to validate a Network Policy - probing pods over TCP or UDP, with +SCTP support available as well for CNIs that support it (such as Calico). + +As examples of how we use Cyclonus to help make CNI implementations better from a Network Policy perspective, you can see the following issues: + +- [Antrea: NetworkPolicy: unable to allow ingress by CIDR](https://github.com/vmware-tanzu/antrea/issues/1764) +- [Calico: default missing protocol to TCP; don't let single port overwrite all ports](https://github.com/projectcalico/libcalico-go/pull/1373) +- [Cilium: Egress Network Policy allows traffic that should be denied](https://github.com/cilium/cilium/issues/14678) + +The good news is that Antrea and Calico have already merged fixes for all the issues found and other CNI providers are working on it, +with the support of SIG Network and the Network Policy subproject. + +Are you interested in verifying NetworkPolicy functionality on your cluster? +(if you care about security or offer multi-tenant SaaS, you should be) +If so, you can run the upstream end to end tests, or Cyclonus, or both. +- If you're just getting started with NetworkPolicies and want to simply + verify the "common" NetworkPolicy cases that most CNIs should be + implementing correctly, in a way that is quick to diagnose, then you're + better off running the e2e tests only. +- If you are deeply curious about your CNI provider's NetworkPolicy + implementation, and want to verify it: use Cyclonus. +- If you want to test *hundreds* of policies, and evaluate your CNI plugin + for comprehensive functionality, for deep discovery of potential security + holes: use Cyclonus, and also consider running end-to-end cluster tests. +- If you're thinking of getting involved with the upstream NetworkPolicy efforts: + use Cyclonus, and read at least an outline of which e2e tests are relevant. + +## Where to start with NetworkPolicy testing? + +- Cyclonus is easy to run on your cluster, check out the [instructions on github](https://github.com/mattfenwick/cyclonus#run-as-a-kubernetes-job), + and determine whether *your* specific CNI configuration is fully conformant to the hundreds of different + Kubernetes Network Policy API constructs. +- Alternatively, you can use a tool like [sonobuoy](https://github.com/vmware-tanzu/sonobuoy) + to run the existing E2E tests in Kubernetes, with the `--ginkgo.focus=NetworkPolicy` flag. + Make sure that you use the K8s conformance image for K8s 1.21 or above (for example, by using the `--kube-conformance-image-version v1.21.0` flag), + as older images will not have the *new* Network Policy tests in them. + +## Improvements to the NetworkPolicy API and user experience + +In addition to cleaning up the validation story for CNI plugins that implement NetworkPolicies, +subproject contributors have also spent some time improving the Kubernetes NetworkPolicy API for a few commonly requested features. +After months of deliberation, we eventually settled on a few core areas for improvement: + +- Port Range policies: We now allow you to specify a *range* of ports for a policy. + This allows users interested in scenarios like FTP or virtualization to enable advanced policies. + The port range option for network policies will be available to use in Kubernetes 1.21. + Read more in [targeting a range of ports](/docs/concepts/services-networking/network-policies/#targeting-a-range-of-ports). +- Namespace as name policies: Allowing users in Kubernetes >= 1.21 to target namespaces using names, + when building Network Policy objects. This was done in collaboration with Jordan Liggitt and Tim Hockin on the API Machinery side. + This change allowed us to improve the Network Policy user experience without actually + changing the API! For more details, you can read + [Automatic labelling](/docs/concepts/overview/working-with-objects/namespaces/#automatic-labelling) in the page about Namespaces. + The TL,DR; is that for Kubernetes 1.21 and later, **all namespaces** have the following label added by default: + + ``` + kubernetes.io/metadata.name: + ``` + +This means you can write a namespace policy against this namespace, even if you can't edit its labels. +For example, this policy, will 'just work', without needing to run a command such as `kubectl edit namespace`. +In fact, it will even work if you can't edit or view this namespace's data at all, because of the magic of API server defaulting. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: test-network-policy + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Ingress + # Allow inbound traffic to Pods labelled role=db, in the namespace 'default' + # provided that the source is a Pod in the namespace 'my-namespace' + ingress: + - from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: my-namespace +``` + +## Results + +In our tests, we found that: + +- Antrea and Calico are at a point where they support all of cyclonus's scenarios, modulo a few very minor tweaks which we've made. +- Cilium also conformed to the majority of the policies, outside known features that aren't fully supported (for example, related to the way Cilium deals with pod CIDR policies). + +If you are a CNI provider and interested in helping us to do a better job curating large tests of network policies, please reach out! We are continuing to curate the Network Policy conformance results from Cyclonus [here](https://raw.githubusercontent.com/K8sbykeshed/cyclonus-artifacts/), but +we are not capable of maintaining all of the subtleties in NetworkPolicy testing data on our own. For now, we use github actions and Kind to test in CI. + +## The Future + +We're also working on some improvements for the future of Network Policies, including: + +- Fully qualified Domain policies: The Google Cloud team created a prototype (which + we are really excited about) of [FQDN policies](https://github.com/GoogleCloudPlatform/gke-fqdnnetworkpolicies-golang). + This tool uses the Network Policy API to enforce policies against L7 URLs, by finding + their IPs and blocking them proactively when requests are made. +- Cluster Administrative policies: We're working hard at enabling *administrative* or + *cluster scoped* Network Policies for the future. These are being presented iteratively to the NetworkPolicy subproject. + You can read about them here in [Cluster Scoped Network Policy](https://docs.google.com/presentation/d/1Jk86jtS3TcGAugVSM_I4Yds5ukXFJ4F1ZCvxN5v2BaY/). + +The Network Policy subproject meets on mondays at 4PM EST. For details, check out the +[SIG Network community repo](https://github.com/kubernetes/community/tree/master/sig-network). We'd love +to hang out with you, hack on stuff, and help you adopt K8s Network Policies for your cluster wherever possible. + +### A quick note on User Feedback + +We've gotten a lot of ideas and feedback from users on Network Policies. A lot of people have interesting ideas about Network Policies, +but we've found that as a subproject, very few people were deeply interested in implementing these ideas to the full extent. + +Almost every change to the NetworkPolicy API includes weeks or months of discussion to cover different cases, and ensure no CVEs are being introduced. Thus, long term ownership +is the biggest impediment in improving the NetworkPolicy user experience for us, over time. + +- We've documented a lot of the history of the Network Policy dialogue [here](https://github.com/jayunit100/network-policy-subproject/blob/master/history.md). +- We've also taken a poll of users, for what they'd like to see in the Network Policy API [here](https://github.com/jayunit100/network-policy-subproject/blob/master/p0_user_stories.md). + +We encourage anyone to provide us with feedback, but our most pressing issues right now +involve finding *long term owners to help us drive changes*. + +This doesn't require a lot of technical knowledge, but rather, just a long term commitment to helping us stay organized, do paperwork, +and iterate through the many stages of the K8s feature process. If you want to help us and get involved, please reach out on the SIG Network mailing list, or in the SIG Network room in the k8s.io slack channel! + +Anyone can put an oar in the water and help make NetworkPolices better! diff --git a/content/en/blog/_posts/2021-04-20-annotating-k8s-for-humans.md b/content/en/blog/_posts/2021-04-20-annotating-k8s-for-humans.md new file mode 100644 index 0000000000000..155ff5a3b31f5 --- /dev/null +++ b/content/en/blog/_posts/2021-04-20-annotating-k8s-for-humans.md @@ -0,0 +1,100 @@ +--- +layout: blog +title: 'Annotating Kubernetes Services for Humans' +date: 2021-04-20 +slug: annotating-k8s-for-humans +--- + +**Author:** Richard Li, Ambassador Labs + +Have you ever been asked to troubleshoot a failing Kubernetes service and struggled to find basic information about the service such as the source repository and owner? + +One of the problems as Kubernetes applications grow is the proliferation of services. As the number of services grows, developers start to specialize working with specific services. When it comes to troubleshooting, however, developers need to be able to find the source, understand the service and dependencies, and chat with the owning team for any service. + +## Human service discovery + +Troubleshooting always begins with information gathering. While much attention has been paid to centralizing machine data (e.g., logs, metrics), much less attention has been given to the human aspect of service discovery. Who owns a particular service? What Slack channel does the team work on? Where is the source for the service? What issues are currently known and being tracked? + +## Kubernetes annotations + +Kubernetes annotations are designed to solve exactly this problem. Oft-overlooked, Kubernetes annotations are designed to add metadata to Kubernetes objects. The Kubernetes documentation says annotations can “attach arbitrary non-identifying metadata to objects.” This means that annotations should be used for attaching metadata that is external to Kubernetes (i.e., metadata that Kubernetes won’t use to identify objects. As such, annotations can contain any type of data. This is a contrast to labels, which are designed for uses internal to Kubernetes. As such, label structure and values are [constrained](/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set) so they can be efficiently used by Kubernetes. + + +## Kubernetes annotations in action + +Here is an example. Imagine you have a Kubernetes service for quoting, called the quote service. You can do the following: + +``` +kubectl annotate service quote a8r.io/owner=”@sally” +``` + +In this example, we've just added an annotation called `a8r.io/owner` with the value of @sally. Now, we can use `kubectl describe` to get the information. + +``` +Name: quote +Namespace: default +Labels: +Annotations: a8r.io/owner: @sally +Selector: app=quote +Type: ClusterIP +IP: 10.109.142.131 +Port: http 80/TCP +TargetPort: 8080/TCP +Endpoints: +Session Affinity: None +Events: +``` + +If you’re practicing GitOps (and you should be!) you’ll want to code these values directly into your Kubernetes manifest, e.g., + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: quote + annotations: + a8r.io/owner: “@sally” +spec: + ports: + - name: http + port: 80 + targetPort: 8080 + selector: + app: quote +``` + +## A Convention for Annotations + +Adopting a common convention for annotations ensures consistency and understandability. Typically, you’ll want to attach the annotation to the service object, as services are the high-level resource that maps most clearly to a team’s responsibility. Namespacing your annotations is also very important. Here is one set of conventions, documented at [a8r.io](https://a8r.io), and reproduced below: + +{{< table caption="Annotation convention for human-readable services">}} +| Annotation | Description | +| ------------------------------------------ | ------------------------------------------- | +| `a8r.io/description` | Unstructured text description of the service for humans. | +| `a8r.io/owner` | SSO username (GitHub), email address (linked to GitHub account), or unstructured owner description. | +| `a8r.io/chat` | Slack channel, or link to external chat system. | +| `a8r.io/bugs` | Link to external bug tracker. | +| `a8r.io/logs` | Link to external log viewer. | +| `a8r.io/documentation` | Link to external project documentation. | +| `a8r.io/repository` | Link to external VCS repository. | +| `a8r.io/support` | Link to external support center. | +| `a8r.io/runbook` | Link to external project runbook. | +| `a8r.io/incidents` | Link to external incident dashboard. | +| `a8r.io/uptime` | Link to external uptime dashboard. | +| `a8r.io/performance` | Link to external performance dashboard. | +| `a8r.io/dependencies` | Unstructured text describing the service dependencies for humans. | + + +## Visualizing annotations: Service Catalogs + +As the number of microservices and annotations proliferate, running `kubectl describe` can get tedious. Moreover, using `kubectl describe` requires every developer to have some direct access to the Kubernetes cluster. Over the past few years, service catalogs have gained greater visibility in the Kubernetes ecosystem. Popularized by tools such as [Shopify's ServicesDB](https://shopify.engineering/scaling-mobile-development-by-treating-apps-as-services) and [Spotify's System Z](https://dzone.com/articles/modeling-microservices-at-spotify-with-petter-mari), service catalogs are internally-facing developer portals that present critical information about microservices. + +Note that these service catalogs should not be confused with the [Kubernetes Service Catalog project](https://svc-cat.io/). Built on the Open Service Broker API, the Kubernetes Service Catalog enables Kubernetes operators to plug in different services (e.g., databases) to their cluster. + +## Annotate your services now and thank yourself later + +Much like implementing observability within microservice systems, you often don’t realize that you need human service discovery until it’s too late. Don't wait until something is on fire in production to start wishing you had implemented better metrics and also documented how to get in touch with the part of your organization that looks after it. + +There's enormous benefits to building an effective “version 0” service: a [_dancing skeleton_](https://containerjournal.com/topics/container-management/dancing-skeleton-apis-and-microservices/) application with a thin slice of complete functionality that can be deployed to production with a minimal yet effective continuous delivery pipeline. + +Adding service annotations should be an essential part of your “version 0” for all of your services. Add them now, and you’ll thank yourself later. diff --git a/content/en/blog/_posts/2021-04-21-Graceful-Node-Shutdown-Beta.md b/content/en/blog/_posts/2021-04-21-Graceful-Node-Shutdown-Beta.md new file mode 100644 index 0000000000000..2c169674723cb --- /dev/null +++ b/content/en/blog/_posts/2021-04-21-Graceful-Node-Shutdown-Beta.md @@ -0,0 +1,80 @@ +--- +layout: blog +title: 'Graceful Node Shutdown Goes Beta' +date: 2021-04-21 +slug: graceful-node-shutdown-beta +--- + +**Authors:** David Porter (Google), Mrunal Patel (Red Hat), and Tim Bannister (The Scale Factory) + +Graceful node shutdown, beta in 1.21, enables kubelet to gracefully evict pods during a node shutdown. + +Kubernetes is a distributed system and as such we need to be prepared for inevitable failures — nodes will fail, containers might crash or be restarted, and - ideally - your workloads will be able to withstand these catastrophic events. + +One of the common classes of issues are workload failures on node shutdown or restart. The best practice prior to bringing your node down is to [safely drain and cordon your node](/docs/tasks/administer-cluster/safely-drain-node/). This will ensure that all pods running on this node can safely be evicted. An eviction will ensure your pods can follow the expected [pod termination lifecycle](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination) meaning receiving a SIGTERM in your container and/or running `preStopHooks`. + +Prior to Kubernetes 1.20 (when graceful node shutdown was introduced as an alpha feature), safe node draining was not easy: it required users to manually take action and drain the node beforehand. If someone or something shut down your node without draining it first, most likely your pods would not be safely evicted from your node and shutdown abruptly. Other services talking to those pods might see errors due to the pods exiting abruptly. Some examples of this situation may be caused by a reboot due to security patches or preemption of short lived cloud compute instances. + +Kubernetes 1.21 brings graceful node shutdown to beta. Graceful node shutdown gives you more control over some of those unexpected shutdown situations. With graceful node shutdown, the kubelet is aware of underlying system shutdown events and can propagate these events to pods, ensuring containers can shut down as gracefully as possible. This gives the containers a chance to checkpoint their state or release back any resources they are holding. + +Note, that for the best availability, even with graceful node shutdown, you should still design your deployments to be resilient to node failures. + +## How does it work? +On Linux, your system can shut down in many different situations. For example: +* A user or script running `shutdown -h now` or `systemctl poweroff` or `systemctl reboot`. +* Physically pressing a power button on the machine. +* Stopping a VM instance on a cloud provider, e.g. `gcloud compute instances stop` on GCP. +* A Preemptible VM or Spot Instance that your cloud provider can terminate unexpectedly, but with a brief warning. + + +Many of these situations can be unexpected and there is no guarantee that a cluster administrator drained the node prior to these events. With the graceful node shutdown feature, kubelet uses a systemd mechanism called ["Inhibitor Locks"](https://www.freedesktop.org/wiki/Software/systemd/inhibit) to allow draining in most cases. Using Inhibitor Locks, kubelet instructs systemd to postpone system shutdown for a specified duration, giving a chance for the node to drain and evict pods on the system. + +Kubelet makes use of this mechanism to ensure your pods will be terminated cleanly. When the kubelet starts, it acquires a systemd delay-type inhibitor lock. When the system is about to shut down, the kubelet can delay that shutdown for a configurable, short duration utilizing the delay-type inhibitor lock it acquired earlier. This gives your pods extra time to terminate. As a result, even during unexpected shutdowns, your application will receive a SIGTERM, [preStop hooks](/docs/concepts/containers/container-lifecycle-hooks/#container-hooks) will execute, and kubelet will properly update `Ready` node condition and respective pod statuses to the api-server. + +For example, on a node with graceful node shutdown enabled, you can see that the inhibitor lock is taken by the kubelet: + +``` +kubelet-node ~ # systemd-inhibit --list + Who: kubelet (UID 0/root, PID 1515/kubelet) + What: shutdown + Why: Kubelet needs time to handle node shutdown + Mode: delay + +1 inhibitors listed. +``` + +One important consideration we took when designing this feature is that not all pods are created equal. For example, some of the pods running on a node such as a logging related daemonset should stay running as long as possible to capture important logs during the shutdown itself. As a result, pods are split into two categories: "regular" and "critical". [Critical pods](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical) are those that have `priorityClassName` set to `system-cluster-critical` or `system-node-critical`; all other pods are considered regular. + +In our example, the logging DaemonSet would run as a critical pod. During the graceful node shutdown, regular pods are terminated first, followed by critical pods. As an example, this would allow a critical pod associated with a logging daemonset to continue functioning, and collecting logs during the termination of regular pods. + +We will evaluate during the beta phase if we need more flexibility for different pod priority classes and add support if needed, please let us know if you have some scenarios in mind. + + +## How do I use it? + +Graceful node shutdown is controlled with the `GracefulNodeShutdown` [feature gate](/docs/reference/command-line-tools-reference/feature-gates) and is enabled by default in Kubernetes 1.21. + +You can configure the graceful node shutdown behavior using two kubelet configuration options: `ShutdownGracePeriod` and `ShutdownGracePeriodCriticalPods`. To configure these options, you edit the kubelet configuration file that is passed to kubelet via the `--config` flag; for more details, refer to [Set kubelet parameters via a configuration file](/docs/tasks/administer-cluster/kubelet-config-file/). + + +During a shutdown, kubelet terminates pods in two phases. You can configure how long each of these phases lasts. +1. Terminate regular pods running on the node. +2. Terminate critical pods running on the node. + +The settings that control the duration of shutdown are: +* `ShutdownGracePeriod` + * Specifies the total duration that the node should delay the shutdown by. This is the total grace period for pod termination for both regular and critical pods. +* `ShutdownGracePeriodCriticalPods` + * Specifies the duration used to terminate critical pods during a node shutdown. This should be less than `ShutdownGracePeriod`. + +For example, if `ShutdownGracePeriod=30s`, and `ShutdownGracePeriodCriticalPods=10s`, kubelet will delay the node shutdown by 30 seconds. During this time, the first 20 seconds (30-10) would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating critical pods. + +Note that by default, both configuration options described above, `ShutdownGracePeriod` and `ShutdownGracePeriodCriticalPods` are set to zero, so you will need to configure them as appropriate for your environment to activate graceful node shutdown functionality. + +## How can I learn more? +* Read the [documentation](/docs/concepts/architecture/nodes/#graceful-node-shutdown) +* Read the enhancement proposal, [KEP 2000](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2000-graceful-node-shutdown) +* View the [code](https://github.com/kubernetes/kubernetes/tree/release-1.21/pkg/kubelet/nodeshutdown) + +## How do I get involved? +Your feedback is always welcome! SIG Node meets regularly and can be reached via [Slack](https://slack.k8s.io) (channel `#sig-node`), or the SIG's [mailing list](https://github.com/kubernetes/community/tree/master/sig-node#contact) diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/gateway-api-resources.png b/content/en/blog/_posts/2021-04-22-gateway-api/gateway-api-resources.png new file mode 100644 index 0000000000000..ef589bc9144e0 Binary files /dev/null and b/content/en/blog/_posts/2021-04-22-gateway-api/gateway-api-resources.png differ diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/httproute.png b/content/en/blog/_posts/2021-04-22-gateway-api/httproute.png new file mode 100644 index 0000000000000..4dc0cb9457f2f Binary files /dev/null and b/content/en/blog/_posts/2021-04-22-gateway-api/httproute.png differ diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/index.md b/content/en/blog/_posts/2021-04-22-gateway-api/index.md new file mode 100644 index 0000000000000..a7d54ad6450fc --- /dev/null +++ b/content/en/blog/_posts/2021-04-22-gateway-api/index.md @@ -0,0 +1,197 @@ + +--- +layout: blog +title: 'Evolving Kubernetes networking with the Gateway API' +date: 2021-04-22 +slug: evolving-kubernetes-networking-with-the-gateway-api +--- + +**Authors:** Mark Church (Google), Harry Bagdi (Kong), Daneyon Hanson (Red Hat), Nick Young (VMware), Manuel Zapf (Traefik Labs) + +The Ingress resource is one of the many Kubernetes success stories. It created a [diverse ecosystem of Ingress controllers](/docs/concepts/services-networking/ingress-controllers/) which were used across hundreds of thousands of clusters in a standardized and consistent way. This standardization helped users adopt Kubernetes. However, five years after the creation of Ingress, there are signs of fragmentation into different but [strikingly similar CRDs](https://dave.cheney.net/paste/ingress-is-dead-long-live-ingressroute.pdf) and [overloaded annotations](https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/). The same portability that made Ingress pervasive also limited its future. + +It was at Kubecon 2019 San Diego when a passionate group of contributors gathered to discuss the [evolution of Ingress](https://static.sched.com/hosted_files/kccncna19/a5/Kubecon%20San%20Diego%202019%20-%20Evolving%20the%20Kubernetes%20Ingress%20APIs%20to%20GA%20and%20Beyond%20%5BPUBLIC%5D.pdf). The discussion overflowed to the hotel lobby across the street and what came out of it would later be known as the [Gateway API](https://gateway-api.sigs.k8s.io). This discussion was based on a few key assumptions: + +1. The API standards underlying route matching, traffic management, and service exposure are commoditized and provide little value to their implementers and users as custom APIs +2. It’s possible to represent L4/L7 routing and traffic management through common core API resources +3. It’s possible to provide extensibility for more complex capabilities in a way that does not sacrifice the user experience of the core API + + +## Introducing the Gateway API + +This led to design principles that allow the Gateway API to improve upon Ingress: + +- **Expressiveness** - In addition to HTTP host/path matching and TLS, Gateway API can express capabilities like HTTP header manipulation, traffic weighting & mirroring, TCP/UDP routing, and other capabilities that were only possible in Ingress through custom annotations. +- **Role-oriented design** - The API resource model reflects the separation of responsibilities that is common in routing and Kubernetes service networking. +- **Extensibility** - The resources allow arbitrary configuration attachment at various layers within the API. This makes granular customization possible at the most appropriate places. +- **Flexible conformance** - The Gateway API defines varying conformance levels - core (mandatory support), extended (portable if supported), and custom (no portability guarantee), known together as [flexible conformance](https://gateway-api.sigs.k8s.io/concepts/guidelines/#conformance). This promotes a highly portable core API (like Ingress) that still gives flexibility for Gateway controller implementers. + +### What does the Gateway API look like? + +The Gateway API introduces a few new resource types: + +- **[GatewayClasses](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.GatewayClass)** are cluster-scoped resources that act as templates to explicitly define behavior for Gateways derived from them. This is similar in concept to StorageClasses, but for networking data-planes. +- **[Gateways](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.Gateway)** are the deployed instances of GatewayClasses. They are the logical representation of the data-plane which performs routing, which may be in-cluster proxies, hardware LBs, or cloud LBs. +- **Routes** are not a single resource, but represent many different protocol-specific Route resources. The [HTTPRoute](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.HTTPRoute) has matching, filtering, and routing rules that get applied to Gateways that can process HTTP and HTTPS traffic. Similarly, there are [TCPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TCPRoute), [UDPRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.UDPRoute), and [TLSRoutes](https://gateway-api.sigs.k8s.io/references/spec/#networking.x-k8s.io/v1alpha1.TLSRoute) which also have protocol-specific semantics. This model also allows the Gateway API to incrementally expand its protocol support in the future. + +![The resources of the Gateway API](gateway-api-resources.png) + +### Gateway Controller Implementations + +The good news is that although Gateway is in [Alpha](https://github.com/kubernetes-sigs/gateway-api/releases), there are already several [Gateway controller implementations](https://gateway-api.sigs.k8s.io/references/implementations/) that you can run. Since it’s a standardized spec, the following example could be run on any of them and should function the exact same way. Check out [getting started](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see how to install and use one of these Gateway controllers. + +## Getting Hands-on with the Gateway API + +In the following example, we’ll demonstrate the relationships between the different API Resources and walk you through a common use case: + +* Team foo has their app deployed in the foo Namespace. They need to control the routing logic for the different pages of their app. +* Team bar is running in the bar Namespace. They want to be able to do blue-green rollouts of their application to reduce risk. +* The platform team is responsible for managing the load balancer and network security of all the apps in the Kubernetes cluster. + +The following foo-route does path matching to various Services in the foo Namespace and also has a default route to a 404 server. This exposes foo-auth and foo-home Services via `foo.example.com/login` and `foo.example.com/home` respectively.: + + +```yaml +kind: HTTPRoute +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: foo-route + namespace: foo + labels: + gateway: external-https-prod +spec: + hostnames: + - "foo.example.com" + rules: + - matches: + - path: + type: Prefix + value: /login + forwardTo: + - serviceName: foo-auth + port: 8080 + - matches: + - path: + type: Prefix + value: /home + forwardTo: + - serviceName: foo-home + port: 8080 + - matches: + - path: + type: Prefix + value: / + forwardTo: + - serviceName: foo-404 + port: 8080 +``` + + +The bar team, operating in the bar Namespace of the same Kubernetes cluster, also wishes to expose their application to the internet, but they also want to control their own canary and blue-green rollouts. The following HTTPRoute is configured for the following behavior: + +* For traffic to `bar.example.com`: + * Send 90% of the traffic to bar-v1 + * Send 10% of the traffic to bar-v2 +* For traffic to `bar.example.com` with the HTTP header `env: canary`: + + * Send all the traffic to bar-v2 + +![The routing rules configured for the bar-v1 and bar-v2 Services](httproute.png) + + + +```yaml +kind: HTTPRoute +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: bar-route + namespace: bar + labels: + gateway: external-https-prod +spec: + hostnames: + - "bar.example.com" + rules: + - forwardTo: + - serviceName: bar-v1 + port: 8080 + weight: 90 + - serviceName: bar-v2 + port: 8080 + weight: 10 + - matches: + - headers: + values: + env: canary + forwardTo: + - serviceName: bar-v2 + port: 8080 +``` + + + +### Route and Gateway Binding + +So we have two HTTPRoutes matching and routing traffic to different Services. You might be wondering, where are these Services accessible? Through which networks or IPs are they exposed? + +How Routes are exposed to clients is governed by [Route binding](https://gateway-api.sigs.k8s.io/concepts/api-overview/#route-binding), which describes how Routes and Gateways create a bidirectional relationship between each other. When Routes are bound to a Gateway it means their collective routing rules are configured on the underlying load balancers or proxies and the Routes are accessible through the Gateway. Thus, a Gateway is a logical representation of a networking data plane that can be configured through Routes. + + +![How Routes bind with Gateways](route-binding.png ) + +### Administrative Delegation + +The split between Gateway and Route resources allows the cluster administrator to delegate some of the routing configuration to individual teams while still retaining centralized control. The following Gateway resource exposes HTTPS on port 443 and terminates all traffic on the port with a certificate controlled by the cluster administrator. + + +```yaml +kind: Gateway +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: prod-web +spec: + gatewayClassName: acme-lb + listeners: + - protocol: HTTPS + port: 443 + routes: + kind: HTTPRoute + selector: + matchLabels: + gateway: external-https-prod + namespaces: + from: All + tls: + certificateRef: + name: admin-controlled-cert +``` + + +The following HTTPRoute shows how the Route can ensure it matches the Gateway's selector via it’s `kind` (HTTPRoute) and resource labels (`gateway=external-https-prod`). + + +```yaml +# Matches the required kind selector on the Gateway +kind: HTTPRoute +apiVersion: networking.x-k8s.io/v1alpha1 +metadata: + name: foo-route + namespace: foo-ns + labels: + + # Matches the required label selector on the Gateway + gateway: external-https-prod +... +``` + +### Role Oriented Design + +When you put it all together, you have a single load balancing infrastructure that can be safely shared by multiple teams. The Gateway API not only a more expressive API for advanced routing, but is also a role-oriented API, designed for multi-tenant infrastructure. Its extensibility ensures that it will evolve for future use-cases while preserving portability. Ultimately these characteristics will allow Gateway API to adapt to different organizational models and implementations well into the future. + +### Try it out and get involved + +There are many resources to check out to learn more. + +* Check out the [user guides](https://gateway-api.sigs.k8s.io/guides/getting-started/) to see what use-cases can be addressed. +* Try out one of the [existing Gateway controllers ](https://gateway-api.sigs.k8s.io/references/implementations/) +* Or [get involved](https://gateway-api.sigs.k8s.io/contributing/community/) and help design and influence the future of Kubernetes service networking! \ No newline at end of file diff --git a/content/en/blog/_posts/2021-04-22-gateway-api/route-binding.png b/content/en/blog/_posts/2021-04-22-gateway-api/route-binding.png new file mode 100644 index 0000000000000..4e42643316584 Binary files /dev/null and b/content/en/blog/_posts/2021-04-22-gateway-api/route-binding.png differ diff --git a/content/en/blog/_posts/2021-04-23-metrics-stability-ga/index.md b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/index.md new file mode 100644 index 0000000000000..686016e3c5cef --- /dev/null +++ b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/index.md @@ -0,0 +1,80 @@ +--- +layout: blog +title: 'Kubernetes 1.21: Metrics Stability hits GA' +date: 2021-04-23 +slug: kubernetes-release-1.21-metrics-stability-ga +--- + +**Authors**: Han Kang (Google), Elana Hashman (Red Hat) + +Kubernetes 1.21 marks the graduation of the metrics stability framework and along with it, the first officially supported stable metrics. Not only do stable metrics come with supportability guarantees, the metrics stability framework brings escape hatches that you can use if you encounter problematic metrics. + +See the list of [stable Kubernetes metrics here](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) + +### What are stable metrics and why do we need them? +A stable metric is one which, from a consumption point of view, can be reliably consumed across a number of Kubernetes versions without risk of ingestion failure. + +Metrics stability is an ongoing community concern. Cluster monitoring infrastructure often assumes the stability of some control plane metrics, so we have introduced a mechanism for versioning metrics as a proper API, with stability guarantees around a formal metrics deprecation process. + +### What are the stability levels for metrics? + +Metrics can currently have one of two stability levels: alpha or stable. + +_Alpha metrics_ have no stability guarantees; as such they can be modified or deleted at any time. At this time, all Kubernetes metrics implicitly fall into this category. + +_Stable metrics_ can be guaranteed to not change, except that the metric may become marked deprecated for a future Kubernetes version. By not change, we mean three things: + +1. the metric itself will not be deleted or renamed +2. the type of metric will not be modified +3. no labels can be added or removed from this metric + +From an ingestion point of view, it is backwards-compatible to add or remove possible values for labels which already do exist, but not labels themselves. Therefore, adding or removing values from an existing label is permitted. Stable metrics can also be marked as deprecated for a future Kubernetes version, since this is tracked in a metadata field and does not actually change the metric itself. + +Removing or adding labels from stable metrics is not permitted. In order to add or remove a label from an existing stable metric, one would have to introduce a new metric and deprecate the stable one; otherwise this would violate compatibility agreements. + + +#### How are metrics deprecated? + +While deprecation policies only affect stability guarantees for stable metrics (and not alpha ones), deprecation information may be optionally provided on alpha metrics to help component owners inform users of future intent and assist with transition plans. + +A stable metric undergoing the deprecation process signals that the metric will eventually be deleted. The metrics deprecation lifecycle looks roughly like this (with each stage representing a Kubernetes release): + +![Stable metric → Deprecated metric → Hidden metric → Deletion](lifecycle-metric.png) + +_Deprecated metrics_ have the same stability guarantees of their stable counterparts. If a stable metric is deprecated, then a deprecated stable metric is guaranteed to not change. When deprecating a stable metric, a future Kubernetes release is specified as the point from which the metric will be considered deprecated. + +Deprecated metrics will have their description text prefixed with a deprecation notice string “(Deprecated from x.y)” and a warning log will be emitted during metric registration, in the spirit of the official Kubernetes deprecation policy. + +Like their stable metric counterparts, deprecated metrics will be automatically registered to the metrics endpoint. On a subsequent release (when the metric's deprecatedVersion is equal to _current\_kubernetes\_version - 4_)), a deprecated metric will become a _hidden_ metric. _Hidden metrics_ are not automatically registered, and hence are hidden by default from end users. These hidden metrics can be explicitly re-enabled for one release after they reach the hidden state, to provide a migration path for cluster operators. + + +#### As an owner of a Kubernetes component, how do I add stable metrics? + +During metric instantiation, stability can be specified by setting the metadata field, StabilityLevel, to “Stable”. When a StabilityLevel is not explicitly set, metrics default to “Alpha” stability. Note that metrics which have fields determined at runtime cannot be marked as Stable. Stable metrics will be detected during static analysis during the pre-commit phase, and must be reviewed by sig-instrumentation. + +```golang +var metricDefinition = kubemetrics.CounterOpts{ + Name: "some_metric", + Help: "some description", + StabilityLevel: kubemetrics.STABLE, +} +``` +For more examples of setting metrics stability and deprecation, see the [Metrics Stability KEP](http://bit.ly/metrics-stability). + + +### How do I get involved? + +This project, like all of Kubernetes, is the result of hard work by many contributors from diverse backgrounds working together. +We offer a huge thank you to all the contributors in Kubernetes community who helped review the design and implementation of the project, including but not limited to the following: + +- Han Kang (logicalhan) +- Frederic Branczyk (brancz) +- Marek Siarkowicz (serathius) +- Elana Hashman (ehashman) +- Solly Ross (DirectXMan12) +- Stefan Schimanski (sttts) +- David Ashpole (dashpole) +- Yuchen Zhou (yoyinzyc) +- Yu Yi (erain) + +If you’re interested in getting involved with the design and development of instrumentation or any part of the Kubernetes metrics system, join the [Kubernetes Instrumentation Special Interest Group (SIG)](https://github.com/kubernetes/community/tree/master/sig-instrumentation). We’re rapidly growing and always welcome new contributors. diff --git a/content/en/blog/_posts/2021-04-23-metrics-stability-ga/lifecycle-metric.png b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/lifecycle-metric.png new file mode 100644 index 0000000000000..7618a98c5c7c6 Binary files /dev/null and b/content/en/blog/_posts/2021-04-23-metrics-stability-ga/lifecycle-metric.png differ diff --git a/content/en/case-studies/adform/adform_featured_logo.svg b/content/en/case-studies/adform/adform_featured_logo.svg index ce058af82ed09..b31ef3235a471 100644 --- a/content/en/case-studies/adform/adform_featured_logo.svg +++ b/content/en/case-studies/adform/adform_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/adidas/adidas-featured.svg b/content/en/case-studies/adidas/adidas-featured.svg index 07e595356aa78..a53d4675522fe 100644 --- a/content/en/case-studies/adidas/adidas-featured.svg +++ b/content/en/case-studies/adidas/adidas-featured.svg @@ -1 +1 @@ -kubernetes.io-54664 \ No newline at end of file +kubernetes.io-54664 \ No newline at end of file diff --git a/content/en/case-studies/amadeus/amadeus_featured.svg b/content/en/case-studies/amadeus/amadeus_featured.svg index 6b711f9baeccb..9d0c40b8e18c3 100644 --- a/content/en/case-studies/amadeus/amadeus_featured.svg +++ b/content/en/case-studies/amadeus/amadeus_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/ancestry/ancestry_featured.svg b/content/en/case-studies/ancestry/ancestry_featured.svg index 301e6fec92327..9a3e80186b0be 100644 --- a/content/en/case-studies/ancestry/ancestry_featured.svg +++ b/content/en/case-studies/ancestry/ancestry_featured.svg @@ -1 +1 @@ -kubernetes.io-logos-ancestry \ No newline at end of file +kubernetes.io-logos-ancestry \ No newline at end of file diff --git a/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg b/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg index 1d20786a5d2ef..4eb8a51127ea2 100644 --- a/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg +++ b/content/en/case-studies/ant-financial/ant-financial_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/appdirect/appdirect_featured_logo.svg b/content/en/case-studies/appdirect/appdirect_featured_logo.svg index d655c7f2fa0de..36fcba1abba36 100644 --- a/content/en/case-studies/appdirect/appdirect_featured_logo.svg +++ b/content/en/case-studies/appdirect/appdirect_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/babylon/babylon_featured_logo.svg b/content/en/case-studies/babylon/babylon_featured_logo.svg index 8bea0b8fc3392..e84da19268e33 100644 --- a/content/en/case-studies/babylon/babylon_featured_logo.svg +++ b/content/en/case-studies/babylon/babylon_featured_logo.svg @@ -1 +1 @@ -babylon_featured_logo \ No newline at end of file +babylon_featured_logo \ No newline at end of file diff --git a/content/en/case-studies/blablacar/blablacar_featured.svg b/content/en/case-studies/blablacar/blablacar_featured.svg index f66f6ca95485d..5b887f24a8722 100644 --- a/content/en/case-studies/blablacar/blablacar_featured.svg +++ b/content/en/case-studies/blablacar/blablacar_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/blackrock/blackrock_featured.svg b/content/en/case-studies/blackrock/blackrock_featured.svg index d70c169bc8a12..f98ea323d73e1 100644 --- a/content/en/case-studies/blackrock/blackrock_featured.svg +++ b/content/en/case-studies/blackrock/blackrock_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/booking-com/booking.com_featured_logo.svg b/content/en/case-studies/booking-com/booking.com_featured_logo.svg index 298c77c773922..0b245c27001af 100644 --- a/content/en/case-studies/booking-com/booking.com_featured_logo.svg +++ b/content/en/case-studies/booking-com/booking.com_featured_logo.svg @@ -1 +1 @@ -booking.com_featured_logo \ No newline at end of file +booking.com_featured_logo \ No newline at end of file diff --git a/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg b/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg index b844d998462b0..3ce58c68f7858 100644 --- a/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg +++ b/content/en/case-studies/booz-allen/booz-allen-featured-logo.svg @@ -1 +1 @@ -booz-allen-featured \ No newline at end of file +booz-allen-featured \ No newline at end of file diff --git a/content/en/case-studies/bose/bose_featured_logo.svg b/content/en/case-studies/bose/bose_featured_logo.svg index 13f1ff261622c..58b2add6144b8 100644 --- a/content/en/case-studies/bose/bose_featured_logo.svg +++ b/content/en/case-studies/bose/bose_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/box/box_featured.svg b/content/en/case-studies/box/box_featured.svg index c4ebadaffe526..2b4fb6552b610 100644 --- a/content/en/case-studies/box/box_featured.svg +++ b/content/en/case-studies/box/box_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/buffer/buffer_featured.svg b/content/en/case-studies/buffer/buffer_featured.svg index 6527f94f4d64c..b8e321f31d633 100644 --- a/content/en/case-studies/buffer/buffer_featured.svg +++ b/content/en/case-studies/buffer/buffer_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/capital-one/capitalone_featured_logo.svg b/content/en/case-studies/capital-one/capitalone_featured_logo.svg index 28552e569c441..124adae9af21e 100644 --- a/content/en/case-studies/capital-one/capitalone_featured_logo.svg +++ b/content/en/case-studies/capital-one/capitalone_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg b/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg index a1b39cc109112..aae1978cf2129 100644 --- a/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg +++ b/content/en/case-studies/chinaunicom/chinaunicom_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg b/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg index 1d90a6536f014..44ac9b0b1d9fa 100644 --- a/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg +++ b/content/en/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg b/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg index 3c7a565f11563..a4f020161a784 100644 --- a/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg +++ b/content/en/case-studies/crowdfire/crowdfire_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/denso/denso_featured_logo.svg b/content/en/case-studies/denso/denso_featured_logo.svg index 375d9cefbc091..e2b26b2c8c30c 100644 --- a/content/en/case-studies/denso/denso_featured_logo.svg +++ b/content/en/case-studies/denso/denso_featured_logo.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/content/en/case-studies/golfnow/golfnow_featured.svg b/content/en/case-studies/golfnow/golfnow_featured.svg index 761782a7563c2..b5b42d6fcdc08 100644 --- a/content/en/case-studies/golfnow/golfnow_featured.svg +++ b/content/en/case-studies/golfnow/golfnow_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/haufegroup/haufegroup_featured.svg b/content/en/case-studies/haufegroup/haufegroup_featured.svg index b552d117739db..a61b577ab884e 100644 --- a/content/en/case-studies/haufegroup/haufegroup_featured.svg +++ b/content/en/case-studies/haufegroup/haufegroup_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/huawei/huawei_featured.svg b/content/en/case-studies/huawei/huawei_featured.svg index 860f62dd4e7a9..a8a8f22c8f9a1 100644 --- a/content/en/case-studies/huawei/huawei_featured.svg +++ b/content/en/case-studies/huawei/huawei_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/ibm/ibm_featured_logo.svg b/content/en/case-studies/ibm/ibm_featured_logo.svg index 577d8e97d960d..f79fd7847bf7c 100644 --- a/content/en/case-studies/ibm/ibm_featured_logo.svg +++ b/content/en/case-studies/ibm/ibm_featured_logo.svg @@ -1 +1 @@ -ibm_featured_logo \ No newline at end of file +ibm_featured_logo \ No newline at end of file diff --git a/content/en/case-studies/ing/ing_featured_logo.svg b/content/en/case-studies/ing/ing_featured_logo.svg index 20418a0dbfcbf..5a2df497c7bb0 100644 --- a/content/en/case-studies/ing/ing_featured_logo.svg +++ b/content/en/case-studies/ing/ing_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/naic/naic_featured_logo.svg b/content/en/case-studies/naic/naic_featured_logo.svg index 100d158b9bd1c..b4af63931dbb4 100644 --- a/content/en/case-studies/naic/naic_featured_logo.svg +++ b/content/en/case-studies/naic/naic_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/nav/nav_featured_logo.svg b/content/en/case-studies/nav/nav_featured_logo.svg index 79ae4384598c1..42b4ffa9674d7 100644 --- a/content/en/case-studies/nav/nav_featured_logo.svg +++ b/content/en/case-studies/nav/nav_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg b/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg index 43a001461d6c5..aa2661e503bae 100644 --- a/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg +++ b/content/en/case-studies/nerdalize/nerdalize_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/netease/netease_featured_logo.svg b/content/en/case-studies/netease/netease_featured_logo.svg index 7ddb664f1a225..0ea176812dd65 100644 --- a/content/en/case-studies/netease/netease_featured_logo.svg +++ b/content/en/case-studies/netease/netease_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/newyorktimes/newyorktimes_featured.svg b/content/en/case-studies/newyorktimes/newyorktimes_featured.svg index f006841112b8f..e386c15806625 100644 --- a/content/en/case-studies/newyorktimes/newyorktimes_featured.svg +++ b/content/en/case-studies/newyorktimes/newyorktimes_featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/nokia/nokia_featured_logo.svg b/content/en/case-studies/nokia/nokia_featured_logo.svg index bbf2920f7a9a5..1e3cce49565d3 100644 --- a/content/en/case-studies/nokia/nokia_featured_logo.svg +++ b/content/en/case-studies/nokia/nokia_featured_logo.svg @@ -1 +1 @@ -nokia \ No newline at end of file +nokia \ No newline at end of file diff --git a/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg b/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg index 19051f4b21d90..a162e93f03b1e 100644 --- a/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg +++ b/content/en/case-studies/nordstrom/nordstrom_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg b/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg index c907eb6e22aee..7a2f09de54716 100644 --- a/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg +++ b/content/en/case-studies/northwestern-mutual/northwestern_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/ocado/ocado_featured_logo.svg b/content/en/case-studies/ocado/ocado_featured_logo.svg index 8f30890dfc7b4..d9e2886e36fda 100644 --- a/content/en/case-studies/ocado/ocado_featured_logo.svg +++ b/content/en/case-studies/ocado/ocado_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/openAI/openai_featured.svg b/content/en/case-studies/openAI/openai_featured.svg index 97cf4dab57eab..cf9b79721e8dd 100644 --- a/content/en/case-studies/openAI/openai_featured.svg +++ b/content/en/case-studies/openAI/openai_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/peardeck/peardeck_featured.svg b/content/en/case-studies/peardeck/peardeck_featured.svg index dcaa62f71ea9b..1c42e719207f0 100644 --- a/content/en/case-studies/peardeck/peardeck_featured.svg +++ b/content/en/case-studies/peardeck/peardeck_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/pingcap/pingcap_featured_logo.svg b/content/en/case-studies/pingcap/pingcap_featured_logo.svg index 6beb710c041ef..46d2d2543d784 100644 --- a/content/en/case-studies/pingcap/pingcap_featured_logo.svg +++ b/content/en/case-studies/pingcap/pingcap_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/pinterest/pinterest_feature.svg b/content/en/case-studies/pinterest/pinterest_feature.svg index 32d0c5d60fd40..96cd6ded97560 100644 --- a/content/en/case-studies/pinterest/pinterest_feature.svg +++ b/content/en/case-studies/pinterest/pinterest_feature.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/prowise/prowise_featured_logo.svg b/content/en/case-studies/prowise/prowise_featured_logo.svg index ae03646afc7b7..1f2d5ce41a918 100644 --- a/content/en/case-studies/prowise/prowise_featured_logo.svg +++ b/content/en/case-studies/prowise/prowise_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg b/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg index 60b79ca30c3d8..caefc4b96f97f 100644 --- a/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg +++ b/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/slamtec/slamtec_featured_logo.svg b/content/en/case-studies/slamtec/slamtec_featured_logo.svg index 2a8fde70170ee..7b4f6d6af1683 100644 --- a/content/en/case-studies/slamtec/slamtec_featured_logo.svg +++ b/content/en/case-studies/slamtec/slamtec_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/slingtv/slingtv_featured_logo.svg b/content/en/case-studies/slingtv/slingtv_featured_logo.svg index 36077bc1ace19..764f8ddd884a8 100644 --- a/content/en/case-studies/slingtv/slingtv_featured_logo.svg +++ b/content/en/case-studies/slingtv/slingtv_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/spotify/spotify-featured.svg b/content/en/case-studies/spotify/spotify-featured.svg index fb7d8e750de98..d1cc3418dec97 100644 --- a/content/en/case-studies/spotify/spotify-featured.svg +++ b/content/en/case-studies/spotify/spotify-featured.svg @@ -1 +1 @@ -kubernetes.io-logos \ No newline at end of file +kubernetes.io-logos \ No newline at end of file diff --git a/content/en/case-studies/squarespace/squarespace_featured_logo.svg b/content/en/case-studies/squarespace/squarespace_featured_logo.svg index 4ffcbf078ecc5..a69d7ea5c8ebf 100644 --- a/content/en/case-studies/squarespace/squarespace_featured_logo.svg +++ b/content/en/case-studies/squarespace/squarespace_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/thredup/thredup_featured_logo.svg b/content/en/case-studies/thredup/thredup_featured_logo.svg index 48841f9878d25..987e1a55c1203 100644 --- a/content/en/case-studies/thredup/thredup_featured_logo.svg +++ b/content/en/case-studies/thredup/thredup_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/vsco/vsco_featured_logo.svg b/content/en/case-studies/vsco/vsco_featured_logo.svg index e1181f8a957ad..e65dad8c52d1c 100644 --- a/content/en/case-studies/vsco/vsco_featured_logo.svg +++ b/content/en/case-studies/vsco/vsco_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/wikimedia/wikimedia_featured.svg b/content/en/case-studies/wikimedia/wikimedia_featured.svg index b3e654dd641f3..5fa786aaa52ba 100644 --- a/content/en/case-studies/wikimedia/wikimedia_featured.svg +++ b/content/en/case-studies/wikimedia/wikimedia_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/wink/wink_featured.svg b/content/en/case-studies/wink/wink_featured.svg index 3e27cac771a1c..8168ac2b435c2 100644 --- a/content/en/case-studies/wink/wink_featured.svg +++ b/content/en/case-studies/wink/wink_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/woorank/woorank_featured_logo.svg b/content/en/case-studies/woorank/woorank_featured_logo.svg index a5e3736ee1d48..50b64e9a9c6e8 100644 --- a/content/en/case-studies/woorank/woorank_featured_logo.svg +++ b/content/en/case-studies/woorank/woorank_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/workiva/workiva_featured_logo.svg b/content/en/case-studies/workiva/workiva_featured_logo.svg index 76de3f217ce5a..0cde714f2315e 100644 --- a/content/en/case-studies/workiva/workiva_featured_logo.svg +++ b/content/en/case-studies/workiva/workiva_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg b/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg index 3efabbfde4930..b0baa4a49ee73 100644 --- a/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg +++ b/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/ygrene/ygrene_featured_logo.svg b/content/en/case-studies/ygrene/ygrene_featured_logo.svg index f8806fa33d505..0b0ab458facd4 100644 --- a/content/en/case-studies/ygrene/ygrene_featured_logo.svg +++ b/content/en/case-studies/ygrene/ygrene_featured_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/case-studies/zalando/zalando_feature_logo.svg b/content/en/case-studies/zalando/zalando_feature_logo.svg index 7560bc54feac2..875d10c030218 100644 --- a/content/en/case-studies/zalando/zalando_feature_logo.svg +++ b/content/en/case-studies/zalando/zalando_feature_logo.svg @@ -1 +1 @@ -kubernetes.io-logos2 \ No newline at end of file +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index 30dc0d4af9b3b..c5830986935e1 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -309,13 +309,6 @@ The node controller also adds {{< glossary_tooltip text="taints" term_id="taint" corresponding to node problems like node unreachable or not ready. This means that the scheduler won't place Pods onto unhealthy nodes. - -{{< caution >}} -`kubectl cordon` marks a node as 'unschedulable', which has the side effect of the service -controller removing the node from any LoadBalancer node target lists it was previously -eligible for, effectively removing incoming load balancer traffic from the cordoned node(s). -{{< /caution >}} - ### Node capacity Node objects track information about the Node's resource capacity: for example, the amount diff --git a/content/en/docs/concepts/cluster-administration/system-metrics.md b/content/en/docs/concepts/cluster-administration/system-metrics.md index 9852f9cc999e0..bfcfec9ecb212 100644 --- a/content/en/docs/concepts/cluster-administration/system-metrics.md +++ b/content/en/docs/concepts/cluster-administration/system-metrics.md @@ -174,4 +174,5 @@ Here is an example: ## {{% heading "whatsnext" %}} * Read about the [Prometheus text format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) for metrics +* See the list of [stable Kubernetes metrics](https://github.com/kubernetes/kubernetes/blob/master/test/instrumentation/testdata/stable-metrics-list.yaml) * Read about the [Kubernetes deprecation policy](/docs/reference/using-api/deprecation-policy/#deprecating-a-feature-or-behavior) diff --git a/content/en/docs/concepts/configuration/configmap.md b/content/en/docs/concepts/configuration/configmap.md index 3af8d6ae2cde4..cb98bf7439cc3 100644 --- a/content/en/docs/concepts/configuration/configmap.md +++ b/content/en/docs/concepts/configuration/configmap.md @@ -224,7 +224,7 @@ When a ConfigMap currently consumed in a volume is updated, projected keys are e The kubelet checks whether the mounted ConfigMap is fresh on every periodic sync. However, the kubelet uses its local cache for getting the current value of the ConfigMap. The type of the cache is configurable using the `ConfigMapAndSecretChangeDetectionStrategy` field in -the [KubeletConfiguration struct](/docs/reference/config-api/kubelet-config.v1beta1/)). +the [KubeletConfiguration struct](/docs/reference/config-api/kubelet-config.v1beta1/). A ConfigMap can be either propagated by watch (default), ttl-based, or by redirecting all requests directly to the API server. As a result, the total delay from the moment when the ConfigMap is updated to the moment diff --git a/content/en/docs/concepts/configuration/pod-priority-preemption.md b/content/en/docs/concepts/configuration/pod-priority-preemption.md index d6acc80a71f69..5e75674d73a3c 100644 --- a/content/en/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/en/docs/concepts/configuration/pod-priority-preemption.md @@ -353,13 +353,15 @@ the removal of the lowest priority Pods is not sufficient to allow the scheduler to schedule the preemptor Pod, or if the lowest priority Pods are protected by `PodDisruptionBudget`. -The only component that considers both QoS and Pod priority is -[kubelet out-of-resource eviction](/docs/tasks/administer-cluster/out-of-resource/). -The kubelet ranks Pods for eviction first by whether or not their usage of the -starved resource exceeds requests, then by Priority, and then by the consumption -of the starved compute resource relative to the Pods' scheduling requests. -See -[evicting end-user pods](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods) +The kubelet uses Priority to determine pod order for [out-of-resource eviction](/docs/tasks/administer-cluster/out-of-resource/). +You can use the QoS class to estimate the order in which pods are most likely +to get evicted. The kubelet ranks pods for eviction based on the following factors: + + 1. Whether the starved resource usage exceeds requests + 1. Pod Priority + 1. Amount of resource usage relative to requests + +See [evicting end-user pods](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods) for more details. kubelet out-of-resource eviction does not evict Pods when their @@ -367,7 +369,6 @@ usage does not exceed their requests. If a Pod with lower priority is not exceeding its requests, it won't be evicted. Another Pod with higher priority that exceeds its requests may be evicted. - ## {{% heading "whatsnext" %}} * Read about using ResourceQuotas in connection with PriorityClasses: [limit Priority Class consumption by default](/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index 49cc25ffbdc8b..96569f95189cf 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -50,11 +50,10 @@ A more detailed description of the termination behavior can be found in ### Hook handler implementations Containers can access a hook by implementing and registering a handler for that hook. -There are three types of hook handlers that can be implemented for Containers: +There are two types of hook handlers that can be implemented for Containers: * Exec - Executes a specific command, such as `pre-stop.sh`, inside the cgroups and namespaces of the Container. Resources consumed by the command are counted against the Container. -* TCP - Opens a TCP connecton against a specific port on the Container. * HTTP - Executes an HTTP request against a specific endpoint on the Container. ### Hook handler execution diff --git a/content/en/docs/concepts/containers/runtime-class.md b/content/en/docs/concepts/containers/runtime-class.md index abfec1ef6cb66..6af609636eddf 100644 --- a/content/en/docs/concepts/containers/runtime-class.md +++ b/content/en/docs/concepts/containers/runtime-class.md @@ -109,7 +109,8 @@ For more details on setting up CRI runtimes, see [CRI installation](/docs/setup/ #### dockershim -Kubernetes built-in dockershim CRI does not support runtime handlers. +RuntimeClasses with dockershim must set the runtime handler to `docker`. Dockershim does not support +custom configurable runtime handlers. #### {{< glossary_tooltip term_id="containerd" >}} @@ -163,7 +164,7 @@ Nodes](/docs/concepts/scheduling-eviction/assign-pod-node/). {{< feature-state for_k8s_version="v1.18" state="beta" >}} You can specify _overhead_ resources that are associated with running a Pod. Declaring overhead allows -the cluster (including the scheduler) to account for it when making decisions about Pods and resources. +the cluster (including the scheduler) to account for it when making decisions about Pods and resources. To use Pod overhead, you must have the PodOverhead [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) enabled (it is on by default). diff --git a/content/en/docs/concepts/extend-kubernetes/_index.md b/content/en/docs/concepts/extend-kubernetes/_index.md index cc5ba809ecf41..083dca6964545 100644 --- a/content/en/docs/concepts/extend-kubernetes/_index.md +++ b/content/en/docs/concepts/extend-kubernetes/_index.md @@ -7,6 +7,10 @@ reviewers: - lavalamp - cheftako - chenopis +feature: + title: Designed for extensibility + description: > + Add features to your Kubernetes cluster without changing upstream source code. content_type: concept no_list: true --- @@ -80,18 +84,15 @@ and by kubectl. Below is a diagram showing how the extension points interact with the Kubernetes control plane. - - - +![Extension Points and the Control Plane](/docs/concepts/extend-kubernetes/control-plane.png) ## Extension Points This diagram shows the extension points in a Kubernetes system. - - +![Extension Points](/docs/concepts/extend-kubernetes/extension-points.png) 1. Users often interact with the Kubernetes API using `kubectl`. [Kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) extend the kubectl binary. They only affect the individual user's local environment, and so cannot enforce site-wide policies. 2. The apiserver handles all requests. Several types of extension points in the apiserver allow authenticating requests, or blocking them based on their content, editing content, and handling deletion. These are described in the [API Access Extensions](#api-access-extensions) section. @@ -103,12 +104,11 @@ This diagram shows the extension points in a Kubernetes system. If you are unsure where to start, this flowchart can help. Note that some solutions may involve several types of extensions. - - - +![Flowchart for Extension](/docs/concepts/extend-kubernetes/flowchart.png) ## API Extensions + ### User-Defined Types Consider adding a Custom Resource to Kubernetes if you want to define new controllers, application configuration objects or other declarative APIs, and to manage them using Kubernetes tools, such as `kubectl`. @@ -157,7 +157,6 @@ After a request is authorized, if it is a write operation, it also goes through ## Infrastructure Extensions - ### Storage Plugins [Flex Volumes](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/storage/flexvolume-deployment.md diff --git a/content/en/docs/concepts/extend-kubernetes/control-plane.png b/content/en/docs/concepts/extend-kubernetes/control-plane.png new file mode 100644 index 0000000000000..fa61599e94118 Binary files /dev/null and b/content/en/docs/concepts/extend-kubernetes/control-plane.png differ diff --git a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md b/content/en/docs/concepts/extend-kubernetes/extend-cluster.md deleted file mode 100644 index 2bdc74e7e96ef..0000000000000 --- a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md +++ /dev/null @@ -1,207 +0,0 @@ ---- -title: Extending your Kubernetes Cluster -reviewers: -- erictune -- lavalamp -- cheftako -- chenopis -content_type: concept -weight: 10 ---- - - - -Kubernetes is highly configurable and extensible. As a result, -there is rarely a need to fork or submit patches to the Kubernetes -project code. - -This guide describes the options for customizing a Kubernetes cluster. It is -aimed at {{< glossary_tooltip text="cluster operators" term_id="cluster-operator" >}} -who want to understand how to adapt their -Kubernetes cluster to the needs of their work environment. Developers who are prospective -{{< glossary_tooltip text="Platform Developers" term_id="platform-developer" >}} -or Kubernetes Project {{< glossary_tooltip text="Contributors" term_id="contributor" >}} -will also find it useful as an introduction to what extension points and -patterns exist, and their trade-offs and limitations. - - - - -## Overview - -Customization approaches can be broadly divided into *configuration*, which only involves changing flags, local configuration files, or API resources; and *extensions*, which involve running additional programs or services. This document is primarily about extensions. - -## Configuration - -*Configuration files* and *flags* are documented in the Reference section of the online documentation, under each binary: - -* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) -* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) -* [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) -* [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/). - -Flags and configuration files may not always be changeable in a hosted Kubernetes service or a distribution with managed installation. When they are changeable, they are usually only changeable by the cluster administrator. Also, they are subject to change in future Kubernetes versions, and setting them may require restarting processes. For those reasons, they should be used only when there are no other options. - -*Built-in Policy APIs*, such as [ResourceQuota](/docs/concepts/policy/resource-quotas/), [PodSecurityPolicies](/docs/concepts/policy/pod-security-policy/), [NetworkPolicy](/docs/concepts/services-networking/network-policies/) and Role-based Access Control ([RBAC](/docs/reference/access-authn-authz/rbac/)), are built-in Kubernetes APIs. APIs are typically used with hosted Kubernetes services and with managed Kubernetes installations. They are declarative and use the same conventions as other Kubernetes resources like pods, so new cluster configuration can be repeatable and be managed the same way as applications. And, where they are stable, they enjoy a [defined support policy](/docs/reference/using-api/deprecation-policy/) like other Kubernetes APIs. For these reasons, they are preferred over *configuration files* and *flags* where suitable. - -## Extensions - -Extensions are software components that extend and deeply integrate with Kubernetes. -They adapt it to support new types and new kinds of hardware. - -Most cluster administrators will use a hosted or distribution -instance of Kubernetes. As a result, most Kubernetes users will not need to -install extensions and fewer will need to author new ones. - -## Extension Patterns - -Kubernetes is designed to be automated by writing client programs. Any -program that reads and/or writes to the Kubernetes API can provide useful -automation. *Automation* can run on the cluster or off it. By following -the guidance in this doc you can write highly available and robust automation. -Automation generally works with any Kubernetes cluster, including hosted -clusters and managed installations. - -There is a specific pattern for writing client programs that work well with -Kubernetes called the *Controller* pattern. Controllers typically read an -object's `.spec`, possibly do things, and then update the object's `.status`. - -A controller is a client of Kubernetes. When Kubernetes is the client and -calls out to a remote service, it is called a *Webhook*. The remote service -is called a *Webhook Backend*. Like Controllers, Webhooks do add a point of -failure. - -In the webhook model, Kubernetes makes a network request to a remote service. -In the *Binary Plugin* model, Kubernetes executes a binary (program). -Binary plugins are used by the kubelet (e.g. -[Flex Volume Plugins](/docs/concepts/storage/volumes/#flexvolume) -and [Network Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)) -and by kubectl. - -Below is a diagram showing how the extension points interact with the -Kubernetes control plane. - - - - - - -## Extension Points - -This diagram shows the extension points in a Kubernetes system. - - - - - -1. Users often interact with the Kubernetes API using `kubectl`. [Kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) extend the kubectl binary. They only affect the individual user's local environment, and so cannot enforce site-wide policies. -2. The apiserver handles all requests. Several types of extension points in the apiserver allow authenticating requests, or blocking them based on their content, editing content, and handling deletion. These are described in the [API Access Extensions](/docs/concepts/extend-kubernetes/#api-access-extensions) section. -3. The apiserver serves various kinds of *resources*. *Built-in resource kinds*, like `pods`, are defined by the Kubernetes project and can't be changed. You can also add resources that you define, or that other projects have defined, called *Custom Resources*, as explained in the [Custom Resources](/docs/concepts/extend-kubernetes/#user-defined-types) section. Custom Resources are often used with API Access Extensions. -4. The Kubernetes scheduler decides which nodes to place pods on. There are several ways to extend scheduling. These are described in the [Scheduler Extensions](/docs/concepts/extend-kubernetes/#scheduler-extensions) section. -5. Much of the behavior of Kubernetes is implemented by programs called Controllers which are clients of the API-Server. Controllers are often used in conjunction with Custom Resources. -6. The kubelet runs on servers, and helps pods appear like virtual servers with their own IPs on the cluster network. [Network Plugins](/docs/concepts/extend-kubernetes/#network-plugins) allow for different implementations of pod networking. -7. The kubelet also mounts and unmounts volumes for containers. New types of storage can be supported via [Storage Plugins](/docs/concepts/extend-kubernetes/#storage-plugins). - -If you are unsure where to start, this flowchart can help. Note that some solutions may involve several types of extensions. - - - - - - -## API Extensions -### User-Defined Types - -Consider adding a Custom Resource to Kubernetes if you want to define new controllers, application configuration objects or other declarative APIs, and to manage them using Kubernetes tools, such as `kubectl`. - -Do not use a Custom Resource as data storage for application, user, or monitoring data. - -For more about Custom Resources, see the [Custom Resources concept guide](/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - - -### Combining New APIs with Automation - -The combination of a custom resource API and a control loop is called the [Operator pattern](/docs/concepts/extend-kubernetes/operator/). The Operator pattern is used to manage specific, usually stateful, applications. These custom APIs and control loops can also be used to control other resources, such as storage or policies. - -### Changing Built-in Resources - -When you extend the Kubernetes API by adding custom resources, the added resources always fall into a new API Groups. You cannot replace or change existing API groups. -Adding an API does not directly let you affect the behavior of existing APIs (e.g. Pods), but API Access Extensions do. - - -### API Access Extensions - -When a request reaches the Kubernetes API Server, it is first Authenticated, then Authorized, then subject to various types of Admission Control. See [Controlling Access to the Kubernetes API](/docs/concepts/security/controlling-access/) for more on this flow. - -Each of these steps offers extension points. - -Kubernetes has several built-in authentication methods that it supports. It can also sit behind an authenticating proxy, and it can send a token from an Authorization header to a remote service for verification (a webhook). All of these methods are covered in the [Authentication documentation](/docs/reference/access-authn-authz/authentication/). - -### Authentication - -[Authentication](/docs/reference/access-authn-authz/authentication/) maps headers or certificates in all requests to a username for the client making the request. - -Kubernetes provides several built-in authentication methods, and an [Authentication webhook](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication) method if those don't meet your needs. - - -### Authorization - -[Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision. - - -### Dynamic Admission Control - -After a request is authorized, if it is a write operation, it also goes through [Admission Control](/docs/reference/access-authn-authz/admission-controllers/) steps. In addition to the built-in steps, there are several extensions: - -* The [Image Policy webhook](/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook) restricts what images can be run in containers. -* To make arbitrary admission control decisions, a general [Admission webhook](/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) can be used. Admission Webhooks can reject creations or updates. - -## Infrastructure Extensions - - -### Storage Plugins - -[Flex Volumes](/docs/concepts/storage/volumes/#flexvolume) -allow users to mount volume types without built-in support by having the -Kubelet call a Binary Plugin to mount the volume. - - -### Device Plugins - -Device plugins allow a node to discover new Node resources (in addition to the -builtin ones like cpu and memory) via a -[Device Plugin](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). - -### Network Plugins - -Different networking fabrics can be supported via node-level -[Network Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/). - -### Scheduler Extensions - -The scheduler is a special type of controller that watches pods, and assigns -pods to nodes. The default scheduler can be replaced entirely, while -continuing to use other Kubernetes components, or -[multiple schedulers](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) -can run at the same time. - -This is a significant undertaking, and almost all Kubernetes users find they -do not need to modify the scheduler. - -The scheduler also supports a -[webhook](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/scheduler_extender.md) -that permits a webhook backend (scheduler extension) to filter and prioritize -the nodes chosen for a pod. - - -## {{% heading "whatsnext" %}} - -* Learn more about [Custom Resources](/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -* Learn about [Dynamic admission control](/docs/reference/access-authn-authz/extensible-admission-controllers/) -* Learn more about Infrastructure extensions - * [Network Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) - * [Device Plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) -* Learn about [kubectl plugins](/docs/tasks/extend-kubectl/kubectl-plugins/) -* Learn about the [Operator pattern](/docs/concepts/extend-kubernetes/operator/) - - diff --git a/content/en/docs/concepts/extend-kubernetes/extension-points.png b/content/en/docs/concepts/extend-kubernetes/extension-points.png new file mode 100644 index 0000000000000..01fb689e7bc42 Binary files /dev/null and b/content/en/docs/concepts/extend-kubernetes/extension-points.png differ diff --git a/content/en/docs/concepts/extend-kubernetes/flowchart.png b/content/en/docs/concepts/extend-kubernetes/flowchart.png new file mode 100644 index 0000000000000..e75802c0483cc Binary files /dev/null and b/content/en/docs/concepts/extend-kubernetes/flowchart.png differ diff --git a/content/en/docs/concepts/overview/working-with-objects/labels.md b/content/en/docs/concepts/overview/working-with-objects/labels.md index 811d9fb3f7ced..25eb5da66ec9c 100644 --- a/content/en/docs/concepts/overview/working-with-objects/labels.md +++ b/content/en/docs/concepts/overview/working-with-objects/labels.md @@ -53,8 +53,8 @@ If the prefix is omitted, the label Key is presumed to be private to the user. A The `kubernetes.io/` and `k8s.io/` prefixes are reserved for Kubernetes core components. Valid label value: -* must be 63 characters or less (cannot be empty), -* must begin and end with an alphanumeric character (`[a-z0-9A-Z]`), +* must be 63 characters or less (can be empty), +* unless empty, must begin and end with an alphanumeric character (`[a-z0-9A-Z]`), * could contain dashes (`-`), underscores (`_`), dots (`.`), and alphanumerics between. For example, here's the configuration file for a Pod that has two labels `environment: production` and `app: nginx` : @@ -237,4 +237,3 @@ selector: One use case for selecting over labels is to constrain the set of nodes onto which a pod can schedule. See the documentation on [node selection](/docs/concepts/scheduling-eviction/assign-pod-node/) for more information. - diff --git a/content/en/docs/concepts/policy/node-resource-managers.md b/content/en/docs/concepts/policy/node-resource-managers.md index ce9e6be98ba87..719e8b1151f0e 100644 --- a/content/en/docs/concepts/policy/node-resource-managers.md +++ b/content/en/docs/concepts/policy/node-resource-managers.md @@ -19,4 +19,4 @@ The configuration of individual managers is elaborated in dedicated documents: - [CPU Manager Policies](/docs/tasks/administer-cluster/cpu-management-policies/) - [Device Manager](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager) -- [Memory Manger Policies](/docs/tasks/administer-cluster/memory-manager/) \ No newline at end of file +- [Memory Manager Policies](/docs/tasks/administer-cluster/memory-manager/) diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index 88ca9e8128a0b..3c779dda79965 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -72,7 +72,7 @@ verify that it worked by running `kubectl get pods -o wide` and looking at the ## Interlude: built-in node labels {#built-in-node-labels} In addition to labels you [attach](#step-one-attach-label-to-the-node), nodes come pre-populated -with a standard set of labels. See [Well-Known Labels, Annotations and Taints](/docs/reference/kubernetes-api/labels-annotations-taints/) for a list of these. +with a standard set of labels. See [Well-Known Labels, Annotations and Taints](/docs/reference/labels-annotations-taints/) for a list of these. {{< note >}} The value of these labels is cloud provider specific and is not guaranteed to be reliable. diff --git a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md index 079024c9d66a8..946e858a02388 100644 --- a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -210,9 +210,9 @@ are true. The following taints are built in: the NodeCondition `Ready` being "`False`". * `node.kubernetes.io/unreachable`: Node is unreachable from the node controller. This corresponds to the NodeCondition `Ready` being "`Unknown`". - * `node.kubernetes.io/out-of-disk`: Node becomes out of disk. * `node.kubernetes.io/memory-pressure`: Node has memory pressure. * `node.kubernetes.io/disk-pressure`: Node has disk pressure. + * `node.kubernetes.io/pid-pressure`: Node has PID pressure. * `node.kubernetes.io/network-unavailable`: Node's network is unavailable. * `node.kubernetes.io/unschedulable`: Node is unschedulable. * `node.cloudprovider.kubernetes.io/uninitialized`: When the kubelet is started @@ -275,7 +275,7 @@ tolerations to all daemons, to prevent DaemonSets from breaking. * `node.kubernetes.io/memory-pressure` * `node.kubernetes.io/disk-pressure` - * `node.kubernetes.io/out-of-disk` (*only for critical pods*) + * `node.kubernetes.io/pid-pressure` (1.14 or later) * `node.kubernetes.io/unschedulable` (1.10 or later) * `node.kubernetes.io/network-unavailable` (*host network only*) diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index e41b629f20e0b..de4e665af15c9 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -230,7 +230,7 @@ reference additional implementation-specific configuration for this class. reference a namespace-specific resource for configuration of an Ingress class. `Scope` field defaults to `Cluster`, meaning, the default is cluster-scoped resource. Setting `Scope` to `Namespace` and setting the `Namespace` field -will reference a paramters resource in a specific namespace: +will reference a parameters resource in a specific namespace: {{< codenew file="service/networking/namespaced-params.yaml" >}} diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index d057f05533a8d..2c9e6e89969f5 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -936,11 +936,18 @@ There are other annotations to manage Classic Elastic Load Balancers that are de # value. Defaults to 5, must be between 2 and 60 service.beta.kubernetes.io/aws-load-balancer-security-groups: "sg-53fae93f" - # A list of existing security groups to be added to ELB created. Unlike the annotation - # service.beta.kubernetes.io/aws-load-balancer-extra-security-groups, this replaces all other security groups previously assigned to the ELB. + # A list of existing security groups to be configured on the ELB created. Unlike the annotation + # service.beta.kubernetes.io/aws-load-balancer-extra-security-groups, this replaces all other security groups previously assigned to the ELB and also overrides the creation + # of a uniquely generated security group for this ELB. + # The first security group ID on this list is used as a source to permit incoming traffic to target worker nodes (service traffic and health checks). + # If multiple ELBs are configured with the same security group ID, only a single permit line will be added to the worker node security groups, that means if you delete any + # of those ELBs it will remove the single permit line and block access for all ELBs that shared the same security group ID. + # This can cause a cross-service outage if not used properly service.beta.kubernetes.io/aws-load-balancer-extra-security-groups: "sg-53fae93f,sg-42efd82e" - # A list of additional security groups to be added to the ELB + # A list of additional security groups to be added to the created ELB, this leaves the uniquely generated security group in place, this ensures that every ELB + # has a unique security group ID and a matching permit line to allow traffic to the target worker nodes (service traffic and health checks). + # Security groups defined here can be shared between services. service.beta.kubernetes.io/aws-load-balancer-target-node-labels: "ingress-gw,gw-name=public-api" # A comma separated list of key-value pairs which are used @@ -989,7 +996,6 @@ groups are modified with the following IP rules: | Rule | Protocol | Port(s) | IpRange(s) | IpRange Description | |------|----------|---------|------------|---------------------| | Health Check | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | Subnet CIDR | kubernetes.io/rule/nlb/health=\ | - | Client Traffic | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | | MTU Discovery | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | diff --git a/content/en/docs/concepts/storage/ephemeral-volumes.md b/content/en/docs/concepts/storage/ephemeral-volumes.md index dc715d7c46651..e76f76f4922f8 100644 --- a/content/en/docs/concepts/storage/ephemeral-volumes.md +++ b/content/en/docs/concepts/storage/ephemeral-volumes.md @@ -130,7 +130,6 @@ As a cluster administrator, you can use a [PodSecurityPolicy](/docs/concepts/pol ### Generic ephemeral volumes -{{< feature-state for_k8s_version="v1.19" state="alpha" >}} {{< feature-state for_k8s_version="v1.21" state="beta" >}} This feature requires the `GenericEphemeralVolume` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) to be diff --git a/content/en/docs/concepts/storage/persistent-volumes.md b/content/en/docs/concepts/storage/persistent-volumes.md index 54e42bae9ee50..88e468ebfa8c7 100644 --- a/content/en/docs/concepts/storage/persistent-volumes.md +++ b/content/en/docs/concepts/storage/persistent-volumes.md @@ -387,7 +387,7 @@ Kubernetes supports two `volumeModes` of PersistentVolumes: `Filesystem` and `Bl `Filesystem` is the default mode used when `volumeMode` parameter is omitted. A volume with `volumeMode: Filesystem` is *mounted* into Pods into a directory. If the volume -is backed by a block device and the device is empty, Kuberneretes creates a filesystem +is backed by a block device and the device is empty, Kubernetes creates a filesystem on the device before mounting it for the first time. You can set the value of `volumeMode` to `Block` to use a volume as a raw block device. diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index 6a7016dd13922..d693e057efbdc 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -33,10 +33,9 @@ drivers, but the functionality is somewhat limited. Kubernetes supports many types of volumes. A {{< glossary_tooltip term_id="pod" text="Pod" >}} can use any number of volume types simultaneously. Ephemeral volume types have a lifetime of a pod, but persistent volumes exist beyond -the lifetime of a pod. Consequently, a volume outlives any containers -that run within the pod, and data is preserved across container restarts. When a pod -ceases to exist, Kubernetes destroys ephemeral volumes; however, Kubernetes does not -destroy persistent volumes. +the lifetime of a pod. When a pod ceases to exist, Kubernetes destroys ephemeral volumes; +however, Kubernetes does not destroy persistent volumes. +For any kind of volume in a given pod, data is preserved across container restarts. At its core, a volume is a directory, possibly with some data in it, which is accessible to the containers in a pod. How that directory comes to be, the diff --git a/content/en/docs/concepts/workloads/controllers/job.md b/content/en/docs/concepts/workloads/controllers/job.md index a23c37ad0de52..9a49e2afd794d 100644 --- a/content/en/docs/concepts/workloads/controllers/job.md +++ b/content/en/docs/concepts/workloads/controllers/job.md @@ -192,7 +192,7 @@ parallelism, for a variety of reasons: {{< note >}} To be able to create Indexed Jobs, make sure to enable the `IndexedJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -on the [API server](docs/reference/command-line-tools-reference/kube-apiserver/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). {{< /note >}} @@ -412,7 +412,7 @@ Here, `W` is the number of work items. {{< note >}} Suspending Jobs is available in Kubernetes versions 1.21 and above. You must enable the `SuspendJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -on the [API server](docs/reference/command-line-tools-reference/kube-apiserver/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) in order to use this feature. {{< /note >}} diff --git a/content/en/docs/concepts/workloads/controllers/replicaset.md b/content/en/docs/concepts/workloads/controllers/replicaset.md index 316b6a3b6ca9d..f47f3be439c3d 100644 --- a/content/en/docs/concepts/workloads/controllers/replicaset.md +++ b/content/en/docs/concepts/workloads/controllers/replicaset.md @@ -325,7 +325,7 @@ If all of the above match, then selection is random. ### Pod deletion cost {{< feature-state for_k8s_version="v1.21" state="alpha" >}} -Using the [`controller.kubernetes.io/pod-deletion-cost`](/docs/reference/command-line-tools-reference/labels-annotations-taints/#pod-deletion-cost) +Using the [`controller.kubernetes.io/pod-deletion-cost`](/docs/reference/labels-annotations-taints/#pod-deletion-cost) annotation, users can set a preference regarding which pods to remove first when downscaling a ReplicaSet. The annotation should be set on the pod, the range is [-2147483647, 2147483647]. It represents the cost of diff --git a/content/en/docs/concepts/workloads/pods/disruptions.md b/content/en/docs/concepts/workloads/pods/disruptions.md index c791ed42f3b9c..cf0346e9395d0 100644 --- a/content/en/docs/concepts/workloads/pods/disruptions.md +++ b/content/en/docs/concepts/workloads/pods/disruptions.md @@ -136,7 +136,7 @@ during application updates is configured in the spec for the specific workload r When a pod is evicted using the eviction API, it is gracefully [terminated](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination), honoring the -`terminationGracePeriodSeconds` setting in its [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core).) +`terminationGracePeriodSeconds` setting in its [PodSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#podspec-v1-core). ## PodDisruptionBudget example {#pdb-example} diff --git a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md index 2e8a915c627b3..8e588da111a23 100644 --- a/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md +++ b/content/en/docs/concepts/workloads/pods/pod-topology-spread-constraints.md @@ -58,7 +58,7 @@ graph TB class zoneA,zoneB cluster; {{< /mermaid >}} -Instead of manually applying labels, you can also reuse the [well-known labels](/docs/reference/kubernetes-api/labels-annotations-taints/) that are created and populated automatically on most clusters. +Instead of manually applying labels, you can also reuse the [well-known labels](/docs/reference/labels-annotations-taints/) that are created and populated automatically on most clusters. ## Spread Constraints for Pods diff --git a/content/en/docs/contribute/localization.md b/content/en/docs/contribute/localization.md index ad0b9420c5bb8..eafc241d40bca 100644 --- a/content/en/docs/contribute/localization.md +++ b/content/en/docs/contribute/localization.md @@ -4,7 +4,6 @@ content_type: concept approvers: - remyleone - rlenferink -- zacharysarah weight: 50 card: name: contribute diff --git a/content/en/docs/contribute/new-content/open-a-pr.md b/content/en/docs/contribute/new-content/open-a-pr.md index d511360e2205f..a49bffb030843 100644 --- a/content/en/docs/contribute/new-content/open-a-pr.md +++ b/content/en/docs/contribute/new-content/open-a-pr.md @@ -123,8 +123,8 @@ Make sure you have [git](https://git-scm.com/book/en/v2/Getting-Started-Installi ```bash origin git@github.com:/website.git (fetch) origin git@github.com:/website.git (push) - upstream https://github.com/kubernetes/website (fetch) - upstream https://github.com/kubernetes/website (push) + upstream https://github.com/kubernetes/website.git (fetch) + upstream https://github.com/kubernetes/website.git (push) ``` 6. Fetch commits from your fork's `origin/master` and `kubernetes/website`'s `upstream/master`: diff --git a/content/en/docs/contribute/style/style-guide.md b/content/en/docs/contribute/style/style-guide.md index 5931422e95844..26df0a85ac4b6 100644 --- a/content/en/docs/contribute/style/style-guide.md +++ b/content/en/docs/contribute/style/style-guide.md @@ -596,8 +596,8 @@ Do | Don't :--| :----- Include one command in ... | Include just one command in ... Run the container ... | Simply run the container ... -You can easily remove ... | You can remove ... -These simple steps ... | These steps ... +You can remove ... | You can easily remove ... +These steps ... | These simple steps ... {{< /table >}} ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/reference/_index.md b/content/en/docs/reference/_index.md index d8ed48db8b9e6..a9d7ee3a9bdc2 100644 --- a/content/en/docs/reference/_index.md +++ b/content/en/docs/reference/_index.md @@ -26,7 +26,7 @@ This section of the Kubernetes documentation contains references. * [One-page API Reference for Kubernetes {{< param "version" >}}](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) * [Using The Kubernetes API](/docs/reference/using-api/) - overview of the API for Kubernetes. * [API access control](/docs/reference/access-authn-authz/) - details on how Kubernetes controls API access -* [Well-Known Labels, Annotations and Taints](/docs/reference/kubernetes-api/labels-annotations-taints/) +* [Well-Known Labels, Annotations and Taints](/docs/reference/labels-annotations-taints/) ## Officially supported client libraries @@ -73,16 +73,11 @@ operator to use or manage a cluster. * [kubelet configuration (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) * [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +* [kube-scheduler policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) * [kube-proxy configuration (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/) * [`audit.k8s.io/v1` API](/docs/reference/config-api/apiserver-audit.v1/) - -## Config APIs - * [Client authentication API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) - -## Config APIs - -* [kube-scheduler policy reference (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) +* [WebhookAdmission configuration (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/) ## Design Docs diff --git a/content/en/docs/reference/access-authn-authz/admission-controllers.md b/content/en/docs/reference/access-authn-authz/admission-controllers.md index 581c218755750..324de2e0dd4eb 100644 --- a/content/en/docs/reference/access-authn-authz/admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/admission-controllers.md @@ -789,7 +789,7 @@ for more detailed information. ### TaintNodesByCondition {#taintnodesbycondition} -{{< feature-state for_k8s_version="v1.12" state="beta" >}} +{{< feature-state for_k8s_version="v1.17" state="stable" >}} This admission controller {{< glossary_tooltip text="taints" term_id="taint" >}} newly created Nodes as `NotReady` and `NoSchedule`. That tainting avoids a race condition that could cause Pods to be scheduled on new Nodes before their taints were updated to accurately reflect their reported conditions. diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index d09ffa23a2395..2e223b023a1b1 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -205,12 +205,10 @@ spec: ``` Service account bearer tokens are perfectly valid to use outside the cluster and - can be used to create identities for long standing jobs that wish to talk to the -Kubernetes API. To manually create a service account, simply use the `kubectl` - -create serviceaccount (NAME)` command. This creates a service account in the -current namespace and an associated secret. +Kubernetes API. To manually create a service account, use the `kubectl create +serviceaccount (NAME)` command. This creates a service account in the current +namespace and an associated secret. ```bash kubectl create serviceaccount jenkins diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index 018195f817508..26a7634c2a9b0 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -57,7 +57,7 @@ In the following, we describe how to quickly experiment with admission webhooks. ### Write an admission webhook server Please refer to the implementation of the [admission webhook -server](https://github.com/kubernetes/kubernetes/blob/v1.13.0/test/images/webhook/main.go) +server](https://github.com/kubernetes/kubernetes/blob/release-1.21/test/images/agnhost/webhook/main.go) that is validated in a Kubernetes e2e test. The webhook handles the `AdmissionReview` request sent by the apiservers, and sends back its decision as an `AdmissionReview` object in the same version it received. @@ -147,7 +147,7 @@ webhooks: {{< /tabs >}} The scope field specifies if only cluster-scoped resources ("Cluster") or namespace-scoped -resources ("Namespaced") will match this rule. "*" means that there are no scope restrictions. +resources ("Namespaced") will match this rule. "∗" means that there are no scope restrictions. {{< note >}} When using `clientConfig.service`, the server cert must be valid for @@ -225,7 +225,7 @@ plugins: {{< /tabs >}} For more information about `AdmissionConfiguration`, see the -[AdmissionConfiguration schema](https://github.com/kubernetes/kubernetes/blob/v1.17.0/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go#L27). +[AdmissionConfiguration (v1) reference](/docs/reference/config-api/apiserver-webhookadmission.v1/). See the [webhook configuration](#webhook-configuration) section for details about each config field. * In the kubeConfig file, provide the credentials: diff --git a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md index 4aaf6da0a2edc..ea04f462b1a4a 100644 --- a/content/en/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/en/docs/reference/access-authn-authz/service-accounts-admin.md @@ -1,23 +1,24 @@ --- reviewers: -- bprashanth -- davidopp -- lavalamp -- liggitt + - bprashanth + - davidopp + - lavalamp + - liggitt title: Managing Service Accounts content_type: concept weight: 50 --- + This is a Cluster Administrator guide to service accounts. You should be familiar with [configuring Kubernetes service accounts](/docs/tasks/configure-pod-container/configure-service-account/). -Support for authorization and user accounts is planned but incomplete. Sometimes +Support for authorization and user accounts is planned but incomplete. Sometimes incomplete features are referred to in order to better describe service accounts. - + ## User accounts versus service accounts Kubernetes distinguishes between the concept of a user account and a service account @@ -53,37 +54,51 @@ It is part of the API server. It acts synchronously to modify pods as they are created or updated. When this plugin is active (and it is by default on most distributions), then it does the following when a pod is created or modified: - 1. If the pod does not have a `ServiceAccount` set, it sets the `ServiceAccount` to `default`. - 1. It ensures that the `ServiceAccount` referenced by the pod exists, and otherwise rejects it. - 1. If the pod does not contain any `ImagePullSecrets`, then `ImagePullSecrets` of the `ServiceAccount` are added to the pod. - 1. It adds a `volume` to the pod which contains a token for API access. - 1. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`. +1. If the pod does not have a `ServiceAccount` set, it sets the `ServiceAccount` to `default`. +1. It ensures that the `ServiceAccount` referenced by the pod exists, and otherwise rejects it. +1. It adds a `volume` to the pod which contains a token for API access if neither the ServiceAccount `automountServiceAccountToken` nor the Pod's `automountServiceAccountToken` is set to `false`. +1. It adds a `volumeSource` to each container of the pod mounted at `/var/run/secrets/kubernetes.io/serviceaccount`, if the previous step has created a volume for ServiceAccount token. +1. If the pod does not contain any `ImagePullSecrets`, then `ImagePullSecrets` of the `ServiceAccount` are added to the pod. #### Bound Service Account Token Volume + {{< feature-state for_k8s_version="v1.21" state="beta" >}} When the `BoundServiceAccountTokenVolume` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) is enabled, the service account admission controller will -add a projected service account token volume instead of a secret volume. The service account token will expire after 1 hour by default or the pod is deleted. See more details about [projected volume](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). - -This feature depends on the `RootCAConfigMap` feature gate enabled which publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. -1. If the pod does not have a `serviceAccountName` set, it sets the - `serviceAccountName` to `default`. -1. It ensures that the `serviceAccountName` referenced by the pod exists, and - otherwise rejects it. -1. If the pod does not contain any `imagePullSecrets`, then `imagePullSecrets` - of the ServiceAccount referenced by `serviceAccountName` are added to the pod. -1. It adds a `volume` to the pod which contains a token for API access - if neither the ServiceAccount `automountServiceAccountToken` nor the Pod's - `automountServiceAccountToken` is set to `false`. -1. It adds a `volumeSource` to each container of the pod mounted at - `/var/run/secrets/kubernetes.io/serviceaccount`, if the previous step has - created a volume for ServiceAccount token. - -You can migrate a service account volume to a projected volume when -the `BoundServiceAccountTokenVolume` feature gate is enabled. -The service account token will expire after 1 hour or the pod is deleted. See -more details about -[projected volume](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). +add the following projected volume instead of a Secret-based volume for the non-expiring service account token created by Token Controller. + +```yaml +- name: kube-api-access- + projected: + defaultMode: 420 # 0644 + sources: + - serviceAccountToken: + expirationSeconds: 3600 + path: token + - configMap: + items: + - key: ca.crt + path: ca.crt + name: kube-root-ca.crt + - downwardAPI: + items: + - fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + path: namespace +``` + +This projected volume consists of three sources: + +1. A ServiceAccountToken acquired from kube-apiserver via TokenRequest API. It will expire after 1 hour by default or when the pod is deleted. It is bound to the pod and has kube-apiserver as the audience. +1. A ConfigMap containing a CA bundle used for verifying connections to the kube-apiserver. This feature depends on the `RootCAConfigMap` feature gate being enabled, which publishes a "kube-root-ca.crt" ConfigMap to every namespace. `RootCAConfigMap` is enabled by default in 1.20, and always enabled in 1.21+. +1. A DownwardAPI that references the namespace of the pod. + +See more details about [projected volumes](/docs/tasks/configure-pod-container/configure-projected-volume-storage/). + +You can manually migrate a secret-based service account volume to a projected volume when +the `BoundServiceAccountTokenVolume` feature gate is not enabled by adding the above +projected volume to the pod spec. However, `RootCAConfigMap` needs to be enabled. ### Token Controller diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 726207c9fe268..849af9f00bf29 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -53,7 +53,7 @@ different Kubernetes components. | `APIPriorityAndFairness` | `false` | Alpha | 1.17 | 1.19 | | `APIPriorityAndFairness` | `true` | Beta | 1.20 | | | `APIResponseCompression` | `false` | Alpha | 1.7 | 1.15 | -| `APIResponseCompression` | `false` | Beta | 1.16 | | +| `APIResponseCompression` | `true` | Beta | 1.16 | | | `APIServerIdentity` | `false` | Alpha | 1.20 | | | `AllowInsecureBackendProxy` | `true` | Beta | 1.17 | | | `AnyVolumeDataSource` | `false` | Alpha | 1.18 | | @@ -90,6 +90,7 @@ different Kubernetes components. | `CSIStorageCapacity` | `true` | Beta | 1.21 | | | `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | 1.19 | | `CSIVolumeFSGroupPolicy` | `true` | Beta | 1.20 | | +| `CSIVolumeHealth` | `false` | Alpha | 1.21 | | | `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | 1.19 | | `ConfigurableFSGroupPolicy` | `true` | Beta | 1.20 | | | `CronJobControllerV2` | `false` | Alpha | 1.20 | 1.20 | @@ -125,14 +126,13 @@ different Kubernetes components. | `HPAScaleToZero` | `false` | Alpha | 1.16 | | | `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | | `HugePageStorageMediumSize` | `true` | Beta | 1.19 | | +| `IndexedJob` | `false` | Alpha | 1.21 | | | `IngressClassNamespacedParams` | `false` | Alpha | 1.21 | | | `IPv6DualStack` | `false` | Alpha | 1.15 | 1.20 | | `IPv6DualStack` | `true` | Beta | 1.21 | | | `KubeletCredentialProviders` | `false` | Alpha | 1.20 | | -| `KubeletPodResources` | `true` | Alpha | 1.13 | 1.14 | -| `KubeletPodResources` | `true` | Beta | 1.15 | | | `LegacyNodeRoleBehavior` | `false` | Alpha | 1.16 | 1.18 | -| `LegacyNodeRoleBehavior` | `true` | True | 1.19 | | +| `LegacyNodeRoleBehavior` | `true` | Beta | 1.19 | | | `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | | `LocalStorageCapacityIsolation` | `true` | Beta | 1.10 | | | `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha | 1.15 | | @@ -158,8 +158,6 @@ different Kubernetes components. | `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | | `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | | `RunAsGroup` | `true` | Beta | 1.14 | | -| `SCTPSupport` | `false` | Alpha | 1.12 | 1.18 | -| `SCTPSupport` | `true` | Beta | 1.19 | | | `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | | `ServerSideApply` | `true` | Beta | 1.16 | | | `ServiceInternalTrafficPolicy` | `false` | Alpha | 1.21 | | @@ -181,12 +179,13 @@ different Kubernetes components. | `TopologyManager` | `true` | Beta | 1.18 | | | `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | | `ValidateProxyRedirects` | `true` | Beta | 1.14 | | +| `VolumeCapacityPriority` | `false` | Alpha | 1.21 | - | | `WarningHeaders` | `true` | Beta | 1.19 | | | `WinDSR` | `false` | Alpha | 1.14 | | | `WinOverlay` | `false` | Alpha | 1.14 | 1.19 | | `WinOverlay` | `true` | Beta | 1.20 | | | `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | 1.20 | -| `WindowsEndpointSliceProxying` | `true` | beta | 1.21 | | +| `WindowsEndpointSliceProxying` | `true` | Beta | 1.21 | | {{< /table >}} ### Feature gates for graduated or deprecated features @@ -225,7 +224,6 @@ different Kubernetes components. | `CSIPersistentVolume` | `false` | Alpha | 1.9 | 1.9 | | `CSIPersistentVolume` | `true` | Beta | 1.10 | 1.12 | | `CSIPersistentVolume` | `true` | GA | 1.13 | - | -| `CSIVolumeHealth` | `false` | Alpha | 1.21 | - | | `CustomPodDNS` | `false` | Alpha | 1.9 | 1.9 | | `CustomPodDNS` | `true` | Beta| 1.10 | 1.13 | | `CustomPodDNS` | `true` | GA | 1.14 | - | @@ -258,9 +256,9 @@ different Kubernetes components. | `EnableEquivalenceClassCache` | - | Deprecated | 1.15 | - | | `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 | | `EndpointSlice` | `false` | Beta | 1.17 | 1.17 | -| `EndpointSlice` | `true` | Beta | 1.18 | 1.21 | +| `EndpointSlice` | `true` | Beta | 1.18 | 1.20 | | `EndpointSlice` | `true` | GA | 1.21 | - | -| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | 1.21 | +| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | 1.20 | | `EndpointSliceNodeName` | `true` | GA | 1.21 | - | | `ExperimentalCriticalPodAnnotation` | `false` | Alpha | 1.5 | 1.12 | | `ExperimentalCriticalPodAnnotation` | `false` | Deprecated | 1.13 | - | @@ -278,7 +276,6 @@ different Kubernetes components. | `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 | | `ImmutableEphemeralVolumes` | `true` | Beta | 1.19 | 1.20 | | `ImmutableEphemeralVolumes` | `true` | GA | 1.21 | | -| `IndexedJob` | `false` | Alpha | 1.21 | | | `Initializers` | `false` | Alpha | 1.7 | 1.13 | | `Initializers` | - | Deprecated | 1.14 | - | | `KubeletConfigFile` | `false` | Alpha | 1.8 | 1.9 | @@ -315,6 +312,7 @@ different Kubernetes components. | `PodShareProcessNamespace` | `true` | Beta | 1.12 | 1.16 | | `PodShareProcessNamespace` | `true` | GA | 1.17 | - | | `RequestManagement` | `false` | Alpha | 1.15 | 1.16 | +| `RequestManagement` | - | Derecated | 1.17 | - | | `ResourceLimitsPriorityFunction` | `false` | Alpha | 1.9 | 1.18 | | `ResourceLimitsPriorityFunction` | - | Deprecated | 1.19 | - | | `ResourceQuotaScopeSelectors` | `false` | Alpha | 1.11 | 1.11 | @@ -338,7 +336,7 @@ different Kubernetes components. | `ServiceAccountIssuerDiscovery` | `true` | Beta | 1.20 | 1.20 | | `ServiceAccountIssuerDiscovery` | `true` | GA | 1.21 | - | | `ServiceAppProtocol` | `false` | Alpha | 1.18 | 1.18 | -| `ServiceAppProtocol` | `true` | Beta | 1.19 | | +| `ServiceAppProtocol` | `true` | Beta | 1.19 | 1.19 | | `ServiceAppProtocol` | `true` | GA | 1.20 | - | | `ServiceLoadBalancerFinalizer` | `false` | Alpha | 1.15 | 1.15 | | `ServiceLoadBalancerFinalizer` | `true` | Beta | 1.16 | 1.16 | @@ -350,7 +348,7 @@ different Kubernetes components. | `StorageObjectInUseProtection` | `true` | GA | 1.11 | - | | `StreamingProxyRedirects` | `false` | Beta | 1.5 | 1.5 | | `StreamingProxyRedirects` | `true` | Beta | 1.6 | 1.18 | -| `StreamingProxyRedirects` | - | Deprecated| 1.19 | - | +| `StreamingProxyRedirects` | - | GA | 1.19 | - | | `SupportIPVSProxyMode` | `false` | Alpha | 1.8 | 1.8 | | `SupportIPVSProxyMode` | `false` | Beta | 1.9 | 1.9 | | `SupportIPVSProxyMode` | `true` | Beta | 1.10 | 1.10 | @@ -375,16 +373,15 @@ different Kubernetes components. | `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | | `TokenRequestProjection` | `true` | Beta | 1.12 | 1.19 | | `TokenRequestProjection` | `true` | GA | 1.20 | - | -| `VolumeCapacityPriority` | `false` | Alpha | 1.21 | - | -| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 | -| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | 1.19 | -| `VolumeSnapshotDataSource` | `true` | GA | 1.20 | - | | `VolumePVCDataSource` | `false` | Alpha | 1.15 | 1.15 | | `VolumePVCDataSource` | `true` | Beta | 1.16 | 1.17 | | `VolumePVCDataSource` | `true` | GA | 1.18 | - | | `VolumeScheduling` | `false` | Alpha | 1.9 | 1.9 | | `VolumeScheduling` | `true` | Beta | 1.10 | 1.12 | | `VolumeScheduling` | `true` | GA | 1.13 | - | +| `VolumeSnapshotDataSource` | `false` | Alpha | 1.12 | 1.16 | +| `VolumeSnapshotDataSource` | `true` | Beta | 1.17 | 1.19 | +| `VolumeSnapshotDataSource` | `true` | GA | 1.20 | - | | `VolumeSubpath` | `true` | GA | 1.10 | - | | `VolumeSubpathEnvExpansion` | `false` | Alpha | 1.14 | 1.14 | | `VolumeSubpathEnvExpansion` | `true` | Beta | 1.15 | 1.16 | @@ -451,7 +448,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `APIServerIdentity`: Assign each API server an ID in a cluster. - `Accelerators`: Enable Nvidia GPU support when using Docker - `AdvancedAuditing`: Enable [advanced auditing](/docs/tasks/debug-application-cluster/audit/#advanced-audit) -- `AffinityInAnnotations`(*deprecated*): Enable setting +- `AffinityInAnnotations`: Enable setting [Pod affinity or anti-affinity](/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity). - `AllowExtTrafficLocalEndpoints`: Enable a service to route external requests to node local endpoints. - `AllowInsecureBackendProxy`: Enable the users to skip TLS verification of @@ -592,18 +589,18 @@ Each feature gate is designed for enabling/disabling a specific feature: [downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information). - `DryRun`: Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests so that validation, merging, and mutation can be tested without committing. -- `DynamicAuditing`(*deprecated*): Used to enable dynamic auditing before v1.19. +- `DynamicAuditing`: Used to enable dynamic auditing before v1.19. - `DynamicKubeletConfig`: Enable the dynamic configuration of kubelet. See [Reconfigure kubelet](/docs/tasks/administer-cluster/reconfigure-kubelet/). - `DynamicProvisioningScheduling`: Extend the default scheduler to be aware of volume topology and handle PV provisioning. This feature is superseded by the `VolumeScheduling` feature completely in v1.12. -- `DynamicVolumeProvisioning`(*deprecated*): Enable the +- `DynamicVolumeProvisioning`: Enable the [dynamic provisioning](/docs/concepts/storage/dynamic-provisioning/) of persistent volumes to Pods. - `EfficientWatchResumption`: Allows for storage-originated bookmark (progress notify) events to be delivered to the users. This is only applied to watch operations. -- `EnableAggregatedDiscoveryTimeout` (*deprecated*): Enable the five second +- `EnableAggregatedDiscoveryTimeout`: Enable the five second timeout on aggregated discovery calls. - `EnableEquivalenceClassCache`: Enable the scheduler to cache equivalence of nodes when scheduling Pods. @@ -664,11 +661,13 @@ Each feature gate is designed for enabling/disabling a specific feature: - `IndexedJob`: Allows the [Job](/docs/concepts/workloads/controllers/job/) controller to manage Pod completions per completion index. - `IngressClassNamespacedParams`: Allow namespace-scoped parameters reference in - `IngressClass` resouce. This feature adds two fields - `Scope` and `Namespace` + `IngressClass` resource. This feature adds two fields - `Scope` and `Namespace` to `IngressClass.spec.parameters`. +- `Initializers`: Allow asynchronous coordination of object creation using the + Initializers admission plugin. - `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) support for IPv6. -- `KubeletConfigFile` (*deprecated*): Enable loading kubelet configuration from +- `KubeletConfigFile`: Enable loading kubelet configuration from a file specified using a config file. See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details. @@ -679,7 +678,7 @@ Each feature gate is designed for enabling/disabling a specific feature: [Support Device Monitoring](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/606-compute-device-assignment/README.md) for more details. - `KubeletPodResourcesGetAllocatable`: Enable the kubelet's pod resources `GetAllocatableResources` functionality. - This API augments the [resource allocation reporting](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) + This API augments the [resource allocation reporting](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) with informations about the allocatable resources, enabling clients to properly track the free compute resources on a node. - `LegacyNodeRoleBehavior`: When disabled, legacy behavior in service load balancers and node disruption will ignore the `node-role.kubernetes.io/master` label in favor of the @@ -699,8 +698,7 @@ Each feature gate is designed for enabling/disabling a specific feature: based on logarithmic bucketing of pod timestamps. - `MixedProtocolLBService`: Enable using different protocols in the same `LoadBalancer` type Service instance. -- `MountContainers` (*deprecated*): Enable using utility containers on host as - the volume mounter. +- `MountContainers`: Enable using utility containers on host as the volume mounter. - `MountPropagation`: Enable sharing volume mounted by one container to other containers or pods. For more details, please see [mount propagation](/docs/concepts/storage/volumes/#mount-propagation). - `NamespaceDefaultLabelName`: Configure the API Server to set an immutable {{< glossary_tooltip text="label" term_id="label" >}} @@ -712,7 +710,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `NonPreemptingPriority`: Enable `preemptionPolicy` field for PriorityClass and Pod. - `PVCProtection`: Enable the prevention of a PersistentVolumeClaim (PVC) from being deleted when it is still used by any Pod. -- `PodDeletionCost`: Enable the [Pod Deletion Cost](/docs/content/en/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) +- `PodDeletionCost`: Enable the [Pod Deletion Cost](/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) feature which allows users to influence ReplicaSet downscaling order. - `PersistentLocalVolumes`: Enable the usage of `local` volume type in Pods. Pod affinity has to be specified if requesting a `local` volume. @@ -730,7 +728,7 @@ Each feature gate is designed for enabling/disabling a specific feature: a single process namespace between containers running in a pod. More details can be found in [Share Process Namespace between Containers in a Pod](/docs/tasks/configure-pod-container/share-process-namespace/). - `ProbeTerminationGracePeriod`: Enable [setting probe-level - `terminationGracePeriodSeconds`](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationGracePeriodSeconds) + `terminationGracePeriodSeconds`](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds) on pods. See the [enhancement proposal](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period) for more details. - `ProcMountType`: Enables control over the type proc mounts for containers by setting the `procMount` field of a SecurityContext. @@ -742,7 +740,9 @@ Each feature gate is designed for enabling/disabling a specific feature: [chunking list request](/docs/reference/using-api/api-concepts/#retrieving-large-results-sets-in-chunks). - `RemoveSelfLink`: Deprecates and removes `selfLink` from ObjectMeta and ListMeta. -- `ResourceLimitsPriorityFunction` (*deprecated*): Enable a scheduler priority function that +- `RequestManagement`: Enables managing request concurrency with prioritization and fairness + at each API server. Deprecated by `APIPriorityAndFairness` since 1.17. +- `ResourceLimitsPriorityFunction`: Enable a scheduler priority function that assigns a lowest possible score of 1 to a node that satisfies at least one of the input Pod's cpu and memory limits. The intent is to break ties between nodes with same scores. @@ -772,11 +772,11 @@ Each feature gate is designed for enabling/disabling a specific feature: JWKS URLs) for the service account issuer in the API server. See [Configure Service Accounts for Pods](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery) for more details. -- `ServiceAppProtocol`: Enables the `AppProtocol` field on Services and Endpoints. -- `ServiceInternalTrafficPolicy`: Enables the `InternalTrafficPolicy` field on Services -- `ServiceLBNodePortControl`: Enables the `spec.allocateLoadBalancerNodePorts` - field on Services. -- `ServiceLoadBalancerClass`: Enables the `LoadBalancerClass` field on Services. See [Specifying class of load balancer implementation](/docs/concepts/services-networking/service/#specifying-class-of-load-balancer-implementation-load-balancer-class) for more details. +- `ServiceAppProtocol`: Enables the `appProtocol` field on Services and Endpoints. +- `ServiceInternalTrafficPolicy`: Enables the `internalTrafficPolicy` field on Services +- `ServiceLBNodePortControl`: Enables the `allocateLoadBalancerNodePorts` field on Services. +- `ServiceLoadBalancerClass`: Enables the `loadBalancerClass` field on Services. See + [Specifying class of load balancer implementation](/docs/concepts/services-networking/service/#load-balancer-class) for more details. - `ServiceLoadBalancerFinalizer`: Enable finalizer protection for Service load balancers. - `ServiceNodeExclusion`: Enable the exclusion of nodes from load balancers created by a cloud provider. A node is eligible for exclusion if labelled with @@ -804,12 +804,12 @@ Each feature gate is designed for enabling/disabling a specific feature: Examples of streaming requests include the `exec`, `attach` and `port-forward` requests. - `SupportIPVSProxyMode`: Enable providing in-cluster service load balancing using IPVS. See [service proxies](/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies) for more details. -- `SupportPodPidsLimit`: Enable the support to limiting PIDs in Pods. - `SupportNodePidsLimit`: Enable the support to limiting PIDs on the Node. The parameter `pid=` in the `--system-reserved` and `--kube-reserved` options can be specified to ensure that the specified number of process IDs will be reserved for the system as a whole and for Kubernetes system daemons respectively. +- `SupportPodPidsLimit`: Enable the support to limiting PIDs in Pods. - `SuspendJob`: Enable support to suspend and resume Jobs. See [the Jobs docs](/docs/concepts/workloads/controllers/job/) for more details. @@ -835,6 +835,9 @@ Each feature gate is designed for enabling/disabling a specific feature: - `TopologyManager`: Enable a mechanism to coordinate fine-grained hardware resource assignments for different components in Kubernetes. See [Control Topology Management Policies on a node](/docs/tasks/administer-cluster/topology-manager/). +- `ValidateProxyRedirects`: This flag controls whether the API server should + validate that redirects are only followed to the same host. Only used if the + `StreamingProxyRedirects` flag is enabled. - 'VolumeCapacityPriority`: Enable support for prioritizing nodes in different topologies based on available PV capacity. - `VolumePVCDataSource`: Enable support for specifying an existing PVC as a DataSource. @@ -843,6 +846,7 @@ Each feature gate is designed for enabling/disabling a specific feature: enables the usage of [`local`](/docs/concepts/storage/volumes/#local) volume type when used together with the `PersistentLocalVolumes` feature gate. - `VolumeSnapshotDataSource`: Enable volume snapshot data source support. +- `VolumeSubpath`: Allow mounting a subpath of a volume in a container. - `VolumeSubpathEnvExpansion`: Enable `subPathExpr` field for expanding environment variables into a `subPath`. - `WarningHeaders`: Allow sending warning headers in API responses. diff --git a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md index edf25ad8350f9..c502c77d16085 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-apiserver.md @@ -5,6 +5,7 @@ weight: 30 auto_generated: true --- + ,. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

+ + --allow-privileged @@ -469,7 +477,14 @@ kube-apiserver [flags] --disable-admission-plugins strings -

admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

+

admission plugins that should be disabled although they are in the default enabled plugins list (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

+ + + +--disabled-metrics strings + + +

This flag provides an escape hatch for misbehaving metrics. You must provide the fully qualified metric name in order to disable it. Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.

@@ -483,7 +498,7 @@ kube-apiserver [flags] --enable-admission-plugins strings -

admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

+

admission plugins that should be enabled in addition to default enabled ones (NamespaceLifecycle, LimitRanger, ServiceAccount, TaintNodesByCondition, Priority, DefaultTolerationSeconds, DefaultStorageClass, StorageObjectInUseProtection, PersistentVolumeClaimResize, RuntimeClass, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, MutatingAdmissionWebhook, ValidatingAdmissionWebhook, ResourceQuota). Comma-delimited list of admission plugins: AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyServiceExternalIPs, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook. The order of plugins in this flag does not matter.

@@ -595,7 +610,7 @@ kube-apiserver [flags] --etcd-servers-overrides strings -

Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated.

+

Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are URLs, semicolon separated. Note that this applies only to resources compiled into this server binary.

@@ -623,7 +638,7 @@ kube-apiserver [flags] --feature-gates <comma-separated 'key=True|False' pairs> -

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
GracefulNodeShutdown=true|false (ALPHA - default=false)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

@@ -703,6 +718,13 @@ kube-apiserver [flags]

If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be of type NodePort, using this as the value of the port. If zero, the Kubernetes master service will be of type ClusterIP.

+ +--lease-reuse-duration-seconds int     Default: 60 + + +

The time in seconds that each lease is reused. A lower value could avoid large number of objects reusing the same lease. Notice that a too small value may cause performance problems at storage layer.

+ + --livez-grace-period duration @@ -749,7 +771,7 @@ kube-apiserver [flags] --logging-format string     Default: "text" -

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

+

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

@@ -861,7 +883,14 @@ kube-apiserver [flags] --one-output -

If true, only write logs to their native severity level (vs also writing to each lower severity level

+

If true, only write logs to their native severity level (vs also writing to each lower severity level)

+ + + +--permit-address-sharing + + +

If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]

@@ -1001,7 +1030,7 @@ kube-apiserver [flags] --service-cluster-ip-range string -

A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods.

+

A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes or pods. Max of two dual-stack CIDRs is allowed.

@@ -1060,6 +1089,13 @@ kube-apiserver [flags]

The media type to use to store objects in storage. Some resources or storage backends may only support a specific media type and will ignore this setting.

+ +--strict-transport-security-directives strings + + +

List of directives for HSTS, comma separated. If this list is empty, then HSTS directives will not be added. Example: 'max-age=31536000,includeSubDomains,preload'

+ + --tls-cert-file string diff --git a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md index a7ad248e94d77..df8f76baede44 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md @@ -5,6 +5,7 @@ weight: 30 auto_generated: true --- + ,. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

+ + --alsologtostderr @@ -99,7 +107,7 @@ kube-controller-manager [flags] ---authorization-always-allow-paths strings     Default: "/healthz" +--authorization-always-allow-paths strings     Default: "/healthz,/readyz,/livez"

A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server.

@@ -294,6 +302,13 @@ kube-controller-manager [flags]

The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load

+ +--concurrent-rc-syncs int32     Default: 5 + + +

The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load

+ + --concurrent-replicaset-syncs int32     Default: 5 @@ -343,13 +358,6 @@ kube-controller-manager [flags]

The number of TTL-after-finished controller workers that are allowed to sync concurrently.

- ---concurrent_rc_syncs int32     Default: 5 - - -

The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load

- - --configure-cloud-routes     Default: true @@ -392,6 +400,13 @@ kube-controller-manager [flags]

Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.

+ +--disabled-metrics strings + + +

This flag provides an escape hatch for misbehaving metrics. You must provide the fully qualified metric name in order to disable it. Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.

+ + --enable-dynamic-provisioning     Default: true @@ -413,6 +428,13 @@ kube-controller-manager [flags]

Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.

+ +--enable-leader-migration + + +

Whether to enable controller leader migration.

+ + --enable-taint-manager     Default: true @@ -452,7 +474,7 @@ kube-controller-manager [flags] --feature-gates <comma-separated 'key=True|False' pairs> -

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
GracefulNodeShutdown=true|false (ALPHA - default=false)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

@@ -595,6 +617,13 @@ kube-controller-manager [flags]

The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.

+ +--leader-migration-config string + + +

Path to the config file for controller leader migration, or empty to use the value that reflects default configuration of the controller manager. The config file should be of type LeaderMigrationConfiguration, group controllermanager.config.k8s.io, version v1alpha1.

+ + --log-backtrace-at <a string in the form 'file:N'>     Default: :0 @@ -634,7 +663,7 @@ kube-controller-manager [flags] --logging-format string     Default: "text" -

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

+

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

@@ -746,7 +775,14 @@ kube-controller-manager [flags] --one-output -

If true, only write logs to their native severity level (vs also writing to each lower severity level

+

If true, only write logs to their native severity level (vs also writing to each lower severity level)

+ + + +--permit-address-sharing + + +

If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]

diff --git a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md index ad9d8b022da01..8313be8ebb58b 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-proxy.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-proxy.md @@ -5,6 +5,7 @@ weight: 30 auto_generated: true --- + ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, rather than being surprised when they are permanently removed in the release after that.

+ +--skip-headers + + +

If true, avoid header prefixes in the log messages

+ + + +--skip-log-headers + + +

If true, avoid headers when opening log files

+ + + +--stderrthreshold int     Default: 2 + + +

logs at or above this threshold go to stderr

+ + --udp-timeout duration     Default: 250ms @@ -335,6 +455,13 @@ kube-proxy [flags]

How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace

+ +-v, --v int + + +

number for the log level verbosity

+ + --version version[=true] @@ -342,6 +469,13 @@ kube-proxy [flags]

Print version information and quit

+ +--vmodule <comma-separated 'pattern=N' settings> + + +

comma-separated list of pattern=N settings for file-filtered logging

+ + --write-config-to string diff --git a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md index 913d7eb925c09..885e997ed7f71 100644 --- a/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +++ b/content/en/docs/reference/command-line-tools-reference/kube-scheduler.md @@ -5,6 +5,7 @@ weight: 30 auto_generated: true --- + ,. The value's format is <allowed_value>,<allowed_value>...e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.

+ + --alsologtostderr @@ -99,7 +107,7 @@ kube-scheduler [flags] ---authorization-always-allow-paths strings     Default: "/healthz" +--authorization-always-allow-paths strings     Default: "/healthz,/readyz,/livez"

A list of HTTP paths to skip during authorization, i.e. these are authorized without contacting the 'core' kubernetes server.

@@ -158,14 +166,21 @@ kube-scheduler [flags] --config string -

The path to the configuration file. The following flags can overwrite fields in this file:
--address
--port
--use-legacy-policy-config
--policy-configmap
--policy-config-file
--algorithm-provider

+

The path to the configuration file. The following flags can overwrite fields in this file:
--algorithm-provider
--policy-config-file
--policy-configmap
--policy-configmap-namespace

--contention-profiling     Default: true -

DEPRECATED: enable lock contention profiling, if profiling is enabled

+

DEPRECATED: enable lock contention profiling, if profiling is enabled. This parameter is ignored if a config file is specified in --config.

+ + + +--disabled-metrics strings + + +

This flag provides an escape hatch for misbehaving metrics. You must provide the fully qualified metric name in order to disable it. Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.

@@ -179,14 +194,14 @@ kube-scheduler [flags] --feature-gates <comma-separated 'key=True|False' pairs> -

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AllowInsecureBackendProxy=true|false (BETA - default=true)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (ALPHA - default=false)
CPUManager=true|false (BETA - default=true)
CRIContainerLogRotation=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAWSComplete=true|false (ALPHA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureDiskComplete=true|false (ALPHA - default=false)
CSIMigrationAzureFile=true|false (ALPHA - default=false)
CSIMigrationAzureFileComplete=true|false (ALPHA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationGCEComplete=true|false (ALPHA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=false)
CSIMigrationOpenStackComplete=true|false (ALPHA - default=false)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (ALPHA - default=false)
CSIStorageCapacity=true|false (ALPHA - default=false)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
CronJobControllerV2=true|false (ALPHA - default=false)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (ALPHA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (ALPHA - default=false)
EndpointSlice=true|false (BETA - default=true)
EndpointSliceNodeName=true|false (ALPHA - default=false)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (ALPHA - default=false)
GracefulNodeShutdown=true|false (ALPHA - default=false)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (ALPHA - default=false)
ImmutableEphemeralVolumes=true|false (BETA - default=true)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
LegacyNodeRoleBehavior=true|false (BETA - default=true)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NodeDisruptionExclusion=true|false (BETA - default=true)
NonPreemptingPriority=true|false (BETA - default=true)
PodDisruptionBudget=true|false (BETA - default=true)
PodOverhead=true|false (BETA - default=true)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RootCAConfigMap=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
RunAsGroup=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceAccountIssuerDiscovery=true|false (BETA - default=true)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceNodeExclusion=true|false (BETA - default=true)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
Sysctls=true|false (BETA - default=true)
TTLAfterFinished=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (ALPHA - default=false)

+

A set of key=value pairs that describe feature gates for alpha/experimental features. Options are:
APIListChunking=true|false (BETA - default=true)
APIPriorityAndFairness=true|false (BETA - default=true)
APIResponseCompression=true|false (BETA - default=true)
APIServerIdentity=true|false (ALPHA - default=false)
AllAlpha=true|false (ALPHA - default=false)
AllBeta=true|false (BETA - default=false)
AnyVolumeDataSource=true|false (ALPHA - default=false)
AppArmor=true|false (BETA - default=true)
BalanceAttachedNodeVolumes=true|false (ALPHA - default=false)
BoundServiceAccountTokenVolume=true|false (BETA - default=true)
CPUManager=true|false (BETA - default=true)
CSIInlineVolume=true|false (BETA - default=true)
CSIMigration=true|false (BETA - default=true)
CSIMigrationAWS=true|false (BETA - default=false)
CSIMigrationAzureDisk=true|false (BETA - default=false)
CSIMigrationAzureFile=true|false (BETA - default=false)
CSIMigrationGCE=true|false (BETA - default=false)
CSIMigrationOpenStack=true|false (BETA - default=true)
CSIMigrationvSphere=true|false (BETA - default=false)
CSIMigrationvSphereComplete=true|false (BETA - default=false)
CSIServiceAccountToken=true|false (BETA - default=true)
CSIStorageCapacity=true|false (BETA - default=true)
CSIVolumeFSGroupPolicy=true|false (BETA - default=true)
CSIVolumeHealth=true|false (ALPHA - default=false)
ConfigurableFSGroupPolicy=true|false (BETA - default=true)
ControllerManagerLeaderMigration=true|false (ALPHA - default=false)
CronJobControllerV2=true|false (BETA - default=true)
CustomCPUCFSQuotaPeriod=true|false (ALPHA - default=false)
DaemonSetUpdateSurge=true|false (ALPHA - default=false)
DefaultPodTopologySpread=true|false (BETA - default=true)
DevicePlugins=true|false (BETA - default=true)
DisableAcceleratorUsageMetrics=true|false (BETA - default=true)
DownwardAPIHugePages=true|false (BETA - default=false)
DynamicKubeletConfig=true|false (BETA - default=true)
EfficientWatchResumption=true|false (BETA - default=true)
EndpointSliceProxying=true|false (BETA - default=true)
EndpointSliceTerminatingCondition=true|false (ALPHA - default=false)
EphemeralContainers=true|false (ALPHA - default=false)
ExpandCSIVolumes=true|false (BETA - default=true)
ExpandInUsePersistentVolumes=true|false (BETA - default=true)
ExpandPersistentVolumes=true|false (BETA - default=true)
ExperimentalHostUserNamespaceDefaulting=true|false (BETA - default=false)
GenericEphemeralVolume=true|false (BETA - default=true)
GracefulNodeShutdown=true|false (BETA - default=true)
HPAContainerMetrics=true|false (ALPHA - default=false)
HPAScaleToZero=true|false (ALPHA - default=false)
HugePageStorageMediumSize=true|false (BETA - default=true)
IPv6DualStack=true|false (BETA - default=true)
InTreePluginAWSUnregister=true|false (ALPHA - default=false)
InTreePluginAzureDiskUnregister=true|false (ALPHA - default=false)
InTreePluginAzureFileUnregister=true|false (ALPHA - default=false)
InTreePluginGCEUnregister=true|false (ALPHA - default=false)
InTreePluginOpenStackUnregister=true|false (ALPHA - default=false)
InTreePluginvSphereUnregister=true|false (ALPHA - default=false)
IndexedJob=true|false (ALPHA - default=false)
IngressClassNamespacedParams=true|false (ALPHA - default=false)
KubeletCredentialProviders=true|false (ALPHA - default=false)
KubeletPodResources=true|false (BETA - default=true)
KubeletPodResourcesGetAllocatable=true|false (ALPHA - default=false)
LocalStorageCapacityIsolation=true|false (BETA - default=true)
LocalStorageCapacityIsolationFSQuotaMonitoring=true|false (ALPHA - default=false)
LogarithmicScaleDown=true|false (ALPHA - default=false)
MemoryManager=true|false (ALPHA - default=false)
MixedProtocolLBService=true|false (ALPHA - default=false)
NamespaceDefaultLabelName=true|false (BETA - default=true)
NetworkPolicyEndPort=true|false (ALPHA - default=false)
NonPreemptingPriority=true|false (BETA - default=true)
PodAffinityNamespaceSelector=true|false (ALPHA - default=false)
PodDeletionCost=true|false (ALPHA - default=false)
PodOverhead=true|false (BETA - default=true)
PreferNominatedNode=true|false (ALPHA - default=false)
ProbeTerminationGracePeriod=true|false (ALPHA - default=false)
ProcMountType=true|false (ALPHA - default=false)
QOSReserved=true|false (ALPHA - default=false)
RemainingItemCount=true|false (BETA - default=true)
RemoveSelfLink=true|false (BETA - default=true)
RotateKubeletServerCertificate=true|false (BETA - default=true)
ServerSideApply=true|false (BETA - default=true)
ServiceInternalTrafficPolicy=true|false (ALPHA - default=false)
ServiceLBNodePortControl=true|false (ALPHA - default=false)
ServiceLoadBalancerClass=true|false (ALPHA - default=false)
ServiceTopology=true|false (ALPHA - default=false)
SetHostnameAsFQDN=true|false (BETA - default=true)
SizeMemoryBackedVolumes=true|false (ALPHA - default=false)
StorageVersionAPI=true|false (ALPHA - default=false)
StorageVersionHash=true|false (BETA - default=true)
SuspendJob=true|false (ALPHA - default=false)
TTLAfterFinished=true|false (BETA - default=true)
TopologyAwareHints=true|false (ALPHA - default=false)
TopologyManager=true|false (BETA - default=true)
ValidateProxyRedirects=true|false (BETA - default=true)
VolumeCapacityPriority=true|false (ALPHA - default=false)
WarningHeaders=true|false (BETA - default=true)
WinDSR=true|false (ALPHA - default=false)
WinOverlay=true|false (BETA - default=true)
WindowsEndpointSliceProxying=true|false (BETA - default=true)

--hard-pod-affinity-symmetric-weight int32     Default: 1 -

DEPRECATED: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule. Must be in the range 0-100.This option was moved to the policy configuration file

+

DEPRECATED: RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule. Must be in the range 0-100.This parameter is ignored if a config file is specified in --config.

@@ -207,28 +222,28 @@ kube-scheduler [flags] --kube-api-burst int32     Default: 100 -

DEPRECATED: burst to use while talking with kubernetes apiserver

+

DEPRECATED: burst to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.

--kube-api-content-type string     Default: "application/vnd.kubernetes.protobuf" -

DEPRECATED: content type of requests sent to apiserver.

+

DEPRECATED: content type of requests sent to apiserver. This parameter is ignored if a config file is specified in --config.

--kube-api-qps float     Default: 50 -

DEPRECATED: QPS to use while talking with kubernetes apiserver

+

DEPRECATED: QPS to use while talking with kubernetes apiserver. This parameter is ignored if a config file is specified in --config.

--kubeconfig string -

DEPRECATED: path to kubeconfig file with authorization and master location information.

+

DEPRECATED: path to kubeconfig file with authorization and master location information. This parameter is ignored if a config file is specified in --config.

@@ -284,14 +299,14 @@ kube-scheduler [flags] --lock-object-name string     Default: "kube-scheduler" -

DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name

+

DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name. This parameter is ignored if a config file is specified in --config.

--lock-object-namespace string     Default: "kube-system" -

DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace.

+

DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace. This parameter is ignored if a config file is specified in --config.

@@ -333,7 +348,7 @@ kube-scheduler [flags] --logging-format string     Default: "text" -

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add_dir_header, --alsologtostderr, --log_backtrace_at, --log_dir, --log_file, --log_file_max_size, --logtostderr, --one_output, --skip_headers, --skip_log_headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

+

Sets the log format. Permitted formats: "json", "text".
Non-default formats don't honor these flags: --add-dir-header, --alsologtostderr, --log-backtrace-at, --log-dir, --log-file, --log-file-max-size, --logtostderr, --one-output, --skip-headers, --skip-log-headers, --stderrthreshold, --vmodule, --log-flush-frequency.
Non-default choices are currently alpha and subject to change without warning.

@@ -354,7 +369,14 @@ kube-scheduler [flags] --one-output -

If true, only write logs to their native severity level (vs also writing to each lower severity level

+

If true, only write logs to their native severity level (vs also writing to each lower severity level)

+ + + +--permit-address-sharing + + +

If true, SO_REUSEADDR will be used when binding the port. This allows binding to wildcard IPs like 0.0.0.0 and specific IPs in parallel, and it avoids waiting for the kernel to release sockets in TIME_WAIT state. [default=false]

@@ -389,14 +411,14 @@ kube-scheduler [flags] --port int     Default: 10251 -

DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead.

+

DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve plain HTTP at all. See --secure-port instead. This parameter is ignored if a config file is specified in --config.

--profiling     Default: true -

DEPRECATED: enable profiling via web interface host:port/debug/pprof/

+

DEPRECATED: enable profiling via web interface host:port/debug/pprof/. This parameter is ignored if a config file is specified in --config.

@@ -438,7 +460,7 @@ kube-scheduler [flags] --scheduler-name string     Default: "default-scheduler" -

DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's "spec.schedulerName".

+

DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's "spec.schedulerName". This parameter is ignored if a config file is specified in --config.

diff --git a/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md b/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md new file mode 100644 index 0000000000000..fb45ca7b1adec --- /dev/null +++ b/content/en/docs/reference/config-api/apiserver-webhookadmission.v1.md @@ -0,0 +1,46 @@ +--- +title: WebhookAdmission Configuration (v1) +content_type: tool-reference +package: apiserver.config.k8s.io/v1 +auto_generated: true +--- +Package v1 is the v1 version of the API. + +## Resource Types + + +- [WebhookAdmission](#apiserver-config-k8s-io-v1-WebhookAdmission) + + + + +## `WebhookAdmission` {#apiserver-config-k8s-io-v1-WebhookAdmission} + + + + + +WebhookAdmission provides configuration for the webhook admission controller. + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
apiserver.config.k8s.io/v1
kind
string
WebhookAdmission
kubeConfigFile [Required]
+string +
+ KubeConfigFile is the path to the kubeconfig file.
+ + diff --git a/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md index e66bcd7443999..86315856b2f87 100644 --- a/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md +++ b/content/en/docs/reference/config-api/kube-proxy-config.v1alpha1.md @@ -9,7 +9,6 @@ auto_generated: true ## Resource Types - - [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) @@ -535,3 +534,68 @@ this always falls back to the userspace proxy. + + + +## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +ClientConnectionConfiguration contains details for constructing a client. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
kubeconfig [Required]
+string +
+ kubeconfig is the path to a KubeConfig file.
acceptContentTypes [Required]
+string +
+ acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +default value of 'application/json'. This field will control all connections to the server used by a particular +client.
contentType [Required]
+string +
+ contentType is the content type used when sending data to the server from this client.
qps [Required]
+float32 +
+ qps controls the number of queries per second allowed for this connection.
burst [Required]
+int32 +
+ burst allows extra queries to accumulate when a client is exceeding its rate.
diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md b/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md index cca65e53022eb..fac624bf4b8e8 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Authentication Resources" weight: 4 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md index c6a0d34757f8e..5cf56dd6e6157 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/certificate-signing-request-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued." title: "CertificateSigningRequest" weight: 4 +auto_generated: true --- + + `apiVersion: certificates.k8s.io/v1` `import "k8s.io/api/certificates/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md index 8e202ad28d6f6..a83d44bbf9636 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/service-account-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets." title: "ServiceAccount" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md index c0ddd62af309f..b9ee5ab8588a3 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/token-request-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "TokenRequest requests a token for a given service account." title: "TokenRequest" weight: 2 +auto_generated: true --- + + `apiVersion: authentication.k8s.io/v1` `import "k8s.io/api/authentication/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md b/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md index 06e0ffd5cd19b..df71bf4e1a552 100644 --- a/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authentication-resources/token-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "TokenReview attempts to authenticate a token to a known user." title: "TokenReview" weight: 3 +auto_generated: true --- + + `apiVersion: authentication.k8s.io/v1` `import "k8s.io/api/authentication/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md b/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md index e5390914a1b15..5b58698bd8601 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Authorization Resources" weight: 5 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md index d7dfea2179920..ad6a0ff732507 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-binding-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ClusterRoleBinding references a ClusterRole, but not contain it." title: "ClusterRoleBinding" weight: 6 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md index 34bb8099ef771..cc58c7804e2d4 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/cluster-role-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding." title: "ClusterRole" weight: 5 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md index 144dfe257b4a4..a163bfa743eb2 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/local-subject-access-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace." title: "LocalSubjectAccessReview" weight: 1 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md index 36b5ebc4d6a95..bb847f33704ad 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/role-binding-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "RoleBinding references a role, but does not contain it." title: "RoleBinding" weight: 8 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md index f87d305247963..6c1f8b1fdecbc 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/role-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding." title: "Role" weight: 7 +auto_generated: true --- + + `apiVersion: rbac.authorization.k8s.io/v1` `import "k8s.io/api/rbac/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md index aacafd39b114d..430a4a953b0c3 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-access-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "SelfSubjectAccessReview checks whether or the current user can perform an action." title: "SelfSubjectAccessReview" weight: 2 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md index a890598ddf54f..82ab54ec4fe6d 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/self-subject-rules-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace." title: "SelfSubjectRulesReview" weight: 3 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` diff --git a/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md b/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md index 5385b125e162d..5c8d23ea4db16 100644 --- a/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md +++ b/content/en/docs/reference/kubernetes-api/authorization-resources/subject-access-review-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "SubjectAccessReview checks whether or not a user or group can perform an action." title: "SubjectAccessReview" weight: 4 +auto_generated: true --- + + `apiVersion: authorization.k8s.io/v1` `import "k8s.io/api/authorization/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md b/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md index 40d8cdc68d070..c0fbcc0813149 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Cluster Resources" weight: 8 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md index 19ed5e0eb09fd..45f3629c397c6 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/api-service-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "APIService represents a server for a particular GroupVersion." title: "APIService" weight: 4 +auto_generated: true --- + + `apiVersion: apiregistration.k8s.io/v1` `import "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md index a007116367908..4acdf07c98b56 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/binding-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Binding ties one object to another; for example, a pod is bound to a node by a scheduler." title: "Binding" weight: 9 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md index 3b526675502b9..0542fedfbd79a 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/component-status-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ComponentStatus (and ComponentStatusList) holds the cluster validation info." title: "ComponentStatus" weight: 10 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md index cf321c1f448a4..d01f3ee70910c 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/event-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Event is a report of an event somewhere in the cluster." title: "Event" weight: 3 +auto_generated: true --- + + `apiVersion: events.k8s.io/v1` `import "k8s.io/api/events/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md index 0c221dea87857..8329c6016b1ca 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/flow-schema-v1beta1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "FlowSchema defines the schema of a group of flows." title: "FlowSchema v1beta1" weight: 7 +auto_generated: true --- + + `apiVersion: flowcontrol.apiserver.k8s.io/v1beta1` `import "k8s.io/api/flowcontrol/v1beta1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md index 478bc2a8e75aa..8f74401a59c40 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/lease-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Lease defines a lease concept." title: "Lease" weight: 5 +auto_generated: true --- + + `apiVersion: coordination.k8s.io/v1` `import "k8s.io/api/coordination/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md index caa39a5856efc..a05f7f4f26dbb 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/namespace-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Namespace provides a scope for Names." title: "Namespace" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md index d48dc829abfe0..23764d90337ba 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/node-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Node is a worker node in Kubernetes." title: "Node" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md index 073da2a28a100..e7bd624556f61 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/priority-level-configuration-v1beta1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PriorityLevelConfiguration represents the configuration of a priority level." title: "PriorityLevelConfiguration v1beta1" weight: 8 +auto_generated: true --- + + `apiVersion: flowcontrol.apiserver.k8s.io/v1beta1` `import "k8s.io/api/flowcontrol/v1beta1"` diff --git a/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md b/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md index 8b08e83dd39fc..b505277ccc207 100644 --- a/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/cluster-resources/runtime-class-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "RuntimeClass defines a class of container runtime supported in the cluster." title: "RuntimeClass" weight: 6 +auto_generated: true --- + + `apiVersion: node.k8s.io/v1` `import "k8s.io/api/node/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/_index.md b/content/en/docs/reference/kubernetes-api/common-definitions/_index.md index 580fa96ff2e5e..00b036aa6e87d 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/_index.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/_index.md @@ -1,4 +1,17 @@ --- title: "Common Definitions" weight: 9 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md b/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md index 09fd2ce3d130a..4131bcb1ddda9 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/delete-options.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "DeleteOptions may be provided when deleting an API object." title: "DeleteOptions" weight: 1 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/downward-api-volume-file.md b/content/en/docs/reference/kubernetes-api/common-definitions/downward-api-volume-file.md deleted file mode 100644 index 5aa9f29822286..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/downward-api-volume-file.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "DownwardAPIVolumeFile" -content_type: "api_reference" -description: "DownwardAPIVolumeFile represents information to create the file containing the pod field." -title: "DownwardAPIVolumeFile" -weight: 2 ---- - - - -`import "k8s.io/api/core/v1"` - - -DownwardAPIVolumeFile represents information to create the file containing the pod field - -
- -- **path** (string), required - - Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - -- **fieldRef** (}}">ObjectFieldSelector) - - Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - -- **mode** (int32) - - Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - -- **resourceFieldRef** (}}">ResourceFieldSelector) - - Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/exec-action.md b/content/en/docs/reference/kubernetes-api/common-definitions/exec-action.md deleted file mode 100644 index 0078859720175..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/exec-action.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "ExecAction" -content_type: "api_reference" -description: "ExecAction describes a \"run in container\" action." -title: "ExecAction" -weight: 3 ---- - - - -`import "k8s.io/api/core/v1"` - - -ExecAction describes a "run in container" action. - -
- -- **command** ([]string) - - Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/http-get-action.md b/content/en/docs/reference/kubernetes-api/common-definitions/http-get-action.md deleted file mode 100644 index 1eabe9ac2d579..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/http-get-action.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "HTTPGetAction" -content_type: "api_reference" -description: "HTTPGetAction describes an action based on HTTP Get requests." -title: "HTTPGetAction" -weight: 4 ---- - - - -`import "k8s.io/api/core/v1"` - - -HTTPGetAction describes an action based on HTTP Get requests. - -
- -- **port** (IntOrString), required - - Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - -- **host** (string) - - Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. - -- **httpHeaders** ([]HTTPHeader) - - Custom headers to set in the request. HTTP allows repeated headers. - - - *HTTPHeader describes a custom header to be used in HTTP probes* - - - **httpHeaders.name** (string), required - - The header field name - - - **httpHeaders.value** (string), required - - The header field value - -- **path** (string) - - Path to access on the HTTP server. - -- **scheme** (string) - - Scheme to use for connecting to the host. Defaults to HTTP. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/json-schema-props.md b/content/en/docs/reference/kubernetes-api/common-definitions/json-schema-props.md deleted file mode 100644 index 622dba69a2712..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/json-schema-props.md +++ /dev/null @@ -1,226 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - kind: "JSONSchemaProps" -content_type: "api_reference" -description: "JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema." -title: "JSONSchemaProps" -weight: 5 ---- - - - -`import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"` - - -JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). - -
- -- **$ref** (string) - - -- **$schema** (string) - - -- **additionalItems** (JSONSchemaPropsOrBool) - - - - *JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property.* - -- **additionalProperties** (JSONSchemaPropsOrBool) - - - - *JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property.* - -- **allOf** ([]}}">JSONSchemaProps) - - -- **anyOf** ([]}}">JSONSchemaProps) - - -- **default** (JSON) - - default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false. - - - *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* - -- **definitions** (map[string]}}">JSONSchemaProps) - - -- **dependencies** (map[string]JSONSchemaPropsOrStringArray) - - - - *JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array.* - -- **description** (string) - - -- **enum** ([]JSON) - - - - *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* - -- **example** (JSON) - - - - *JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.* - -- **exclusiveMaximum** (boolean) - - -- **exclusiveMinimum** (boolean) - - -- **externalDocs** (ExternalDocumentation) - - - - *ExternalDocumentation allows referencing an external resource for extended documentation.* - - - **externalDocs.description** (string) - - - - **externalDocs.url** (string) - - -- **format** (string) - - format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated: - - - bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041" - isbn10: an ISBN10 number string like "0321751043" - isbn13: an ISBN13 number string like "978-0321751041" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\d{3})\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\d{3}[- ]?\d{2}[- ]?\d{4}$ - hexcolor: an hexadecimal color code like "#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like "rgb(255,255,2559" - byte: base64 encoded binary data - password: any kind of string - date: a date string like "2006-01-02" as defined by full-date in RFC3339 - duration: a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339. - -- **id** (string) - - -- **items** (JSONSchemaPropsOrArray) - - - - *JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes.* - -- **maxItems** (int64) - - -- **maxLength** (int64) - - -- **maxProperties** (int64) - - -- **maximum** (double) - - -- **minItems** (int64) - - -- **minLength** (int64) - - -- **minProperties** (int64) - - -- **minimum** (double) - - -- **multipleOf** (double) - - -- **not** (}}">JSONSchemaProps) - - -- **nullable** (boolean) - - -- **oneOf** ([]}}">JSONSchemaProps) - - -- **pattern** (string) - - -- **patternProperties** (map[string]}}">JSONSchemaProps) - - -- **properties** (map[string]}}">JSONSchemaProps) - - -- **required** ([]string) - - -- **title** (string) - - -- **type** (string) - - -- **uniqueItems** (boolean) - - -- **x-kubernetes-embedded-resource** (boolean) - - x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata). - -- **x-kubernetes-int-or-string** (boolean) - - x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns: - - 1) anyOf: - - type: integer - - type: string - 2) allOf: - - anyOf: - - type: integer - - type: string - - ... zero or more - -- **x-kubernetes-list-map-keys** ([]string) - - x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map. - - This tag MUST only be used on lists that have the "x-kubernetes-list-type" extension set to "map". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported). - - The properties specified must either be required or have a default value, to ensure those properties are present for all list items. - -- **x-kubernetes-list-type** (string) - - x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values: - - 1) `atomic`: the list is treated as a single entity, like a scalar. - Atomic lists will be entirely replaced when updated. This extension - may be used on any type of list (struct, scalar, ...). - 2) `set`: - Sets are lists that must not have multiple items with the same value. Each - value must be a scalar, an object with x-kubernetes-map-type `atomic` or an - array with x-kubernetes-list-type `atomic`. - 3) `map`: - These lists are like maps in that their elements have a non-index key - used to identify them. Order is preserved upon merge. The map tag - must only be used on a list with elements of type object. - Defaults to atomic for arrays. - -- **x-kubernetes-map-type** (string) - - x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values: - - 1) `granular`: - These maps are actual maps (key-value pairs) and each fields are independent - from each other (they can each be manipulated by separate actors). This is - the default behaviour for all maps. - 2) `atomic`: the list is treated as a single entity, like a scalar. - Atomic maps will be entirely replaced when updated. - -- **x-kubernetes-preserve-unknown-fields** (boolean) - - x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/key-to-path.md b/content/en/docs/reference/kubernetes-api/common-definitions/key-to-path.md deleted file mode 100644 index 64068fe1a2710..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/key-to-path.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "KeyToPath" -content_type: "api_reference" -description: "Maps a string key to a path within a volume." -title: "KeyToPath" -weight: 6 ---- - - - -`import "k8s.io/api/core/v1"` - - -Maps a string key to a path within a volume. - -
- -- **key** (string), required - - The key to project. - -- **path** (string), required - - The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. - -- **mode** (int32) - - Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md b/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md index 58d94dd5b87cf..d81c26d17a13e 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/label-selector.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "A label selector is a label query over a set of resources." title: "LabelSelector" weight: 2 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md b/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md index af9749a02cab6..96f2dafd3017d 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/list-meta.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ListMeta describes metadata that synthetic resources must have, including lists and various status objects." title: "ListMeta" weight: 3 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md b/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md index 83d4d046b171c..ce56803bacbd9 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/local-object-reference.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace." title: "LocalObjectReference" weight: 4 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/node-affinity.md b/content/en/docs/reference/kubernetes-api/common-definitions/node-affinity.md deleted file mode 100644 index 57b0b627159af..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/node-affinity.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "NodeAffinity" -content_type: "api_reference" -description: "Node affinity is a group of node affinity scheduling rules." -title: "NodeAffinity" -weight: 10 ---- - - - -`import "k8s.io/api/core/v1"` - - -Node affinity is a group of node affinity scheduling rules. - -
- -- **preferredDuringSchedulingIgnoredDuringExecution** ([]PreferredSchedulingTerm) - - The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - - - *An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).* - - - **preferredDuringSchedulingIgnoredDuringExecution.preference** (NodeSelectorTerm), required - - A node selector term, associated with the corresponding weight. - - - *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* - - - **preferredDuringSchedulingIgnoredDuringExecution.preference.matchExpressions** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's labels. - - - **preferredDuringSchedulingIgnoredDuringExecution.preference.matchFields** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's fields. - - - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required - - Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - -- **requiredDuringSchedulingIgnoredDuringExecution** (NodeSelector) - - If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - - - *A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.* - - - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms** ([]NodeSelectorTerm), required - - Required. A list of node selector terms. The terms are ORed. - - - *A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.* - - - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's labels. - - - **requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchFields** ([]}}">NodeSelectorRequirement) - - A list of node selector requirements by node's fields. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md b/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md index ad5c95bf4cce6..33af2e88e30bb 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/node-selector-requirement.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values." title: "NodeSelectorRequirement" weight: 5 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md b/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md index ba4cf37ef07cb..9d1fc9e9c8ce4 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/object-field-selector.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ObjectFieldSelector selects an APIVersioned field of an object." title: "ObjectFieldSelector" weight: 6 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md b/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md index 46361d861b1a1..81d66b38c7d2f 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/object-meta.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create." title: "ObjectMeta" weight: 7 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md b/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md index b1ae24f61235d..fa999b11f4f4a 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/object-reference.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ObjectReference contains enough information to let you inspect or modify the referred object." title: "ObjectReference" weight: 8 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/patch.md b/content/en/docs/reference/kubernetes-api/common-definitions/patch.md index eaf37c8d150b8..a32a88b309165 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/patch.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/patch.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body." title: "Patch" weight: 9 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/pod-affinity.md b/content/en/docs/reference/kubernetes-api/common-definitions/pod-affinity.md deleted file mode 100644 index 06291de393133..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/pod-affinity.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "PodAffinity" -content_type: "api_reference" -description: "Pod affinity is a group of inter pod affinity scheduling rules." -title: "PodAffinity" -weight: 16 ---- - - - -`import "k8s.io/api/core/v1"` - - -Pod affinity is a group of inter pod affinity scheduling rules. - -
- -- **preferredDuringSchedulingIgnoredDuringExecution** ([]WeightedPodAffinityTerm) - - The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - - - *The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm** (PodAffinityTerm), required - - Required. A pod affinity term, associated with the corresponding weight. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required - - weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - -- **requiredDuringSchedulingIgnoredDuringExecution** ([]PodAffinityTerm) - - If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **requiredDuringSchedulingIgnoredDuringExecution.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **requiredDuringSchedulingIgnoredDuringExecution.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/pod-anti-affinity.md b/content/en/docs/reference/kubernetes-api/common-definitions/pod-anti-affinity.md deleted file mode 100644 index baebcc3c0c17d..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/pod-anti-affinity.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "PodAntiAffinity" -content_type: "api_reference" -description: "Pod anti affinity is a group of inter pod anti affinity scheduling rules." -title: "PodAntiAffinity" -weight: 17 ---- - - - -`import "k8s.io/api/core/v1"` - - -Pod anti affinity is a group of inter pod anti affinity scheduling rules. - -
- -- **preferredDuringSchedulingIgnoredDuringExecution** ([]WeightedPodAffinityTerm) - - The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - - - *The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm** (PodAffinityTerm), required - - Required. A pod affinity term, associated with the corresponding weight. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - **preferredDuringSchedulingIgnoredDuringExecution.weight** (int32), required - - weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - -- **requiredDuringSchedulingIgnoredDuringExecution** ([]PodAffinityTerm) - - If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - - - *Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running* - - - **requiredDuringSchedulingIgnoredDuringExecution.topologyKey** (string), required - - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - - - **requiredDuringSchedulingIgnoredDuringExecution.labelSelector** (}}">LabelSelector) - - A label query over a set of resources, in this case pods. - - - **requiredDuringSchedulingIgnoredDuringExecution.namespaces** ([]string) - - namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md b/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md index f0386bb66fab1..45f909c5e783c 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/quantity.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Quantity is a fixed-point representation of a number." title: "Quantity" weight: 10 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/api/resource"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md b/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md index 3d3b8d68a3977..76576bdb5d3a3 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/resource-field-selector.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ResourceFieldSelector represents container resources (cpu, memory) and their output format." title: "ResourceFieldSelector" weight: 11 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/status.md b/content/en/docs/reference/kubernetes-api/common-definitions/status.md index a134ed1407795..d40a22d6daeb9 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/status.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/status.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Status is a return value for calls that don't return other objects." title: "Status" weight: 12 +auto_generated: true --- + + `import "k8s.io/apimachinery/pkg/apis/meta/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/tcp-socket-action.md b/content/en/docs/reference/kubernetes-api/common-definitions/tcp-socket-action.md deleted file mode 100644 index 991518a57a741..0000000000000 --- a/content/en/docs/reference/kubernetes-api/common-definitions/tcp-socket-action.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "TCPSocketAction" -content_type: "api_reference" -description: "TCPSocketAction describes an action based on opening a socket." -title: "TCPSocketAction" -weight: 21 ---- - - - -`import "k8s.io/api/core/v1"` - - -TCPSocketAction describes an action based on opening a socket - -
- -- **port** (IntOrString), required - - Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - -- **host** (string) - - Optional: Host name to connect to, defaults to the pod IP. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md b/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md index 2ec9fd517696b..a6d75abf25d2d 100644 --- a/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md +++ b/content/en/docs/reference/kubernetes-api/common-definitions/typed-local-object-reference.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace." title: "TypedLocalObjectReference" weight: 13 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md index 12792cbd08a60..deb8164881822 100644 --- a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md +++ b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "" title: "Common Parameters" weight: 10 +auto_generated: true --- + + diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md index 4f29e8ec4fe5a..aac30882a90a2 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Config and Storage Resources" weight: 3 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md index 7d7dac6bf0289..be3ef5c8e4193 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/config-map-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ConfigMap holds configuration data for pods to consume." title: "ConfigMap" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md index ddcef415e8b8d..22fcf194ee0d1 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-driver-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster." title: "CSIDriver" weight: 8 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md index d918d4568feff..343ab011350c7 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-node-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CSINode holds information about all CSI drivers installed on a node." title: "CSINode" weight: 9 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md index 1b619a0ef2fc3..cc915b8b15fc6 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/csi-storage-capacity-v1beta1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CSIStorageCapacity stores the result of one CSI GetCapacity call." title: "CSIStorageCapacity v1beta1" weight: 10 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1beta1` `import "k8s.io/api/storage/v1beta1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md index b05083deb41c9..f73ded9ff56b5 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PersistentVolumeClaim is a user's request for and claim to a persistent volume." title: "PersistentVolumeClaim" weight: 4 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md index e02238f3ada7b..c5c68b19d7f27 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PersistentVolume (PV) is a storage resource provisioned by an administrator." title: "PersistentVolume" weight: 5 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md index 8dd7eb4a48b31..5e75c90b7904a 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/secret-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Secret holds secret data of a certain type." title: "Secret" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md index 24d9636d49ca4..c00f36797de3d 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/storage-class-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned." title: "StorageClass" weight: 6 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md index 8e83398b69ef6..b9bc3ee83124b 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume-attachment-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node." title: "VolumeAttachment" weight: 7 +auto_generated: true --- + + `apiVersion: storage.k8s.io/v1` `import "k8s.io/api/storage/v1"` diff --git a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md index ff72bda7f0e11..60badecc43024 100644 --- a/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md +++ b/content/en/docs/reference/kubernetes-api/config-and-storage-resources/volume.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Volume represents a named volume in a pod that may be accessed by any container in the pod." title: "Volume" weight: 3 +auto_generated: true --- + + `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/_index.md b/content/en/docs/reference/kubernetes-api/extend-resources/_index.md index ce6d3d3e39cb8..1886f6e26c816 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Extend Resources" weight: 7 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md index 5597325fac176..ce88ef945de49 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/custom-resource-definition-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CustomResourceDefinition represents a resource that should be exposed on the API server." title: "CustomResourceDefinition" weight: 1 +auto_generated: true --- + + `apiVersion: apiextensions.k8s.io/v1` `import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"` diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md index 59f3b7c457d15..e335c9fc2e402 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/mutating-webhook-configuration-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object." title: "MutatingWebhookConfiguration" weight: 2 +auto_generated: true --- + + `apiVersion: admissionregistration.k8s.io/v1` `import "k8s.io/api/admissionregistration/v1"` diff --git a/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md b/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md index 41bf5b2905a9d..a985763cb3d4a 100644 --- a/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md +++ b/content/en/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it." title: "ValidatingWebhookConfiguration" weight: 3 +auto_generated: true --- + + `apiVersion: admissionregistration.k8s.io/v1` `import "k8s.io/api/admissionregistration/v1"` diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/_index.md b/content/en/docs/reference/kubernetes-api/policy-resources/_index.md index 06a9e27fee703..2b614bb534ab5 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Policy Resources" weight: 6 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md index d1f45d555752c..5d84379b6e889 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/limit-range-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "LimitRange sets resource usage limits for each kind of resource in a Namespace." title: "LimitRange" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md index 2288204257981..6643b81beeed9 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/network-policy-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "NetworkPolicy describes what network traffic is allowed for a set of Pods." title: "NetworkPolicy" weight: 3 +auto_generated: true --- + + `apiVersion: networking.k8s.io/v1` `import "k8s.io/api/networking/v1"` diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md index 841f1bb82a423..3b21024aeb40f 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods." title: "PodDisruptionBudget" weight: 4 +auto_generated: true --- + + `apiVersion: policy/v1` `import "k8s.io/api/policy/v1"` diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1beta1.md b/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1beta1.md deleted file mode 100644 index 9120b19468f24..0000000000000 --- a/content/en/docs/reference/kubernetes-api/policy-resources/pod-disruption-budget-v1beta1.md +++ /dev/null @@ -1,738 +0,0 @@ ---- -api_metadata: - apiVersion: "policy/v1beta1" - import: "k8s.io/api/policy/v1beta1" - kind: "PodDisruptionBudget" -content_type: "api_reference" -description: "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods." -title: "PodDisruptionBudget v1beta1" -weight: 4 ---- - -`apiVersion: policy/v1beta1` - -`import "k8s.io/api/policy/v1beta1"` - - -## PodDisruptionBudget {#PodDisruptionBudget} - -PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods - -
- -- **apiVersion**: policy/v1beta1 - - -- **kind**: PodDisruptionBudget - - -- **metadata** (}}">ObjectMeta) - - -- **spec** (}}">PodDisruptionBudgetSpec) - - Specification of the desired behavior of the PodDisruptionBudget. - -- **status** (}}">PodDisruptionBudgetStatus) - - Most recently observed status of the PodDisruptionBudget. - - - - - -## PodDisruptionBudgetSpec {#PodDisruptionBudgetSpec} - -PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. - -
- -- **maxUnavailable** (IntOrString) - - An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable". - - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - -- **minAvailable** (IntOrString) - - An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%". - - - *IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.* - -- **selector** (}}">LabelSelector) - - Label query over pods whose evictions are managed by the disruption budget. - - - - - -## PodDisruptionBudgetStatus {#PodDisruptionBudgetStatus} - -PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system. - -
- -- **currentHealthy** (int32), required - - current number of healthy pods - -- **desiredHealthy** (int32), required - - minimum desired number of healthy pods - -- **disruptionsAllowed** (int32), required - - Number of pod disruptions that are currently allowed. - -- **expectedPods** (int32), required - - total number of pods counted by this disruption budget - -- **disruptedPods** (map[string]Time) - - DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions. - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - -- **observedGeneration** (int64) - - Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation. - - - - - -## PodDisruptionBudgetList {#PodDisruptionBudgetList} - -PodDisruptionBudgetList is a collection of PodDisruptionBudgets. - -
- -- **apiVersion**: policy/v1beta1 - - -- **kind**: PodDisruptionBudgetList - - -- **metadata** (}}">ListMeta) - - -- **items** ([]}}">PodDisruptionBudget), required - - - - - - -## Operations {#Operations} - - - -
- - - - - - -### `get` read the specified PodDisruptionBudget - -#### HTTP Request - -GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the PodDisruptionBudget - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">PodDisruptionBudget): OK - -401: Unauthorized - - -### `get` read status of the specified PodDisruptionBudget - -#### HTTP Request - -GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the PodDisruptionBudget - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">PodDisruptionBudget): OK - -401: Unauthorized - - -### `list` list or watch objects of kind PodDisruptionBudget - -#### HTTP Request - -GET /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">PodDisruptionBudgetList): OK - -401: Unauthorized - - -### `list` list or watch objects of kind PodDisruptionBudget - -#### HTTP Request - -GET /apis/policy/v1beta1/poddisruptionbudgets - -#### Parameters - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">PodDisruptionBudgetList): OK - -401: Unauthorized - - -### `create` create a PodDisruptionBudget - -#### HTTP Request - -POST /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">PodDisruptionBudget, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">PodDisruptionBudget): OK - -201 (}}">PodDisruptionBudget): Created - -202 (}}">PodDisruptionBudget): Accepted - -401: Unauthorized - - -### `update` replace the specified PodDisruptionBudget - -#### HTTP Request - -PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the PodDisruptionBudget - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">PodDisruptionBudget, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">PodDisruptionBudget): OK - -201 (}}">PodDisruptionBudget): Created - -401: Unauthorized - - -### `update` replace status of the specified PodDisruptionBudget - -#### HTTP Request - -PUT /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the PodDisruptionBudget - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">PodDisruptionBudget, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">PodDisruptionBudget): OK - -201 (}}">PodDisruptionBudget): Created - -401: Unauthorized - - -### `patch` partially update the specified PodDisruptionBudget - -#### HTTP Request - -PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the PodDisruptionBudget - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">PodDisruptionBudget): OK - -401: Unauthorized - - -### `patch` partially update status of the specified PodDisruptionBudget - -#### HTTP Request - -PATCH /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the PodDisruptionBudget - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">PodDisruptionBudget): OK - -401: Unauthorized - - -### `delete` delete a PodDisruptionBudget - -#### HTTP Request - -DELETE /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the PodDisruptionBudget - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - - -#### Response - - -200 (}}">Status): OK - -202 (}}">Status): Accepted - -401: Unauthorized - - -### `deletecollection` delete collection of PodDisruptionBudget - -#### HTTP Request - -DELETE /apis/policy/v1beta1/namespaces/{namespace}/poddisruptionbudgets - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **continue** (*in query*): string - - }}">continue - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - - -#### Response - - -200 (}}">Status): OK - -401: Unauthorized - diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md b/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md index a499f1dc6d18d..b6050390e60de 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/pod-security-policy-v1beta1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container." title: "PodSecurityPolicy v1beta1" weight: 5 +auto_generated: true --- + + `apiVersion: policy/v1beta1` `import "k8s.io/api/policy/v1beta1"` diff --git a/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md b/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md index 00818ec921a0f..2f66235c0f562 100644 --- a/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md +++ b/content/en/docs/reference/kubernetes-api/policy-resources/resource-quota-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ResourceQuota sets aggregate quota restrictions enforced per namespace." title: "ResourceQuota" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/service-resources/_index.md b/content/en/docs/reference/kubernetes-api/service-resources/_index.md index 0ab511b5a4422..4f2d9118d9602 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Service Resources" weight: 2 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md index d29613093751e..a405d5066077b 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "EndpointSlice represents a subset of the endpoints that implement a service." title: "EndpointSlice" weight: 3 +auto_generated: true --- + + `apiVersion: discovery.k8s.io/v1` `import "k8s.io/api/discovery/v1"` diff --git a/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1beta1.md b/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1beta1.md deleted file mode 100644 index 1ad467b022e85..0000000000000 --- a/content/en/docs/reference/kubernetes-api/service-resources/endpoint-slice-v1beta1.md +++ /dev/null @@ -1,618 +0,0 @@ ---- -api_metadata: - apiVersion: "discovery.k8s.io/v1beta1" - import: "k8s.io/api/discovery/v1beta1" - kind: "EndpointSlice" -content_type: "api_reference" -description: "EndpointSlice represents a subset of the endpoints that implement a service." -title: "EndpointSlice v1beta1" -weight: 3 ---- - -`apiVersion: discovery.k8s.io/v1beta1` - -`import "k8s.io/api/discovery/v1beta1"` - - -## EndpointSlice {#EndpointSlice} - -EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints. - -
- -- **apiVersion**: discovery.k8s.io/v1beta1 - - -- **kind**: EndpointSlice - - -- **metadata** (}}">ObjectMeta) - - Standard object's metadata. - -- **addressType** (string), required - - addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name. - -- **endpoints** ([]Endpoint), required - - *Atomic: will be replaced during a merge* - - endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints. - - - *Endpoint represents a single logical "backend" implementing a service.* - - - **endpoints.addresses** ([]string), required - - *Set: unique values will be kept during a merge* - - addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. - - - **endpoints.conditions** (EndpointConditions) - - conditions contains information about the current status of the endpoint. - - - *EndpointConditions represents the current condition of an endpoint.* - - - **endpoints.conditions.ready** (boolean) - - ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be "true" for terminating endpoints. - - - **endpoints.conditions.serving** (boolean) - - serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition. This field can be enabled with the EndpointSliceTerminatingCondition feature gate. - - - **endpoints.conditions.terminating** (boolean) - - terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating. This field can be enabled with the EndpointSliceTerminatingCondition feature gate. - - - **endpoints.hostname** (string) - - hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation. - - - **endpoints.nodeName** (string) - - nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node. This field can be enabled with the EndpointSliceNodeName feature gate. - - - **endpoints.targetRef** (}}">ObjectReference) - - targetRef is a reference to a Kubernetes object that represents this endpoint. - - - **endpoints.topology** (map[string]string) - - topology contains arbitrary topology information associated with the endpoint. These key/value pairs must conform with the label format. https://kubernetes.io/docs/concepts/overview/working-with-objects/labels Topology may include a maximum of 16 key/value pairs. This includes, but is not limited to the following well known keys: * kubernetes.io/hostname: the value indicates the hostname of the node - where the endpoint is located. This should match the corresponding - node label. - * topology.kubernetes.io/zone: the value indicates the zone where the - endpoint is located. This should match the corresponding node label. - * topology.kubernetes.io/region: the value indicates the region where the - endpoint is located. This should match the corresponding node label. - This field is deprecated and will be removed in future api versions. - -- **ports** ([]EndpointPort) - - *Atomic: will be replaced during a merge* - - ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates "all ports". Each slice may include a maximum of 100 ports. - - - *EndpointPort represents a Port used by an EndpointSlice* - - - **ports.port** (int32) - - The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer. - - - **ports.protocol** (string) - - The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. - - - **ports.name** (string) - - The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string. - - - **ports.appProtocol** (string) - - The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. - - - - - -## EndpointSliceList {#EndpointSliceList} - -EndpointSliceList represents a list of endpoint slices - -
- -- **apiVersion**: discovery.k8s.io/v1beta1 - - -- **kind**: EndpointSliceList - - -- **metadata** (}}">ListMeta) - - Standard list metadata. - -- **items** ([]}}">EndpointSlice), required - - List of endpoint slices - - - - - -## Operations {#Operations} - - - -
- - - - - - -### `get` read the specified EndpointSlice - -#### HTTP Request - -GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the EndpointSlice - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">EndpointSlice): OK - -401: Unauthorized - - -### `list` list or watch objects of kind EndpointSlice - -#### HTTP Request - -GET /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">EndpointSliceList): OK - -401: Unauthorized - - -### `list` list or watch objects of kind EndpointSlice - -#### HTTP Request - -GET /apis/discovery.k8s.io/v1beta1/endpointslices - -#### Parameters - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">EndpointSliceList): OK - -401: Unauthorized - - -### `create` create an EndpointSlice - -#### HTTP Request - -POST /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">EndpointSlice, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">EndpointSlice): OK - -201 (}}">EndpointSlice): Created - -202 (}}">EndpointSlice): Accepted - -401: Unauthorized - - -### `update` replace the specified EndpointSlice - -#### HTTP Request - -PUT /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the EndpointSlice - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">EndpointSlice, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">EndpointSlice): OK - -201 (}}">EndpointSlice): Created - -401: Unauthorized - - -### `patch` partially update the specified EndpointSlice - -#### HTTP Request - -PATCH /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the EndpointSlice - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">EndpointSlice): OK - -401: Unauthorized - - -### `delete` delete an EndpointSlice - -#### HTTP Request - -DELETE /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the EndpointSlice - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - - -#### Response - - -200 (}}">Status): OK - -202 (}}">Status): Accepted - -401: Unauthorized - - -### `deletecollection` delete collection of EndpointSlice - -#### HTTP Request - -DELETE /apis/discovery.k8s.io/v1beta1/namespaces/{namespace}/endpointslices - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **continue** (*in query*): string - - }}">continue - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - - -#### Response - - -200 (}}">Status): OK - -401: Unauthorized - diff --git a/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md index 223f9d2d54077..acc7d938f9139 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/endpoints-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Endpoints is a collection of endpoints that implement the actual service." title: "Endpoints" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md index a6dd55e7c786e..c549ac2f83d7c 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/ingress-class-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "IngressClass represents the class of the Ingress, referenced by the Ingress Spec." title: "IngressClass" weight: 5 +auto_generated: true --- + + `apiVersion: networking.k8s.io/v1` `import "k8s.io/api/networking/v1"` diff --git a/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md index 6f8ebf1aad5b6..00fe1bb61797a 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/ingress-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend." title: "Ingress" weight: 4 +auto_generated: true --- + + `apiVersion: networking.k8s.io/v1` `import "k8s.io/api/networking/v1"` diff --git a/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md b/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md index ab17bf3f0ed65..8d643c688aac9 100644 --- a/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md +++ b/content/en/docs/reference/kubernetes-api/service-resources/service-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy." title: "Service" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/_index.md b/content/en/docs/reference/kubernetes-api/workload-resources/_index.md index 85d1bfa44f2b3..7c4e44d99a005 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/_index.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/_index.md @@ -1,4 +1,17 @@ --- title: "Workload Resources" weight: 1 +auto_generated: true --- + + + diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/container.md b/content/en/docs/reference/kubernetes-api/workload-resources/container.md deleted file mode 100644 index d87cd422c84d2..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workload-resources/container.md +++ /dev/null @@ -1,774 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "Container" -content_type: "api_reference" -description: "A single application container that you want to run within a pod." -title: "Container" -weight: 2 ---- - - - -`import "k8s.io/api/core/v1"` - - -## Container {#Container} - -A single application container that you want to run within a pod. - -
- -- **name** (string), required - - Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. - - - -### Image - - -- **image** (string) - - Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. - -- **imagePullPolicy** (string) - - Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - -### Entrypoint - - -- **command** ([]string) - - Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **args** ([]string) - - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **workingDir** (string) - - Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. - -### Ports - - -- **ports** ([]ContainerPort) - - *Patch strategy: merge on key `containerPort`* - - *Map: unique values on keys `containerPort, protocol` will be kept during a merge* - - List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. - - - *ContainerPort represents a network port in a single container.* - - - **ports.containerPort** (int32), required - - Number of port to expose on the pod's IP address. This must be a valid port number, 0 \< x \< 65536. - - - **ports.hostIP** (string) - - What host IP to bind the external port to. - - - **ports.hostPort** (int32) - - Number of port to expose on the host. If specified, this must be a valid port number, 0 \< x \< 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. - - - **ports.name** (string) - - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. - - - **ports.protocol** (string) - - Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". - -### Environment variables - - -- **env** ([]EnvVar) - - *Patch strategy: merge on key `name`* - - List of environment variables to set in the container. Cannot be updated. - - - *EnvVar represents an environment variable present in a Container.* - - - **env.name** (string), required - - Name of the environment variable. Must be a C_IDENTIFIER. - - - **env.value** (string) - - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". - - - **env.valueFrom** (EnvVarSource) - - Source for the environment variable's value. Cannot be used if value is not empty. - - - *EnvVarSource represents a source for the value of an EnvVar.* - - - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) - - Selects a key of a ConfigMap. - - - *Selects a key from a ConfigMap.* - - - **env.valueFrom.configMapKeyRef.key** (string), required - - The key to select. - - - **env.valueFrom.configMapKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.configMapKeyRef.optional** (boolean) - - Specify whether the ConfigMap or its key must be defined - - - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) - - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - - - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) - - Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - - - **env.valueFrom.secretKeyRef** (SecretKeySelector) - - Selects a key of a secret in the pod's namespace - - - *SecretKeySelector selects a key of a Secret.* - - - **env.valueFrom.secretKeyRef.key** (string), required - - The key of the secret to select from. Must be a valid secret key. - - - **env.valueFrom.secretKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.secretKeyRef.optional** (boolean) - - Specify whether the Secret or its key must be defined - -- **envFrom** ([]EnvFromSource) - - List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. - - - *EnvFromSource represents the source of a set of ConfigMaps* - - - **envFrom.configMapRef** (ConfigMapEnvSource) - - The ConfigMap to select from - - - *ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. - - The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.configMapRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.configMapRef.optional** (boolean) - - Specify whether the ConfigMap must be defined - - - **envFrom.prefix** (string) - - An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - - - **envFrom.secretRef** (SecretEnvSource) - - The Secret to select from - - - *SecretEnvSource selects a Secret to populate the environment variables with. - - The contents of the target Secret's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.secretRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.secretRef.optional** (boolean) - - Specify whether the Secret must be defined - -### Volumes - - -- **volumeMounts** ([]VolumeMount) - - *Patch strategy: merge on key `mountPath`* - - Pod volumes to mount into the container's filesystem. Cannot be updated. - - - *VolumeMount describes a mounting of a Volume within a container.* - - - **volumeMounts.mountPath** (string), required - - Path within the container at which the volume should be mounted. Must not contain ':'. - - - **volumeMounts.name** (string), required - - This must match the Name of a Volume. - - - **volumeMounts.mountPropagation** (string) - - mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - - - **volumeMounts.readOnly** (boolean) - - Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - - - **volumeMounts.subPath** (string) - - Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - - - **volumeMounts.subPathExpr** (string) - - Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - -- **volumeDevices** ([]VolumeDevice) - - *Patch strategy: merge on key `devicePath`* - - volumeDevices is the list of block devices to be used by the container. - - - *volumeDevice describes a mapping of a raw block device within a container.* - - - **volumeDevices.devicePath** (string), required - - devicePath is the path inside of the container that the device will be mapped to. - - - **volumeDevices.name** (string), required - - name must match the name of a persistentVolumeClaim in the pod - -### Resources - - -- **resources** (ResourceRequirements) - - Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - - - *ResourceRequirements describes the compute resource requirements.* - - - **resources.limits** (map[string]}}">Quantity) - - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - - - **resources.requests** (map[string]}}">Quantity) - - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - -### Lifecycle - - -- **lifecycle** (Lifecycle) - - Actions that the management system should take in response to container lifecycle events. Cannot be updated. - - - *Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.* - - - **lifecycle.postStart** (Handler) - - PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - *Handler defines a specific action that should be taken* - - - **lifecycle.postStart.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **lifecycle.postStart.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **lifecycle.postStart.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **lifecycle.preStop** (Handler) - - PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - *Handler defines a specific action that should be taken* - - - **lifecycle.preStop.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **lifecycle.preStop.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **lifecycle.preStop.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - -- **terminationMessagePath** (string) - - Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. - -- **terminationMessagePolicy** (string) - - Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. - -- **livenessProbe** (Probe) - - Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **livenessProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **livenessProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **livenessProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **livenessProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **livenessProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **livenessProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **livenessProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **livenessProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - -- **readinessProbe** (Probe) - - Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **readinessProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **readinessProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **readinessProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **readinessProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **readinessProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **readinessProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **readinessProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **readinessProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - -### Security Context - - -- **securityContext** (SecurityContext) - - Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - - - *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* - - - **securityContext.runAsUser** (int64) - - The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsNonRoot** (boolean) - - Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsGroup** (int64) - - The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.readOnlyRootFilesystem** (boolean) - - Whether this container has a read-only root filesystem. Default is false. - - - **securityContext.procMount** (string) - - procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. - - - **securityContext.privileged** (boolean) - - Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. - - - **securityContext.allowPrivilegeEscalation** (boolean) - - AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN - - - **securityContext.capabilities** (Capabilities) - - The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. - - - *Adds and removes POSIX capabilities from running containers.* - - - **securityContext.capabilities.add** ([]string) - - Added capabilities - - - **securityContext.capabilities.drop** ([]string) - - Removed capabilities - - - **securityContext.seccompProfile** (SeccompProfile) - - The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. - - - *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* - - - **securityContext.seccompProfile.type** (string), required - - type indicates which kind of seccomp profile will be applied. Valid options are: - - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. - - - **securityContext.seccompProfile.localhostProfile** (string) - - localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". - - - **securityContext.seLinuxOptions** (SELinuxOptions) - - The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *SELinuxOptions are the labels to be applied to the container* - - - **securityContext.seLinuxOptions.level** (string) - - Level is SELinux level label that applies to the container. - - - **securityContext.seLinuxOptions.role** (string) - - Role is a SELinux role label that applies to the container. - - - **securityContext.seLinuxOptions.type** (string) - - Type is a SELinux type label that applies to the container. - - - **securityContext.seLinuxOptions.user** (string) - - User is a SELinux user label that applies to the container. - - - **securityContext.windowsOptions** (WindowsSecurityContextOptions) - - The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *WindowsSecurityContextOptions contain Windows-specific options and credentials.* - - - **securityContext.windowsOptions.gmsaCredentialSpec** (string) - - GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. - - - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) - - GMSACredentialSpecName is the name of the GMSA credential spec to use. - - - **securityContext.windowsOptions.runAsUserName** (string) - - The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - -### Debugging - - -- **stdin** (boolean) - - Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. - -- **stdinOnce** (boolean) - - Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false - -- **tty** (boolean) - - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. - -### Beta level - - -- **startupProbe** (Probe) - - StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **startupProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **startupProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **startupProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **startupProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **startupProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **startupProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **startupProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **startupProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - - - -## ContainerStatus {#ContainerStatus} - -ContainerStatus contains details for the current status of this container. - -
- -- **name** (string), required - - This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated. - -- **image** (string), required - - The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images - -- **imageID** (string), required - - ImageID of the container's image. - -- **containerID** (string) - - Container's ID in the format 'docker://\'. - -- **state** (ContainerState) - - Details about the container's current condition. - - - *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* - - - **state.running** (ContainerStateRunning) - - Details about a running container - - - *ContainerStateRunning is a running state of a container.* - - - **state.running.startedAt** (Time) - - Time at which the container was last (re-)started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **state.terminated** (ContainerStateTerminated) - - Details about a terminated container - - - *ContainerStateTerminated is a terminated state of a container.* - - - **state.terminated.containerID** (string) - - Container's ID in the format 'docker://\' - - - **state.terminated.exitCode** (int32), required - - Exit status from the last termination of the container - - - **state.terminated.startedAt** (Time) - - Time at which previous execution of the container started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **state.terminated.finishedAt** (Time) - - Time at which the container last terminated - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **state.terminated.message** (string) - - Message regarding the last termination of the container - - - **state.terminated.reason** (string) - - (brief) reason from the last termination of the container - - - **state.terminated.signal** (int32) - - Signal from the last termination of the container - - - **state.waiting** (ContainerStateWaiting) - - Details about a waiting container - - - *ContainerStateWaiting is a waiting state of a container.* - - - **state.waiting.message** (string) - - Message regarding why the container is not yet running. - - - **state.waiting.reason** (string) - - (brief) reason the container is not yet running. - -- **lastState** (ContainerState) - - Details about the container's last termination condition. - - - *ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.* - - - **lastState.running** (ContainerStateRunning) - - Details about a running container - - - *ContainerStateRunning is a running state of a container.* - - - **lastState.running.startedAt** (Time) - - Time at which the container was last (re-)started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **lastState.terminated** (ContainerStateTerminated) - - Details about a terminated container - - - *ContainerStateTerminated is a terminated state of a container.* - - - **lastState.terminated.containerID** (string) - - Container's ID in the format 'docker://\' - - - **lastState.terminated.exitCode** (int32), required - - Exit status from the last termination of the container - - - **lastState.terminated.startedAt** (Time) - - Time at which previous execution of the container started - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **lastState.terminated.finishedAt** (Time) - - Time at which the container last terminated - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - **lastState.terminated.message** (string) - - Message regarding the last termination of the container - - - **lastState.terminated.reason** (string) - - (brief) reason from the last termination of the container - - - **lastState.terminated.signal** (int32) - - Signal from the last termination of the container - - - **lastState.waiting** (ContainerStateWaiting) - - Details about a waiting container - - - *ContainerStateWaiting is a waiting state of a container.* - - - **lastState.waiting.message** (string) - - Message regarding why the container is not yet running. - - - **lastState.waiting.reason** (string) - - (brief) reason the container is not yet running. - -- **ready** (boolean), required - - Specifies whether the container has passed its readiness probe. - -- **restartCount** (int32), required - - The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC. - -- **started** (boolean) - - Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined. - - - - - diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md index d13ae1b404536..23b324fb6e198 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/controller-revision-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ControllerRevision implements an immutable snapshot of state data." title: "ControllerRevision" weight: 8 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md index f6196ef14d4b5..a518d1f72a76c 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "CronJob represents the configuration of a single cron job." title: "CronJob" weight: 11 +auto_generated: true --- + + `apiVersion: batch/v1` `import "k8s.io/api/batch/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1beta1.md b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1beta1.md deleted file mode 100644 index c99aa57998771..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v1beta1.md +++ /dev/null @@ -1,746 +0,0 @@ ---- -api_metadata: - apiVersion: "batch/v1beta1" - import: "k8s.io/api/batch/v1beta1" - kind: "CronJob" -content_type: "api_reference" -description: "CronJob represents the configuration of a single cron job." -title: "CronJob v1beta1" -weight: 12 ---- - -`apiVersion: batch/v1beta1` - -`import "k8s.io/api/batch/v1beta1"` - - -## CronJob {#CronJob} - -CronJob represents the configuration of a single cron job. - -
- -- **apiVersion**: batch/v1beta1 - - -- **kind**: CronJob - - -- **metadata** (}}">ObjectMeta) - - Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -- **spec** (}}">CronJobSpec) - - Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -- **status** (}}">CronJobStatus) - - Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - -## CronJobSpec {#CronJobSpec} - -CronJobSpec describes how the job execution will look like and when it will actually run. - -
- -- **jobTemplate** (JobTemplateSpec), required - - Specifies the job that will be created when executing a CronJob. - - - *JobTemplateSpec describes the data a Job should have when created from a template* - - - **jobTemplate.metadata** (}}">ObjectMeta) - - Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - - - **jobTemplate.spec** (}}">JobSpec) - - Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -- **schedule** (string), required - - The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - -- **concurrencyPolicy** (string) - - Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one - -- **startingDeadlineSeconds** (int64) - - Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. - -- **suspend** (boolean) - - This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. - -- **successfulJobsHistoryLimit** (int32) - - The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3. - -- **failedJobsHistoryLimit** (int32) - - The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. - - - - - -## CronJobStatus {#CronJobStatus} - -CronJobStatus represents the current state of a cron job. - -
- -- **active** ([]}}">ObjectReference) - - A list of pointers to currently running jobs. - -- **lastScheduleTime** (Time) - - Information when was the last time the job was successfully scheduled. - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - - - -## CronJobList {#CronJobList} - -CronJobList is a collection of cron jobs. - -
- -- **apiVersion**: batch/v1beta1 - - -- **kind**: CronJobList - - -- **metadata** (}}">ListMeta) - - Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -- **items** ([]}}">CronJob), required - - items is the list of CronJobs. - - - - - -## Operations {#Operations} - - - -
- - - - - - -### `get` read the specified CronJob - -#### HTTP Request - -GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `get` read status of the specified CronJob - -#### HTTP Request - -GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `list` list or watch objects of kind CronJob - -#### HTTP Request - -GET /apis/batch/v1beta1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">CronJobList): OK - -401: Unauthorized - - -### `list` list or watch objects of kind CronJob - -#### HTTP Request - -GET /apis/batch/v1beta1/cronjobs - -#### Parameters - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">CronJobList): OK - -401: Unauthorized - - -### `create` create a CronJob - -#### HTTP Request - -POST /apis/batch/v1beta1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -202 (}}">CronJob): Accepted - -401: Unauthorized - - -### `update` replace the specified CronJob - -#### HTTP Request - -PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -401: Unauthorized - - -### `update` replace status of the specified CronJob - -#### HTTP Request - -PUT /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -401: Unauthorized - - -### `patch` partially update the specified CronJob - -#### HTTP Request - -PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `patch` partially update status of the specified CronJob - -#### HTTP Request - -PATCH /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `delete` delete a CronJob - -#### HTTP Request - -DELETE /apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - - -#### Response - - -200 (}}">Status): OK - -202 (}}">Status): Accepted - -401: Unauthorized - - -### `deletecollection` delete collection of CronJob - -#### HTTP Request - -DELETE /apis/batch/v1beta1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **continue** (*in query*): string - - }}">continue - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - - -#### Response - - -200 (}}">Status): OK - -401: Unauthorized - diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v2alpha1.md b/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v2alpha1.md deleted file mode 100644 index fc6a933131e32..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workload-resources/cron-job-v2alpha1.md +++ /dev/null @@ -1,746 +0,0 @@ ---- -api_metadata: - apiVersion: "batch/v2alpha1" - import: "k8s.io/api/batch/v2alpha1" - kind: "CronJob" -content_type: "api_reference" -description: "CronJob represents the configuration of a single cron job." -title: "CronJob v2alpha1" -weight: 13 ---- - -`apiVersion: batch/v2alpha1` - -`import "k8s.io/api/batch/v2alpha1"` - - -## CronJob {#CronJob} - -CronJob represents the configuration of a single cron job. - -
- -- **apiVersion**: batch/v2alpha1 - - -- **kind**: CronJob - - -- **metadata** (}}">ObjectMeta) - - Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -- **spec** (}}">CronJobSpec) - - Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -- **status** (}}">CronJobStatus) - - Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - - - - - -## CronJobSpec {#CronJobSpec} - -CronJobSpec describes how the job execution will look like and when it will actually run. - -
- -- **jobTemplate** (JobTemplateSpec), required - - Specifies the job that will be created when executing a CronJob. - - - *JobTemplateSpec describes the data a Job should have when created from a template* - - - **jobTemplate.metadata** (}}">ObjectMeta) - - Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - - - **jobTemplate.spec** (}}">JobSpec) - - Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - -- **schedule** (string), required - - The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - -- **concurrencyPolicy** (string) - - Specifies how to treat concurrent executions of a Job. Valid values are: - "Allow" (default): allows CronJobs to run concurrently; - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - "Replace": cancels currently running job and replaces it with a new one - -- **failedJobsHistoryLimit** (int32) - - The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. - -- **startingDeadlineSeconds** (int64) - - Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. - -- **successfulJobsHistoryLimit** (int32) - - The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. - -- **suspend** (boolean) - - This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. - - - - - -## CronJobStatus {#CronJobStatus} - -CronJobStatus represents the current state of a cron job. - -
- -- **active** ([]}}">ObjectReference) - - A list of pointers to currently running jobs. - -- **lastScheduleTime** (Time) - - Information when was the last time the job was successfully scheduled. - - - *Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.* - - - - - -## CronJobList {#CronJobList} - -CronJobList is a collection of cron jobs. - -
- -- **apiVersion**: batch/v2alpha1 - - -- **kind**: CronJobList - - -- **metadata** (}}">ListMeta) - - Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - -- **items** ([]}}">CronJob), required - - items is the list of CronJobs. - - - - - -## Operations {#Operations} - - - -
- - - - - - -### `get` read the specified CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `get` read status of the specified CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `list` list or watch objects of kind CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">CronJobList): OK - -401: Unauthorized - - -### `list` list or watch objects of kind CronJob - -#### HTTP Request - -GET /apis/batch/v2alpha1/cronjobs - -#### Parameters - - -- **allowWatchBookmarks** (*in query*): boolean - - }}">allowWatchBookmarks - - -- **continue** (*in query*): string - - }}">continue - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - -- **watch** (*in query*): boolean - - }}">watch - - - -#### Response - - -200 (}}">CronJobList): OK - -401: Unauthorized - - -### `create` create a CronJob - -#### HTTP Request - -POST /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -202 (}}">CronJob): Accepted - -401: Unauthorized - - -### `update` replace the specified CronJob - -#### HTTP Request - -PUT /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -401: Unauthorized - - -### `update` replace status of the specified CronJob - -#### HTTP Request - -PUT /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">CronJob, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -201 (}}">CronJob): Created - -401: Unauthorized - - -### `patch` partially update the specified CronJob - -#### HTTP Request - -PATCH /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `patch` partially update status of the specified CronJob - -#### HTTP Request - -PATCH /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name}/status - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">Patch, required - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldManager** (*in query*): string - - }}">fieldManager - - -- **force** (*in query*): boolean - - }}">force - - -- **pretty** (*in query*): string - - }}">pretty - - - -#### Response - - -200 (}}">CronJob): OK - -401: Unauthorized - - -### `delete` delete a CronJob - -#### HTTP Request - -DELETE /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs/{name} - -#### Parameters - - -- **name** (*in path*): string, required - - name of the CronJob - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - - -#### Response - - -200 (}}">Status): OK - -202 (}}">Status): Accepted - -401: Unauthorized - - -### `deletecollection` delete collection of CronJob - -#### HTTP Request - -DELETE /apis/batch/v2alpha1/namespaces/{namespace}/cronjobs - -#### Parameters - - -- **namespace** (*in path*): string, required - - }}">namespace - - -- **body**: }}">DeleteOptions - - - - -- **continue** (*in query*): string - - }}">continue - - -- **dryRun** (*in query*): string - - }}">dryRun - - -- **fieldSelector** (*in query*): string - - }}">fieldSelector - - -- **gracePeriodSeconds** (*in query*): integer - - }}">gracePeriodSeconds - - -- **labelSelector** (*in query*): string - - }}">labelSelector - - -- **limit** (*in query*): integer - - }}">limit - - -- **pretty** (*in query*): string - - }}">pretty - - -- **propagationPolicy** (*in query*): string - - }}">propagationPolicy - - -- **resourceVersion** (*in query*): string - - }}">resourceVersion - - -- **resourceVersionMatch** (*in query*): string - - }}">resourceVersionMatch - - -- **timeoutSeconds** (*in query*): integer - - }}">timeoutSeconds - - - -#### Response - - -200 (}}">Status): OK - -401: Unauthorized - diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md index a40aa4c5920b6..2a313f533fa6a 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/daemon-set-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "DaemonSet represents the configuration of a daemon set." title: "DaemonSet" weight: 9 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md index b455760a4c57d..f304fd92391e8 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/deployment-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Deployment enables declarative updates for Pods and ReplicaSets." title: "Deployment" weight: 6 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-container.md b/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-container.md deleted file mode 100644 index 7355af9df88d5..0000000000000 --- a/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-container.md +++ /dev/null @@ -1,571 +0,0 @@ ---- -api_metadata: - apiVersion: "" - import: "k8s.io/api/core/v1" - kind: "EphemeralContainer" -content_type: "api_reference" -description: "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging." -title: "EphemeralContainer" -weight: 3 ---- - - - -`import "k8s.io/api/core/v1"` - - -An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag. - -
- -- **name** (string), required - - Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers. - -- **targetContainerName** (string) - - If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature. - - - -### Image {#Image} - - -- **image** (string) - - Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images - -- **imagePullPolicy** (string) - - Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - -### Entrypoint {#Entrypoint} - - -- **command** ([]string) - - Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **args** ([]string) - - Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - -- **workingDir** (string) - - Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. - -### Environment variables {#Environment-variables} - - -- **env** ([]EnvVar) - - *Patch strategy: merge on key `name`* - - List of environment variables to set in the container. Cannot be updated. - - - *EnvVar represents an environment variable present in a Container.* - - - **env.name** (string), required - - Name of the environment variable. Must be a C_IDENTIFIER. - - - **env.value** (string) - - Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "". - - - **env.valueFrom** (EnvVarSource) - - Source for the environment variable's value. Cannot be used if value is not empty. - - - *EnvVarSource represents a source for the value of an EnvVar.* - - - **env.valueFrom.configMapKeyRef** (ConfigMapKeySelector) - - Selects a key of a ConfigMap. - - - *Selects a key from a ConfigMap.* - - - **env.valueFrom.configMapKeyRef.key** (string), required - - The key to select. - - - **env.valueFrom.configMapKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.configMapKeyRef.optional** (boolean) - - Specify whether the ConfigMap or its key must be defined - - - **env.valueFrom.fieldRef** (}}">ObjectFieldSelector) - - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\']`, `metadata.annotations['\']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. - - - **env.valueFrom.resourceFieldRef** (}}">ResourceFieldSelector) - - Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - - - **env.valueFrom.secretKeyRef** (SecretKeySelector) - - Selects a key of a secret in the pod's namespace - - - *SecretKeySelector selects a key of a Secret.* - - - **env.valueFrom.secretKeyRef.key** (string), required - - The key of the secret to select from. Must be a valid secret key. - - - **env.valueFrom.secretKeyRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **env.valueFrom.secretKeyRef.optional** (boolean) - - Specify whether the Secret or its key must be defined - -- **envFrom** ([]EnvFromSource) - - List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. - - - *EnvFromSource represents the source of a set of ConfigMaps* - - - **envFrom.configMapRef** (ConfigMapEnvSource) - - The ConfigMap to select from - - - *ConfigMapEnvSource selects a ConfigMap to populate the environment variables with. - - The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.configMapRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.configMapRef.optional** (boolean) - - Specify whether the ConfigMap must be defined - - - **envFrom.prefix** (string) - - An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - - - **envFrom.secretRef** (SecretEnvSource) - - The Secret to select from - - - *SecretEnvSource selects a Secret to populate the environment variables with. - - The contents of the target Secret's Data field will represent the key-value pairs as environment variables.* - - - **envFrom.secretRef.name** (string) - - Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - - - **envFrom.secretRef.optional** (boolean) - - Specify whether the Secret must be defined - -### Volumes {#Volumes} - - -- **volumeMounts** ([]VolumeMount) - - *Patch strategy: merge on key `mountPath`* - - Pod volumes to mount into the container's filesystem. Cannot be updated. - - - *VolumeMount describes a mounting of a Volume within a container.* - - - **volumeMounts.mountPath** (string), required - - Path within the container at which the volume should be mounted. Must not contain ':'. - - - **volumeMounts.name** (string), required - - This must match the Name of a Volume. - - - **volumeMounts.mountPropagation** (string) - - mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. - - - **volumeMounts.readOnly** (boolean) - - Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. - - - **volumeMounts.subPath** (string) - - Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). - - - **volumeMounts.subPathExpr** (string) - - Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. - -- **volumeDevices** ([]VolumeDevice) - - *Patch strategy: merge on key `devicePath`* - - volumeDevices is the list of block devices to be used by the container. - - - *volumeDevice describes a mapping of a raw block device within a container.* - - - **volumeDevices.devicePath** (string), required - - devicePath is the path inside of the container that the device will be mapped to. - - - **volumeDevices.name** (string), required - - name must match the name of a persistentVolumeClaim in the pod - -### Lifecycle {#Lifecycle} - - -- **terminationMessagePath** (string) - - Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. - -- **terminationMessagePolicy** (string) - - Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. - -### Debugging {#Debugging} - - -- **stdin** (boolean) - - Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. - -- **stdinOnce** (boolean) - - Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false - -- **tty** (boolean) - - Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. - -### Not allowed {#Not-allowed} - - -- **ports** ([]ContainerPort) - - Ports are not allowed for ephemeral containers. - - - *ContainerPort represents a network port in a single container.* - - - **ports.containerPort** (int32), required - - Number of port to expose on the pod's IP address. This must be a valid port number, 0 \< x \< 65536. - - - **ports.hostIP** (string) - - What host IP to bind the external port to. - - - **ports.hostPort** (int32) - - Number of port to expose on the host. If specified, this must be a valid port number, 0 \< x \< 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. - - - **ports.name** (string) - - If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. - - - **ports.protocol** (string) - - Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". - -- **resources** (ResourceRequirements) - - Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod. - - - *ResourceRequirements describes the compute resource requirements.* - - - **resources.limits** (map[string]}}">Quantity) - - Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - - - **resources.requests** (map[string]}}">Quantity) - - Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - -- **lifecycle** (Lifecycle) - - Lifecycle is not allowed for ephemeral containers. - - - *Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.* - - - **lifecycle.postStart** (Handler) - - PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - *Handler defines a specific action that should be taken* - - - **lifecycle.postStart.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **lifecycle.postStart.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **lifecycle.postStart.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **lifecycle.preStop** (Handler) - - PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - - - *Handler defines a specific action that should be taken* - - - **lifecycle.preStop.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **lifecycle.preStop.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **lifecycle.preStop.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - -- **livenessProbe** (Probe) - - Probes are not allowed for ephemeral containers. - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **livenessProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **livenessProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **livenessProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **livenessProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **livenessProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **livenessProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **livenessProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **livenessProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - -- **readinessProbe** (Probe) - - Probes are not allowed for ephemeral containers. - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **readinessProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **readinessProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **readinessProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **readinessProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **readinessProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **readinessProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **readinessProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **readinessProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - -- **securityContext** (SecurityContext) - - SecurityContext is not allowed for ephemeral containers. - - - *SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.* - - - **securityContext.runAsUser** (int64) - - The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsNonRoot** (boolean) - - Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.runAsGroup** (int64) - - The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - **securityContext.readOnlyRootFilesystem** (boolean) - - Whether this container has a read-only root filesystem. Default is false. - - - **securityContext.procMount** (string) - - procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. - - - **securityContext.privileged** (boolean) - - Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. - - - **securityContext.allowPrivilegeEscalation** (boolean) - - AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN - - - **securityContext.capabilities** (Capabilities) - - The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. - - - *Adds and removes POSIX capabilities from running containers.* - - - **securityContext.capabilities.add** ([]string) - - Added capabilities - - - **securityContext.capabilities.drop** ([]string) - - Removed capabilities - - - **securityContext.seccompProfile** (SeccompProfile) - - The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. - - - *SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.* - - - **securityContext.seccompProfile.type** (string), required - - type indicates which kind of seccomp profile will be applied. Valid options are: - - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. - - - **securityContext.seccompProfile.localhostProfile** (string) - - localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is "Localhost". - - - **securityContext.seLinuxOptions** (SELinuxOptions) - - The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *SELinuxOptions are the labels to be applied to the container* - - - **securityContext.seLinuxOptions.level** (string) - - Level is SELinux level label that applies to the container. - - - **securityContext.seLinuxOptions.role** (string) - - Role is a SELinux role label that applies to the container. - - - **securityContext.seLinuxOptions.type** (string) - - Type is a SELinux type label that applies to the container. - - - **securityContext.seLinuxOptions.user** (string) - - User is a SELinux user label that applies to the container. - - - **securityContext.windowsOptions** (WindowsSecurityContextOptions) - - The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - - - *WindowsSecurityContextOptions contain Windows-specific options and credentials.* - - - **securityContext.windowsOptions.gmsaCredentialSpec** (string) - - GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. - - - **securityContext.windowsOptions.gmsaCredentialSpecName** (string) - - GMSACredentialSpecName is the name of the GMSA credential spec to use. - - - **securityContext.windowsOptions.runAsUserName** (string) - - The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - -- **startupProbe** (Probe) - - Probes are not allowed for ephemeral containers. - - - *Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.* - - - **startupProbe.exec** (}}">ExecAction) - - One and only one of the following should be specified. Exec specifies the action to take. - - - **startupProbe.httpGet** (}}">HTTPGetAction) - - HTTPGet specifies the http request to perform. - - - **startupProbe.tcpSocket** (}}">TCPSocketAction) - - TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported - - - **startupProbe.initialDelaySeconds** (int32) - - Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **startupProbe.periodSeconds** (int32) - - How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. - - - **startupProbe.timeoutSeconds** (int32) - - Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - - - **startupProbe.failureThreshold** (int32) - - Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. - - - **startupProbe.successThreshold** (int32) - - Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. - - - diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md index 05957e573a970..960fc9c8c4a17 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/ephemeral-containers-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "A list of ephemeral containers used with the Pod ephemeralcontainers subresource." title: "EphemeralContainers" weight: 2 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md index 42d61cb3b28de..a62d79e4f7507 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "configuration of a horizontal pod autoscaler." title: "HorizontalPodAutoscaler" weight: 12 +auto_generated: true --- + + `apiVersion: autoscaling/v1` `import "k8s.io/api/autoscaling/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md index 0ed4942e1b679..9d326e21315fa 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/horizontal-pod-autoscaler-v2beta2.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified." title: "HorizontalPodAutoscaler v2beta2" weight: 13 +auto_generated: true --- + + `apiVersion: autoscaling/v2beta2` `import "k8s.io/api/autoscaling/v2beta2"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md index da8c6422e1ac5..4848a36d4d67d 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/job-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Job represents the configuration of a single job." title: "Job" weight: 10 +auto_generated: true --- + + `apiVersion: batch/v1` `import "k8s.io/api/batch/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md index 5cbe3e4894997..9a0bbecab1468 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/pod-template-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PodTemplate describes a template for creating copies of a predefined pod." title: "PodTemplate" weight: 3 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md index 6553afa2389f2..d16d9b0a8505e 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/pod-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "Pod is a collection of containers that can run on a host." title: "Pod" weight: 1 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md index df6c08603f873..020c25c05af3d 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/priority-class-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "PriorityClass defines mapping from a priority class name to the priority integer value." title: "PriorityClass" weight: 14 +auto_generated: true --- + + `apiVersion: scheduling.k8s.io/v1` `import "k8s.io/api/scheduling/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md index 223832c3826a5..7a344128c8d11 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/replica-set-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ReplicaSet ensures that a specified number of pod replicas are running at any given time." title: "ReplicaSet" weight: 5 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md index 29bac77169c14..c14db8ece901f 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/replication-controller-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "ReplicationController represents the configuration of a replication controller." title: "ReplicationController" weight: 4 +auto_generated: true --- + + `apiVersion: v1` `import "k8s.io/api/core/v1"` diff --git a/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md b/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md index 7e69ec17bb669..ec097d7cced00 100644 --- a/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md +++ b/content/en/docs/reference/kubernetes-api/workload-resources/stateful-set-v1.md @@ -7,8 +7,20 @@ content_type: "api_reference" description: "StatefulSet represents a set of pods with consistent identities." title: "StatefulSet" weight: 7 +auto_generated: true --- + + `apiVersion: apps/v1` `import "k8s.io/api/apps/v1"` diff --git a/content/en/docs/reference/labels-annotations-taints.md b/content/en/docs/reference/labels-annotations-taints.md index 08861e58112a5..2d74362913310 100644 --- a/content/en/docs/reference/labels-annotations-taints.md +++ b/content/en/docs/reference/labels-annotations-taints.md @@ -69,7 +69,7 @@ Example: `controller.kubernetes.io/pod-deletion-cost=10` Used on: Pod -This annotation is used to set [Pod Deletion Cost](/docs/content/en/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) +This annotation is used to set [Pod Deletion Cost](/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost) which allows users to influence ReplicaSet downscaling order. The annotation parses into an `int32` type. ## beta.kubernetes.io/instance-type (deprecated) @@ -99,6 +99,18 @@ See [topology.kubernetes.io/zone](#topologykubernetesiozone). {{< note >}} Starting in v1.17, this label is deprecated in favor of [topology.kubernetes.io/zone](#topologykubernetesiozone). {{< /note >}} +## statefulset.kubernetes.io/pod-name {#statefulsetkubernetesiopod-name} + +Example: + +`statefulset.kubernetes.io/pod-name=mystatefulset-7` + +When a StatefulSet controller creates a Pod for the StatefulSet, the control plane +sets this label on that Pod. The value of the label is the name of the Pod being created. + +See [Pod Name Label](/docs/concepts/workloads/controllers/statefulset/#pod-name-label) in the +StatefulSet topic for more details. + ## topology.kubernetes.io/region {#topologykubernetesioregion} Example: @@ -212,6 +224,14 @@ When a single IngressClass resource has this annotation set to `"true"`, new Ing {{< note >}} Starting in v1.18, this annotation is deprecated in favor of `spec.ingressClassName`. {{< /note >}} +## storageclass.kubernetes.io/is-default-class + +Example: `storageclass.kubernetes.io/is-default-class=true` + +Used on: StorageClass + +When a single StorageClass resource has this annotation set to `"true"`, new Physical Volume Claim resource without a class specified will be assigned this default class. + ## alpha.kubernetes.io/provided-node-ip Example: `alpha.kubernetes.io/provided-node-ip: "10.0.0.1"` diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md index ed03bf49c45d2..ff545b4042357 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md @@ -1,3 +1,16 @@ + + + +kubeadm: easily bootstrap a secure Kubernetes cluster ### Synopsis @@ -47,14 +60,14 @@ Example usage: -h, --help -help for kubeadm +

help for kubeadm

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md index 95b034be1bb44..af458320a5c90 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha.md @@ -1,3 +1,16 @@ + + + +Kubeadm experimental sub-commands ### Synopsis @@ -17,7 +30,7 @@ Kubeadm experimental sub-commands -h, --help -help for alpha +

help for alpha

@@ -38,7 +51,7 @@ Kubeadm experimental sub-commands --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md new file mode 100644 index 0000000000000..b678061bb0ea6 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig.md @@ -0,0 +1,63 @@ + + + +Kubeconfig file utilities + +### Synopsis + + +Kubeconfig file utilities. + +Alpha Disclaimer: this command is currently alpha. + +### Options + + ++++ + + + + + + + + + + +
-h, --help

help for kubeconfig

+ + + +### Options inherited from parent commands + + ++++ + + + + + + + + + + +
--rootfs string

[EXPERIMENTAL] The path to the 'real' host root filesystem.

+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md new file mode 100644 index 0000000000000..de07cd0f7d741 --- /dev/null +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_alpha_kubeconfig_user.md @@ -0,0 +1,102 @@ + + + +Output a kubeconfig file for an additional user + +### Synopsis + + +Output a kubeconfig file for an additional user. + +Alpha Disclaimer: this command is currently alpha. + +``` +kubeadm alpha kubeconfig user [flags] +``` + +### Examples + +``` + # Output a kubeconfig file for an additional user named foo using a kubeadm config file bar + kubeadm alpha kubeconfig user --client-name=foo --config=bar +``` + +### Options + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--client-name string

The name of user. It will be used as the CN if client certificates are created

--config string

Path to a kubeadm configuration file.

-h, --help

help for user

--org strings

The orgnizations of the client certificate. It will be used as the O if client certificates are created

--token string

The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates

+ + + +### Options inherited from parent commands + + ++++ + + + + + + + + + + +
--rootfs string

[EXPERIMENTAL] The path to the 'real' host root filesystem.

+ + + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md index fef772e702650..9458702330d38 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs.md @@ -1,3 +1,16 @@ + + + +Commands related to handling kubernetes certificates ### Synopsis @@ -17,7 +30,7 @@ Commands related to handling kubernetes certificates -h, --help -help for certs +

help for certs

@@ -38,7 +51,7 @@ Commands related to handling kubernetes certificates --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md index 2de0366641d70..3f978e50fd383 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_certificate-key.md @@ -1,3 +1,16 @@ + + + +Generate certificate keys ### Synopsis @@ -27,7 +40,7 @@ kubeadm certs certificate-key [flags] -h, --help -help for certificate-key +

help for certificate-key

@@ -48,7 +61,7 @@ kubeadm certs certificate-key [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md index 50a3cb8bf29c0..e321a5a0294dc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_check-expiration.md @@ -1,3 +1,16 @@ + + + +Check certificates expiration for a Kubernetes cluster ### Synopsis @@ -21,28 +34,28 @@ kubeadm certs check-expiration [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for check-expiration +

help for check-expiration

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -63,7 +76,7 @@ kubeadm certs check-expiration [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md index 81b248e4f0a6c..52d21a2cff107 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_generate-csr.md @@ -1,3 +1,16 @@ + + + +Generate keys and certificate signing requests ### Synopsis @@ -32,28 +45,28 @@ kubeadm certs generate-csr [flags] --cert-dir string -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for generate-csr +

help for generate-csr

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

@@ -74,7 +87,7 @@ kubeadm certs generate-csr [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md index 8b627a595d28f..e728f9f06015c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew.md @@ -1,3 +1,16 @@ + + + +Renew certificates for a Kubernetes cluster ### Synopsis @@ -21,7 +34,7 @@ kubeadm certs renew [flags] -h, --help -help for renew +

help for renew

@@ -42,7 +55,7 @@ kubeadm certs renew [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md index 536164c45a7d7..2a81cee1d4072 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_admin.conf.md @@ -1,3 +1,16 @@ + + + +Renew the certificate embedded in the kubeconfig file for the admin to use and for kubeadm itself ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew admin.conf [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for admin.conf +

help for admin.conf

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew admin.conf [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md index 13c12ed0d0071..b948adb65cde5 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_all.md @@ -1,3 +1,16 @@ + + + +Renew all available certificates ### Synopsis @@ -21,42 +34,42 @@ kubeadm certs renew all [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for all +

help for all

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -77,7 +90,7 @@ kubeadm certs renew all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md index fac6861a7c9dd..cb8fe0d5f7b88 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-etcd-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate the apiserver uses to access etcd ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew apiserver-etcd-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for apiserver-etcd-client +

help for apiserver-etcd-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew apiserver-etcd-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md index 030fb1425aeee..475e8c9f22e6e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver-kubelet-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for the API server to connect to kubelet ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew apiserver-kubelet-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for apiserver-kubelet-client +

help for apiserver-kubelet-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew apiserver-kubelet-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md index 8ab01efd89c7b..750df89d834d3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_apiserver.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for serving the Kubernetes API ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew apiserver [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for apiserver +

help for apiserver

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew apiserver [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md index 10b44f7c3e8eb..b052fb3e543a0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_controller-manager.conf.md @@ -1,3 +1,16 @@ + + + +Renew the certificate embedded in the kubeconfig file for the controller manager to use ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew controller-manager.conf [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for controller-manager.conf +

help for controller-manager.conf

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew controller-manager.conf [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md index b9ddadd6f14af..252296e3950c4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-healthcheck-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for liveness probes to healthcheck etcd ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew etcd-healthcheck-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for etcd-healthcheck-client +

help for etcd-healthcheck-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew etcd-healthcheck-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md index 3b15fa02f0533..f25b86fa15f57 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-peer.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for etcd nodes to communicate with each other ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew etcd-peer [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for etcd-peer +

help for etcd-peer

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew etcd-peer [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md index 82b9e43e34bc7..059d0d9bbb233 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_etcd-server.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for serving etcd ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew etcd-server [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for etcd-server +

help for etcd-server

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew etcd-server [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md index b1f3bc0c840fd..d93fca8d468b0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_front-proxy-client.md @@ -1,3 +1,16 @@ + + + +Renew the certificate for the front proxy client ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew front-proxy-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for front-proxy-client +

help for front-proxy-client

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew front-proxy-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md index f26fbc22b16f1..5d7ade453b5c1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_certs_renew_scheduler.conf.md @@ -1,3 +1,16 @@ + + + +Renew the certificate embedded in the kubeconfig file for the scheduler manager to use ### Synopsis @@ -27,42 +40,42 @@ kubeadm certs renew scheduler.conf [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save the certificates +

The path where to save the certificates

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--csr-dir string -The path to output the CSRs and private keys to +

The path to output the CSRs and private keys to

--csr-only -Create CSRs instead of generating certificates +

Create CSRs instead of generating certificates

-h, --help -help for scheduler.conf +

help for scheduler.conf

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -83,7 +96,7 @@ kubeadm certs renew scheduler.conf [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md index f5a69d79fdacf..5fe7d65b403c9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_completion.md @@ -1,3 +1,16 @@ + + + +Output shell completion code for the specified shell (bash or zsh) ### Synopsis @@ -59,7 +72,7 @@ source <(kubeadm completion zsh) -h, --help -help for completion +

help for completion

@@ -80,7 +93,7 @@ source <(kubeadm completion zsh) --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md index b39cdd7a0d24a..50cb9f63b9e5d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config.md @@ -1,3 +1,16 @@ + + + +Manage configuration for a kubeadm cluster persisted in a ConfigMap in the cluster ### Synopsis @@ -26,14 +39,14 @@ kubeadm config [flags] -h, --help -help for config +

help for config

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -54,7 +67,7 @@ kubeadm config [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md index 436f3c3c7e303..0f85b4fbc2183 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images.md @@ -1,3 +1,16 @@ + + + +Interact with container images used by kubeadm ### Synopsis @@ -21,7 +34,7 @@ kubeadm config images [flags] -h, --help -help for images +

help for images

@@ -42,14 +55,14 @@ kubeadm config images [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md index 842fb2fe9280a..4634bd0a27c45 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_list.md @@ -1,3 +1,16 @@ + + + +Print a list of images kubeadm will use. The configuration file is used in case any images or image repositories are customized ### Synopsis @@ -21,49 +34,56 @@ kubeadm config images list [flags] --allow-missing-template-keys     Default: true -If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats. +

If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-o, --experimental-output string     Default: "text" -Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file. +

Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for list +

help for list

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

+ + + +--show-managed-fields + + +

If true, keep the managedFields when printing objects in JSON or YAML format.

@@ -84,14 +104,14 @@ kubeadm config images list [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md index d2f5961f85946..840072d167e03 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_images_pull.md @@ -1,3 +1,16 @@ + + + +Pull images used by kubeadm ### Synopsis @@ -21,42 +34,42 @@ kubeadm config images pull [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for pull +

help for pull

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -77,14 +90,14 @@ kubeadm config images pull [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md index d07ffe8677493..5858bdb307cdc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_migrate.md @@ -1,3 +1,16 @@ + + + +Read an older version of the kubeadm configuration API types from a file, and output the similar config object for the newer version ### Synopsis @@ -34,21 +47,21 @@ kubeadm config migrate [flags] -h, --help -help for migrate +

help for migrate

--new-config string -Path to the resulting equivalent kubeadm config file using the new API version. Optional, if not specified output will be sent to STDOUT. +

Path to the resulting equivalent kubeadm config file using the new API version. Optional, if not specified output will be sent to STDOUT.

--old-config string -Path to the kubeadm config file that is using an old API version and should be converted. This flag is mandatory. +

Path to the kubeadm config file that is using an old API version and should be converted. This flag is mandatory.

@@ -69,14 +82,14 @@ kubeadm config migrate [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md index c6e1ea2173ed7..2f20d9d1ce4b1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print.md @@ -1,3 +1,16 @@ + + + +Print configuration ### Synopsis @@ -23,7 +36,7 @@ kubeadm config print [flags] -h, --help -help for print +

help for print

@@ -44,14 +57,14 @@ kubeadm config print [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md index adc76ee41cb7c..f8200dfd52836 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_init-defaults.md @@ -1,3 +1,16 @@ + + + +Print default init configuration, that can be used for 'kubeadm init' ### Synopsis @@ -5,7 +18,7 @@ This command prints objects such as the default init configuration that is used for 'kubeadm init'. -Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" <nil> [] []} in order to pass validation but +Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like "abcdef.0123456789abcdef" in order to pass validation but not perform the real computation for creating a token. @@ -23,17 +36,17 @@ kubeadm config print init-defaults [flags] ---component-configs stringSlice +--component-configs strings -A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. +

A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.

-h, --help -help for init-defaults +

help for init-defaults

@@ -54,14 +67,14 @@ kubeadm config print init-defaults [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md index b1c976c663fa6..1c634871eb24a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_print_join-defaults.md @@ -1,3 +1,16 @@ + + + +Print default join configuration, that can be used for 'kubeadm join' ### Synopsis @@ -5,7 +18,7 @@ This command prints objects such as the default join configuration that is used for 'kubeadm join'. -Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like {"abcdef.0123456789abcdef" "" "nil" <nil> [] []} in order to pass validation but +Note that sensitive values like the Bootstrap Token fields are replaced with placeholder values like "abcdef.0123456789abcdef" in order to pass validation but not perform the real computation for creating a token. @@ -23,17 +36,17 @@ kubeadm config print join-defaults [flags] ---component-configs stringSlice +--component-configs strings -A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed. +

A comma-separated list for component config API objects to print the default values for. Available values: [KubeProxyConfiguration KubeletConfiguration]. If this flag is not set, no component configs will be printed.

-h, --help -help for join-defaults +

help for join-defaults

@@ -54,14 +67,14 @@ kubeadm config print join-defaults [flags] --kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md deleted file mode 100644 index c3a3137105dfc..0000000000000 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_config_view.md +++ /dev/null @@ -1,63 +0,0 @@ - -### Synopsis - - - -Using this command, you can view the ConfigMap in the cluster where the configuration for kubeadm is located. - -The configuration is located in the "kube-system" namespace in the "kubeadm-config" ConfigMap. - - -``` -kubeadm config view [flags] -``` - -### Options - - ---- - - - - - - - - - - -
-h, --help
help for view
- - - -### Options inherited from parent commands - - ---- - - - - - - - - - - - - - - - - - -
--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.
--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.
- - - diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md index 49c7fd112ac86..4294cffe8b340 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init.md @@ -1,3 +1,16 @@ + + + +Run this command in order to set up the Kubernetes control plane ### Synopsis @@ -39,7 +52,7 @@ mark-control-plane Mark a node as a control-plane bootstrap-token Generates bootstrap tokens used to join a node to a cluster kubelet-finalize Updates settings relevant to the kubelet after TLS bootstrap /experimental-cert-rotation Enable kubelet client certificate rotation -addon Install required addons for passing Conformance tests +addon Install required addons for passing conformance tests /coredns Install the CoreDNS addon to a Kubernetes cluster /kube-proxy Install the kube-proxy addon to a Kubernetes cluster ``` @@ -62,175 +75,175 @@ kubeadm init [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

---apiserver-cert-extra-sans stringSlice +--apiserver-cert-extra-sans strings -Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. +

Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--certificate-key string -Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. +

Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--dry-run -Don't apply any changes; just output what would be done. +

Don't apply any changes; just output what would be done.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for init +

help for init

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--node-name string -Specify the node name. +

Specify the node name.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

--skip-certificate-key-print -Don't print the key used to encrypt the control-plane certificates. +

Don't print the key used to encrypt the control-plane certificates.

---skip-phases stringSlice +--skip-phases strings -List of phases to be skipped +

List of phases to be skipped

--skip-token-print -Skip printing of the default bootstrap token generated by 'kubeadm init'. +

Skip printing of the default bootstrap token generated by 'kubeadm init'.

--token string -The token to use for establishing bidirectional trust between nodes and control-plane nodes. The format is [a-z0-9]{6}\.[a-z0-9]{16} - e.g. abcdef.0123456789abcdef +

The token to use for establishing bidirectional trust between nodes and control-plane nodes. The format is [a-z0-9]{6}.[a-z0-9]{16} - e.g. abcdef.0123456789abcdef

--token-ttl duration     Default: 24h0m0s -The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire +

The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire

--upload-certs -Upload control-plane certificates to the kubeadm-certs Secret. +

Upload control-plane certificates to the kubeadm-certs Secret.

@@ -251,7 +264,7 @@ kubeadm init [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md index 2db3ea5e54aee..48ccd99cd552a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the init workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the init workflow -h, --help -help for phase +

help for phase

@@ -38,7 +51,7 @@ Use this command to invoke single phase of the init workflow --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md index 67b9c3af7598a..64777661d03ae 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon.md @@ -1,3 +1,16 @@ + + + +Install required addons for passing conformance tests ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase addon [flags] -h, --help -help for addon +

help for addon

@@ -42,7 +55,7 @@ kubeadm init phase addon [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md index 103dd7e7c5e74..48ae42ca48495 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_all.md @@ -1,3 +1,16 @@ + + + +Install all the addons ### Synopsis @@ -21,84 +34,84 @@ kubeadm init phase addon all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for all +

help for all

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -119,7 +132,7 @@ kubeadm init phase addon all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md index 3eebcb828bf60..68f0d0d0259b0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_coredns.md @@ -1,3 +1,16 @@ + + + +Install the CoreDNS addon to a Kubernetes cluster ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase addon coredns [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for coredns +

help for coredns

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -91,7 +104,7 @@ kubeadm init phase addon coredns [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md index 78140e94e80ec..4dc9a18339ccc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_addon_kube-proxy.md @@ -1,3 +1,16 @@ + + + +Install the kube-proxy addon to a Kubernetes cluster ### Synopsis @@ -21,63 +34,63 @@ kubeadm init phase addon kube-proxy [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for kube-proxy +

help for kube-proxy

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

@@ -98,7 +111,7 @@ kubeadm init phase addon kube-proxy [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md index 123ab38fdc843..652399d5cfd7d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_bootstrap-token.md @@ -1,3 +1,16 @@ + + + +Generates bootstrap tokens used to join a node to a cluster ### Synopsis @@ -31,28 +44,28 @@ kubeadm init phase bootstrap-token [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for bootstrap-token +

help for bootstrap-token

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--skip-token-print -Skip printing of the default bootstrap token generated by 'kubeadm init'. +

Skip printing of the default bootstrap token generated by 'kubeadm init'.

@@ -73,7 +86,7 @@ kubeadm init phase bootstrap-token [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md index 28f5acc3e3427..c779b920e1d0a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs.md @@ -1,3 +1,16 @@ + + + +Certificate generation ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase certs [flags] -h, --help -help for certs +

help for certs

@@ -42,7 +55,7 @@ kubeadm init phase certs [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md index 7ac391c0784ff..7485310462329 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_all.md @@ -1,3 +1,16 @@ + + + +Generate all certificates ### Synopsis @@ -21,63 +34,63 @@ kubeadm init phase certs all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

---apiserver-cert-extra-sans stringSlice +--apiserver-cert-extra-sans strings -Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. +

Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for all +

help for all

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -98,7 +111,7 @@ kubeadm init phase certs all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md index eef07b2afaa27..4c8bed971a32e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-etcd-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate the apiserver uses to access etcd ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs apiserver-etcd-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for apiserver-etcd-client +

help for apiserver-etcd-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs apiserver-etcd-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md index 1d03b13ef10cb..814a9c15ff4ad 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver-kubelet-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for the API server to connect to kubelet ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs apiserver-kubelet-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for apiserver-kubelet-client +

help for apiserver-kubelet-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs apiserver-kubelet-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md index 28f6cc6244b9f..fa2d46ab8eaba 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_apiserver.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for serving the Kubernetes API ### Synopsis @@ -27,63 +40,63 @@ kubeadm init phase certs apiserver [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

---apiserver-cert-extra-sans stringSlice +--apiserver-cert-extra-sans strings -Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names. +

Optional extra Subject Alternative Names (SANs) to use for the API Server serving certificate. Can be both IP addresses and DNS names.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for apiserver +

help for apiserver

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

--service-dns-domain string     Default: "cluster.local" -Use alternative domain for services, e.g. "myorg.internal". +

Use alternative domain for services, e.g. "myorg.internal".

@@ -104,7 +117,7 @@ kubeadm init phase certs apiserver [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md index 81ccc2cbc2e36..d12b74f19f8f1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_ca.md @@ -1,3 +1,16 @@ + + + +Generate the self-signed Kubernetes CA to provision identities for other Kubernetes components ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs ca [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for ca +

help for ca

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs ca [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md index 17066413ddd10..2cddb77aded75 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-ca.md @@ -1,3 +1,16 @@ + + + +Generate the self-signed CA to provision identities for etcd ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs etcd-ca [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-ca +

help for etcd-ca

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs etcd-ca [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md index 6ee2e7ea112a6..9876d5bce793b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-healthcheck-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for liveness probes to healthcheck etcd ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs etcd-healthcheck-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-healthcheck-client +

help for etcd-healthcheck-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs etcd-healthcheck-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md index a127d4095ab47..d86991f8f85d3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-peer.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for etcd nodes to communicate with each other ### Synopsis @@ -27,28 +40,28 @@ kubeadm init phase certs etcd-peer [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-peer +

help for etcd-peer

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -69,7 +82,7 @@ kubeadm init phase certs etcd-peer [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md index 07a58373c151f..213cf22d2fe1a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_etcd-server.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for serving etcd ### Synopsis @@ -27,28 +40,28 @@ kubeadm init phase certs etcd-server [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for etcd-server +

help for etcd-server

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -69,7 +82,7 @@ kubeadm init phase certs etcd-server [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md index 4a05b78d776b6..c2d37be74fc91 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-ca.md @@ -1,3 +1,16 @@ + + + +Generate the self-signed CA to provision identities for front proxy ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs front-proxy-ca [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for front-proxy-ca +

help for front-proxy-ca

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs front-proxy-ca [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md index 8e2d76f4512fd..58a81fa7a286a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_front-proxy-client.md @@ -1,3 +1,16 @@ + + + +Generate the certificate for the front proxy client ### Synopsis @@ -25,28 +38,28 @@ kubeadm init phase certs front-proxy-client [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for front-proxy-client +

help for front-proxy-client

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -67,7 +80,7 @@ kubeadm init phase certs front-proxy-client [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md index 8d36df6c52f0d..a3df321d886fc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_certs_sa.md @@ -1,3 +1,16 @@ + + + +Generate a private key for signing service account tokens along with its public key ### Synopsis @@ -23,14 +36,14 @@ kubeadm init phase certs sa [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

-h, --help -help for sa +

help for sa

@@ -51,7 +64,7 @@ kubeadm init phase certs sa [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md index 2bed8442d3364..86ef35d14d131 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane.md @@ -1,3 +1,16 @@ + + + +Generate all static Pod manifest files necessary to establish the control plane ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase control-plane [flags] -h, --help -help for control-plane +

help for control-plane

@@ -42,7 +55,7 @@ kubeadm init phase control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md index e03cdb6274d80..daad2e9a39f4d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_all.md @@ -1,3 +1,16 @@ + + + +Generate all static Pod manifest files ### Synopsis @@ -32,105 +45,105 @@ kubeadm init phase control-plane all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

---apiserver-extra-args mapStringString +--apiserver-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the API Server or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the API Server or override default ones in form of =

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

---controller-manager-extra-args mapStringString +--controller-manager-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Controller Manager or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Controller Manager or override default ones in form of =

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for all +

help for all

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

---scheduler-extra-args mapStringString +--scheduler-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Scheduler or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Scheduler or override default ones in form of =

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

@@ -151,7 +164,7 @@ kubeadm init phase control-plane all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md index 50aef041fd535..f95da1c6d2b1b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_apiserver.md @@ -1,3 +1,16 @@ + + + +Generates the kube-apiserver static Pod manifest ### Synopsis @@ -21,84 +34,84 @@ kubeadm init phase control-plane apiserver [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

---apiserver-extra-args mapStringString +--apiserver-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the API Server or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the API Server or override default ones in form of =

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string -A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false) +

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help -help for apiserver +

help for apiserver

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--service-cidr string     Default: "10.96.0.0/12" -Use alternative range of IP address for service VIPs. +

Use alternative range of IP address for service VIPs.

@@ -119,7 +132,7 @@ kubeadm init phase control-plane apiserver [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md index c1f0989cb6606..0931956c54021 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_controller-manager.md @@ -1,3 +1,16 @@ + + + +Generates the kube-controller-manager static Pod manifest ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase control-plane controller-manager [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

---controller-manager-extra-args mapStringString +--controller-manager-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Controller Manager or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Controller Manager or override default ones in form of =

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for controller-manager +

help for controller-manager

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--pod-network-cidr string -Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node. +

Specify range of IP addresses for the pod network. If set, the control plane will automatically allocate CIDRs for every node.

@@ -91,7 +104,7 @@ kubeadm init phase control-plane controller-manager [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md index ce2f366b13345..5fe483282a26a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_control-plane_scheduler.md @@ -1,3 +1,16 @@ + + + +Generates the kube-scheduler static Pod manifest ### Synopsis @@ -21,49 +34,49 @@ kubeadm init phase control-plane scheduler [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for scheduler +

help for scheduler

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

---scheduler-extra-args mapStringString +--scheduler-extra-args <comma-separated 'key=value' pairs> -A set of extra flags to pass to the Scheduler or override default ones in form of <flagname>=<value> +

A set of extra flags to pass to the Scheduler or override default ones in form of =

@@ -84,7 +97,7 @@ kubeadm init phase control-plane scheduler [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md index dc5227a34ca5f..be2aef8c3bb94 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd.md @@ -1,3 +1,16 @@ + + + +Generate static Pod manifest file for local etcd ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase etcd [flags] -h, --help -help for etcd +

help for etcd

@@ -42,7 +55,7 @@ kubeadm init phase etcd [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md index 0e4cb7181eb11..1e4e8fa22f164 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_etcd_local.md @@ -1,3 +1,16 @@ + + + +Generate the static Pod manifest file for a local, single-node local etcd instance ### Synopsis @@ -33,35 +46,35 @@ kubeadm init phase etcd local [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for local +

help for local

--image-repository string     Default: "k8s.gcr.io" -Choose a container registry to pull control plane images from +

Choose a container registry to pull control plane images from

@@ -82,7 +95,7 @@ kubeadm init phase etcd local [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md index b3a200a22877b..da4fde7ebcf1d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig.md @@ -1,3 +1,16 @@ + + + +Generate all kubeconfig files necessary to establish the control plane and the admin kubeconfig file ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase kubeconfig [flags] -h, --help -help for kubeconfig +

help for kubeconfig

@@ -42,7 +55,7 @@ kubeadm init phase kubeconfig [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md index 85885559f761b..a664e126ffedb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_admin.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the admin to use and for kubeadm itself ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase kubeconfig admin [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for admin +

help for admin

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -91,7 +104,7 @@ kubeadm init phase kubeconfig admin [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md index 9296e84a199cd..f1ebdbcf12afd 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_all.md @@ -1,3 +1,16 @@ + + + +Generate all kubeconfig files ### Synopsis @@ -21,63 +34,63 @@ kubeadm init phase kubeconfig all [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for all +

help for all

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--node-name string -Specify the node name. +

Specify the node name.

@@ -98,7 +111,7 @@ kubeadm init phase kubeconfig all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md index 295d7e57dc819..c49ab4b6c4be7 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_controller-manager.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the controller manager to use ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase kubeconfig controller-manager [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for controller-manager +

help for controller-manager

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -91,7 +104,7 @@ kubeadm init phase kubeconfig controller-manager [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md index 9fd3145290273..fd141ea0fc833 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_kubelet.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the kubelet to use *only* for cluster bootstrapping purposes ### Synopsis @@ -23,63 +36,63 @@ kubeadm init phase kubeconfig kubelet [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for kubelet +

help for kubelet

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

--node-name string -Specify the node name. +

Specify the node name.

@@ -100,7 +113,7 @@ kubeadm init phase kubeconfig kubelet [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md index c608732717d23..9618c2d874b45 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubeconfig_scheduler.md @@ -1,3 +1,16 @@ + + + +Generate a kubeconfig file for the scheduler to use ### Synopsis @@ -21,56 +34,56 @@ kubeadm init phase kubeconfig scheduler [flags] --apiserver-advertise-address string -The IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

The IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -Port for the API Server to bind to. +

Port for the API Server to bind to.

--cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--control-plane-endpoint string -Specify a stable IP address or DNS name for the control plane. +

Specify a stable IP address or DNS name for the control plane.

-h, --help -help for scheduler +

help for scheduler

--kubeconfig-dir string     Default: "/etc/kubernetes" -The path where to save the kubeconfig file. +

The path where to save the kubeconfig file.

--kubernetes-version string     Default: "stable-1" -Choose a specific Kubernetes version for the control plane. +

Choose a specific Kubernetes version for the control plane.

@@ -91,7 +104,7 @@ kubeadm init phase kubeconfig scheduler [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md index 4e5febf638fdf..d2eb7f01257d4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize.md @@ -1,3 +1,16 @@ + + + +Updates settings relevant to the kubelet after TLS bootstrap ### Synopsis @@ -28,7 +41,7 @@ kubeadm init phase kubelet-finalize [flags] -h, --help -help for kubelet-finalize +

help for kubelet-finalize

@@ -49,7 +62,7 @@ kubeadm init phase kubelet-finalize [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md index fce712fc45cf9..70e4c634b027c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_all.md @@ -1,3 +1,16 @@ + + + +Run all kubelet-finalize phases ### Synopsis @@ -28,21 +41,21 @@ kubeadm init phase kubelet-finalize all [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for all +

help for all

@@ -63,7 +76,7 @@ kubeadm init phase kubelet-finalize all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md index 2ace62929bb8e..6ce904cc2ba90 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-finalize_experimental-cert-rotation.md @@ -1,3 +1,16 @@ + + + +Enable kubelet client certificate rotation ### Synopsis @@ -21,21 +34,21 @@ kubeadm init phase kubelet-finalize experimental-cert-rotation [flags] --cert-dir string     Default: "/etc/kubernetes/pki" -The path where to save and store the certificates. +

The path where to save and store the certificates.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for experimental-cert-rotation +

help for experimental-cert-rotation

@@ -56,7 +69,7 @@ kubeadm init phase kubelet-finalize experimental-cert-rotation [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md index f9898b58e0efa..11d2407499e86 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_kubelet-start.md @@ -1,3 +1,16 @@ + + + +Write kubelet settings and (re)start the kubelet ### Synopsis @@ -28,28 +41,28 @@ kubeadm init phase kubelet-start [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

-h, --help -help for kubelet-start +

help for kubelet-start

--node-name string -Specify the node name. +

Specify the node name.

@@ -70,7 +83,7 @@ kubeadm init phase kubelet-start [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md index 453783db52c13..6ba7e9047932c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_mark-control-plane.md @@ -1,3 +1,16 @@ + + + +Mark a node as a control-plane ### Synopsis @@ -12,7 +25,7 @@ kubeadm init phase mark-control-plane [flags] ``` # Applies control-plane label and taint to the current node, functionally equivalent to what executed by kubeadm init. - kubeadm init phase mark-control-plane --config config.yml + kubeadm init phase mark-control-plane --config config.yaml # Applies control-plane label and taint to a specific node kubeadm init phase mark-control-plane --node-name myNode @@ -31,21 +44,21 @@ kubeadm init phase mark-control-plane [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for mark-control-plane +

help for mark-control-plane

--node-name string -Specify the node name. +

Specify the node name.

@@ -66,7 +79,7 @@ kubeadm init phase mark-control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md index 06d47e861c323..345621f7030a9 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run pre-flight checks ### Synopsis @@ -12,7 +25,7 @@ kubeadm init phase preflight [flags] ``` # Run pre-flight checks for kubeadm init using a config file. - kubeadm init phase preflight --config kubeadm-config.yml + kubeadm init phase preflight --config kubeadm-config.yaml ``` ### Options @@ -28,21 +41,21 @@ kubeadm init phase preflight [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for preflight +

help for preflight

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

@@ -63,7 +76,7 @@ kubeadm init phase preflight [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md index 404f62d725778..515060a76c7bb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-certs.md @@ -1,3 +1,16 @@ + + + +Upload certificates to kubeadm-certs ### Synopsis @@ -21,42 +34,42 @@ kubeadm init phase upload-certs [flags] --certificate-key string -Key used to encrypt the control-plane certificates in the kubeadm-certs Secret. +

Key used to encrypt the control-plane certificates in the kubeadm-certs Secret.

--config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for upload-certs +

help for upload-certs

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--skip-certificate-key-print -Don't print the key used to encrypt the control-plane certificates. +

Don't print the key used to encrypt the control-plane certificates.

--upload-certs -Upload control-plane certificates to the kubeadm-certs Secret. +

Upload control-plane certificates to the kubeadm-certs Secret.

@@ -77,7 +90,7 @@ kubeadm init phase upload-certs [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md index c1b5c960921ae..7d007e7b5640e 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config.md @@ -1,3 +1,16 @@ + + + +Upload the kubeadm and kubelet configuration to a ConfigMap ### Synopsis @@ -21,7 +34,7 @@ kubeadm init phase upload-config [flags] -h, --help -help for upload-config +

help for upload-config

@@ -42,7 +55,7 @@ kubeadm init phase upload-config [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md index 6370094df9b04..3c087368a77e3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_all.md @@ -1,3 +1,16 @@ + + + +Upload all configuration to a config map ### Synopsis @@ -21,21 +34,21 @@ kubeadm init phase upload-config all [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for all +

help for all

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -56,7 +69,7 @@ kubeadm init phase upload-config all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md index 030595466be3e..13e561f486287 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubeadm.md @@ -1,3 +1,16 @@ + + + +Upload the kubeadm ClusterConfiguration to a ConfigMap ### Synopsis @@ -30,21 +43,21 @@ kubeadm init phase upload-config kubeadm [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for kubeadm +

help for kubeadm

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -65,7 +78,7 @@ kubeadm init phase upload-config kubeadm [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md index bd334e091c35c..ba27f728cbf80 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_init_phase_upload-config_kubelet.md @@ -1,3 +1,16 @@ + + + +Upload the kubelet component config to a ConfigMap ### Synopsis @@ -28,21 +41,21 @@ kubeadm init phase upload-config kubelet [flags] --config string -Path to a kubeadm configuration file. +

Path to a kubeadm configuration file.

-h, --help -help for kubelet +

help for kubelet

--kubeconfig string     Default: "/etc/kubernetes/admin.conf" -The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file. +

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

@@ -63,7 +76,7 @@ kubeadm init phase upload-config kubelet [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md index 3a6f7299d6bea..ae528a44df34f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join.md @@ -1,3 +1,16 @@ + + + +Run this on any machine you wish to join an existing cluster ### Synopsis @@ -78,119 +91,119 @@ kubeadm join [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for join +

help for join

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--node-name string -Specify the node name. +

Specify the node name.

---skip-phases stringSlice +--skip-phases strings -List of phases to be skipped +

List of phases to be skipped

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -211,7 +224,7 @@ kubeadm join [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md index 873f64aa163f6..b780de18ccdce 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the join workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the join workflow -h, --help -help for phase +

help for phase

@@ -38,7 +51,7 @@ Use this command to invoke single phase of the join workflow --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md index 20170c783c7ae..07768a16c6efb 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join.md @@ -1,3 +1,16 @@ + + + +Join a machine as a control plane instance ### Synopsis @@ -28,7 +41,7 @@ kubeadm join phase control-plane-join [flags] -h, --help -help for control-plane-join +

help for control-plane-join

@@ -49,7 +62,7 @@ kubeadm join phase control-plane-join [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md index 9515d0dfe7bbc..ed1753457a57c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_all.md @@ -1,3 +1,16 @@ + + + +Join a machine as a control plane instance ### Synopsis @@ -21,35 +34,35 @@ kubeadm join phase control-plane-join all [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

-h, --help -help for all +

help for all

--node-name string -Specify the node name. +

Specify the node name.

@@ -70,7 +83,7 @@ kubeadm join phase control-plane-join all [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md index 4618107dd2457..9990ce3dc115c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_etcd.md @@ -1,3 +1,16 @@ + + + +Add a new local etcd member ### Synopsis @@ -21,42 +34,42 @@ kubeadm join phase control-plane-join etcd [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for etcd +

help for etcd

--node-name string -Specify the node name. +

Specify the node name.

@@ -77,7 +90,7 @@ kubeadm join phase control-plane-join etcd [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md index 37bd9675b8802..9e2d117ed9e27 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_mark-control-plane.md @@ -1,3 +1,16 @@ + + + +Mark a node as a control-plane ### Synopsis @@ -21,28 +34,28 @@ kubeadm join phase control-plane-join mark-control-plane [flags] --config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

-h, --help -help for mark-control-plane +

help for mark-control-plane

--node-name string -Specify the node name. +

Specify the node name.

@@ -63,7 +76,7 @@ kubeadm join phase control-plane-join mark-control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md index 258210f3032a7..10127f967f8da 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-join_update-status.md @@ -1,3 +1,16 @@ + + + +Register the new control-plane node into the ClusterStatus maintained in the kubeadm-config ConfigMap ### Synopsis @@ -21,35 +34,35 @@ kubeadm join phase control-plane-join update-status [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

-h, --help -help for update-status +

help for update-status

--node-name string -Specify the node name. +

Specify the node name.

@@ -70,7 +83,7 @@ kubeadm join phase control-plane-join update-status [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md index 81a88bdaa5a5a..6952dbca80ca4 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare.md @@ -1,3 +1,16 @@ + + + +Prepare the machine for serving a control plane ### Synopsis @@ -28,7 +41,7 @@ kubeadm join phase control-plane-prepare [flags] -h, --help -help for control-plane-prepare +

help for control-plane-prepare

@@ -49,7 +62,7 @@ kubeadm join phase control-plane-prepare [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md index 932ef5f27b911..cfc54c9bb4e98 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_all.md @@ -1,3 +1,16 @@ + + + +Prepare the machine for serving a control plane ### Synopsis @@ -21,98 +34,98 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for all +

help for all

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -133,7 +146,7 @@ kubeadm join phase control-plane-prepare all [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md index c8d59d58eb032..d26c5e1adb6e8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_certs.md @@ -1,3 +1,16 @@ + + + +Generate the certificates for the new control plane components ### Synopsis @@ -21,77 +34,77 @@ kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for certs +

help for certs

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -112,7 +125,7 @@ kubeadm join phase control-plane-prepare certs [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md index 3e9a120c000ad..820f499c41968 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_control-plane.md @@ -1,3 +1,16 @@ + + + +Generate the manifests for the new control plane components ### Synopsis @@ -21,42 +34,42 @@ kubeadm join phase control-plane-prepare control-plane [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--experimental-patches string -Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically. +

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help -help for control-plane +

help for control-plane

@@ -77,7 +90,7 @@ kubeadm join phase control-plane-prepare control-plane [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md index 26e65cce87db4..e45e23cf7d674 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_download-certs.md @@ -1,3 +1,16 @@ + + + +[EXPERIMENTAL] Download certificates shared among control-plane nodes from the kubeadm-certs Secret ### Synopsis @@ -21,70 +34,70 @@ kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [f --certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for download-certs +

help for download-certs

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -105,7 +118,7 @@ kubeadm join phase control-plane-prepare download-certs [api-server-endpoint] [f --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md index 722ec2263d9e5..995c6290c4de0 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_control-plane-prepare_kubeconfig.md @@ -1,3 +1,16 @@ + + + +Generate the kubeconfig for the new control plane components ### Synopsis @@ -21,70 +34,70 @@ kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags --certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for kubeconfig +

help for kubeconfig

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -105,7 +118,7 @@ kubeadm join phase control-plane-prepare kubeconfig [api-server-endpoint] [flags --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md index 719700b9a04ba..9c1cef31c0f1b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_kubelet-start.md @@ -1,3 +1,16 @@ + + + +Write kubelet settings, certificates and (re)start the kubelet ### Synopsis @@ -21,70 +34,70 @@ kubeadm join phase kubelet-start [api-server-endpoint] [flags] --config string -Path to kubeadm config file. +

Path to kubeadm config file.

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for kubelet-start +

help for kubelet-start

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -105,7 +118,7 @@ kubeadm join phase kubelet-start [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md index ca975f9d9204b..5d8e10522bf28 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_join_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run join pre-flight checks ### Synopsis @@ -12,7 +25,7 @@ kubeadm join phase preflight [api-server-endpoint] [flags] ``` # Run join pre-flight checks using a config file. - kubeadm join phase preflight --config kubeadm-config.yml + kubeadm join phase preflight --config kubeadm-config.yaml ``` ### Options @@ -28,105 +41,105 @@ kubeadm join phase preflight [api-server-endpoint] [flags] --apiserver-advertise-address string -If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used. +

If the node should host a new control plane instance, the IP address the API Server will advertise it's listening on. If not set the default network interface will be used.

--apiserver-bind-port int32     Default: 6443 -If the node should host a new control plane instance, the port for the API Server to bind to. +

If the node should host a new control plane instance, the port for the API Server to bind to.

--certificate-key string -Use this key to decrypt the certificate secrets uploaded by init. +

Use this key to decrypt the certificate secrets uploaded by init.

--config string -Path to kubeadm config file. +

Path to kubeadm config file.

--control-plane -Create a new control plane instance on this node +

Create a new control plane instance on this node

--cri-socket string -Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket. +

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

--discovery-file string -For file-based discovery, a file or URL from which to load cluster information. +

For file-based discovery, a file or URL from which to load cluster information.

--discovery-token string -For token-based discovery, the token used to validate cluster information fetched from the API server. +

For token-based discovery, the token used to validate cluster information fetched from the API server.

---discovery-token-ca-cert-hash stringSlice +--discovery-token-ca-cert-hash strings -For token-based discovery, validate that the root CA public key matches this hash (format: "<type>:<value>"). +

For token-based discovery, validate that the root CA public key matches this hash (format: ":").

--discovery-token-unsafe-skip-ca-verification -For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning. +

For token-based discovery, allow joining without --discovery-token-ca-cert-hash pinning.

-h, --help -help for preflight +

help for preflight

---ignore-preflight-errors stringSlice +--ignore-preflight-errors strings -A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks. +

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--node-name string -Specify the node name. +

Specify the node name.

--tls-bootstrap-token string -Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node. +

Specify the token used to temporarily authenticate with the Kubernetes Control Plane while joining the node.

--token string -Use this token for both discovery-token and tls-bootstrap-token when those values are not provided. +

Use this token for both discovery-token and tls-bootstrap-token when those values are not provided.

@@ -147,7 +160,7 @@ kubeadm join phase preflight [api-server-endpoint] [flags] --rootfs string -[EXPERIMENTAL] The path to the 'real' host root filesystem. +

[EXPERIMENTAL] The path to the 'real' host root filesystem.

diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md index 67ee58fdd7c14..b678061bb0ea6 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig.md @@ -1,9 +1,24 @@ + + + +Kubeconfig file utilities ### Synopsis Kubeconfig file utilities. +Alpha Disclaimer: this command is currently alpha. + ### Options @@ -17,7 +32,7 @@ Kubeconfig file utilities. - + @@ -38,7 +53,7 @@ Kubeconfig file utilities. - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md index 8ff987503c64f..8293ee2f27ec2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_kubeconfig_user.md @@ -1,9 +1,23 @@ + + + +Output a kubeconfig file for an additional user ### Synopsis Output a kubeconfig file for an additional user. +Alpha Disclaimer: this command is currently alpha. ``` kubeadm kubeconfig user [flags] @@ -13,7 +27,7 @@ kubeadm kubeconfig user [flags] ``` # Output a kubeconfig file for an additional user named foo using a kubeadm config file bar - kubeadm kubeconfig user --client-name=foo --config=bar + kubeadm alpha kubeconfig user --client-name=foo --config=bar ``` ### Options @@ -29,35 +43,35 @@ kubeadm kubeconfig user [flags] - + - + - + - + - + - + @@ -78,7 +92,7 @@ kubeadm kubeconfig user [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md index 4cfa48be372d6..a745cb8c9e31c 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset.md @@ -1,3 +1,16 @@ + + + +Performs a best effort revert of changes made to this host by 'kubeadm init' or 'kubeadm join' ### Synopsis @@ -30,49 +43,49 @@ kubeadm reset [flags] - + - + - + - + - + - + - + - + - + @@ -93,7 +106,7 @@ kubeadm reset [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md index 498621b95d8f6..e526dafa1fb35 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the reset workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the reset workflow - + @@ -38,7 +51,7 @@ Use this command to invoke single phase of the reset workflow - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md index 84376e67b2d1c..ceabd2045e96a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_cleanup-node.md @@ -1,3 +1,16 @@ + + + +Run cleanup node. ### Synopsis @@ -21,21 +34,21 @@ kubeadm reset phase cleanup-node [flags] - + - + - + @@ -56,7 +69,7 @@ kubeadm reset phase cleanup-node [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md index 8f3537bc7c347..b3d1502184650 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run reset pre-flight checks ### Synopsis @@ -21,21 +34,21 @@ kubeadm reset phase preflight [flags] - + - + - + - + @@ -56,7 +69,7 @@ kubeadm reset phase preflight [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md index c7350d27ca463..d2c1060ff4ac2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_remove-etcd-member.md @@ -1,3 +1,16 @@ + + + +Remove a local etcd member. ### Synopsis @@ -21,14 +34,14 @@ kubeadm reset phase remove-etcd-member [flags] - + - + @@ -49,7 +62,7 @@ kubeadm reset phase remove-etcd-member [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md index de4700032bf84..b73f736958b1a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_reset_phase_update-cluster-status.md @@ -1,3 +1,16 @@ + + + +Remove this node from the ClusterStatus object. ### Synopsis @@ -21,7 +34,7 @@ kubeadm reset phase update-cluster-status [flags] - + @@ -42,7 +55,7 @@ kubeadm reset phase update-cluster-status [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md index 2662497699d8c..5384fc4d6cce2 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token.md @@ -1,3 +1,16 @@ + + + +Manage bootstrap tokens ### Synopsis @@ -38,21 +51,21 @@ kubeadm token [flags] - + - + - + @@ -73,7 +86,7 @@ kubeadm token [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md index b2212bba44dc5..a2a217033c88b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_create.md @@ -1,3 +1,16 @@ + + + +Create bootstrap tokens on the server ### Synopsis @@ -28,56 +41,56 @@ kubeadm token create [token] - + - + - + - + - + - + - + - + - + - + @@ -98,21 +111,21 @@ kubeadm token create [token] - + - + - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md index d1ddd8bd2c542..2040bd3f94ac1 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_delete.md @@ -1,3 +1,16 @@ + + + +Delete bootstrap tokens on the server ### Synopsis @@ -26,7 +39,7 @@ kubeadm token delete [token-value] ... - + @@ -47,21 +60,21 @@ kubeadm token delete [token-value] ... - + - + - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md index 72ca0220ee46d..60de389d6c07f 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_generate.md @@ -1,3 +1,16 @@ + + + +Generate and print a bootstrap token, but do not create it on the server ### Synopsis @@ -31,7 +44,7 @@ kubeadm token generate [flags] - + @@ -52,21 +65,21 @@ kubeadm token generate [flags] - + - + - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md index 20ba81f63fe07..089424492e90d 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_token_list.md @@ -1,3 +1,16 @@ + + + +List bootstrap tokens on the server ### Synopsis @@ -23,21 +36,28 @@ kubeadm token list [flags] - + - + - + + + + + + + + @@ -58,21 +78,21 @@ kubeadm token list [flags] - + - + - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md index b3fe44532beba..0c2a46519454b 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade.md @@ -1,3 +1,16 @@ + + + +Upgrade your cluster smoothly to a newer version with this command ### Synopsis @@ -21,7 +34,7 @@ kubeadm upgrade [flags] - + @@ -42,7 +55,7 @@ kubeadm upgrade [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md index ed6896b0a779e..d34e01da47e54 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_apply.md @@ -1,3 +1,16 @@ + + + +Upgrade your Kubernetes cluster to the specified version ### Synopsis @@ -21,98 +34,98 @@ kubeadm upgrade apply [version] - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -133,7 +146,7 @@ kubeadm upgrade apply [version] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md index c15b1180752d5..eb5e3c4cace98 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_diff.md @@ -1,3 +1,16 @@ + + + +Show what differences would be applied to existing static pod manifests. See also: kubeadm upgrade apply --dry-run ### Synopsis @@ -21,49 +34,49 @@ kubeadm upgrade diff [version] [flags] - + - + - + - + - + - + - + @@ -84,7 +97,7 @@ kubeadm upgrade diff [version] [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md index b9198f77d79da..5bd05a9822e99 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node.md @@ -1,3 +1,16 @@ + + + +Upgrade commands for a node in the cluster ### Synopsis @@ -29,56 +42,56 @@ kubeadm upgrade node [flags] - + - + - + - + - + - + - + - + - + - + @@ -99,7 +112,7 @@ kubeadm upgrade node [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md index 39a2e05ab0aef..6b86c950548ec 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase.md @@ -1,3 +1,16 @@ + + + +Use this command to invoke single phase of the node workflow ### Synopsis @@ -17,7 +30,7 @@ Use this command to invoke single phase of the node workflow - + @@ -38,7 +51,7 @@ Use this command to invoke single phase of the node workflow - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md index 1ca65f50a70d2..835eba68426fc 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_control-plane.md @@ -1,3 +1,16 @@ + + + +Upgrade the control plane instance deployed on this node, if any ### Synopsis @@ -21,42 +34,42 @@ kubeadm upgrade node phase control-plane [flags] - + - + - + - + - + - + @@ -77,7 +90,7 @@ kubeadm upgrade node phase control-plane [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md index a4f5ceeafb7ca..d2b03974c2042 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_kubelet-config.md @@ -1,3 +1,16 @@ + + + +Upgrade the kubelet configuration for this node ### Synopsis @@ -21,21 +34,21 @@ kubeadm upgrade node phase kubelet-config [flags] - + - + - + @@ -56,7 +69,7 @@ kubeadm upgrade node phase kubelet-config [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md index 943e05cb6e498..d82a193898a21 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_node_phase_preflight.md @@ -1,3 +1,16 @@ + + + +Run upgrade node pre-flight checks ### Synopsis @@ -21,14 +34,14 @@ kubeadm upgrade node phase preflight [flags] - + - + - + @@ -49,7 +62,7 @@ kubeadm upgrade node phase preflight [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md index eaa58b588f9b8..7d16866b9af27 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_upgrade_plan.md @@ -1,3 +1,16 @@ + + + +Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter ### Synopsis @@ -21,56 +34,56 @@ kubeadm upgrade plan [version] [flags] - + - + - + - + - + - + - + - + - + @@ -91,7 +104,7 @@ kubeadm upgrade plan [version] [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md index 658075c4eacbe..b86c7259774d3 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md +++ b/content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm_version.md @@ -1,3 +1,16 @@ + + + +Print the version of kubeadm ### Synopsis @@ -21,14 +34,14 @@ kubeadm version [flags] - + - + @@ -49,7 +62,7 @@ kubeadm version [flags] - + diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md index 93f89c594b830..0b373ee42341a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md @@ -20,10 +20,17 @@ For more information navigate to [Using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) or [Using kubeadm join with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-join/#config-file). +You can also configure several kubelet-configuration options with `kubeadm init`. These options will be the same on any node in your cluster. +See [Configuring each kubelet in your cluster using kubeadm](/docs/setup/production-environment/tools/kubeadm/kubelet-integration/) for details. + +In Kubernetes v1.13.0 and later to list/pull kube-dns images instead of the CoreDNS image +the `--config` method described [here](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) +has to be used. + -## kubeadm config view {#cmd-config-view} +## kubeadm config print {#cmd-config-view} -{{< include "generated/kubeadm_config_view.md" >}} +{{< include "generated/kubeadm_config_print.md" >}} ## kubeadm config print init-defaults {#cmd-config-print-init-defaults} diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index a484c8e74ffe6..9ec9f84c5dcd2 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -30,13 +30,12 @@ The following client libraries are officially maintained by | Language | Client Library | Sample Programs | |----------|----------------|-----------------| +| dotnet | [github.com/kubernetes-client/csharp](https://github.com/kubernetes-client/csharp) | [browse](https://github.com/kubernetes-client/csharp/tree/master/examples/simple) | Go | [github.com/kubernetes/client-go/](https://github.com/kubernetes/client-go/) | [browse](https://github.com/kubernetes/client-go/tree/master/examples) -| Python | [github.com/kubernetes-client/python/](https://github.com/kubernetes-client/python/) | [browse](https://github.com/kubernetes-client/python/tree/master/examples) +| Haskell | [github.com/kubernetes-client/haskell](https://github.com/kubernetes-client/haskell) | [browse](https://github.com/kubernetes-client/haskell/tree/master/kubernetes-client/example) | Java | [github.com/kubernetes-client/java](https://github.com/kubernetes-client/java/) | [browse](https://github.com/kubernetes-client/java#installation) -| dotnet | [github.com/kubernetes-client/csharp](https://github.com/kubernetes-client/csharp) | [browse](https://github.com/kubernetes-client/csharp/tree/master/examples/simple) | JavaScript | [github.com/kubernetes-client/javascript](https://github.com/kubernetes-client/javascript) | [browse](https://github.com/kubernetes-client/javascript/tree/master/examples) -| Haskell | [github.com/kubernetes-client/haskell](https://github.com/kubernetes-client/haskell) | [browse](https://github.com/kubernetes-client/haskell/tree/master/kubernetes-client/example) - +| Python | [github.com/kubernetes-client/python/](https://github.com/kubernetes-client/python/) | [browse](https://github.com/kubernetes-client/python/tree/master/examples) ## Community-maintained client libraries @@ -48,6 +47,10 @@ their authors, not the Kubernetes team. | Language | Client Library | | -------------------- | ---------------------------------------- | | Clojure | [github.com/yanatan16/clj-kubernetes-api](https://github.com/yanatan16/clj-kubernetes-api) | +| DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | +| DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | +| Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | +| Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | | Go | [github.com/ericchiang/k8s](https://github.com/ericchiang/k8s) | | Java (OSGi) | [bitbucket.org/amdatulabs/amdatu-kubernetes](https://bitbucket.org/amdatulabs/amdatu-kubernetes) | | Java (Fabric8, OSGi) | [github.com/fabric8io/kubernetes-client](https://github.com/fabric8io/kubernetes-client) | @@ -70,16 +73,10 @@ their authors, not the Kubernetes team. | Python | [github.com/Frankkkkk/pykorm](https://github.com/Frankkkkk/pykorm) | | Ruby | [github.com/abonas/kubeclient](https://github.com/abonas/kubeclient) | | Ruby | [github.com/Ch00k/kuber](https://github.com/Ch00k/kuber) | +| Ruby | [github.com/k8s-ruby/k8s-ruby](https://github.com/k8s-ruby/k8s-ruby) | | Ruby | [github.com/kontena/k8s-client](https://github.com/kontena/k8s-client) | | Rust | [github.com/clux/kube-rs](https://github.com/clux/kube-rs) | | Rust | [github.com/ynqa/kubernetes-rust](https://github.com/ynqa/kubernetes-rust) | | Scala | [github.com/hagay3/skuber](https://github.com/hagay3/skuber) | | Scala | [github.com/joan38/kubernetes-client](https://github.com/joan38/kubernetes-client) | | Swift | [github.com/swiftkube/client](https://github.com/swiftkube/client) | -| DotNet | [github.com/tonnyeremin/kubernetes_gen](https://github.com/tonnyeremin/kubernetes_gen) | -| DotNet (RestSharp) | [github.com/masroorhasan/Kubernetes.DotNet](https://github.com/masroorhasan/Kubernetes.DotNet) | -| Elixir | [github.com/obmarg/kazan](https://github.com/obmarg/kazan/) | -| Elixir | [github.com/coryodaniel/k8s](https://github.com/coryodaniel/k8s) | - - - diff --git a/content/en/docs/reference/using-api/deprecation-guide.md b/content/en/docs/reference/using-api/deprecation-guide.md index 92fbfe58394db..9f518143b32e2 100755 --- a/content/en/docs/reference/using-api/deprecation-guide.md +++ b/content/en/docs/reference/using-api/deprecation-guide.md @@ -25,6 +25,14 @@ deprecated API versions to newer and more stable API versions. The **v1.25** release will stop serving the following deprecated API versions: +#### CronJob {#cronjob-v125} + +The **batch/v1beta1** API version of CronJob will no longer be served in v1.25. + +* Migrate manifests and API clients to use the **batch/v1** API version, available since v1.21. +* All existing persisted objects are accessible via the new API +* No notable changes + #### EndpointSlice {#endpointslice-v125} The **discovery.k8s.io/v1beta1** API version of EndpointSlice will no longer be served in v1.25. @@ -52,13 +60,14 @@ The **events.k8s.io/v1beta1** API version of Event will no longer be served in v * use `reportingComponent` instead of the deprecated `source.component` field (which is renamed to `deprecatedSource.component` and not permitted in new **events.k8s.io/v1** Events) * use `reportingInstance` instead of the deprecated `source.host` field (which is renamed to `deprecatedSource.host` and not permitted in new **events.k8s.io/v1** Events) -#### RuntimeClass {#runtimeclass-v125} +#### PodDisruptionBudget {#poddisruptionbudget-v125} -RuntimeClass in the **node.k8s.io/v1beta1** API version will no longer be served in v1.25. +The **policy/v1beta1** API version of PodDisruptionBudget will no longer be served in v1.25. -* Migrate manifests and API clients to use the **node.k8s.io/v1** API version, available since v1.20. +* Migrate manifests and API clients to use the **policy/v1** API version, available since v1.21. * All existing persisted objects are accessible via the new API -* No notable changes +* Notable changes in **policy/v1**: + * an empty `spec.selector` (`{}`) written to a `policy/v1` PodDisruptionBudget selects all pods in the namespace (in `policy/v1beta1` an empty `spec.selector` selected no pods). An unset `spec.selector` selects no pods in either API version. #### PodSecurityPolicy {#psp-v125} @@ -67,6 +76,14 @@ PodSecurityPolicy in the **policy/v1beta1** API version will no longer be served PodSecurityPolicy replacements are still under discussion, but current use can be migrated to [3rd-party admission webhooks](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) now. +#### RuntimeClass {#runtimeclass-v125} + +RuntimeClass in the **node.k8s.io/v1beta1** API version will no longer be served in v1.25. + +* Migrate manifests and API clients to use the **node.k8s.io/v1** API version, available since v1.20. +* All existing persisted objects are accessible via the new API +* No notable changes + ### v1.22 The **v1.22** release will stop serving the following deprecated API versions: diff --git a/content/en/docs/reference/using-api/server-side-apply.md b/content/en/docs/reference/using-api/server-side-apply.md index 81afa5b567171..15026843256ea 100644 --- a/content/en/docs/reference/using-api/server-side-apply.md +++ b/content/en/docs/reference/using-api/server-side-apply.md @@ -224,17 +224,75 @@ merging, see A number of markers were added in Kubernetes 1.16 and 1.17, to allow API developers to describe the merge strategy supported by lists, maps, and structs. These markers can be applied to objects of the respective type, -in Go files or in the OpenAPI schema definition of the +in Go files or in the OpenAPI schema definition of the [CRD](/docs/reference/generated/kubernetes-api/{{< param "version" >}}#jsonschemaprops-v1-apiextensions-k8s-io): | Golang marker | OpenAPI extension | Accepted values | Description | Introduced in | |---|---|---|---|---| -| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `atomic` and `set` apply to lists with scalar elements only. `map` applies to lists of nested types only. If configured as `atomic`, the entire list is replaced during merge; a single manager manages the list as a whole at any one time. If `set` or `map`, different managers can manage entries separately. | 1.16 | -| `//+listMapKey` | `x-kubernetes-list-map-keys` | Slice of map keys that uniquely identify entries for example `["port", "protocol"]` | Only applicable when `+listType=map`. A slice of strings whose values in combination must uniquely identify list entries. While there can be multiple keys, `listMapKey` is singular because keys need to be specified individually in the Go type. | 1.16 | +| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `set` applies to lists that include only scalar elements. These elements must be unique. `map` applies to lists of nested types only. The key values (see `listMapKey`) must be unique in the list. `atomic` can apply to any list. If configured as `atomic`, the entire list is replaced during merge. At any point in time, a single manager owns the list. If `set` or `map`, different managers can manage entries separately. | 1.16 | +| `//+listMapKey` | `x-kubernetes-list-map-keys` | List of field names, e.g. `["port", "protocol"]` | Only applicable when `+listType=map`. A list of field names whose values uniquely identify entries in the list. While there can be multiple keys, `listMapKey` is singular because keys need to be specified individually in the Go type. The key fields must be scalars. | 1.16 | | `//+mapType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to maps. `atomic` means that the map can only be entirely replaced by a single manager. `granular` means that the map supports separate managers updating individual fields. | 1.17 | | `//+structType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to structs; otherwise same usage and OpenAPI annotation as `//+mapType`.| 1.17 | -### Custom Resources +If `listType` is missing, the API server interprets a +`patchMergeStrategy=merge` marker as a `listType=map` and the +corresponding `patchMergeKey` marker as a `listMapKey`. + +The `atomic` list type is recursive. + +These markers are specified as comments and don't have to be repeated as +field tags. + +### Compatibility across topology changes + +On rare occurences, a CRD or built-in type author may want to change the +specific topology of a field in their resource without incrementing its +version. Changing the topology of types, by upgrading the cluster or +updating the CRD, has different consequences when updating existing +objects. There are two categories of changes: when a field goes from +`map`/`set`/`granular` to `atomic` and the other way around. + +When the `listType`, `mapType`, or `structType` changes from +`map`/`set`/`granular` to `atomic`, the whole list, map or struct of +existing objects will end-up being owned by actors who owned an element +of these types. This means that any further change to these objects +would cause a conflict. + +When a list, map, or struct changes from `atomic` to +`map`/`set`/`granular`, the API server won't be able to infer the new +ownership of these fields. Because of that, no conflict will be produced +when objects have these fields updated. For that reason, it is not +recommended to change a type from `atomic` to `map`/`set`/`granular`. + +Take for example, the custom resource: + +```yaml +apiVersion: example.com/v1 +kind: Foo +metadata: + name: foo-sample + managedFields: + - manager: manager-one + operation: Apply + apiVersion: example.com/v1 + fields: + f:spec: + f:data: {} +spec: + data: + key1: val1 + key2: val2 +``` + +Before `spec.data` gets changed from `atomic` to `granular`, +`manager-one` owns the field `spec.data`, and all the fields within it +(`key1` and `key2`). When the CRD gets changed to make `spec.data` +`granular`, `manager-one` continues to own the top-level field +`spec.data` (meaning no other managers can delete the map called `data` +without a conflict), but it no longer owns `key1` and `key2`, so another +manager can then modify or delete those fields without conflict. + +## Custom Resources By default, Server Side Apply treats custom resources as unstructured data. All keys are treated the same as struct fields, and all lists are considered atomic. @@ -245,7 +303,7 @@ that contains annotations as defined in the previous "Merge Strategy" section, these annotations will be used when merging objects of this type. -### Using Server-Side Apply in a controller +## Using Server-Side Apply in a controller As a developer of a controller, you can use server-side apply as a way to simplify the update logic of your controller. The main differences with a @@ -260,7 +318,7 @@ read-modify-write and/or patch are the following: It is strongly recommended for controllers to always "force" conflicts, since they might not be able to resolve or act on these conflicts. -### Transferring Ownership +## Transferring Ownership In addition to the concurrency controls provided by [conflict resolution](#conflicts), Server Side Apply provides ways to perform coordinated @@ -329,7 +387,7 @@ Note that whenever the HPA controller sets the `replicas` field to a new value, the temporary field manager will no longer own any fields and will be automatically deleted. No clean up is required. -## Transferring Ownership Between Users +### Transferring Ownership Between Users Users can transfer ownership of a field between each other by setting the field to the same value in both of their applied configs, causing them to share @@ -458,4 +516,3 @@ Server Side Apply is a beta feature, so it is enabled by default. To turn this you need to include the `--feature-gates ServerSideApply=false` flag when starting `kube-apiserver`. If you have multiple `kube-apiserver` replicas, all should have the same flag setting. - diff --git a/content/en/docs/setup/production-environment/_index.md b/content/en/docs/setup/production-environment/_index.md index 346280893eb8e..27308495becd2 100644 --- a/content/en/docs/setup/production-environment/_index.md +++ b/content/en/docs/setup/production-environment/_index.md @@ -1,4 +1,293 @@ --- -title: Production environment +title: "Production environment" +description: Create a production-quality Kubernetes cluster weight: 30 +no_list: true --- + + +A production-quality Kubernetes cluster requires planning and preparation. +If your Kubernetes cluster is to run critical workloads, it must be configured to be resilient. +This page explains steps you can take to set up a production-ready cluster, +or to uprate an existing cluster for production use. +If you're already familiar with production setup and want the links, skip to +[What's next](#what-s-next). + + + +## Production considerations + +Typically, a production Kubernetes cluster environment has more requirements than a +personal learning, development, or test environment Kubernetes. A production environment may require +secure access by many users, consistent availability, and the resources to adapt +to changing demands. + +As you decide where you want your production Kubernetes environment to live +(on premises or in a cloud) and the amount of management you want to take +on or hand to others, consider how your requirements for a Kubernetes cluster +are influenced by the following issues: + +- *Availability*: A single-machine Kubernetes [learning environment](/docs/setup/#learning-environment) +has a single point of failure. Creating a highly available cluster means considering: + - Separating the control plane from the worker nodes. + - Replicating the control plane components on multiple nodes. + - Load balancing traffic to the cluster’s {{< glossary_tooltip term_id="kube-apiserver" text="API server" >}}. + - Having enough worker nodes available, or able to quickly become available, as changing workloads warrant it. + +- *Scale*: If you expect your production Kubernetes environment to receive a stable amount of +demand, you might be able to set up for the capacity you need and be done. However, +if you expect demand to grow over time or change dramatically based on things like +season or special events, you need to plan how to scale to relieve increased +pressure from more requests to the control plane and worker nodes or scale down to reduce unused +resources. + +- *Security and access management*: You have full admin privileges on your own +Kubernetes learning cluster. But shared clusters with important workloads, and +more than one or two users, require a more refined approach to who and what can +access cluster resources. You can use role-based access control +([RBAC](/docs/reference/access-authn-authz/rbac/)) and other +security mechanisms to make sure that users and workloads can get access to the +resources they need, while keeping workloads, and the cluster itself, secure. +You can set limits on the resources that users and workloads can access +by managing [policies](https://kubernetes.io/docs/concepts/policy/) and +[container resources](/docs/concepts/configuration/manage-resources-containers/). + +Before building a Kubernetes production environment on your own, consider +handing off some or all of this job to +[Turnkey Cloud Solutions](/docs/setup/production-environment/turnkey-solutions/) +providers or other [Kubernetes Partners](https://kubernetes.io/partners/). +Options include: + +- *Serverless*: Just run workloads on third-party equipment without managing +a cluster at all. You will be charged for things like CPU usage, memory, and +disk requests. +- *Managed control plane*: Let the provider manage the scale and availability +of the cluster's control plane, as well as handle patches and upgrades. +- *Managed worker nodes*: Configure pools of nodes to meet your needs, +then the provider makes sure those nodes are available and ready to implement +upgrades when needed. +- *Integration*: There are providers that integrate Kubernetes with other +services you may need, such as storage, container registries, authentication +methods, and development tools. + +Whether you build a production Kubernetes cluster yourself or work with +partners, review the following sections to evaluate your needs as they relate +to your cluster’s *control plane*, *worker nodes*, *user access*, and +*workload resources*. + +## Production cluster setup + +In a production-quality Kubernetes cluster, the control plane manages the +cluster from services that can be spread across multiple computers +in different ways. Each worker node, however, represents a single entity that +is configured to run Kubernetes pods. + +### Production control plane + +The simplest Kubernetes cluster has the entire control plane and worker node +services running on the same machine. You can grow that environment by adding +worker nodes, as reflected in the diagram illustrated in +[Kubernetes Components](/docs/concepts/overview/components/). +If the cluster is meant to be available for a short period of time, or can be +discarded if something goes seriously wrong, this might meet your needs. + +If you need a more permanent, highly available cluster, however, you should +consider ways of extending the control plane. By design, one-machine control +plane services running on a single machine are not highly available. +If keeping the cluster up and running +and ensuring that it can be repaired if something goes wrong is important, +consider these steps: + +- *Choose deployment tools*: You can deploy a control plane using tools such +as kubeadm, kops, and kubespray. See +[Installing Kubernetes with deployment tools](/docs/setup/production-environment/tools/) +to learn tips for production-quality deployments using each of those deployment +methods. Different [Container Runtimes](/docs/setup/production-environment/container-runtimes/) +are available to use with your deployments. +- *Manage certificates*: Secure communications between control plane services +are implemented using certificates. Certificates are automatically generated +during deployment or you can generate them using your own certificate authority. +See [PKI certificates and requirements](/docs/setup/best-practices/certificates/) for details. +- *Configure load balancer for apiserver*: Configure a load balancer +to distribute external API requests to the apiserver service instances running on different nodes. See +[Create an External Load Balancer](/docs/tasks/access-application-cluster/create-external-load-balancer/) +for details. +- *Separate and backup etcd service*: The etcd services can either run on the +same machines as other control plane services or run on separate machines, for +extra security and availability. Because etcd stores cluster configuration data, +backing up the etcd database should be done regularly to ensure that you can +repair that database if needed. +See the [etcd FAQ](https://etcd.io/docs/v3.4/faq/) for details on configuring and using etcd. +See [Operating etcd clusters for Kubernetes](/docs/tasks/administer-cluster/configure-upgrade-etcd/) +and [Set up a High Availability etcd cluster with kubeadm](/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) +for details. +- *Create multiple control plane systems*: For high availability, the +control plane should not be limited to a single machine. If the control plane +services are run by an init service (such as systemd), each service should run on at +least three machines. However, running control plane services as pods in +Kubernetes ensures that the replicated number of services that you request +will always be available. +The scheduler should be fault tolerant, +but not highly available. Some deployment tools set up [Raft](https://raft.github.io/) +consensus algorithm to do leader election of Kubernetes services. If the +primary goes away, another service elects itself and take over. +- *Span multiple zones*: If keeping your cluster available at all times is +critical, consider creating a cluster that runs across multiple data centers, +referred to as zones in cloud environments. Groups of zones are referred to as regions. +By spreading a cluster across +multiple zones in the same region, it can improve the chances that your +cluster will continue to function even if one zone becomes unavailable. +See [Running in multiple zones](/docs/setup/best-practices/multiple-zones/) for details. +- *Manage on-going features*: If you plan to keep your cluster over time, +there are tasks you need to do to maintain its health and security. For example, +if you installed with kubeadm, there are instructions to help you with +[Certificate Management](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/) +and [Upgrading kubeadm clusters](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/). +See [Administer a Cluster](/docs/tasks/administer-cluster/) +for a longer list of Kubernetes administrative tasks. + +To learn about available options when you run control plane services, see +[kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/), +[kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/), +and [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) +component pages. For highly available control plane examples, see +[Options for Highly Available topology](/docs/setup/production-environment/tools/kubeadm/ha-topology/), +[Creating Highly Available clusters with kubeadm](/docs/setup/production-environment/tools/kubeadm/high-availability/), +and [Operating etcd clusters for Kubernetes](/docs/tasks/administer-cluster/configure-upgrade-etcd/). +See [Backing up an etcd cluster](/docs/tasks/administer-cluster/configure-upgrade-etcd/#backing-up-an-etcd-cluster) +for information on making an etcd backup plan. + +### Production worker nodes + +Production-quality workloads need to be resilient and anything they rely +on needs to be resilient (such as CoreDNS). Whether you manage your own +control plane or have a cloud provider do it for you, you still need to +consider how you want to manage your worker nodes (also referred to +simply as *nodes*). + +- *Configure nodes*: Nodes can be physical or virtual machines. If you want to +create and manage your own nodes, you can install a supported operating system, +then add and run the appropriate +[Node services](/docs/concepts/overview/components/#node-components). Consider: + - The demands of your workloads when you set up nodes by having appropriate memory, CPU, and disk speed and storage capacity available. + - Whether generic computer systems will do or you have workloads that need GPU processors, Windows nodes, or VM isolation. +- *Validate nodes*: See [Valid node setup](/docs/setup/best-practices/node-conformance/) +for information on how to ensure that a node meets the requirements to join +a Kubernetes cluster. +- *Add nodes to the cluster*: If you are managing your own cluster you can +add nodes by setting up your own machines and either adding them manually or +having them register themselves to the cluster’s apiserver. See the +[Nodes](/docs/concepts/architecture/nodes/) section for information on how to set up Kubernetes to add nodes in these ways. +- *Add Windows nodes to the cluster*: Kubernetes offers support for Windows +worker nodes, allowing you to run workloads implemented in Windows containers. See +[Windows in Kubernetes](/docs/setup/production-environment/windows/) for details. +- *Scale nodes*: Have a plan for expanding the capacity your cluster will +eventually need. See [Considerations for large clusters](/docs/setup/best-practices/cluster-large/) +to help determine how many nodes you need, based on the number of pods and +containers you need to run. If you are managing nodes yourself, this can mean +purchasing and installing your own physical equipment. +- *Autoscale nodes*: Most cloud providers support +[Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#readme) +to replace unhealthy nodes or grow and shrink the number of nodes as demand requires. See the +[Frequently Asked Questions](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md) +for how the autoscaler works and +[Deployment](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler#deployment) +for how it is implemented by different cloud providers. For on-premises, there +are some virtualization platforms that can be scripted to spin up new nodes +based on demand. +- *Set up node health checks*: For important workloads, you want to make sure +that the nodes and pods running on those nodes are healthy. Using the +[Node Problem Detector](/docs/tasks/debug-application-cluster/monitor-node-health/) +daemon, you can ensure your nodes are healthy. + +## Production user management + +In production, you may be moving from a model where you or a small group of +people are accessing the cluster to where there may potentially be dozens or +hundreds of people. In a learning environment or platform prototype, you might have a single +administrative account for everything you do. In production, you will want +more accounts with different levels of access to different namespaces. + +Taking on a production-quality cluster means deciding how you +want to selectively allow access by other users. In particular, you need to +select strategies for validating the identities of those who try to access your +cluster (authentication) and deciding if they have permissions to do what they +are asking (authorization): + +- *Authentication*: The apiserver can authenticate users using client +certificates, bearer tokens, an authenticating proxy, or HTTP basic auth. +You can choose which authentication methods you want to use. +Using plugins, the apiserver can leverage your organization’s existing +authentication methods, such as LDAP or Kerberos. See +[Authentication](/docs/reference/access-authn-authz/authentication/) +for a description of these different methods of authenticating Kubernetes users. +- *Authorization*: When you set out to authorize your regular users, you will probably choose between RBAC and ABAC authorization. See [Authorization Overview](/docs/reference/access-authn-authz/authorization/) to review different modes for authorizing user accounts (as well as service account access to your cluster): + - *Role-based access control* ([RBAC](/docs/reference/access-authn-authz/rbac/)): Lets you assign access to your cluster by allowing specific sets of permissions to authenticated users. Permissions can be assigned for a specific namespace (Role) or across the entire cluster (ClusterRole). Then using RoleBindings and ClusterRoleBindings, those permissions can be attached to particular users. + - *Attribute-based access control* ([ABAC](/docs/reference/access-authn-authz/abac/)): Lets you create policies based on resource attributes in the cluster and will allow or deny access based on those attributes. Each line of a policy file identifies versioning properties (apiVersion and kind) and a map of spec properties to match the subject (user or group), resource property, non-resource property (/version or /apis), and readonly. See [Examples](/docs/reference/access-authn-authz/abac/#examples) for details. + +As someone setting up authentication and authorization on your production Kubernetes cluster, here are some things to consider: + +- *Set the authorization mode*: When the Kubernetes API server +([kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/)) +starts, the supported authentication modes must be set using the *--authorization-mode* +flag. For example, that flag in the *kube-adminserver.yaml* file (in */etc/kubernetes/manifests*) +could be set to Node,RBAC. This would allow Node and RBAC authorization for authenticated requests. +- *Create user certificates and role bindings (RBAC)*: If you are using RBAC +authorization, users can create a CertificateSigningRequest (CSR) that can be +signed by the cluster CA. Then you can bind Roles and ClusterRoles to each user. +See [Certificate Signing Requests](/docs/reference/access-authn-authz/certificate-signing-requests/) +for details. +- *Create policies that combine attributes (ABAC)*: If you are using ABAC +authorization, you can assign combinations of attributes to form policies to +authorize selected users or groups to access particular resources (such as a +pod), namespace, or apiGroup. For more information, see +[Examples](/docs/reference/access-authn-authz/abac/#examples). +- *Consider Admission Controllers*: Additional forms of authorization for +requests that can come in through the API server include +[Webhook Token Authentication](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication). +Webhooks and other special authorization types need to be enabled by adding +[Admission Controllers](/docs/reference/access-authn-authz/admission-controllers/) +to the API server. + +## Set limits on workload resources + +Demands from production workloads can cause pressure both inside and outside +of the Kubernetes control plane. Consider these items when setting up for the +needs of your cluster's workloads: + +- *Set namespace limits*: Set per-namespace quotas on things like memory and CPU. See +[Manage Memory, CPU, and API Resources](/docs/tasks/administer-cluster/manage-resources/) +for details. You can also set +[Hierarchical Namespaces](/blog/2020/08/14/introducing-hierarchical-namespaces/) +for inheriting limits. +- *Prepare for DNS demand*: If you expect workloads to massively scale up, +your DNS service must be ready to scale up as well. See +[Autoscale the DNS service in a Cluster](/docs/tasks/administer-cluster/dns-horizontal-autoscaling/). +- *Create additional service accounts*: User accounts determine what users can +do on a cluster, while a service account defines pod access within a particular +namespace. By default, a pod takes on the default service account from its namespace. +See [Managing Service Accounts](/docs/reference/access-authn-authz/service-accounts-admin/) +for information on creating a new service account. For example, you might want to: + - Add secrets that a pod could use to pull images from a particular container registry. See [Configure Service Accounts for Pods](/docs/tasks/configure-pod-container/configure-service-account/) for an example. + - Assign RBAC permissions to a service account. See [ServiceAccount permissions](/docs/reference/access-authn-authz/rbac/#service-account-permissions) for details. + +## What's next {#what-s-next} + +- Decide if you want to build your own production Kubernetes or obtain one from +available [Turnkey Cloud Solutions](/docs/setup/production-environment/turnkey-solutions/) +or [Kubernetes Partners](https://kubernetes.io/partners/). +- If you choose to build your own cluster, plan how you want to +handle [certificates](/docs/setup/best-practices/certificates/) +and set up high availability for features such as +[etcd](/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) +and the +[API server](/docs/setup/production-environment/tools/kubeadm/ha-topology/). +- Choose from [kubeadm](/docs/setup/production-environment/tools/kubeadm/), [kops](/docs/setup/production-environment/tools/kops/) or [Kubespray](/docs/setup/production-environment/tools/kubespray/) +deployment methods. +- Configure user management by determining your +[Authentication](/docs/reference/access-authn-authz/authentication/) and +[Authorization](docs/reference/access-authn-authz/authorization/) methods. +- Prepare for application workloads by setting up +[resource limits](docs/tasks/administer-cluster/manage-resources/), +[DNS autoscaling](/docs/tasks/administer-cluster/dns-horizontal-autoscaling/) +and [service accounts](/docs/reference/access-authn-authz/service-accounts-admin/). diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index b29663621239e..ebbd11f0810f7 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -59,7 +59,7 @@ configuration, or reinstall it using automation. ### Migrating to the `systemd` driver in kubeadm managed clusters -Follow this [Migration guide](/tasks/administer-cluster/kubeadm/configure-cgroup-driver) +Follow this [Migration guide](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/) if you wish to migrate to the `systemd` cgroup driver in existing kubeadm managed clusters. ## Container runtimes diff --git a/content/en/docs/setup/production-environment/tools/kops.md b/content/en/docs/setup/production-environment/tools/kops.md index 4afab697e420f..cf5333a92d078 100644 --- a/content/en/docs/setup/production-environment/tools/kops.md +++ b/content/en/docs/setup/production-environment/tools/kops.md @@ -56,10 +56,10 @@ To download a specific version, replace the following portion of the command wit $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) ``` -For example, to download kops version v1.15.0 type: +For example, to download kops version v1.20.0 type: ```shell -curl -LO https://github.com/kubernetes/kops/releases/download/1.15.0/kops-darwin-amd64 +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-darwin-amd64 ``` Make the kops binary executable. @@ -94,10 +94,10 @@ To download a specific version of kops, replace the following portion of the com $(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) ``` -For example, to download kops version v1.15.0 type: +For example, to download kops version v1.20.0 type: ```shell -curl -LO https://github.com/kubernetes/kops/releases/download/1.15.0/kops-linux-amd64 +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-linux-amd64 ``` Make the kops binary executable diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index fc532286855d5..5de8afd20bca5 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -408,7 +408,7 @@ be advised that this is modifying a design principle of the Linux distribution. ## `kubeadm upgrade plan` prints out `context deadline exceeded` error message -This error message is shown when upgrading a Kubernetes cluster with `kubeadm` in the case of running an external etcd. This is not a critical bug and happens because older versions of kubeadm perform a version check on the external etcd cluster. You can proceed with `kubeadm upgrade apply ...`. +This error message is shown when upgrading a Kubernetes cluster with `kubeadm` in the case of running an external etcd. This is not a critical bug and happens because older versions of kubeadm perform a version check on the external etcd cluster. You can proceed with `kubeadm upgrade apply ...`. This issue is fixed as of version 1.19. @@ -420,3 +420,20 @@ To workaround the issue, re-mount the `/var/lib/kubelet` directory after perform This is a regression introduced in kubeadm 1.15. The issue is fixed in 1.20. +## Cannot use the metrics-server securely in a kubeadm cluster + +In a kubeadm cluster, the [metrics-server](https://github.com/kubernetes-sigs/metrics-server) +can be used insecurely by passing the `--kubelet-insecure-tls` to it. This is not recommended for production clusters. + +If you want to use TLS between the metrics-server and the kubelet there is a problem, +since kubeadm deploys a self-signed serving certificate for the kubelet. This can cause the following errors +on the side of the metrics-server: +``` +x509: certificate signed by unknown authority +x509: certificate is valid for IP-foo not IP-bar +``` + +See [Enabling signed kubelet serving certificates](/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs) +to understand how to configure the kubelets in a kubeadm cluster to have properly signed serving certificates. + +Also see [How to run the metrics-server securely](https://github.com/kubernetes-sigs/metrics-server/blob/master/FAQ.md#how-to-run-metrics-server-securely). diff --git a/content/en/docs/setup/production-environment/tools/kubespray.md b/content/en/docs/setup/production-environment/tools/kubespray.md index c4300b53a2b9c..08893370cf004 100644 --- a/content/en/docs/setup/production-environment/tools/kubespray.md +++ b/content/en/docs/setup/production-environment/tools/kubespray.md @@ -50,7 +50,7 @@ Kubespray provides the following utilities to help provision your environment: ### (2/5) Compose an inventory file -After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/intro_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". +After you provision your servers, create an [inventory file for Ansible](https://docs.ansible.com/ansible/latest/network/getting_started/first_inventory.html). You can do this manually or via a dynamic inventory script. For more information, see "[Building your own inventory](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)". ### (3/5) Plan your cluster deployment diff --git a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index b8d5cd3c9b4cc..a2055ce4257ed 100644 --- a/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/en/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -38,7 +38,7 @@ In this document, when we talk about Windows containers we mean Windows containe Refer to the following table for Windows operating system support in Kubernetes. A single heterogeneous Kubernetes cluster can have both Windows and Linux worker nodes. Windows containers have to be scheduled on Windows nodes and Linux containers on Linux nodes. | Kubernetes version | Windows Server LTSC releases | Windows Server SAC releases | -| --- | --- | --- | --- | +| --- | --- | --- | | *Kubernetes v1.17* | Windows Server 2019 | Windows Server ver 1809 | | *Kubernetes v1.18* | Windows Server 2019 | Windows Server ver 1809, Windows Server ver 1903, Windows Server ver 1909 | | *Kubernetes v1.19* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | @@ -215,10 +215,11 @@ On Windows, you can use the following settings to configure Services and load ba {{< table caption="Windows Service Settings" >}} | Feature | Description | Supported Kubernetes version | Supported Windows OS build | How to enable | | ------- | ----------- | ----------------------------- | -------------------------- | ------------- | -| Session affinity | Ensures that connections from a particular client are passed to the same Pod each time. | v1.19+ | [Windows Server vNext Insider Preview Build 19551](https://blogs.windows.com/windowsexperience/2020/01/28/announcing-windows-server-vnext-insider-preview-build-19551/) (or higher) | Set `service.spec.sessionAffinity` to "ClientIP" | -| Direct Server Return | Load balancing mode where the IP address fixups and the LBNAT occurs at the container vSwitch port directly; service traffic arrives with the source IP set as the originating pod IP. Promises lower latency and scalability. | v1.15+ | Windows Server, version 2004 | Set the following flags in kube-proxy: `--feature-gates="WinDSR=true" --enable-dsr=true` | -| Preserve-Destination | Skips DNAT of service traffic, thereby preserving the virtual IP of the target service in packets reaching the backend Pod. This setting will also ensure that the client IP of incoming packets get preserved. | v1.15+ | Windows Server, version 1903 (or higher) | Set `"preserve-destination": "true"` in service annotations and enable DSR flags in kube-proxy. | -| IPv4/IPv6 dual-stack networking | Native IPv4-to-IPv4 in parallel with IPv6-to-IPv6 communications to, from, and within a cluster | v1.19+ | Windows Server vNext Insider Preview Build 19603 (or higher) | See [IPv4/IPv6 dual-stack](#ipv4ipv6-dual-stack) | +| Session affinity | Ensures that connections from a particular client are passed to the same Pod each time. | v1.20+ | [Windows Server vNext Insider Preview Build 19551](https://blogs.windows.com/windowsexperience/2020/01/28/announcing-windows-server-vnext-insider-preview-build-19551/) (or higher) | Set `service.spec.sessionAffinity` to "ClientIP" | +| Direct Server Return (DSR) | Load balancing mode where the IP address fixups and the LBNAT occurs at the container vSwitch port directly; service traffic arrives with the source IP set as the originating pod IP. | v1.20+ | Windows Server 2019 | Set the following flags in kube-proxy: `--feature-gates="WinDSR=true" --enable-dsr=true` | +| Preserve-Destination | Skips DNAT of service traffic, thereby preserving the virtual IP of the target service in packets reaching the backend Pod. Also disables node-node forwarding. | v1.20+ | Windows Server, version 1903 (or higher) | Set `"preserve-destination": "true"` in service annotations and enable DSR in kube-proxy. | +| IPv4/IPv6 dual-stack networking | Native IPv4-to-IPv4 in parallel with IPv6-to-IPv6 communications to, from, and within a cluster | v1.19+ | Windows Server, version 2004 (or higher) | See [IPv4/IPv6 dual-stack](#ipv4ipv6-dual-stack) | +| Client IP preservation | Ensures that source IP of incoming ingress traffic gets preserved. Also disables node-node forwarding. | v1.20+ | Windows Server, version 2019 (or higher) | Set `service.spec.externalTrafficPolicy` to "Local" and enable DSR in kube-proxy | {{< /table >}} #### IPv4/IPv6 dual-stack @@ -314,9 +315,9 @@ The following networking functionality is not supported on Windows nodes * Local NodePort access from the node itself fails (works for other nodes or external clients) * Accessing service VIPs from nodes will be available with a future release of Windows Server * A single service can only support up to 64 backend pods / unique destination IPs -* Overlay networking support in kube-proxy is an alpha release. In addition, it requires [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) to be installed on Windows Server 2019 -* Local Traffic Policy and DSR mode -* Windows containers connected to l2bridge, l2tunnel, or overlay networks do not support communicating over the IPv6 stack. There is outstanding Windows platform work required to enable these network drivers to consume IPv6 addresses and subsequent Kubernetes work in kubelet, kube-proxy, and CNI plugins. +* Overlay networking support in kube-proxy is a beta feature. In addition, it requires [KB4482887](https://support.microsoft.com/en-us/help/4482887/windows-10-update-kb4482887) to be installed on Windows Server 2019 +* Local Traffic Policy in non-DSR mode +* Windows containers connected to overlay networks do not support communicating over the IPv6 stack. There is outstanding Windows platform work required to enable this network driver to consume IPv6 addresses and subsequent Kubernetes work in kubelet, kube-proxy, and CNI plugins. * Outbound communication using the ICMP protocol via the win-overlay, win-bridge, and Azure-CNI plugin. Specifically, the Windows data plane ([VFP](https://www.microsoft.com/en-us/research/project/azure-virtual-filtering-platform/)) doesn't support ICMP packet transpositions. This means: * ICMP packets directed to destinations within the same network (e.g. pod to pod communication via ping) work as expected and without any limitations * TCP/UDP packets work as expected and without any limitations diff --git a/content/en/docs/setup/release/notes.md b/content/en/docs/setup/release/notes.md index 54146007a0dd6..2741de7e50c06 100644 --- a/content/en/docs/setup/release/notes.md +++ b/content/en/docs/setup/release/notes.md @@ -1,5 +1,5 @@ --- -title: v1.20 Release Notes +title: v1.21 Release Notes weight: 10 card: name: release-notes @@ -13,953 +13,671 @@ card: -# v1.20.0 +# v1.21.0 [Documentation](https://docs.k8s.io) -## Downloads for v1.20.0 +## Downloads for v1.21.0 + +### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes.tar.gz) | `ebfe49552bbda02807034488967b3b62bf9e3e507d56245e298c4c19090387136572c1fca789e772a5e8a19535531d01dcedb61980e42ca7b0461d3864df2c14` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-src.tar.gz) | `bcbd67ed0bb77840828c08c6118ad0c9bf2bcda16763afaafd8731fd6ce735be654feef61e554bcc34c77c65b02a25dae565adc5e1dc49a2daaa0d115bf1efe6` +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes.tar.gz) | `19bb76a3fa5ce4b9f043b2a3a77c32365ab1fcb902d8dd6678427fb8be8f49f64a5a03dc46aaef9c7dadee05501cf83412eda46f0edacbb8fc1ed0bf5fb79142` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-src.tar.gz) | `f942e6d6c10007a6e9ce21e94df597015ae646a7bc3e515caf1a3b79f1354efb9aff59c40f2553a8e3d43fe4a01742241f5af18b69666244906ed11a22e3bc49` ### Client Binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-darwin-amd64.tar.gz) | `3609f6483f4244676162232b3294d7a2dc40ae5bdd86a842a05aa768f5223b8f50e1d6420fd8afb2d0ce19de06e1d38e5e5b10154ba0cb71a74233e6dc94d5a0` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-386.tar.gz) | `e06c08016a08137d39804383fdc33a40bb2567aa77d88a5c3fd5b9d93f5b581c635b2c4faaa718ed3bb2d120cb14fe91649ed4469ba72c3a3dda1e343db545ed` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-amd64.tar.gz) | `081472833601aa4fa78e79239f67833aa4efcb4efe714426cd01d4ddf6f36fbf304ef7e1f5373bff0fdff44a845f7560165c093c108bd359b5ab4189f36b1f2f` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm.tar.gz) | `037f84a2f29fe62d266cab38ac5600d058cce12cbc4851bcf062fafba796c1fbe23a0c2939cd15784854ca7cd92383e5b96a11474fc71fb614b47dbf98a477d9` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm64.tar.gz) | `275727e1796791ca3cbe52aaa713a2660404eab6209466fdc1cfa8559c9b361fe55c64c6bcecbdeba536b6d56213ddf726e58adc60f959b6f77e4017834c5622` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-ppc64le.tar.gz) | `7a9965293029e9fcdb2b7387467f022d2026953b8461e6c84182abf35c28b7822d2389a6d8e4d8e532d2ea5d5d67c6fee5fb6c351363cb44c599dc8800649b04` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-s390x.tar.gz) | `85fc449ce1980f5f030cc32e8c8e2198c1cc91a448e04b15d27debc3ca56aa85d283f44b4f4e5fed26ac96904cc12808fa3e9af3d8bf823fc928befb9950d6f5` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-386.tar.gz) | `4c0a27dba1077aaee943e0eb7a787239dd697e1d968e78d1933c1e60b02d5d233d58541d5beec59807a4ffe3351d5152359e11da120bf64cacb3ee29fbc242e6` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-amd64.tar.gz) | `29336faf7c596539b8329afbbdceeddc843162501de4afee44a40616278fa1f284d8fc48c241fc7d52c65dab70f76280cc33cec419c8c5dbc2625d9175534af8` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-amd64.tar.gz) | `be9d1440e418e5253fb8a3d8aba705ca8160746a9bd17325ad626a986b6da9f733af864155a651a32b7bca94b533b8d596005ddbe5248bdeea85db47a1b957ed` +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-arm64.tar.gz) | `eed0ddc81d104bb2d41ace13f737c490423d5df4ebddc7376e45c18ed66af35933c9376b912c1c3da105945b04056f6ca0870c156bee8a307cf4189ca5eb1dd1` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-386.tar.gz) | `8a2f30c4434199762f2a96141dab4241c1cce2711bea9ea39cc63c2c5e7d31719ed7f076efac1931604e3a94578d3bbf0cfa454965708c96f3cfb91789868746` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-amd64.tar.gz) | `cd3cfa645fa31de3716f1f63506e31b73d2aa8d37bb558bb3b3e8c151f35b3d74d44e03cbd05be67e380f9a5d015aba460222afdac6677815cd99a85c2325cf0` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm.tar.gz) | `936042aa11cea0f6dfd2c30fc5dbe655420b34799bede036b1299a92d6831f589ca10290b73b9c9741560b603ae31e450ad024e273f2b4df5354bfac272691d8` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm64.tar.gz) | `42beb75364d7bf4bf526804b8a35bd0ab3e124b712e9d1f45c1b914e6be0166619b30695feb24b3eecef134991dacb9ab3597e788bd9e45cf35addddf20dd7f6` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-ppc64le.tar.gz) | `4baba2ed7046b28370eccc22e2378ae79e3ce58220d6f4f1b6791e8233bec8379e30200bb20b971456b83f2b791ea166fdfcf1ea56908bc1eea03590c0eda468` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-s390x.tar.gz) | `37fa0c4d703aef09ce68c10ef3e7362b0313c8f251ce38eea579cd18fae4023d3d2b70e0f31577cabe6958ab9cfc30e98d25a7c64e69048b423057c3cf728339` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-386.tar.gz) | `6900db36c1e3340edfd6dfd8d720575a904c932d39a8a7fa36401595e971a0235bd42111dbcc1cbb77e7374e47f1380a68c637997c18f96a0d9cdc9f3714c4c9` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-amd64.tar.gz) | `90de67f6f79fc63bcfdf35066e3d84501cc85433265ffad36fd1a7a428a31b446249f0644a1e97495ea8b2a08e6944df6ef30363003750339edaa2aceffe937c` ### Server Binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-amd64.tar.gz) | `fb56486a55dbf7dbacb53b1aaa690bae18d33d244c72a1e2dc95fb0fcce45108c44ba79f8fa04f12383801c46813dc33d2d0eb2203035cdce1078871595e446e` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm.tar.gz) | `735ed9993071fe35b292bf06930ee3c0f889e3c7edb983195b1c8e4d7113047c12c0f8281fe71879fc2fcd871e1ee587f03b695a03c8512c873abad444997a19` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm64.tar.gz) | `ffab155531d5a9b82487ee1abf4f6ef49626ea58b2de340656a762e46cf3e0f470bdbe7821210901fe1114224957c44c1d9cc1e32efb5ee24e51fe63990785b2` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-ppc64le.tar.gz) | `9d5730d35c4ddfb4c5483173629fe55df35d1e535d96f02459468220ac2c97dc01b995f577432a6e4d1548b6edbfdc90828dc9c1f7cf7464481af6ae10aaf118` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-s390x.tar.gz) | `6e4c165306940e8b99dd6e590f8542e31aed23d2c7a6808af0357fa425cec1a57016dd66169cf2a95f8eb8ef70e1f29e2d500533aae889e2e3d9290d04ab8721` +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-amd64.tar.gz) | `3941dcc2309ac19ec185603a79f5a086d8a198f98c04efa23f15a177e5e1f34946ea9392ba9f5d24d0d727839438f067fef1001fc6e88b27b8b01e35bbd962ca` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm.tar.gz) | `6507abf6c2ec2b336901dc23269f6c577ec0049b8bad3c9dd6ad63f21aa10f09bfbbfa6e064c2466d250411d3e10f8672791a9e10942e38de7bfbaf7a8bcc9da` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm64.tar.gz) | `5abe76f867ca6865344e957bf166b81766c049ec4eb183a8a5580c22a7f8474db1edf90fd901a5833e56128b6825811653a1d27f72fd34ce5b1287a8c10da05c` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-ppc64le.tar.gz) | `62507b182ca25396a285d91241536860e58f54fac937e97cbdf91948c83bb41be97d33277400489bf50e85164d560205540b76e94e5d519892312bdc63df1067` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-s390x.tar.gz) | `04f2a1f7d1388e4a7d7d9f597f872a3da36f26839cfed16aad6df07021c03f4dca1df06b19cfda56df09d1c2d9a13ebd0af40ca1b9b6aecfaf427ab7712d88f3` ### Node Binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-amd64.tar.gz) | `3e6c90561dd1c27fa1dff6953c503251c36001f7e0f8eff3ec918c74ae2d9aa25917d8ac87d5b4224b8229f620b1830442e6dce3b2a497043f8497eee3705696` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm.tar.gz) | `26db385d9ae9a97a1051a638e7e3de22c4bbff389d5a419fe40d5893f9e4fa85c8b60a2bd1d370fd381b60c3ca33c5d72d4767c90898caa9dbd4df6bd116a247` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm64.tar.gz) | `5b8b63f617e248432b7eb913285a8ef8ba028255216332c05db949666c3f9e9cb9f4c393bbd68d00369bda77abf9bfa2da254a5c9fe0d79ffdad855a77a9d8ed` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-ppc64le.tar.gz) | `60da7715996b4865e390640525d6e98593ba3cd45c6caeea763aa5355a7f989926da54f58cc5f657f614c8134f97cd3894b899f8b467d100dca48bc22dd4ff63` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-s390x.tar.gz) | `9407dc55412bd04633f84fcefe3a1074f3eaa772a7cb9302242b8768d6189b75d37677a959f91130e8ad9dc590f9ba8408ba6700a0ceff6827315226dd5ee1e6` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-windows-amd64.tar.gz) | `9d4261af343cc330e6359582f80dbd6efb57d41f882747a94bbf47b4f93292d43dd19a86214d4944d268941622dfbc96847585e6fec15fddc4dbd93d17015fa8` +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-amd64.tar.gz) | `c1831c708109c31b3878e5a9327ea4b9e546504d0b6b00f3d43db78b5dd7d5114d32ac24a9a505f9cadbe61521f0419933348d2cd309ed8cfe3987d9ca8a7e2c` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm.tar.gz) | `b68dd5bcfc7f9ce2781952df40c8c3a64c29701beff6ac22f042d6f31d4de220e9200b7e8272ddf608114327770acdaf3cb9a34a0a5206e784bda717ea080e0f` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm64.tar.gz) | `7fa84fc500c28774ed25ca34b6f7b208a2bea29d6e8379f84b9f57bd024aa8fe574418cee7ee26edd55310716d43d65ae7b9cbe11e40c995fe2eac7f66bdb423` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-ppc64le.tar.gz) | `a4278b3f8e458e9581e01f0c5ba8443303c987988ee136075a8f2f25515d70ca549fbd2e4d10eefca816c75c381d62d71494bd70c47034ab47f8315bbef4ae37` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-s390x.tar.gz) | `8de2bc6f22f232ff534b45012986eac23893581ccb6c45bd637e40dbe808ce31d5a92375c00dc578bdbadec342b6e5b70c1b9f3d3a7bb26ccfde97d71f9bf84a` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-windows-amd64.tar.gz) | `b82e94663d330cff7a117f99a7544f27d0bc92b36b5a283b3c23725d5b33e6f15e0ebf784627638f22f2d58c58c0c2b618ddfd226a64ae779693a0861475d355` -## Changelog since v1.19.0 +## Changelog since v1.20.0 ## What's New (Major Themes) -### Dockershim deprecation - -Docker as an underlying runtime is being deprecated. Docker-produced images will continue to work in your cluster with all runtimes, as they always have. -The Kubernetes community [has written a blog post about this in detail](https://blog.k8s.io/2020/12/02/dont-panic-kubernetes-and-docker/) with [a dedicated FAQ page for it](https://blog.k8s.io/2020/12/02/dockershim-faq/). - -### External credential provider for client-go - -The client-go credential plugins can now be passed in the current cluster information via the `KUBERNETES_EXEC_INFO` environment variable. Learn more about this on [client-go credential plugins documentation](https://docs.k8s.io/reference/access-authn-authz/authentication/#client-go-credential-plugins/). - -### CronJob controller v2 is available through feature gate - -An alternative implementation of `CronJob` controller is now available as an alpha feature in this release, which has experimental performance improvement by using informers instead of polling. While this will be the default behavior in the future, you can [try them in this release through a feature gate](https://docs.k8s.io/concepts/workloads/controllers/cron-jobs/). - -### PID Limits graduates to General Availability - -PID Limits features are now generally available on both `SupportNodePidsLimit` (node-to-pod PID isolation) and `SupportPodPidsLimit` (ability to limit PIDs per pod), after being enabled-by-default in beta stage for a year. - -### API Priority and Fairness graduates to Beta - -Initially introduced in 1.18, Kubernetes 1.20 now enables API Priority and Fairness (APF) by default. This allows `kube-apiserver` to [categorize incoming requests by priority levels](https://docs.k8s.io/concepts/cluster-administration/flow-control/). - -### IPv4/IPv6 run - -IPv4/IPv6 dual-stack has been reimplemented for 1.20 to support dual-stack Services, based on user and community feedback. If your cluster has dual-stack enabled, you can create Services which can use IPv4, IPv6, or both, and you can change this setting for existing Services. Details are available in updated [IPv4/IPv6 dual-stack docs](https://docs.k8s.io/concepts/services-networking/dual-stack/), which cover the nuanced array of options. - -We expect this implementation to progress from alpha to beta and GA in coming releases, so we’re eager to have you comment about your dual-stack experiences in [#k8s-dual-stack](https://kubernetes.slack.com/messages/k8s-dual-stack) or in [enhancements #563](https://features.k8s.io/563). - -### go1.15.5 - -go1.15.5 has been integrated to Kubernetes project as of this release, [including other infrastructure related updates on this effort](https://github.com/kubernetes/kubernetes/pull/95776). - -### CSI Volume Snapshot graduates to General Availability - -CSI Volume Snapshot moves to GA in the 1.20 release. This feature provides a standard way to trigger volume snapshot operations in Kubernetes and allows Kubernetes users to incorporate snapshot operations in a portable manner on any Kubernetes environment regardless of supporting underlying storage providers. -Additionally, these Kubernetes snapshot primitives act as basic building blocks that unlock the ability to develop advanced, enterprise grade, storage administration features for Kubernetes: including application or cluster level backup solutions. -Note that snapshot support will require Kubernetes distributors to bundle the Snapshot controller, Snapshot CRDs, and validation webhook. In addition, a CSI driver supporting the snapshot functionality must also be deployed on the cluster. - -### Non-recursive Volume Ownership (FSGroup) graduates to Beta - -By default, the `fsgroup` setting, if specified, recursively updates permissions for every file in a volume on every mount. This can make mount, and pod startup, very slow if the volume has many files. -This setting enables a pod to specify a `PodFSGroupChangePolicy` that indicates that volume ownership and permissions will be changed only when permission and ownership of the root directory does not match with expected permissions on the volume. - -### CSIDriver policy for FSGroup graduates to Beta - -The FSGroup's CSIDriver Policy is now beta in 1.20. This allows CSIDrivers to explicitly indicate if they want Kubernetes to manage permissions and ownership for their volumes via `fsgroup`. - -### Security Improvements for CSI Drivers (Alpha) +### Deprecation of PodSecurityPolicy -In 1.20, we introduce a new alpha feature `CSIServiceAccountToken`. This feature allows CSI drivers to impersonate the pods that they mount the volumes for. This improves the security posture in the mounting process where the volumes are ACL’ed on the pods’ service account without handing out unnecessary permissions to the CSI drivers’ service account. This feature is especially important for secret-handling CSI drivers, such as the secrets-store-csi-driver. Since these tokens can be rotated and short-lived, this feature also provides a knob for CSI drivers to receive `NodePublishVolume` RPC calls periodically with the new token. This knob is also useful when volumes are short-lived, e.g. certificates. +PSP as an admission controller resource is being deprecated. Deployed PodSecurityPolicy's will keep working until version 1.25, their target removal from the codebase. A new feature, with a working title of "PSP replacement policy", is being developed in [KEP-2579](https://features.k8s.io/2579). To learn more, read [PodSecurityPolicy Deprecation: Past, Present, and Future](https://blog.k8s.io/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). -### Introducing Graceful Node Shutdown (Alpha) +### Kubernetes API Reference Documentation -The `GracefulNodeShutdown` feature is now in Alpha. This allows kubelet to be aware of node system shutdowns, enabling graceful termination of pods during a system shutdown. This feature can be [enabled through feature gate](https://docs.k8s.io/concepts/architecture/nodes/#graceful-node-shutdown). +The API reference is now generated with [`gen-resourcesdocs`](https://github.com/kubernetes-sigs/reference-docs/tree/c96658d89fb21037b7d00d27e6dbbe6b32375837/gen-resourcesdocs) and it is moving to [Kubernetes API](https://docs.k8s.io/reference/kubernetes-api/) -### Runtime log sanitation +### Kustomize Updates in Kubectl -Logs can now be configured to use runtime protection from leaking sensitive data. [Details for this experimental feature is available in documentation](https://docs.k8s.io/concepts/cluster-administration/system-logs/#log-sanitization). +[Kustomize](https://github.com/kubernetes-sigs/kustomize) version in kubectl had a jump from v2.0.3 to [v4.0.5](https://github.com/kubernetes/kubernetes/pull/98946). Kustomize is now treated as a library and future updates will be less sporadic. -### Pod resource metrics +### Default Container Labels -On-demand metrics calculation is now available through `/metrics/resources`. [When enabled]( -https://docs.k8s.io/concepts/cluster-administration/system-metrics#kube-scheduler-metrics), the endpoint will report the requested resources and the desired limits of all running pods. +Pod with multiple containers can use `kubectl.kubernetes.io/default-container` label to have a container preselected for kubectl commands. More can be read in [KEP-2227](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/2227-kubectl-default-container/README.md). -### Introducing `RootCAConfigMap` +### Immutable Secrets and ConfigMaps -`RootCAConfigMap` graduates to Beta, seperating from `BoundServiceAccountTokenVolume`. The `kube-root-ca.crt` ConfigMap is now available to every namespace, by default. It contains the Certificate Authority bundle for verify kube-apiserver connections. +Immutable Secrets and ConfigMaps graduates to GA. This feature allows users to specify that the contents of a particular Secret or ConfigMap is immutable for its object lifetime. For such instances, Kubelet will not watch/poll for changes and therefore reducing apiserver load. -### `kubectl debug` graduates to Beta +### Structured Logging in Kubelet -`kubectl alpha debug` graduates from alpha to beta in 1.20, becoming `kubectl debug`. -`kubectl debug` provides support for common debugging workflows directly from kubectl. Troubleshooting scenarios supported in this release of `kubectl` include: -Troubleshoot workloads that crash on startup by creating a copy of the pod that uses a different container image or command. -Troubleshoot distroless containers by adding a new container with debugging tools, either in a new copy of the pod or using an ephemeral container. (Ephemeral containers are an alpha feature that are not enabled by default.) -Troubleshoot on a node by creating a container running in the host namespaces and with access to the host’s filesystem. -Note that as a new builtin command, `kubectl debug` takes priority over any `kubectl` plugin named “debug”. You will need to rename the affected plugin. -Invocations using `kubectl alpha debug` are now deprecated and will be removed in a subsequent release. Update your scripts to use `kubectl debug` instead of `kubectl alpha debug`! -For more information about kubectl debug, see Debugging Running Pods on the Kubernetes website, kubectl help debug, or reach out to SIG CLI by visiting #sig-cli or commenting on [enhancement #1441](https://features.k8s.io/1441). +Kubelet has adopted structured logging, thanks to community effort in accomplishing this within the release timeline. Structured logging in the project remains an ongoing effort -- for folks interested in participating, [keep an eye / chime in to the mailing list discussion](https://groups.google.com/g/kubernetes-dev/c/y4WIw-ntUR8). -### Removing deprecated flags in kubeadm +### Storage Capacity Tracking -`kubeadm` applies a number of deprecations and removals of deprecated features in this release. More details are available in the Urgent Upgrade Notes and Kind / Deprecation sections. +Traditionally, the Kubernetes scheduler was based on the assumptions that additional persistent storage is available everywhere in the cluster and has infinite capacity. Topology constraints addressed the first point, but up to now pod scheduling was still done without considering that the remaining storage capacity may not be enough to start a new pod. [Storage capacity tracking](https://docs.k8s.io/concepts/storage/storage-capacity/) addresses that by adding an API for a CSI driver to report storage capacity and uses that information in the Kubernetes scheduler when choosing a node for a pod. This feature serves as a stepping stone for supporting dynamic provisioning for local volumes and other volume types that are more capacity constrained. -### Pod Hostname as FQDN graduates to Beta +### Generic Ephemeral Volumes -Previously introduced in 1.19 behind a feature gate, `SetHostnameAsFQDN` is now enabled by default. More details on this behavior is available in [documentation for DNS for Services and Pods](https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field) +[Generic ephermeral volumes](https://docs.k8s.io/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) feature allows any existing storage driver that supports dynamic provisioning to be used as an ephemeral volume with the volume’s lifecycle bound to the Pod. It can be used to provide scratch storage that is different from the root disk, for example persistent memory, or a separate local disk on that node. All StorageClass parameters for volume provisioning are supported. All features supported with PersistentVolumeClaims are supported, such as storage capacity tracking, snapshots and restore, and volume resizing. -### `TokenRequest` / `TokenRequestProjection` graduates to General Availability +### CSI Service Account Token -Service account tokens bound to pod is now a stable feature. The feature gates will be removed in 1.21 release. For more information, refer to notes below on the changelogs. +CSI Service Account Token feature moves to Beta in 1.21. This feature improves the security posture and allows CSI drivers to receive pods' [bound service account tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md). This feature also provides a knob to re-publish volumes so that short-lived volumes can be refreshed. -### RuntimeClass feature graduates to General Availability. +### CSI Health Monitoring -The `node.k8s.io` API groups are promoted from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] - -### Cloud Controller Manager now exclusively shipped by Cloud Provider - -Kubernetes will no longer ship an instance of the Cloud Controller Manager binary. Each Cloud Provider is expected to ship their own instance of this binary. Details for a Cloud Provider to create an instance of such a binary can be found under [here](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/cloud-provider/sample). Anyone with questions on building a Cloud Controller Manager should reach out to SIG Cloud Provider. Questions about the Cloud Controller Manager on a Managed Kubernetes solution should go to the relevant Cloud Provider. Questions about the Cloud Controller Manager on a non managed solution can be brought up with SIG Cloud Provider. +The CSI health monitoring feature is being released as a second Alpha in Kubernetes 1.21. This feature enables CSI Drivers to share abnormal volume conditions from the underlying storage systems with Kubernetes so that they can be reported as events on PVCs or Pods. This feature serves as a stepping stone towards programmatic detection and resolution of individual volume health issues by Kubernetes. ## Known Issues -### Summary API in kubelet doesn't have accelerator metrics -Currently, cadvisor_stats_provider provides AcceleratorStats but cri_stats_provider does not. As a result, when using cri_stats_provider, kubelet's Summary API does not have accelerator metrics. [There is an open work in progress to fix this](https://github.com/kubernetes/kubernetes/pull/96873). +### `TopologyAwareHints` feature falls back to default behavior + +The feature gate currently falls back to the default behavior in most cases. Enabling the feature gate will add hints to `EndpointSlices`, but functional differences are only observed in non-dual stack kube-proxy implementation. [The fix will be available in coming releases](https://github.com/kubernetes/kubernetes/pull/100804). ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) -- A bug was fixed in kubelet where exec probe timeouts were not respected. This may result in unexpected behavior since the default timeout (if not specified) is `1s` which may be too small for some exec probes. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. See [configure probe](https://docs.k8s.io/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) section of the documentation for more details. - - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the `ExecProbeTimeout` feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] -- RuntimeClass feature graduates to General Availability. Promote `node.k8s.io` API groups from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- API priority and fairness graduated to beta. 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Kubeadm: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - -- Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] -- The deprecated flag --experimental-kustomize is now removed from kubeadm commands. Use --experimental-patches instead, which was introduced in 1.19. Migration information available in --help description for --experimental-patches. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) -- Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42), [SIG API Machinery, Node, Testing]) -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- `TokenRequest` and `TokenRequestProjection` are now GA features. The following flags are required by the API server: - - `--service-account-issuer`, should be set to a URL identifying the API server that will be stable over the cluster lifetime. - - `--service-account-key-file`, set to one or more files containing one or more public keys used to verify tokens. - - `--service-account-signing-key-file`, set to a file containing a private key to use to sign service account tokens. Can be the same file given to `kube-controller-manager` with `--service-account-private-key-file`. ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle] -- kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] - - +- Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Remove alpha `CSIMigrationXXComplete` flag and add alpha `InTreePluginXXUnregister` flag. Deprecate `CSIMigrationvSphereComplete` flag and it will be removed in v1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the `storage_operation_duration_seconds` metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) + - `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior` features have been promoted to GA. `ServiceNodeExclusion` and `NodeDisruptionExclusion` are now unconditionally enabled, while `LegacyNodeRoleBehavior` is unconditionally disabled. To prevent control plane nodes from being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) + ## Changes by Kind ### Deprecation -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] - +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm: deprecated command "alpha selfhosting pivot" is now removed. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Remove deprecated `--generator, --replicas, --service-generator, --service-overrides, --schedule` from `kubectl run` + Deprecate `--serviceaccount, --hostport, --requests, --limits` in `kubectl run` ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- The GA TokenRequest and TokenRequestProjection feature gates have been removed and are unconditionally enabled. Remove explicit use of those feature gates in CLI invocations. ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `discovery.k8s.io/v1beta1` EndpointSlices are deprecated in favor of `discovery.k8s.io/v1`, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) +- `diskformat` storage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) ### API Change -- `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - - kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a `serving` and `terminating` condition to the EndpointSlice API. - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fewer candidates are enumerated for preemption to improve performance in large clusters. ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- GPU metrics provided by kubelet are now disabled by default. ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The status of v1beta1 CRDs without "preserveUnknownFields:false" now shows a violation, "spec.preserveUnknownFields: Invalid value: true: must be false". ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBService is enabled. The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed`. This is an alpha field and is only honored by servers with the `IndexedJob` feature gate enabled. ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Adds support for endPort field in NetworkPolicy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- CSIServiceAccountToken graduates to Beta and enabled by default. ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) +- Cluster admins can now turn off `/debug/pprof` and `/debug/flags/v` endpoint in kubelet by setting `enableProfilingHandler` and `enableDebugFlagsHandler` to `false` in the Kubelet configuration file. Options `enableProfilingHandler` and `enableDebugFlagsHandler` can be set to `true` only when `enableDebuggingHandlers` is also set to `true`. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl portforward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Fixes server-side apply for APIService resources. ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Hugepages request values are limited to integer multiples of the page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration (previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) +- Jobs API has a new `.spec.suspend` field that can be used to suspend and resume Jobs. This is an alpha field which is only honored by servers with the `SuspendJob` feature gate enabled. ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) +- Kubelet Graceful Node Shutdown feature graduates to Beta and enabled by default. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Namespace API objects now have a `kubernetes.io/metadata.name` label matching their metadata.name field to allow selecting any namespace by its name using a label selector. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodDisruptionBudget API objects can now contain conditions in status. ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Promote Immutable Secrets/ConfigMaps feature to Stable. This allows to set `immutable` field in Secret or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] +- Services can specify loadBalancerClass to use a custom load balancer ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) +- Storage capacity tracking (= the CSIStorageCapacity feature) graduates to Beta and enabled by default, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] +- The EndpointSlice Controllers are now GA. The `EndpointSliceController` will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The `EndpointSlice` API is now GA. The `EndpointSlice` topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The `discovery.k8s.io/v1alpha1` API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) +- The `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a `Pod` compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) +- The kube-apiserver now resets `managedFields` that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#97099](https://github.com/kubernetes/kubernetes/pull/97099), [@pacoxu](https://github.com/pacoxu)) [SIG CLI] ### Feature -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add foreground cascading deletion to kubectl with the new `kubectl delete foreground|background|orphan` option. ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation. ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- Configure AWS LoadBalancer health check protocol via service annotations. ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] -- Kubeadm now prints warnings instead of throwing errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. - - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: [@mikedanese](https://github.com/mikedanese): - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. - - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. - - - make kube::util::find-binary not dependent on bazel-out/ structure - - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node-to-pod PID isolation. - Promote SupportPodPidsLimit to GA to provide ability to limit PIDs per pod. ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Send GCE node startup scripts logs to console and journal. ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric buckets were set to 0.005 0.0125 0.03125 0.078125 0.1953125 0.48828125 1.220703125 3.0517578125 7.62939453125 19.073486328125 47.6837158203125 119.20928955078125 298.0232238769531 and 745.0580596923828 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- A new histogram metric to track the time it took to delete a job by the `TTLAfterFinished` controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) +- AWS cloud provider supports auto-discovering subnets without any `kubernetes.io/cluster/` tags. It also supports additional service annotation `service.beta.kubernetes.io/aws-load-balancer-subnets` to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Add --permit-address-sharing flag to `kube-apiserver` to listen with `SO_REUSEADDR`. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoids waiting for the kernel to release socket in `TIME_WAIT` state, and hence, considerably reducing `kube-apiserver` restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Add metric etcd_lease_object_counts for kube-apiserver to observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Added support for installing `arm64` node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, as well as a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- CRIContainerLogRotation graduates to GA and unconditionally enabled. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99385](https://github.com/kubernetes/kubernetes/pull/99385), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Release] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- EndpointSliceNodeName graduates to GA and thus will be unconditionally enabled -- NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) +- Export `NewDebuggingRoundTripper` function and `DebugLevel` options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: IPv6DualStack feature gate graduates to Beta and enabled by default ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: perform pre-flight validation on host/node name upon `kubeadm init` and `kubeadm join`, showing warnings on non-compliant names ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubectl: Add `--use-protocol-buffers` flag to kubectl top pods and nodes. ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Kubectl: a Pod can be preselected as default container using `kubectl.kubernetes.io/default-container` annotation ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) +- Kubectl: add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=metric1,metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) +- New admission controller `DenyServiceExternalIPs` is available. Clusters which do not *need* the Service `externalIPs` feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Port the devicemanager to Windows node to allow device plugins like directx ([#93285](https://github.com/kubernetes/kubernetes/pull/93285), [@aarnaud](https://github.com/aarnaud)) [SIG Node, Testing and Windows] +- Removes cAdvisor JSON metrics (/stats/container, /stats//, /stats////) from the kubelet. ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Sysctls graduates to General Availability and thus unconditionally enabled. ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The `CronJobControllerV2` feature flag graduates to Beta and set to be enabled by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) +- The `EndpointSlice` mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation `endpoints.kubernetes.io/last-change-trigger-time` is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] +- The `TTLAfterFinished` feature flag is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` takes into consideration global IP addresses on loopback interfaces when 1) the host has default routes, or 2) there are no global IPs on those interfaces in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] +- The feature gate `RootCAConfigMap` graduated to GA in v1.21 and therefore will be unconditionally enabled. This flag will be removed in v1.22 release. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) +- The pause image upgraded to `v3.4.1` in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. New clusters or existing clusters are not be affected until an actor starts adding secondary Pods and service CIDRS CLI flags as described here: [IPv4/IPv6 Dual-stack](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack) ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) +- Users might specify the `kubectl.kubernetes.io/default-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] +- `kubectl wait` ensures that observedGeneration >= generation to prevent stale state reporting. An example scenario can be found on CRD updates. ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- Azure file migration graduates to beta, with CSIMigrationAzureFile flag off by default + as it requires installation of AzureFile CSI Driver. Users should enable CSIMigration and + CSIMigrationAzureFile features and install the [AzureFile CSI Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) + to avoid disruption to existing Pod and PVC objects at that time. Azure File CSI driver does not support using same persistent + volume with different fsgroups. When CSI migration is enabled for azurefile driver, such case is not supported. + (there is a case we support where volume is mounted with 0777 and then it readable/writable by everyone) ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### Failing Test -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP `NodePort` Services where stale connection tracking entries may blackhole the traffic directed to the `NodePort` ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) +- Kubelet: fixes a bug in the HostPort dockershim implementation that caused the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### Bug or Regression -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure armclient backoff step defaults to 1 (no retry). ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp fields of an audit event now take into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) -- Build/lib/release: Explicitly use '--platform' in building server images - - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. - - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. - - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. - - Before: - `FROM ${base_image}` - - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- Ceph RBD volume expansion now works even when ceph.conf was not provided. ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Change the calculation of pod UIDs so that static pods get a unique value - will cause all containers to be killed and recreated after in-place upgrade. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Endpoint slice controller now mirrors parent's service label to its corresponding endpoint slices. ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix a bug where loadbalancer deletion gets stuck because of missing resource group. ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a panic in `kubectl debug` when a pod has multiple init or ephemeral containers. ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix race condition on timeCache locks. ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) -- Fix regression on `kubectl portforward` when TCP and UCP services were configured on the same port. ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Increase maximum IOPS of AWS EBS io1 volume to current maximum (64,000). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kubeadm ensures "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user. ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm now warns (instead of error out) on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Port mapping now allows the same `containerPort` of different containers to different `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Recreate EndpointSlices on rapid Service creation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- StatefulSet Controller now waits for PersistentVolumeClaim deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) -- StreamWatcher now calls HandleCrash at appropriate sequence. ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] -- Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- All data is no longer automatically deleted when a failure is detected during creation of the volume data file on a CSI volume. Now only the data file and volume path is removed. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Client-go exec credential plugins will pass stdin only when interactive terminal is detected on stdin. This fixes a bug where previously it was checking if **stdout** is an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Cluster Autoscaler version bump to v1.20.0 ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure all vSphere nodes are are tracked by volume attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) +- Ensure empty string annotations are copied over in rollbacks. ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Explicitly pass `KUBE_BUILD_CONFORMANCE=y` in `package-tarballs` to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) +- Fix Azure file migration e2e test failure when CSIMigration is turned on. ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) +- Fix CSI-migrated inline EBS volumes failing to mount if their volumeID is prefixed by aws:// ([#96821](https://github.com/kubernetes/kubernetes/pull/96821), [@wongma7](https://github.com/wongma7)) [SIG Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix PermissionDenied issue on SMB mount for Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fix kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix privileged config of Pod Sandbox which was previously ignored. ([#96877](https://github.com/kubernetes/kubernetes/pull/96877), [@xeniumlee](https://github.com/xeniumlee)) +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed authentication_duration_seconds metric scope. Previously, it included whole apiserver request duration which yields inaccurate results. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a bug of identifying the correct containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) +- Fixes add-on manager leader election to use leases instead of endpoints, similar to what kube-controller-manager does in 1.20 ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- Fixes problem where invalid selector on `PodDisruptionBudget` leads to a nil pointer dereference that causes the Controller manager to crash loop. ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) +- Fixes spurious errors about IPv6 in `kube-proxy` logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm installs etcd v3.4.13 when creating cluster v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: Fixes a kubeadm upgrade bug that could cause a custom CoreDNS configuration to be replaced with the default. ([#97016](https://github.com/kubernetes/kubernetes/pull/97016), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl logs: `--ignore-errors` is now honored by all containers, maintaining consistency with parallelConsumeRequest behavior. ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) +- Kubectl-convert: Fix `no kind "Ingress" is registered for version` error ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fix repeatedly acquiring the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Limits lease to have 1000 maximum attached objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Performance regression #97685 has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] +- Pod status updates faster when reacting on probe results. The first readiness probe will be called faster when startup probes succeeded, which will make Pod status as ready faster. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) +- Readjust `kubelet_containers_per_pod_count` buckets to only show metrics greater than 1. ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Requests with invalid timeout parameters in the request URL now appear in the audit log correctly. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Resolve a "concurrent map read and map write" crashing error in the kubelet ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- ResourceQuota of an entity now inclusively calculate Pod overhead ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using `dockershim` as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) +- Reverts breaking change to inline AzureFile volumes; referenced secrets are now searched for in the same namespace as the pod as in previous releases. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- The calculation of pod UIDs for static pods has changed to ensure each static pod gets a unique value - this will cause all static pod containers to be recreated/restarted if an in-place kubelet upgrade from 1.20 to 1.21 is performed. Note that draining pods before upgrading the kubelet across minor versions is the supported upgrade path. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Truncates a message if it hits the `NoteLengthLimit` when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) +- Updated k8s.gcr.io/ingress-gce-404-server-with-metrics-amd64 to a version that serves /metrics endpoint on a non-default port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Updates the commands ` + - kubectl kustomize {arg} + - kubectl apply -k {arg} + `to use same code as kustomize CLI [v4.0.5](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5) ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] +- When using `Containerd` on Windows, the `C:\Windows\System32\drivers\etc\hosts` file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### Other (Cleanup or Flake) -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: - - ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Add fine grained debugging to intra-pod conformance test to troubleshoot networking issues for potentially unhealthy nodes when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) -- Add the following metrics: - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Decrease warning message frequency on setting volume ownership for configmap/secret. ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Fix kubelet to properly log when a container is started. Previously, kubelet may log that container is dead and was restarted when it was actually started for the first time. This behavior only happened on pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Mask ceph RBD adminSecrets in logs when logLevel >= 4. ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) -- Remove offensive words from kubectl cluster-info command. ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) -- Remove support for "ci/k8s-master" version label in kubeadm, use "ci/latest" instead. See [kubernetes/test-infra#18517](https://github.com/kubernetes/test-infra/pull/18517). ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- APIs for kubelet annotations and labels from `k8s.io/kubernetes/pkg/kubelet/apis` are now moved under `k8s.io/kubelet/pkg/apis/` ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Clients required to use go1.15.8+ or go1.16+ if kube-apiserver has the goaway feature enabled to avoid unexpected data race condition. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy graduates to GA and unconditionally enabled. ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) +- Increase timeout for pod lifecycle test to reach pod status=ready ([#96691](https://github.com/kubernetes/kubernetes/pull/96691), [@hh](https://github.com/hh)) +- Increased `CSINodeIDMaxLength` from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) +- Kube-apiserver: The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Kubelet command line flags related to dockershim are now showing deprecation message as they will be removed along with dockershim in future release. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The `AttachVolumeLimit` feature gate (GA since v1.17) has been removed and now unconditionally enabled. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The `apiserver_request_total` metric is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts`) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) + +### Uncategorized + +- GCE L4 Loadbalancers now handle > 5 ports in service spec correctly. ([#99595](https://github.com/kubernetes/kubernetes/pull/99595), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## Dependencies ### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/creack/pty: [v1.1.7 → v1.1.11](https://github.com/creack/pty/compare/v1.1.7...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.5...v0.39.0) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: c1934b7 → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/gengo: 83324d8 → b6c5ce2 +- k8s.io/klog/v2: v2.4.0 → v2.8.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.2.0 → v1.4.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.1.0 ### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 - - -## Dependencies - -### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) - rsc.io/quote/v3: v3.1.0 - rsc.io/sampler: v1.3.0 - -### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 - -### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-rc.0 +# v1.21.0-rc.0 -## Downloads for v1.20.0-rc.0 +## Downloads for v1.21.0-rc.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes.tar.gz) | acfee8658831f9503fccda0904798405434f17be7064a361a9f34c6ed04f1c0f685e79ca40cef5fcf34e3193bacbf467665e8dc277e0562ebdc929170034b5ae -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-src.tar.gz) | 9d962f8845e1fa221649cf0c0e178f0f03808486c49ea15ab5ec67861ec5aa948cf18bc0ee9b2067643c8332227973dd592e6a4457456a9d9d80e8ef28d5f7c3 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes.tar.gz) | ef53a41955d6f8a8d2a94636af98b55d633fb8a5081517559039e019b3dd65c9d10d4e7fa297ab88a7865d772f3eecf72e7b0eeba5e87accb4000c91da33e148 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-src.tar.gz) | 9335a01b50d351776d3b8d00c07a5233844c51d307e361fa7e55a0620c1cb8b699e43eacf45ae9cafd8cbc44752e6987450c528a5bede8204706b7673000b5fc ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 062b57f1a450fe01d6184f104d81d376bdf5720010412821e315fd9b1b622a400ac91f996540daa66cee172006f3efade4eccc19265494f1a1d7cc9450f0b50a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-386.tar.gz) | 86e96d2c2046c5e62e02bef30a6643f25e01f1b3eba256cab7dd61252908540c26cb058490e9cecc5a9bad97d2b577f5968884e9f1a90237e302419f39e068bc -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 619d3afb9ce902368390e71633396010e88e87c5fd848e3adc71571d1d4a25be002588415e5f83afee82460f8a7c9e0bd968335277cb8f8cb51e58d4bb43e64e -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 60965150a60ab3d05a248339786e0c7da4b89a04539c3719737b13d71302bac1dd9bcaa427d8a1f84a7b42d0c67801dce2de0005e9e47d21122868b32ac3d40f -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | 688e064f4ef6a17189dbb5af468c279b9de35e215c40500fb97b1d46692d222747023f9e07a7f7ba006400f9532a8912e69d7c5143f956b1dadca144c67ee711 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | 47b8abc02b42b3b1de67da184921b5801d7e3cb09befac840c85913193fc5ac4e5e3ecfcb57da6b686ff21af9a3bd42ae6949d4744dbe6ad976794340e328b83 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 971b41d3169f30e6c412e0254c180636abb7ccc8dcee6641b0e9877b69752fc61aa30b76c19c108969df654fe385da3cb3a44dd59d3c28dc45561392d7e08874 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-386.tar.gz) | 2d34e8387e31531d9aca5655f2f0d18e75b01825dc1c39b7beb73a7b7b610e2ba429e5ca97d5c41a71b67e75e7096c86ab63fda9baab4c0878c1ccb3a1aefac8 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | f909640f4140693bb871936f10a40e79b43502105d0adb318b35bb7a64a770ad9d05a3a732368ccd3d15d496d75454789165bd1f5c2571da9a00569b3e6c007c +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 964135e43234cee275c452f5f06fb6d2bcd3cff3211a0d50fa35fff1cc4446bc5a0ac5125405dadcfb6596cb152afe29fabf7aad5b35b100e1288db890b70f8e +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-arm64.tar.gz) | 50d782abaa4ded5e706b3192d87effa953ceabbd7d91e3d48b0c1fa2206a1963a909c14b923560f5d09cac2c7392edc5f38a13fbf1e9a40bc94e3afe8de10622 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-386.tar.gz) | 72af5562f24184a2d7c27f95fa260470da979fbdcacce39a372f8f3add2991d7af8bc78f4e1dbe7a0f97e3f559b149b72a51491d3b13008da81872ee50f02f37 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 1eddb8f6b51e005bc6f7b519d036cbe3d2f6d97dbf7d212dd933fb56354c29f222d050519115a9bcf94555aef095db7cf763469e47bb4ae3c6c07f97edf437cb +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 670f8ca60ea3cf0bb3262a772715e0ea735fccda6a92f3186299361dc455b304ae177d4017e0b67bbfa4a95e36f4cc3f7eb335e2a5130c93ac3fba2aff4519bf +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | a69a47907cff138ba393d8c87044fd95d97f3ca8f35d301b50742e2801ad7c229d99d6667971091f65825eb51854d585be0dd7421670110b1aa567e67e7ab4b3 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | b929feade94b71c81908abdcd4343b1e1e20098fd65e10d4d02585ad649d292d06f52c7ddc349efa188ce5b093e703c7aa9582c6ae5a69699adb87bbf5350243 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 899d1470e412282cf289d8e24806d1a08c62ec0151f345ae3c9e497cc7bc0feab76498de4dd897d6adcdfa0c422e6b1a37e25d928669030f53457fd69d6e7df7 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-386.tar.gz) | 9f0bc90a269eabd06fe4f637b5172a3a6a7d3de26de0d66504c2e1f2093083c584ea39031db6075a7da7a86b98c48bed25aa88d4ac09060b38692c6a5b637078 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | 05c8cc10188a1294b0d51d052942742a9b26411a08ec73494bf0e728a8a167e0a7863bdfc8864e76a371b584380098381805341e18b4b283b5d0cf298d5f7c7c ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 0ea4458ae34108c633b4d48f1f128c6274dbc82b613492e78b3e0a2f656ac0df0bb9a75124e15d67c8e81850adcecf19f4ab0234c17247ee7ddf84f2df3e5eaa -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm.tar.gz) | aef6a4d457faa29936603370f29a8523bb274211c3cb5101bd31aaf469c91ba6bd149ea99a4ccdd83352cf37e4d6508c5ee475ec10292bccd2f77ceea31e1c28 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | 4829f473e9d60f9929ad17c70fdc2b6b6509ed75418be0b23a75b28580949736cb5b0bd6382070f93aa0a2a8863f0b1596daf965186ca749996c29d03ef7d8b8 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 9ab0790d382a3e28df1c013762c09da0085449cfd09d176d80be932806c24a715ea829be0075c3e221a2ad9cf06e726b4c39ab41987c1fb0fee2563e48206763 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 98670b587e299856dd9821b7517a35f9a65835b915b153de08b66c54d82160438b66f774bf5306c07bc956d70ff709860bc23162225da5e89f995d3fdc1f0122 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 355f278728ef7ac7eb2f5568c99c1429543c6302bbd0ed3bd0378c08116075e56ae850a49241313f078e2392702672ec6c9b70c8d97b4f2f5f4bee36828a63ba +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm.tar.gz) | 9ac02c2825e2fd4e92f0c0f67180c67c24e32841ccbabc82284bf6293727ffecfae65e8a42b527c2a7ca482752384928eb65c2a1706144ae7819a6b3a1ab291c +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | eb412453da03c82a9248412c8ccf4d4baa1fbfa81edd8d4f81d28969b40a3727e18934accc68f643d253446c58ffd2623292402495480b3d4b2a837b5318b957 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 07da2812c35bbc427ee5b4a0b601c3ae271e0d50ab0dd4c5c25399f43506fa2a187642eb9d4d2085df7b90264d48ea2f31088af87d9efa7eb2e87f91e1fdbde4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 3b79442a3d6e389c4ff105922a8e49994c0b6c088d2c501bd8c78d9f9e814902f5bb72c8f9c89380b750fda9b3a336759b9b68f11d70bef4f0e984564a95c29e ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | 699e9c8d1837198312eade8eb6fec390f6a2fea9e08207d2f58e8bb6e3e799028aca69e4670aac0a4ba7cf0af683aee2c158bf78cc520c80edc876c8d94d521a -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm.tar.gz) | f3b5eab0669490e3cd7e802693daf3555d08323dfff6e73a881fce00fed4690e8bdaf1610278d9de74036ca37631016075e5695a02158b7d3e7582b20ef7fa35 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | e5012f77363561a609aaf791baaa17d09009819c4085a57132e5feb5366275a54640094e6ed1cba527f42b586c6d62999c2a5435edf5665ff0e114db4423c2ae -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 2a6d6501620b1a9838dff05c66a40260cc22154a28027813346eb16e18c386bc3865298a46a0f08da71cd55149c5e7d07c4c4c431b4fd231486dd9d716548adb -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 5eca02777519e31428a1e5842fe540b813fb8c929c341bbc71dcfd60d98deb89060f8f37352e8977020e21e053379eead6478eb2d54ced66fb9d38d5f3142bf0 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | 8ace02e7623dff894e863a2e0fa7dfb916368431d1723170713fe82e334c0ae0481b370855b71e2561de0fb64fed124281be604761ec08607230b66fb9ed1c03 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | f12edf1faf5f07de1ebc5a8626601c12927902e10aca3f11e398637382fdf55365dbd9a0ef38858553fb7569495ae2cf68f155dd2e49b85b27d76fb599bb92e4 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm.tar.gz) | 4fba8fc4e2102f07fb778aab597ec7231ea65c35e1aa618fe98b707b64a931237bd842c173e9120326e4d9deb983bb3917176762bba2212612bbc09d6e2105c4 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | a2e1be5459a8346839970faf4e7ebdb8ab9f3273e02babf1f3199b06bdb67434a2d18fcd1628cf1b989756e99d8dad6624a455b9db11d50f51f509f4df5c27da +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 16d2c1cc295474fc49fe9a827ddd73e81bdd6b76af7074987b90250023f99b6d70bf474e204c7d556802111984fcb3a330740b150bdc7970d0e3634eb94a1665 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 9dc6faa6cd007b13dfce703f3e271f80adcc4e029c90a4a9b4f2f143b9756f2893f8af3d7c2cf813f2bd6731cffd87d15d4229456c1685939f65bf467820ec6e +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | f8bac2974c9142bfb80cd5eadeda79f79f27b78899a4e6e71809b795c708824ba442be83fdbadb98e01c3823dd8350776358258a205e851ed045572923cacba7 -## Changelog since v1.20.0-beta.2 +## Changelog since v1.21.0-beta.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + + - Migrated pkg/kubelet/cm/cpuset/cpuset.go to structured logging. Exit code changed from 255 to 1. ([#100007](https://github.com/kubernetes/kubernetes/pull/100007), [@utsavoza](https://github.com/utsavoza)) [SIG Instrumentation and Node] + ## Changes by Kind -### Feature +### API Change -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- CSIServiceAccountToken is Beta now ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) [SIG Auth, Storage and Testing] +- Discovery.k8s.io/v1beta1 EndpointSlices are deprecated in favor of discovery.k8s.io/v1, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) [SIG Network] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] -### Failing Test +### Feature -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Add e2e test to validate performance metrics of volume lifecycle operations ([#94334](https://github.com/kubernetes/kubernetes/pull/94334), [@RaunakShah](https://github.com/RaunakShah)) [SIG Storage and Testing] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- Introduce a churn operator to scheduler perf testing framework. ([#98900](https://github.com/kubernetes/kubernetes/pull/98900), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Migrated pkg/kubelet/cm/devicemanager to structured logging ([#99976](https://github.com/kubernetes/kubernetes/pull/99976), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/memorymanager to structured logging ([#99974](https://github.com/kubernetes/kubernetes/pull/99974), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/topologymanager to structure logging ([#99969](https://github.com/kubernetes/kubernetes/pull/99969), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] ### Bug or Regression -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] +- Add ability to skip OpenAPI handler installation to the GenericAPIServer ([#100341](https://github.com/kubernetes/kubernetes/pull/100341), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery] +- Count pod overhead against an entity's ResourceQuota ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) [SIG API Machinery and Node] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed authentication_duration_seconds metric. Previously it included whole apiserver request duration. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) [SIG API Machinery, Instrumentation and Scalability] +- Fixes issue where inline AzueFile secrets could not be accessed from the pod's namespace. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) [SIG Storage] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubectl: fix case of age column in describe node (#96963, @bl-ue) ([#96963](https://github.com/kubernetes/kubernetes/pull/96963), [@bl-ue](https://github.com/bl-ue)) [SIG CLI] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Pass `KUBE_BUILD_CONFORMANCE=y` to the package-tarballs to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) [SIG Release] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] + +### Other (Cleanup or Flake) + +- A new storage E2E testsuite covers CSIStorageCapacity publishing if a driver opts into the test. ([#100537](https://github.com/kubernetes/kubernetes/pull/100537), [@pohly](https://github.com/pohly)) [SIG Storage and Testing] +- Convert cmd/kubelet/app/server.go to structured logging ([#98334](https://github.com/kubernetes/kubernetes/pull/98334), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- If kube-apiserver enabled goaway feature, clients required golang 1.15.8 or 1.16+ version to avoid un-expected data race issue. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] +- Increased CSINodeIDMaxLength from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps and Storage] +- Migrate `pkg/kubelet/pluginmanager` to structured logging ([#99885](https://github.com/kubernetes/kubernetes/pull/99885), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/preemption/preemption.go` and `pkg/kubelet/logs/container_log_manager.go` to structured logging ([#99848](https://github.com/kubernetes/kubernetes/pull/99848), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/(cri)` to structured logging ([#99006](https://github.com/kubernetes/kubernetes/pull/99006), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(node, pod)` to structured logging ([#98847](https://github.com/kubernetes/kubernetes/pull/98847), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(volume,container)` to structured logging ([#98850](https://github.com/kubernetes/kubernetes/pull/98850), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/kubelet_node_status.go` to structured logging ([#98154](https://github.com/kubernetes/kubernetes/pull/98154), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node and Release] +- Migrate `pkg/kubelet/lifecycle,oom` to structured logging ([#99479](https://github.com/kubernetes/kubernetes/pull/99479), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG Instrumentation and Node] +- Migrate cmd/kubelet/+ pkg/kubelet/cadvisor/cadvisor_linux.go + pkg/kubelet/cri/remote/util/util_unix.go + pkg/kubelet/images/image_manager.go to structured logging ([#99994](https://github.com/kubernetes/kubernetes/pull/99994), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/container_manager_linux.go and pkg/kubelet/cm/container_manager_stub.go to structured logging ([#100001](https://github.com/kubernetes/kubernetes/pull/100001), [@shiyajuan123](https://github.com/shiyajuan123)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanage/{topology/togit pology.go, policy_none.go, cpu_assignment.go} to structured logging ([#100163](https://github.com/kubernetes/kubernetes/pull/100163), [@lala123912](https://github.com/lala123912)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanager/state to structured logging ([#99563](https://github.com/kubernetes/kubernetes/pull/99563), [@jmguzik](https://github.com/jmguzik)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/config to structured logging ([#100002](https://github.com/kubernetes/kubernetes/pull/100002), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubelet.go to structured logging ([#99861](https://github.com/kubernetes/kubernetes/pull/99861), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubeletconfig to structured logging ([#100265](https://github.com/kubernetes/kubernetes/pull/100265), [@ehashman](https://github.com/ehashman)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime to structured logging ([#99970](https://github.com/kubernetes/kubernetes/pull/99970), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/prober to structured logging ([#99830](https://github.com/kubernetes/kubernetes/pull/99830), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/winstats to structured logging ([#99855](https://github.com/kubernetes/kubernetes/pull/99855), [@hexxdump](https://github.com/hexxdump)) [SIG Instrumentation and Node] +- Migrate probe log messages to structured logging ([#97093](https://github.com/kubernetes/kubernetes/pull/97093), [@aldudko](https://github.com/aldudko)) [SIG Instrumentation and Node] +- Migrate remaining kubelet files to structured logging ([#100196](https://github.com/kubernetes/kubernetes/pull/100196), [@ehashman](https://github.com/ehashman)) [SIG Instrumentation and Node] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] ## Dependencies @@ -967,411 +685,411 @@ filename | sha512 hash _Nothing has changed._ ### Changed -- github.com/google/cadvisor: [v0.38.4 → v0.38.5](https://github.com/google/cadvisor/compare/v0.38.4...v0.38.5) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/creack/pty: [v1.1.9 → v1.1.11](https://github.com/creack/pty/compare/v1.1.9...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/google/cadvisor: [v0.38.8 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.8...v0.39.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/klog/v2: v2.5.0 → v2.8.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.3 → v4.1.0 ### Removed _Nothing has changed._ -# v1.20.0-beta.2 +# v1.21.0-beta.1 -## Downloads for v1.20.0-beta.2 +## Downloads for v1.21.0-beta.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes.tar.gz) | fe769280aa623802a949b6a35fbddadbba1d6f9933a54132a35625683719595ecf58096a9aa0f7456f8d4931774df21bfa98e148bc3d85913f1da915134f77bd -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-src.tar.gz) | ce1c8d97c52e5189af335d673bd7e99c564816f6adebf249838f7e3f0e920f323b4e398a5d163ea767091497012ec38843c59ff14e6fdd07683b682135eed645 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes.tar.gz) | c9f4f25242e319e5d90f49d26f239a930aad69677c0f3c2387c56bb13482648a26ed234be2bfe2352508f35010e3eb6d3b127c31a9f24fa1e53ac99c38520fe4 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-src.tar.gz) | 255357db8fa160cab2187658906b674a8b0d9b9a5b5f688cc7b69dc124f5da00362c6cc18ae9b80f7ddb3da6f64c2ab2f12fb9b63a4e063c7366a5375b175cda ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | d6c14bd0f6702f4bbdf14a6abdfa4e5936de5b4efee38aa86c2bd7272967ec6d7868b88fc00ad4a7c3a20717a35e6be2b84e56dec04154fd702315f641409f7c -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-386.tar.gz) | b923c44cb0acb91a8f6fd442c2168aa6166c848f5d037ce50a7cb11502be3698db65836b373c916f75b648d6ac8d9158807a050eecc4e1c77cffa25b386c8cdb -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | 8cae14146a9034dcd4e9d69d5d700f195a77aac35f629a148960ae028ed8b4fe12213993fe3e6e464b4b3e111adebe6f3dd7ca0accc70c738ed5cfd8993edd7c -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm.tar.gz) | 1f54e5262a0432945ead57fcb924e6bfedd9ea76db1dd9ebd946787a2923c247cf16e10505307b47e365905a1b398678dac5af0f433c439c158a33e08362d97b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | 31cf79c01e4878a231b4881fe3ed5ef790bd5fb5419388438d3f8c6a2129e655aba9e00b8e1d77e0bc5d05ecc75cf4ae02cf8266788822d0306c49c85ee584ed -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | 2527948c40be2e16724d939316ad5363f15aa22ebf42d59359d8b6f757d30cfef6447434cc93bc5caa5a23a6a00a2da8d8191b6441e06bba469d9d4375989a97 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | b777ad764b3a46651ecb0846e5b7f860bb2c1c4bd4d0fcc468c6ccffb7d3b8dcb6dcdd73b13c16ded7219f91bba9f1e92f9258527fd3bb162b54d7901ac303ff -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-386.tar.gz) | 8a2f58aaab01be9fe298e4d01456536047cbdd39a37d3e325c1f69ceab3a0504998be41a9f41a894735dfc4ed22bed02591eea5f3c75ce12d9e95ba134e72ec5 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | 2f69cda177a178df149f5de66b7dba7f5ce14c1ffeb7c8d7dc4130c701b47d89bb2fbe74e7a262f573e4d21dee2c92414d050d7829e7c6fc3637a9d6b0b9c5c1 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | 02efd389c8126456416fd2c7ea25c3cc30f612649ad91f631f068d6c0e5e539484d3763cb9a8645ad6b8077e4fcd1552a659d7516ebc4ce6828cf823b65c3016 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-arm64.tar.gz) | ac90dcd1699d1d7ff9c8342d481f6d0d97ccdc3ec501a56dc7c9e1898a8f77f712bf66942d304bfe581b5494f13e3efa211865de88f89749780e9e26e673dbdb +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-386.tar.gz) | cce5fb84cc7a1ee664f89d8ad3064307c51c044e9ddd2ae5a004939b69d3b3ef6f29acc5782e27d0c8f0d6d3d9c96e922f5d1b99d210ca3e754666d775df9f0c +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 2e93bbd2e60ad7cd8fe495115e96c55b1dc8facd100a827ef9c197a732679b60cceb9ea7bf92a1f5e328c3b8adfa8d3922cbc5d8370e374f3381b83f5b877b4f +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm.tar.gz) | 23f03b6a8fa9decce9b89a2c1bd3dae6d0b2f9e533e35a79e2c5a29326a165259677594ae83c877219a21bdb95557a284e55f4eec12954742794579c89a7d7e5 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3acf3101b46568b0ded6b90f13df0e918870d6812dc1a584903ddb8ba146484a204b9e442f863df47c7d4dab043fd9f7294c5510d3eb09004993d6d3b1e9e13c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | f749198df69577f62872d3096138a1b8969ec6b1636eb68eb56640bf33cf5f97a11df4363462749a1c0dc3ccbb8ae76c5d66864bf1c5cf7e52599caaf498e504 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | 3f6c0189d59fca22cdded3a02c672ef703d17e6ab0831e173a870e14ccec436c142600e9fc35b403571b6906f2be8d18d38d33330f7caada971bbe1187b388f6 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-386.tar.gz) | 03d92371c425cf331c80807c0ac56f953be304fc6719057258a363d527d186d610e1d4b4d401b34128062983265c2e21f2d2389231aa66a6f5787eee78142cf6 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | 489ece0c886a025ca3a25d28518637a5a824ea6544e7ef8778321036f13c8909a978ad4ceca966cec1e1cda99f25ca78bfd37460d1231c77436d216d43c872ad ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | 3ecaac0213d369eab691ac55376821a80df5013cb12e1263f18d1c236a9e49d42b3cea422175556d8f929cdf3109b22c0b6212ac0f2e80cc7a5f4afa3aba5f24 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm.tar.gz) | 580030b57ff207e177208fec0801a43389cae10cc2c9306327d354e7be6a055390184531d54b6742e0983550b7a76693cc4a705c2d2f4ac30495cf63cef26b9b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | 3e3286bd54671549fbef0dfdaaf1da99bc5c3efb32cc8d1e1985d9926520cea0c43bcf7cbcbbc8b1c1a95eab961255693008af3bb1ba743362998b5f0017d6d7 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | 9fa051e7e97648e97e26b09ab6d26be247b41b1a5938d2189204c9e6688e455afe76612bbcdd994ed5692935d0d960bd96dc222bce4b83f61d62557752b9d75b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | fa85d432eff586f30975c95664ac130b9f5ae02dc52b97613ed7a41324496631ea11d1a267daba564cf2485a9e49707814d86bbd3175486c7efc8b58a9314af5 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 2e95cb31d5afcb6842c41d25b7d0c18dd7e65693b2d93c8aa44e5275f9c6201e1a67685c7a8ddefa334babb04cb559d26e39b6a18497695a07dc270568cae108 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 2927e82b98404c077196ce3968f3afd51a7576aa56d516019bd3976771c0213ba01e78da5b77478528e770da0d334e9457995fafb98820ed68b2ee34beb68856 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | e0f7aea3ea598214a9817bc04949389cb7e4e7b9503141a590ef48c0b681fe44a4243ebc6280752fa41aa1093149b3ee1bcef7664edb746097a342281825430b +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | c011f7eb01294e9ba5d5ced719068466f88ed595dcb8d554a36a4dd5118fb6b3d6bafe8bf89aa2d42988e69793ed777ba77b8876c6ec74f898a43cfce1f61bf4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 15f6683e7f16caab7eebead2b7c15799460abbf035a43de0b75f96b0be19908f58add98a777a0cca916230d60cf6bfe3fee92b9dcff50274b1e37c243c157969 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | 86e631f95fe670b467ead2b88d34e0364eaa275935af433d27cc378d82dcaa22041ccce40f5fa9561b9656dadaa578dc018ad458a59b1690d35f86dca4776b5c -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm.tar.gz) | a8754ff58a0e902397056b8615ab49af07aca347ba7cc4a812c238e3812234862270f25106b6a94753b157bb153b8eae8b39a01ed67384774d798598c243583b -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | 28d727d7d08e2c856c9b4a574ef2dbf9e37236a0555f7ec5258b4284fa0582fb94b06783aaf50bf661f7503d101fbd70808aba6de02a2f0af94db7d065d25947 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | a1283449f1a0b155c11449275e9371add544d0bdd4609d6dc737ed5f7dd228e84e24ff249613a2a153691627368dd894ad64f4e6c0010eecc6efd2c13d4fb133 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | 5806028ba15a6a9c54a34f90117bc3181428dbb0e7ced30874c9f4a953ea5a0e9b2c73e6b1e2545e1b4e5253e9c7691588538b44cdfa666ce6865964b92d2fa8 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | d5327e3b7916c78777b9b69ba0f3758c3a8645c67af80114a0ae52babd7af27bb504febbaf51b1bfe5bd2d74c8c5c573471e1cb449f2429453f4b1be9d5e682a +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | ed58679561197110f366b9109f7afd62c227bfc271918ccf3eea203bb2ab6428eb5db4dd6c965f202a8a636f66da199470269b863815809b99d53d2fa47af2ea +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 7e6c7f1957fcdecec8fef689c5019edbc0d0c11d22dafbfef0a07121d10d8f6273644f73511bd06a9a88b04d81a940bd6645ffb5711422af64af547a45c76273 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | a3618f29967e7a1574917a67f0296e65780321eda484b99aa32bfd4dc9b35acdefce33da952ac52dfb509fbac5bf700cf177431fad2ab4adcab0544538939faa +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 326d3eb521b41bdf489912177f70b8cdd7cd828bb9b3d847ed3694eb27e457f24e0a88b8e51b726eee39800a3c5a40c1b30e3a8ec4a34d8041b3d8ef05d1b749 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | 022d05ebaa66a0332c4fe18cdaf23d14c2c7e4d1f2af7f27baaf1eb042e6890dc3434b4ac8ba58c35d590717956f8c3458112685aff4938b94b18e263c3f4256 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | fa691ed93f07af6bc1cf57e20a30580d6c528f88e5fea3c14f39c1820969dc5a0eb476c5b87b288593d0c086c4dd93aff6165082393283c3f46c210f9bb66d61 -## Changelog since v1.20.0-beta.1 +## Changelog since v1.21.0-beta.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - A bug was fixed in kubelet where exec probe timeouts were not respected. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the ExecProbeTimeout feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] - - For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] - - Kubeadm: - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - - For more details see: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Migrate `pkg/kubelet/(dockershim, network)` to structured logging + Exit code changed from 255 to 1 ([#98939](https://github.com/kubernetes/kubernetes/pull/98939), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Network and Node] + - Migrate `pkg/kubelet/certificate` to structured logging + Exit code changed from 255 to 1 ([#98993](https://github.com/kubernetes/kubernetes/pull/98993), [@SataQiu](https://github.com/SataQiu)) [SIG Auth and Node] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Package pkg/kubelet/server migrated to structured logging + Exit code changed from 255 to 1 ([#99838](https://github.com/kubernetes/kubernetes/pull/99838), [@adisky](https://github.com/adisky)) [SIG Node] + - Pkg/kubelet/kuberuntime/kuberuntime_manager.go migrated to structured logging + Exit code changed from 255 to 1 ([#99841](https://github.com/kubernetes/kubernetes/pull/99841), [@adisky](https://github.com/adisky)) [SIG Instrumentation and Node] ## Changes by Kind ### Deprecation -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Remove deprecated --generator --replicas --service-generator --service-overrides --schedule from kubectl run + Deprecate --serviceaccount --hostport --requests --limits in kubectl run ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `diskformat` stroage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] ### API Change -- API priority and fairness graduated to beta - 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- Add LoadBalancerIPMode feature gate ([#92312](https://github.com/kubernetes/kubernetes/pull/92312), [@Sh4d1](https://github.com/Sh4d1)) [SIG Apps, CLI, Cloud Provider and Network] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a 'serving' and `terminating` condition to the EndpointSlice API. - - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- Fewer candidates are enumerated for preemption to improve performance in large clusters ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) [SIG Scheduling] -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBSVC is enabled. - "action required" - The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add a default metadata name labels for selecting any namespace by its name. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed` ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Clarified NetworkPolicy policyTypes documentation ([#97216](https://github.com/kubernetes/kubernetes/pull/97216), [@joejulian](https://github.com/joejulian)) [SIG Network] +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- EndpointSlice API is now GA. The EndpointSlice topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The discovery.k8s.io/v1alpha1 API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network and Testing] +- EndpointSlice Controllers are now GA. The EndpointSlice Controller will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, Apps, Auth, Network and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration(previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) [SIG API Machinery, CLI and Network] +- Introduce conditions for PodDisruptionBudget ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- Jobs API has a new .spec.suspend field that can be used to suspend and resume Jobs ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) [SIG API Machinery, Apps, Node, Scheduling and Testing] +- Kubelet Graceful Node Shutdown feature is now beta. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) [SIG Node] +- Limit the quest value of hugepage to integer multiple of page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Setting loadBalancerClass in load balancer type of service is available with this PR. + Users who want to use a custom load balancer can specify loadBalancerClass to achieve it. ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) [SIG API Machinery, Apps, Cloud Provider and Network] +- Storage capacity tracking (= the CSIStorageCapacity feature) is beta, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, Scheduling, Storage and Testing] +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The apiserver now resets managedFields that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) [SIG API Machinery and Testing] +- `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a pod compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) [SIG Apps] ### Feature -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Cluster Lifecycle] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric got additional buckets of 60, 300, 600, 900 and 1200 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- Add --use-protocol-buffers flag to kubectl top pods and nodes ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) [SIG CLI] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, and a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps and Instrumentation] +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. + - To enable: export KUBECTL_COMMAND_HEADERS=1; kubectl ... ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) [SIG API Machinery and CLI] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- Disruption controller only sends one event per PodDisruptionBudget if scale can't be computed ([#98128](https://github.com/kubernetes/kubernetes/pull/98128), [@mortent](https://github.com/mortent)) [SIG Apps] +- EndpointSliceNodeName will always be enabled, so NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] +- Graduate CRIContainerLogRotation feature gate to GA. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) [SIG Node and Testing] +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: promote IPv6DualStack feature gate to Beta ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Remove cAdvisor json metrics api collected by Kubelet ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Sysctls is now GA and locked to default ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) [SIG Node] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The endpointslice mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation endpoints.kubernetes.io/last-change-trigger-time is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) [SIG Apps, Network and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] ### Bug or Regression -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Fix --ignore-errors does not take effect if multiple logs are printed and unfollowed ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) [SIG CLI] +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix smb mount PermissionDenied issue on Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider, Storage and Windows] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixes add-on manager leader election ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider] +- Improved update time of pod statuses following new probe results. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) [SIG Node and Testing] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using dockershim as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) [SIG Node] +- Stdin is now only passed to client-go exec credential plugins when it is detected to be an interactive terminal. Previously, it was passed to client-go exec plugins when **stdout*- was detected to be an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Updates the commands + - kubectl kustomize {arg} + - kubectl apply -k {arg} + to use same code as kustomize CLI v4.0.5 + - [v4.0.5]: https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5 ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) [SIG API Machinery, Architecture, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node and Storage] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When using Containerd on Windows, the "C:\Windows\System32\drivers\etc\hosts" file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node and Windows] +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### Other (Cleanup or Flake) -- Handle slow cronjob lister in cronjob controller v2 and improve memory footprint. ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- NONE ([#96179](https://github.com/kubernetes/kubernetes/pull/96179), [@bbyrne5](https://github.com/bbyrne5)) [SIG Network] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Apiserver_request_total is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy is promoted to GA ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] +- Migrate `pkg/kubelet/(eviction)` to structured logging ([#99032](https://github.com/kubernetes/kubernetes/pull/99032), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate deployment controller log messages to structured logging ([#97507](https://github.com/kubernetes/kubernetes/pull/97507), [@aldudko](https://github.com/aldudko)) [SIG Apps] +- Migrate pkg/kubelet/cloudresource to structured logging ([#98999](https://github.com/kubernetes/kubernetes/pull/98999), [@sladyn98](https://github.com/sladyn98)) [SIG Node] +- Migrate pkg/kubelet/cri/remote logs to structured logging ([#98589](https://github.com/kubernetes/kubernetes/pull/98589), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime/kuberuntime_container.go logs to structured logging ([#96973](https://github.com/kubernetes/kubernetes/pull/96973), [@chenyw1990](https://github.com/chenyw1990)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/status to structured logging ([#99836](https://github.com/kubernetes/kubernetes/pull/99836), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/token to structured logging ([#99264](https://github.com/kubernetes/kubernetes/pull/99264), [@palnabarun](https://github.com/palnabarun)) [SIG Auth, Instrumentation and Node] +- Migrate pkg/kubelet/util to structured logging ([#99823](https://github.com/kubernetes/kubernetes/pull/99823), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate proxy/userspace/proxier.go logs to structured logging ([#97837](https://github.com/kubernetes/kubernetes/pull/97837), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some kubelet/metrics log messages to structured logging ([#98627](https://github.com/kubernetes/kubernetes/pull/98627), [@jialaijun](https://github.com/jialaijun)) [SIG Instrumentation and Node] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] + +### Uncategorized + +- Migrate pkg/kubelet/stats to structured logging ([#99607](https://github.com/kubernetes/kubernetes/pull/99607), [@krzysiekg](https://github.com/krzysiekg)) [SIG Node] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## Dependencies ### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.35.5 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.35.5...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.4](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.4) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 8b50664 → d219536 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.12 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: b3cf1e8 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/creack/pty: [v1.1.7 → v1.1.9](https://github.com/creack/pty/compare/v1.1.7...v1.1.9) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/google/cadvisor: [v0.38.7 → v0.38.8](https://github.com/google/cadvisor/compare/v0.38.7...v0.38.8) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: 113979e → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.3.0 → v1.4.0 ### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-beta.1 +# v1.21.0-beta.0 -## Downloads for v1.20.0-beta.1 +## Downloads for v1.21.0-beta.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes.tar.gz) | 4eddf4850c2d57751696f352d0667309339090aeb30ff93e8db8a22c6cdebf74cb2d5dc78d4ae384c4e25491efc39413e2e420a804b76b421a9ad934e56b0667 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-src.tar.gz) | 59de5221162e9b6d88f5abbdb99765cb2b2e501498ea853fb65f2abe390211e28d9f21e0d87be3ade550a5ea6395d04552cf093d2ce2f99fd45ad46545dd13cb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes.tar.gz) | 69b73a03b70b0ed006e9fef3f5b9bc68f0eb8dc40db6cc04777c03a2cb83a008c783012ca186b1c48357fb192403dbcf6960f120924785e2076e215b9012d546 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-src.tar.gz) | 9620fb6d37634271bdd423c09f33f3bd29e74298aa82c47dffc8cb6bd2ff44fa8987a53c53bc529db4ca96ec41503aa81cc8d0c3ac106f3b06c4720de933a8e6 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | d69ffed19b034a4221fc084e43ac293cf392e98febf5bf580f8d92307a8421d8b3aab18f9ca70608937e836b42c7a34e829f88eba6e040218a4486986e2fca21 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-386.tar.gz) | 1b542e165860c4adcd4550adc19b86c3db8cd75d2a1b8db17becc752da78b730ee48f1b0aaf8068d7bfbb1d8e023741ec293543bc3dd0f4037172a6917db8169 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 90ad52785eecb43a6f9035b92b6ba39fc84e67f8bc91cf098e70f8cfdd405c4b9d5c02dccb21022f21bb5b6ce92fdef304def1da0a7255c308e2c5fb3a9cdaab -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm.tar.gz) | d0cb3322b056e1821679afa70728ffc0d3375e8f3326dabbe8185be2e60f665ab8985b13a1a432e10281b84a929e0f036960253ac0dd6e0b44677d539e98e61b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3aecc8197e0aa368408624add28a2dd5e73f0d8a48e5e33c19edf91d5323071d16a27353a6f3e22df4f66ed7bfbae8e56e0a9050f7bbdf927ce6aeb29bba6374 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | 6ff145058f62d478b98f1e418e272555bfb5c7861834fbbf10a8fb334cc7ff09b32f2666a54b230932ba71d2fc7d3b1c1f5e99e6fe6d6ec83926a9b931cd2474 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | ff7b8bb894076e05a3524f6327a4a6353b990466f3292e84c92826cb64b5c82b3855f48b8e297ccadc8bcc15552bc056419ff6ff8725fc4e640828af9cc1331b -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-386.tar.gz) | 6c6dcac9c725605763a130b5a975f2b560aa976a5c809d4e0887900701b707baccb9ca1aebc10a03cfa7338a6f42922bbf838ccf6800fc2a3e231686a72568b6 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | d12e3a29c960f0ddd1b9aabf5426ac1259863ac6c8f2be1736ebeb57ddca6b1c747ee2c363be19e059e38cf71488c5ea3509ad4d0e67fd5087282a5ad0ae9a48 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | 2a6f3fcd6b571f5ccde56b91e6e179a01899244be496dae16a2a16e0405c9437b75c6dc853b56f9a4876a7c0a60ec624ccd28400bf8fb960258263172f6860ba +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-386.tar.gz) | 78fe9ad9f9a9bc043293327223f0038a2c087ca65e87187a6dcae7a24aef9565fe498d295a4639b0b90524469a04930022fcecd815d0afc742eb87ddd8eb7ef5 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | c025f5e5bd132355e7dd1296cf2ec752264e7f754c4d95fc34b076bd75bef2f571d30872bcb3d138ce95c592111353d275a80eb31f82c07000874b4c56282dbd +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm.tar.gz) | 9975cd2f08fbc202575fb15ba6fc51dab23155ca4d294ebb48516a81efa51f58bab3a87d41c865103756189b554c020371d729ad42880ba788f25047ffc46910 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 56a6836e24471e42e9d9a8488453f2d55598d70c8aca0a307d5116139c930c25c469fd0d1ab5060fbe88dad75a9b5209a08dc11d644af5f3ebebfbcb6c16266c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | b6a6cc9baad0ad85ed079ee80e6d6acc905095cfb440998bbc0f553b94fa80077bd58b8692754de477517663d51161705e6e89a1b6d04aa74819800db3517722 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 7b743481b340f510bf9ae28ea8ea91150aa1e8c37fe104b66d7b3aff62f5e6db3c590d2c13d14dbb5c928de31c7613372def2496075853611d10d6b5fa5b60bd +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-386.tar.gz) | df06c7a524ce84c1f8d7836aa960c550c88dbca0ec4854df4dd0a85b3c84b8ecbc41b54e8c4669ce28ac670659ff0fad795deb1bc539f3c3b3aa885381265f5a +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 4568497b684564f2a94fbea6cbfd778b891231470d9a6956c3b7a3268643d13b855c0fc5ebea5f769300cc0c7719c2c331c387f468816f182f63e515adeaa7a0 ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 904e8c049179e071c6caa65f525f465260bb4d4318a6dd9cc05be2172f39f7cfc69d1672736e01d926045764fe8872e806444e3af77ffef823ede769537b7d20 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 5934959374868aed8d4294de84411972660bca7b2e952201a9403f37e40c60a5c53eaea8001344d0bf4a00c8cd27de6324d88161388de27f263a5761357cb82b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | 4c884585970f80dc5462d9a734d7d5be9558b36c6e326a8a3139423efbd7284fa9f53fb077983647e17e19f03f5cb9bf26201450c78daecf10afa5a1ab5f9efc -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | 235b78b08440350dcb9f13b63f7722bd090c672d8e724ca5d409256e5a5d4f46d431652a1aa908c3affc5b1e162318471de443d38b93286113e79e7f90501a9b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 220fc9351702b3ecdcf79089892ceb26753a8a1deaf46922ffb3d3b62b999c93fef89440e779ca6043372b963081891b3a966d1a5df0cf261bdd44395fd28dce +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 42883cca2d312153baf693fc6024a295359a421e74fd70eefc927413be4e0353debe634e7cca6b9a8f7d8a0cee3717e03ba5d29a306e93139b1c2f3027535a6d +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm.tar.gz) | e0042215e84c769ba4fc4d159ccf67b2c4a26206bfffb0ec5152723dc813ff9c1426aa0e9b963d7bfa2efb266ca43561b596b459152882ebb42102ccf60bd8eb +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | bfad29d43e14152cb9bc7c4df6aa77929c6eca64a294bb832215bdba9fa0ee2195a2b709c0267dc7426bb371b547ee80bb8461a8c678c9bffa0819aa7db96289 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | ca67674c01c6cebdc8160c85b449eab1a23bb0557418665246e0208543fa2eaaf97679685c7b49bee3a4300904c0399c3d762ae34dc3e279fd69ce792c4b07ff +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 285352b628ec754b01b8ad4ef1427223a142d58ebcb46f6861df14d68643133b32330460b213b1ba5bc5362ff2b6dacd8e0c2d20cce6e760fa1954af8a60df8b ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | fe59d3a1f21c47bab126f689687657f77fbcb46a2caeef48eecd073b2b22879f997a466911b5c5c829e9cf27e68a36ecdf18686d42714839d4b97d6c7281578d -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 93e545aa963cfd11e0b2c6d47669b5ef70c5a86ef80c3353c1a074396bff1e8e7371dda25c39d78c7a9e761f2607b8b5ab843fa0c10b8ff9663098fae8d25725 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | 5e0f177f9bec406a668d4b37e69b191208551fdf289c82b5ec898959da4f8a00a2b0695cbf1d2de5acb809321c6e5604f5483d33556543d92b96dcf80e814dd3 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 574412059e4d257eb904cd4892a075b6a2cde27adfa4976ee64c46d6768facece338475f1b652ad94c8df7cfcbb70ebdf0113be109c7099ab76ffdb6f023eefd -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | b1ffaa6d7f77d89885c642663cb14a86f3e2ec2afd223e3bb2000962758cf0f15320969ffc4be93b5826ff22d54fdbae0dbea09f9d8228eda6da50b6fdc88758 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | 388983765213cf3bdc1f8b27103ed79e39028767e5f1571e35ed1f91ed100e49f3027f7b7ff19b53fab7fbb6d723c0439f21fc6ed62be64532c25f5bfa7ee265 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | d92d9b30e7e44134a0cd9db4c01924d365991ea16b3131200b02a82cff89c8701f618cd90e7f1c65427bd4bb5f78b10d540b2262de2c143b401fa44e5b25627b +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 551092f23c27fdea4bb2d0547f6075892534892a96fc2be7786f82b58c93bffdb5e1c20f8f11beb8bed46c24f36d4c18ec5ac9755435489efa28e6ae775739bd +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 26ae7f4163e527349b8818ee38b9ee062314ab417f307afa49c146df8f5a2bd689509b128bd4a1efd3896fd89571149a9955ada91f8ca0c2f599cd863d613c86 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 821fa953f6cebc69d2d481e489f3e90899813d20e2eefbabbcadd019d004108e7540f741fabe60e8e7c6adbb1053ac97898bbdddec3ca19f34a71aa3312e0d4e +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | 22197d4f66205d5aa9de83dfddcc4f2bb3195fd7067cdb5c21e61dbeae217bc112fb7ecff8a539579b60ad92298c2b4c87b9b7c7e6ec1ee1ffa0c6e4bc4412c1 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | 7e22e0d9603562a04dee16a513579f06b1ff6354d97d669bd68f8777ec7f89f6ef027fb23ab0445d7bba0bb689352f0cc748ce90e3f597c6ebe495464a96b860 -## Changelog since v1.20.0-beta.0 +## Changelog since v1.21.0-alpha.3 + +## Urgent Upgrade Notes +### (No, really, you MUST read this before you upgrade) + + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the storage_operation_duration_seconds metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- ACTION REQUIRED: The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42)) [SIG API Machinery, Node and Testing] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] ### API Change -- + `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - + kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] +- Cluster admins can now turn off /debug/pprof and /debug/flags/v endpoint in kubelet by setting enableProfilingHandler and enableDebugFlagsHandler to false in their kubelet configuration file. enableProfilingHandler and enableDebugFlagsHandler can be set to true only when enableDebuggingHandlers is also set to true. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) [SIG Node] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] ### Feature -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] +- A new histogram metric to track the time it took to delete a job by the ttl-after-finished controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Instrumentation] +- AWS cloudprovider supports auto-discovering subnets without any kubernetes.io/cluster/ tags. It also supports additional service annotation service.beta.kubernetes.io/aws-load-balancer-subnets to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Add --permit-address-sharing flag to kube-apiserver to listen with SO_REUSEADDR. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoid waiting for the kernel to release socket in TIME_WAIT state, and hence, considably reducing kube-apiserver restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) [SIG CLI] +- Added support for installing arm64 node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) [SIG Cloud Provider] +- Feature gate RootCAConfigMap is graduated to GA in 1.21 and will be removed in 1.22. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] +- Kubeadm: during "init" and "join" perform preflight validation on the host / node name and throw warnings if a name is not compliant ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=bad_metric1,bad_metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- TTLAfterFinished is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Auth] +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- Turn CronJobControllerV2 on by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) [SIG Apps] +- UDP protocol support for Agnhost connect subcommand ([#98639](https://github.com/kubernetes/kubernetes/pull/98639), [@knabben](https://github.com/knabben)) [SIG Testing] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. Clusters new and existing will not be affected until user starting adding secondary pod and service cidrs cli flags as described here: https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, Cloud Provider, Network and Node] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] +- Fix ALPHA stability level reference link ([#98641](https://github.com/kubernetes/kubernetes/pull/98641), [@Jeffwan](https://github.com/Jeffwan)) [SIG Auth, Cloud Provider, Instrumentation and Storage] + +### Failing Test + +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP NodePort Services where stale conntrack entries may blackhole the traffic directed to the NodePort. ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) [SIG Network] ### Bug or Regression -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] +- Add missing --kube-api-content-type in kubemark hollow template ([#98911](https://github.com/kubernetes/kubernetes/pull/98911), [@Jeffwan](https://github.com/Jeffwan)) [SIG Scalability and Testing] +- Avoid duplicate error messages when runing kubectl edit quota ([#98201](https://github.com/kubernetes/kubernetes/pull/98201), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery and Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fixes spurious errors about IPv6 in kube-proxy logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- In the method that ensures that the docker and containerd are in the correct containers with the proper OOM score set up, fixed the bug of identifying containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] ### Other (Cleanup or Flake) -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] +- APIs for kubelet annotations and labels from k8s.io/kubernetes/pkg/kubelet/apis are now available under k8s.io/kubelet/pkg/apis/ ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) [SIG Apps, Auth and Node] +- Migrate `pkg/kubelet/(pod, pleg)` to structured logging ([#98990](https://github.com/kubernetes/kubernetes/pull/98990), [@gjkim42](https://github.com/gjkim42)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/nodestatus to structured logging ([#99001](https://github.com/kubernetes/kubernetes/pull/99001), [@QiWang19](https://github.com/QiWang19)) [SIG Node] +- Migrate pkg/kubelet/server logs to structured logging ([#98643](https://github.com/kubernetes/kubernetes/pull/98643), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate proxy/winkernel/proxier.go logs to structured logging ([#98001](https://github.com/kubernetes/kubernetes/pull/98001), [@JornShen](https://github.com/JornShen)) [SIG Network and Windows] +- Migrate scheduling_queue.go to structured logging ([#98358](https://github.com/kubernetes/kubernetes/pull/98358), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Several flags related to the deprecated dockershim which are present in the kubelet command line are now deprecated. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) [SIG Node] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] ## Dependencies @@ -1379,763 +1097,530 @@ filename | sha512 hash _Nothing has changed._ ### Changed -_Nothing has changed._ +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.0.3 ### Removed _Nothing has changed._ -# v1.20.0-beta.0 +# v1.21.0-alpha.3 -## Downloads for v1.20.0-beta.0 +## Downloads for v1.21.0-alpha.3 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes.tar.gz) | 385e49e32bbd6996f07bcadbf42285755b8a8ef9826ee1ba42bd82c65827cf13f63e5634b834451b263a93b708299cbb4b4b0b8ddbc688433deaf6bec240aa67 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-src.tar.gz) | 842e80f6dcad461426fb699de8a55fde8621d76a94e54288fe9939cc1a3bbd0f4799abadac2c59bcf3f91d743726dbd17e1755312ae7fec482ef560f336dbcbb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes.tar.gz) | 704ec916a1dbd134c54184d2652671f80ae09274f9d23dbbed312944ebeccbc173e2e6b6949b38bdbbfdaf8aa032844deead5efeda1b3150f9751386d9184bc8 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-src.tar.gz) | 57db9e7560cfc9c10e7059cb5faf9c4bd5eb8f9b7964f44f000a417021cf80873184b774e7c66c80d4aba84c14080c6bc335618db3d2e5f276436ae065e25408 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | bde5e7d9ee3e79d1e69465a3ddb4bb36819a4f281b5c01a7976816d7c784410812dde133cdf941c47e5434e9520701b9c5e8b94d61dca77c172f87488dfaeb26 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-386.tar.gz) | 721bb8444c9e0d7a9f8461e3f5428882d76fcb3def6eb11b8e8e08fae7f7383630699248660d69d4f6a774124d6437888666e1fa81298d5b5518bc4a6a6b2c92 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | 71e4edc41afbd65f813e7ecbc22b27c95f248446f005e288d758138dc4cc708735be7218af51bcf15e8b9893a3598c45d6a685f605b46f50af3762b02c32ed76 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm.tar.gz) | bbefc749156f63898973f2f7c7a6f1467481329fb430d641fe659b497e64d679886482d557ebdddb95932b93de8d1e3e365c91d4bf9f110b68bd94b0ba702ded -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 9803190685058b4b64d002c2fbfb313308bcea4734ed53a8c340cfdae4894d8cb13b3e819ae64051bafe0fbf8b6ecab53a6c1dcf661c57640c75b0eb60041113 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | bcdceea64cba1ae38ea2bab50d8fd77c53f6d673de12566050b0e3c204334610e6c19e4ace763e68b5e48ab9e811521208b852b1741627be30a2b17324fc1daf -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 41e36d00867e90012d5d5adfabfaae8d9f5a9fd32f290811e3c368e11822916b973afaaf43961081197f2cbab234090d97d89774e674aeadc1da61f7a64708a9 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-386.tar.gz) | c50fec5aec2d0e742f851f25c236cb73e76f8fc73b0908049a10ae736c0205b8fff83eb3d29b1748412edd942da00dd738195d9003f25b577d6af8359d84fb2f -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 0fd6777c349908b6d627e849ea2d34c048b8de41f7df8a19898623f597e6debd35b7bcbf8e1d43a1be3a9abb45e4810bc498a0963cf780b109e93211659e9c7e +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | e2706efda92d5cf4f8b69503bb2f7703a8754407eff7f199bb77847838070e720e5f572126c14daa4c0c03b59bb1a63c1dfdeb6e936a40eff1d5497e871e3409 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 007bb23c576356ed0890bdfd25a0f98d552599e0ffec19fb982591183c7c1f216d8a3ffa3abf15216be12ae5c4b91fdcd48a7306a2d26b007b86a6abd553fc61 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | 39504b0c610348beba60e8866fff265bad58034f74504951cd894c151a248db718d10f77ebc83f2c38b2d517f8513a46325b38889eefa261ca6dbffeceba50ff +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | 30bc2c40d0c759365422ad1651a6fb35909be771f463c5b971caf401f9209525d05256ab70c807e88628dd357c2896745eecf13eda0b748464da97d0a5ef2066 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 085cdf574dc8fd33ece667130b8c45830b522a07860e03a2384283b1adea73a9652ef3dfaa566e69ee00aea1a6461608814b3ce7a3f703e4a934304f7ae12f97 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b34b845037d83ea7b3e2d80a9ede4f889b71b17b93b1445f0d936a36e98c13ed6ada125630a68d9243a5fcd311ee37cdcc0c05da484da8488ea5060bc529dbfc +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | c4758adc7a404b776556efaa79655db2a70777c562145d6ea6887f3335988367a0c2fcd4383e469340f2a768b22e786951de212805ca1cb91104d41c21e0c9ce +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-386.tar.gz) | f51edc79702bbd1d9cb3a672852a405e11b20feeab64c5411a7e85c9af304960663eb6b23ef96e0f8c44a722fecf58cb6d700ea2c42c05b3269d8efd5ad803f2 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 6a3507ce4ac40a0dc7e4720538863fa15f8faf025085a032f34b8fa0f6fa4e8c26849baf649b5b32829b9182e04f82721b13950d31cf218c35be6bf1c05d6abf ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 30d982424ca64bf0923503ae8195b2e2a59497096b2d9e58dfd491cd6639633027acfa9750bc7bccf34e1dc116d29d2f87cbd7ae713db4210ce9ac16182f0576 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm.tar.gz) | f08b62be9bc6f0745f820b0083c7a31eedb2ce370a037c768459a59192107b944c8f4345d0bb88fc975f2e7a803ac692c9ac3e16d4a659249d4600e84ff75d9e -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | e3472b5b3dfae0a56e5363d52062b1e4a9fc227a05e0cf5ece38233b2c442f427970aab94a52377fb87e583663c120760d154bc1c4ac22dca1f4d0d1ebb96088 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | 06c254e0a62f755d31bc40093d86c44974f0a60308716cc3214a6b3c249a4d74534d909b82f8a3dd3a3c9720e61465b45d2bb3a327ef85d3caba865750020dfb -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 2edeb4411c26a0de057a66787091ab1044f71774a464aed898ffee26634a40127181c2edddb38e786b6757cca878fd0c3a885880eec6c3448b93c645770abb12 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 19181d162dfb0b30236e2bf1111000e037eece87c037ca2b24622ca94cb88db86aa4da4ca533522518b209bc9983bbfd6b880a7898e0da96b33f3f6c4690539b +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 42a02f9e08a78ad5da6e5fa1ab12bf1e3c967c472fdbdadbd8746586da74dc8093682ba8513ff2a5301393c47ee9021b860e88ada56b13da386ef485708e46ca +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 3c8ba8eb02f70061689bd7fab7813542005efe2edc6cfc6b7aecd03ffedf0b81819ad91d69fff588e83023d595eefbfe636aa55e1856add8733bf42fff3c748f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | cd9e6537450411c39a06fd0b5819db3d16b668d403fb3627ec32c0e32dd1c4860e942934578ca0e1d1b8e6f21f450ff81e37e0cd46ff5c5faf7847ab074aefc5 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | ada3f65e53bc0e0c0229694dd48c425388089d6d77111a62476d1b08f6ad1d8ab3d60b9ed7d95ac1b42c2c6be8dc0618f40679717160769743c43583d8452362 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | cc1d5b94b86070b5e7746d7aaeaeac3b3a5e5ebbff1ec33885f7eeab270a6177d593cb1975b2e56f4430b7859ad42da76f266629f9313e0f688571691ac448ed -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 75e82c7c9122add3b24695b94dcb0723c52420c3956abf47511e37785aa48a1fa8257db090c6601010c4475a325ccfff13eb3352b65e3aa1774f104b09b766b0 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 16ef27c40bf4d678a55fcd3d3f7d09f1597eec2cc58f9950946f0901e52b82287be397ad7f65e8d162d8a9cdb4a34a610b6db8b5d0462be8e27c4b6eb5d6e5e7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 939865f2c4cb6a8934f22a06223e416dec5f768ffc1010314586149470420a1d62aef97527c34d8a636621c9669d6489908ce1caf96f109e8d073cee1c030b50 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | bbfdd844075fb816079af7b73d99bc1a78f41717cdbadb043f6f5872b4dc47bc619f7f95e2680d4b516146db492c630c17424e36879edb45e40c91bc2ae4493c -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | a2b3ea40086fd71aed71a4858fd3fc79fd1907bc9ea8048ff3c82ec56477b0a791b724e5a52d79b3b36338c7fbd93dfd3d03b00ccea9042bda0d270fc891e4ec +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | ae0fec6aa59e49624b55d9a11c12fdf717ddfe04bdfd4f69965d03004a34e52ee4a3e83f7b61d0c6a86f43b72c99f3decb195b39ae529ef30526d18ec5f58f83 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | 9a48c140ab53b7ed8ecec6903988a1a474efc16d2538e5974bc9a12f0c9190be78c4f9e326bf4e982d0b7045a80b99dd0fda7e9b650663be5b89bfd991596746 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | 6912adbc9300344bea470d6435f7b387bfce59767078c11728ce59faf47cd3f72b41b9604fcc5cda45e9816fe939fbe2fb33e52a773e6ff2dfa9a615b4df6141 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | d66dccfe3e6ed6d81567c70703f15375a53992b3a5e2814b98c32e581b861ad95912e03ed2562415d087624c008038bb4a816611fa255442ae752968ea15856b +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | ad8c69a28f1fbafa3f1cb54909bfd3fc22b104bed63d7ca2b296208c9d43eb5f2943a0ff267da4c185186cdd9f7f77b315cd7f5f1bf9858c0bf42eceb9ac3c58 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 91d723aa848a9cb028f5bcb41090ca346fb973961521d025c4399164de2c8029b57ca2c4daca560d3c782c05265d2eb0edb0abcce6f23d3efbecf2316a54d650 -## Changelog since v1.20.0-alpha.3 +## Changelog since v1.21.0-alpha.2 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] - - Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - Remove alpha CSIMigrationXXComplete flag and add alpha InTreePluginXXUnregister flag. Deprecate CSIMigrationvSphereComplete flag and it will be removed in 1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Node and Storage] ## Changes by Kind -### Deprecation - -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] - ### API Change -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] +- Adds support for portRange / EndPort in Network Policy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- Fixes using server-side apply with APIService resources ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Apps and Testing] +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] ### Feature -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] +- A lease can only attach up to 10k objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery] +- Add ignore-errors flag for drain, support none-break drain in group ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) [SIG CLI] +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- Export NewDebuggingRoundTripper function and DebugLevel options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) [SIG API Machinery] +- Kubectl wait ensures that observedGeneration >= generation if applicable ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) [SIG CLI] +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- New admission controller "denyserviceexternalips" is available. Clusters which do not *need- the Service "externalIPs" feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) [SIG API Machinery] +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Pause image upgraded to 3.4.1 in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Testing and Windows] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] - -### Bug or Regression - -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions. ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] - -### Other (Cleanup or Flake) - -- NO ([#95690](https://github.com/kubernetes/kubernetes/pull/95690), [@nikhita](https://github.com/nikhita)) [SIG Release] - -## Dependencies - -### Added -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) - -### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.9.0 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.9.0...v0.9.5) -- github.com/Azure/go-autorest/autorest/mocks: [v0.4.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.4.0...v0.4.1) -- golang.org/x/crypto: 75b2880 → 7f63de1 - -### Removed -_Nothing has changed._ - - - -# v1.20.0-alpha.3 - - -## Downloads for v1.20.0-alpha.3 - -### Source Code - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes.tar.gz) | 542cc9e0cd97732020491456402b6e2b4f54f2714007ee1374a7d363663a1b41e82b50886176a5313aaccfbfd4df2bc611d6b32d19961cdc98b5821b75d6b17c -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-src.tar.gz) | 5e5d725294e552fd1d14fd6716d013222827ac2d4e2d11a7a1fdefb77b3459bbeb69931f38e1597de205dd32a1c9763ab524c2af1551faef4f502ef0890f7fbf - -### Client binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | 60004939727c75d0f06adc4449e16b43303941937c0e9ea9aca7d947e93a5aed5d11e53d1fc94caeb988be66d39acab118d406dc2d6cead61181e1ced6d2be1a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 7edba9c4f1bf38fdf1fa5bff2856c05c0e127333ce19b17edf3119dc9b80462c027404a1f58a5eabf1de73a8f2f20aced043dda1fafd893619db1a188cda550c -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | db1818aa82d072cb3e32a2a988e66d76ecf7cebc6b8a29845fa2d6ec27f14a36e4b9839b1b7ed8c43d2da9cde00215eb672a7e8ee235d2e3107bc93c22e58d38 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | d2922e70d22364b1f5a1e94a0c115f849fe2575b231b1ba268f73a9d86fc0a9fbb78dc713446839a2593acf1341cb5a115992f350870f13c1a472bb107b75af7 -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 2e3ae20e554c7d4fc3a8afdfcafe6bbc81d4c5e9aea036357baac7a3fdc2e8098aa8a8c3dded3951667d57f667ce3fbf37ec5ae5ceb2009a569dc9002d3a92f9 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b54a34e572e6a86221577de376e6f7f9fcd82327f7fe94f2fc8d21f35d302db8a0f3d51e60dc89693999f5df37c96d0c3649a29f07f095efcdd59923ae285c95 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | 5be1b70dc437d3ba88cb0b89cd1bc555f79896c3f5b5f4fa0fb046a0d09d758b994d622ebe5cef8e65bba938c5ae945b81dc297f9dfa0d98f82ea75f344a3a0d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-386.tar.gz) | 88cf3f66168ef3bf9a5d3d2275b7f33799406e8205f2c202997ebec23d449aa4bb48b010356ab1cf52ff7b527b8df7c8b9947a43a82ebe060df83c3d21b7223a -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 87d2d4ea1829da8cfa1a705a03ea26c759a03bd1c4d8b96f2c93264c4d172bb63a91d9ddda65cdc5478b627c30ae8993db5baf8be262c157d83bffcebe85474e - -### Server binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 7af691fc0b13a937797912374e3b3eeb88d5262e4eb7d4ebe92a3b64b3c226cb049aedfd7e39f639f6990444f7bcf2fe58699cf0c29039daebe100d7eebf60de -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 557c47870ecf5c2090b2694c8f0c8e3b4ca23df5455a37945bd037bc6fb5b8f417bf737bb66e6336b285112cb52de0345240fdb2f3ce1c4fb335ca7ef1197f99 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 981de6cf7679d743cdeef1e894314357b68090133814801870504ef30564e32b5675e270db20961e9a731e35241ad9b037bdaf749da87b6c4ce8889eeb1c5855 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | 506578a21601ccff609ae757a55e68634c15cbfecbf13de972c96b32a155ded29bd71aee069c77f5f721416672c7a7ac0b8274de22bfd28e1ecae306313d96c5 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | af0cdcd4a77a7cc8060a076641615730a802f1f02dab084e41926023489efec6102d37681c70ab0dbe7440cd3e72ea0443719a365467985360152b9aae657375 - -### Node binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | 2d92c61596296279de1efae23b2b707415565d9d50cd61a7231b8d10325732b059bcb90f3afb36bef2575d203938c265572721e38df408e8792d3949523bd5d9 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | c298de9b5ac1b8778729a2d8e2793ff86743033254fbc27014333880b03c519de81691caf03aa418c729297ee8942ce9ec89d11b0e34a80576b9936015dc1519 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | daa3c65afda6d7aff206c1494390bbcc205c2c6f8db04c10ca967a690578a01c49d49c6902b85e7158f79fd4d2a87c5d397d56524a75991c9d7db85ac53059a7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | 05661908bb73bfcaf9c2eae96e9a6a793db5a7a100bce6df9e057985dd53a7a5248d72e81b6d13496bd38b9326c17cdb2edaf0e982b6437507245fb846e1efc6 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | 845e518e2c4ef0cef2c3b58f0b9ea5b5fe9b8a249717f789607752484c424c26ae854b263b7c0a004a8426feb9aa3683c177a9ed2567e6c3521f4835ea08c24a -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 530e536574ed2c3e5973d3c0f0fdd2b4d48ef681a7a7c02db13e605001669eeb4f4b8a856fc08fc21436658c27b377f5d04dbcb3aae438098abc953b6eaf5712 - -## Changelog since v1.20.0-alpha.2 - -## Changes by Kind - -### API Change - -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] +- Feat: azure file migration go beta in 1.21. Feature gates CSIMigration to Beta (on by default) and CSIMigrationAzureFile to Beta (off by default since it requires installation of the AzureFile CSI Driver) + The in-tree AzureFile plugin "kubernetes.io/azure-file" is now deprecated and will be removed in 1.23. Users should enable CSIMigration + CSIMigrationAzureFile features and install the AzureFile CSI Driver (https://github.com/kubernetes-sigs/azurefile-csi-driver) to avoid disruption to existing Pod and PVC objects at that time. + Users should start using the AzureFile CSI Driver directly for any new volumes. ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -### Feature +### Failing Test -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] +- Kubelet: the HostPort implementation in dockershim was not taking into consideration the HostIP field, causing that the same HostPort can not be used with different IP addresses. + This bug causes the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### Bug or Regression -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) [SIG API Machinery, Release and Testing] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a panic in the disruption budget controller for PDB objects with invalid selectors ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) [SIG Apps] +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. + - in cases where the client specifies a timeout in the request URL, the overall request deadline is shortened now since the deadline is setup as soon as the request is received by the apiserver. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: fix bad token placeholder text in "config print *-defaults --help" ([#98839](https://github.com/kubernetes/kubernetes/pull/98839), [@Mattias-](https://github.com/Mattias-)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Truncates a message if it hits the NoteLengthLimit when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) [SIG Scheduling] +- We will no longer automatically delete all data when a failure is detected during creation of the volume data file on a CSI volume. Now we will only remove the data file and volume path. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) [SIG Storage] ### Other (Cleanup or Flake) -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] +- Fix the description of command line flags that can override --config ([#98254](https://github.com/kubernetes/kubernetes/pull/98254), [@changshuchao](https://github.com/changshuchao)) [SIG Scheduling] +- Migrate scheduler/taint_manager.go structured logging ([#98259](https://github.com/kubernetes/kubernetes/pull/98259), [@tanjing2020](https://github.com/tanjing2020)) [SIG Apps] +- Migrate staging/src/k8s.io/apiserver/pkg/admission logs to structured logging ([#98138](https://github.com/kubernetes/kubernetes/pull/98138), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] ## Dependencies ### Added -_Nothing has changed._ +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) ### Changed -_Nothing has changed._ +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/tools: c1934b7 → 113979e +- k8s.io/klog/v2: v2.4.0 → v2.5.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 ### Removed -_Nothing has changed._ +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) -# v1.20.0-alpha.2 +# v1.21.0-alpha.2 -## Downloads for v1.20.0-alpha.2 +## Downloads for v1.21.0-alpha.2 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes.tar.gz) | 45089a4d26d56a5d613ecbea64e356869ac738eca3cc71d16b74ea8ae1b4527bcc32f1dc35ff7aa8927e138083c7936603faf063121d965a2f0f8ba28fa128d8 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-src.tar.gz) | 646edd890d6df5858b90aaf68cc6e1b4589b8db09396ae921b5c400f2188234999e6c9633906692add08c6e8b4b09f12b2099132b0a7533443fb2a01cfc2bf81 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes.tar.gz) | 6836f6c8514253fe0831fd171fc4ed92eb6d9a773491c8dc82b90d171a1b10076bd6bfaea56ec1e199c5f46c273265bdb9f174f0b2d99c5af1de4c99b862329e +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-src.tar.gz) | d137694804741a05ab09e5f9a418448b66aba0146c028eafce61bcd9d7c276521e345ce9223ffbc703e8172041d58dfc56a3242a4df3686f24905a4541fcd306 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | c136273883e24a2a50b5093b9654f01cdfe57b97461d34885af4a68c2c4d108c07583c02b1cdf7f57f82e91306e542ce8f3bddb12fcce72b744458bc4796f8eb -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 6ec59f1ed30569fa64ddb2d0de32b1ae04cda4ffe13f339050a7c9d7c63d425ee6f6d963dcf82c17281c4474da3eaf32c08117669052872a8c81bdce2c8a5415 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | 7b40a4c087e2ea7f8d055f297fcd39a3f1cb6c866e7a3981a9408c3c3eb5363c648613491aad11bc7d44d5530b20832f8f96f6ceff43deede911fb74aafad35f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | cda9955feebea5acb8f2b5b87895d24894bbbbde47041453b1f926ebdf47a258ce0496aa27d06bcbf365b5615ce68a20d659b64410c54227216726e2ee432fca -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | f65bd9241c7eb88a4886a285330f732448570aea4ededaebeabcf70d17ea185f51bf8a7218f146ee09fb1adceca7ee71fb3c3683834f2c415163add820fba96e -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 1e377599af100a81d027d9199365fb8208d443a8e0a97affff1a79dc18796e14b78cb53d6e245c1c1e8defd0e050e37bf5f2a23c8a3ff45a6d18d03619709bf5 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 1cdee81478246aa7e7b80ae4efc7f070a5b058083ae278f59fad088b75a8052761b0e15ab261a6e667ddafd6a69fb424fc307072ed47941cad89a85af7aee93d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-386.tar.gz) | d8774167c87b6844c348aa15e92d5033c528d6ab9e95d08a7cb22da68bafd8e46d442cf57a5f6affad62f674c10ae6947d524b94108b5e450ca78f92656d63c0 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | f664b47d8daa6036f8154c1dc1f881bfe683bf57c39d9b491de3848c03d051c50c6644d681baf7f9685eae45f9ce62e4c6dfea2853763cfe8256a61bdd59d894 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | 9478b047a97717953f365c13a098feb7e3cb30a3df22e1b82aa945f2208dcc5cb90afc441ba059a3ae7aafb4ee000ec3a52dc65a8c043a5ac7255a391c875330 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 44c8dd4b1ddfc256d35786c8abf45b0eb5f0794f5e310d2efc865748adddc50e8bf38aa71295ae8a82884cb65f2e0b9b0737b000f96fd8f2d5c19971d7c4d8e8 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | e1291989892769de6b978c17b8612b94da6f3b735a4d895100af622ca9ebb968c75548afea7ab00445869625dd0da3afec979e333afbb445805f5d31c1c13cc7 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | 3c4bcb8cbe73822d68a2f62553a364e20bec56b638c71d0f58679b4f4b277d809142346f18506914e694f6122a3e0f767eab20b7b1c4dbb79e4c5089981ae0f1 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | 9389974a790268522e187f5ba5237f3ee4684118c7db76bc3d4164de71d8208702747ec333b204c7a78073ab42553cbbce13a1883fab4fec617e093b05fab332 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 63399e53a083b5af3816c28ff162c9de6b64c75da4647f0d6bbaf97afdf896823cb1e556f2abac75c6516072293026d3ff9f30676fd75143ac6ca3f4d21f4327 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 50898f197a9d923971ff9046c9f02779b57f7b3cea7da02f3ea9bab8c08d65a9c4a7531a2470fa14783460f52111a52b96ebf916c0a1d8215b4070e4e861c1b0 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-386.tar.gz) | a7743e839e1aa19f5ee20b6ee5000ac8ef9e624ac5be63bb574fad6992e4b9167193ed07e03c9bc524e88bfeed66c95341a38a03bff1b10bc9910345f33019f0 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | 5f1d19c230bd3542866d16051808d184e9dd3e2f8c001ed4cee7b5df91f872380c2bf56a3add8c9413ead9d8c369efce2bcab4412174df9b823d3592677bf74e ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | d6fcb4600be0beb9de222a8da64c35fe22798a0da82d41401d34d0f0fc7e2817512169524c281423d8f4a007cd77452d966317d5a1b67d2717a05ff346e8aa7d -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | 022a76cf10801f8afbabb509572479b68fdb4e683526fa0799cdbd9bab4d3f6ecb76d1d63d0eafee93e3edf6c12892d84b9c771ef2325663b95347728fa3d6c0 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 0679aadd60bbf6f607e5befad74b5267eb2d4c1b55985cc25a97e0f4c5efb7acbb3ede91bfa6a5a5713dae4d7a302f6faaf678fd6b359284c33d9a6aca2a08bb -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 9f2cfeed543b515eafb60d9765a3afff4f3d323c0a5c8a0d75e3de25985b2627817bfcbe59a9a61d969e026e2b861adb974a09eae75b58372ed736ceaaed2a82 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | 937258704d7b9dcd91f35f2d34ee9dd38c18d9d4e867408c05281bfbbb919ad012c95880bee84d2674761aa44cc617fb2fae1124cf63b689289286d6eac1c407 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | ef2cac10febde231aeb6f131e589450c560eeaab8046b49504127a091cddc17bc518c2ad56894a6a033033ab6fc6e121b1cc23691683bc36f45fe6b1dd8e0510 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | d11c9730307f08e80b2b8a7c64c3e9a9e43c622002e377dfe3a386f4541e24adc79a199a6f280f40298bb36793194fd44ed45defe8a3ee54a9cb1386bc26e905 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 28f8c32bf98ee1add7edf5d341c3bac1afc0085f90dcbbfb8b27a92087f13e2b53c327c8935ee29bf1dc3160655b32bbe3e29d5741a8124a3848a777e7d42933 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 99ae8d44b0de3518c27fa8bbddd2ecf053dfb789fb9d65f8a4ecf4c8331cf63d2f09a41c2bcd5573247d5f66a1b2e51944379df1715017d920d521b98589508a +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | f8c0e954a2dfc6845614488dadeed069cc7f3f08e33c351d7a77c6ef97867af590932e8576d12998a820a0e4d35d2eee797c764e2810f09ab1e90a5acaeaad33 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | 076165d745d47879de68f4404eaf432920884be48277eb409e84bf2c61759633bf3575f46b0995f1fc693023d76c0921ed22a01432e756d7f8d9e246a243b126 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 1ff2e2e3e43af41118cdfb70c778e15035bbb1aca833ffd2db83c4bcd44f55693e956deb9e65017ebf3c553f2820ad5cd05f5baa33f3d63f3e00ed980ea4dfed -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | b232c7359b8c635126899beee76998078eec7a1ef6758d92bcdebe8013b0b1e4d7b33ecbf35e3f82824fe29493400845257e70ed63c1635bfa36c8b3b4969f6f -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 51d415a068f554840f4c78d11a4fedebd7cb03c686b0ec864509b24f7a8667ebf54bb0a25debcf2b70f38be1e345e743f520695b11806539a55a3620ce21946f -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | b51c082d8af358233a088b632cf2f6c8cfe5421471c27f5dc9ba4839ae6ea75df25d84298f2042770097554c01742bb7686694b331ad9bafc93c86317b867728 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | 91b9d26620a2dde67a0edead0039814efccbdfd54594dda3597aaced6d89140dc92612ed0727bc21d63468efeef77c845e640153b09e39d8b736062e6eee0c76 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | c5456d50bfbe0d75fb150b3662ed7468a0abd3970792c447824f326894382c47bbd3a2cc5a290f691c8c09585ff6fe505ab86b4aff2b7e5ccee11b5e6354ae6c +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 335b5cd8672e053302fd94d932fb2fa2e48eeeb1799650b3f93acdfa635e03a8453637569ab710c46885c8317759f4c60aaaf24dca9817d9fa47500fe4a3ca53 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | 3ee87dbeed8ace9351ac89bdaf7274dd10b4faec3ceba0825f690ec7a2bb7eb7c634274a1065a0939eec8ff3e43f72385f058f4ec141841550109e775bc5eff9 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 6956f965b8d719b164214ec9195fdb2c776b907fe6d2c524082f00c27872a73475927fd7d2a994045ce78f6ad2aa5aeaf1eb5514df1810d2cfe342fd4e5ce4a1 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | 3b643aa905c709c57083c28dd9e8ffd88cb64466cda1499da7fc54176b775003e08b9c7a07b0964064df67c8142f6f1e6c13bfc261bd65fb064049920bfa57d0 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | b2e6d6fb0091f2541f9925018c2bdbb0138a95bab06b4c6b38abf4b7144b2575422263b78fb3c6fd09e76d90a25a8d35a6d4720dc169794d42c95aa22ecc6d5f -## Changelog since v1.20.0-alpha.1 +## Changelog since v1.21.0-alpha.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- Action-required: kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Action-required: kubeadm: remove the deprecated feature --experimental-kustomize from kubeadm commands. The feature was replaced with --experimental-patches in 1.19. To migrate see the --help description for the --experimental-patches flag. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] +- Remove the TokenRequest and TokenRequestProjection feature gates ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] ### API Change -- GPU metrics provided by kubelet are now disabled by default ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Status of v1beta1 CRDs without "preserveUnknownFields:false" will show violation "spec.preserveUnknownFields: Invalid value: true: must be false" ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) [SIG API Machinery] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl port-forward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] -### Feature +### Documentation -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Allow configuring AWS LoadBalancer health check protocol via service annotations ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kubeadm: do not throw errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. Print warnings instead. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- Send gce node startup scripts logs to console and journal ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) [SIG Cloud Provider and Node] -- Support kubectl delete orphan/foreground/background options ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) [SIG CLI and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### Bug or Regression -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Fix a bug where the endpoint slice controller was not mirroring the parent service labels to its corresponding endpoint slices ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) [SIG Apps and Network] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: warn but do not error out on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Port mapping allows to map the same `containerPort` to multiple `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Network and Node] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Fix azure file migration issue ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) [SIG Auth, Cloud Provider and Storage] +- Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix repeatedly acquire the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- Indentation of `Resource Quota` block in kubectl describe namespaces output gets correct. ([#97946](https://github.com/kubernetes/kubernetes/pull/97946), [@dty1er](https://github.com/dty1er)) [SIG CLI] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubelet should ignore cgroup driver check on Windows node. ([#97764](https://github.com/kubernetes/kubernetes/pull/97764), [@pacoxu](https://github.com/pacoxu)) [SIG Node and Windows] +- Make podTopologyHints protected by lock ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Readjust kubelet_containers_per_pod_count bucket ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) [SIG Instrumentation and Node] +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] ### Other (Cleanup or Flake) -- Added fine grained debugging to the intra-pod conformance test for helping easily resolve networking issues for nodes that might be unhealthy when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) [SIG Network and Testing] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Errors from staticcheck: - vendor/k8s.io/client-go/discovery/cached/memory/memcache_test.go:94:2: this value of g is never used (SA4006) ([#95098](https://github.com/kubernetes/kubernetes/pull/95098), [@phunziker](https://github.com/phunziker)) [SIG API Machinery] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Masks ceph RBD adminSecrets in logs when logLevel >= 4 ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) [SIG Storage] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove offensive words from kubectl cluster-info command ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) [SIG Architecture, CLI and Testing] -- The following new metrics are available. - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) [SIG Instrumentation, Network and Node] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Migrate generic_scheduler.go and types.go to structured logging. ([#98134](https://github.com/kubernetes/kubernetes/pull/98134), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Migrate proxy/winuserspace/proxier.go logs to structured logging ([#97941](https://github.com/kubernetes/kubernetes/pull/97941), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go logs to structured logging. ([#98252](https://github.com/kubernetes/kubernetes/pull/98252), [@lala123912](https://github.com/lala123912)) [SIG API Machinery and Auth] +- Migrate staging\src\k8s.io\apiserver\pkg\endpoints logs to structured logging ([#98093](https://github.com/kubernetes/kubernetes/pull/98093), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Node ([#96552](https://github.com/kubernetes/kubernetes/pull/96552), [@pandaamanda](https://github.com/pandaamanda)) [SIG Apps, Cloud Provider, Node and Scheduling] +- The kubectl alpha debug command was scheduled to be removed in v1.21. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] ## Dependencies ### Added -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) +_Nothing has changed._ ### Changed -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.5](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.5) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- k8s.io/kube-openapi: 6aeccd4 → 8b50664 -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.12 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → b3cf1e8 +- github.com/google/cadvisor: [v0.38.6 → v0.38.7](https://github.com/google/cadvisor/compare/v0.38.6...v0.38.7) +- k8s.io/gengo: 83324d8 → b6c5ce2 ### Removed _Nothing has changed._ -# v1.20.0-alpha.1 +# v1.21.0-alpha.1 -## Downloads for v1.20.0-alpha.1 +## Downloads for v1.21.0-alpha.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes.tar.gz) | e7daed6502ea07816274f2371f96fe1a446d0d7917df4454b722d9eb3b5ff6163bfbbd5b92dfe7a0c1d07328b8c09c4ae966e482310d6b36de8813aaf87380b5 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-src.tar.gz) | e91213a0919647a1215d4691a63b12d89a3e74055463a8ebd71dc1a4cabf4006b3660881067af0189960c8dab74f4a7faf86f594df69021901213ee5b56550ea +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes.tar.gz) | b2bacd5c3fc9f829e6269b7d2006b0c6e464ff848bb0a2a8f2fe52ad2d7c4438f099bd8be847d8d49ac6e4087f4d74d5c3a967acd798e0b0cb4d7a2bdb122997 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-src.tar.gz) | 518ac5acbcf23902fb1b902b69dbf3e86deca5d8a9b5f57488a15f185176d5a109558f3e4df062366af874eca1bcd61751ee8098b0beb9bcdc025d9a1c9be693 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | 1f3add5f826fa989820d715ca38e8864b66f30b59c1abeacbb4bfb96b4e9c694eac6b3f4c1c81e0ee3451082d44828cb7515315d91ad68116959a5efbdaef1e1 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-386.tar.gz) | c62acdc8993b0a950d4b0ce0b45473bf96373d501ce61c88adf4007afb15c1d53da8d53b778a7eccac6c1624f7fdda322be9f3a8bc2d80aaad7b4237c39f5eaf -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1203ababfe00f9bc5be5c059324c17160a96530c1379a152db33564bbe644ccdb94b30eea15a0655bd652efb17895a46c31bbba19d4f5f473c2a0ff62f6e551f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | 31860088596e12d739c7aed94556c2d1e217971699b950c8417a3cea1bed4e78c9ff1717b9f3943354b75b4641d4b906cd910890dbf4278287c0d224837d9a7d -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8d469f37fe20d6e15b5debc13cce4c22e8b7a4f6a4ac787006b96507a85ce761f63b28140d692c54b5f7deb08697f8d5ddb9bbfa8f5ac0d9241fc7de3a3fe3cd -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 0d62ee1729cd5884946b6c73701ad3a570fa4d642190ca0fe5c1db0fb0cba9da3ac86a948788d915b9432d28ab8cc499e28aadc64530b7d549ee752a6ed93ec1 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 0fc0420e134ec0b8e0ab2654e1e102cebec47b48179703f1e1b79d51ee0d6da55a4e7304d8773d3cf830341ac2fe3cede1e6b0460fd88f7595534e0730422d5a -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-386.tar.gz) | 3fb53b5260f4888c77c0e4ff602bbcf6bf38c364d2769850afe2b8d8e8b95f7024807c15e2b0d5603e787c46af8ac53492be9e88c530f578b8a389e3bd50c099 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 2f44c93463d6b5244ce0c82f147e7f32ec2233d0e29c64c3c5759e23533aebd12671bf63e986c0861e9736f9b5259bb8d138574a7c8c8efc822e35cd637416c0 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | eaa7aea84a5ed954df5ec710cbeb6ec88b46465f43cb3d09aabe2f714b84a050a50bf5736089f09dbf1090f2e19b44823d656c917e3c8c877630756c3026f2b6 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-386.tar.gz) | 47f74b8d46ad1779c5b0b5f15aa15d5513a504eeb6f53db4201fbe9ff8956cb986b7c1b0e9d50a99f78e9e2a7f304f3fc1cc2fa239296d9a0dd408eb6069e975 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1a148e282628b008c8abd03dd12ec177ced17584b5115d92cd33dd251e607097d42e9da8c7089bd947134b900f85eb75a4740b6a5dd580c105455b843559df39 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | d13d2feb73bd032dc01f7e2955b98d8215a39fe1107d037a73fa1f7d06c3b93ebaa53ed4952d845c64454ef3cca533edb97132d234d50b6fb3bcbd8a8ad990eb +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8252105a17b09a78e9ad2c024e4e401a69764ac869708a071aaa06f81714c17b9e7c5b2eb8efde33f24d0b59f75c5da607d5e1e72bdf12adfbb8c829205cd1c1 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 297a9082df4988389dc4be30eb636dff49f36f5d87047bab44745884e610f46a17ae3a08401e2cab155b7c439f38057bfd8288418215f7dd3bf6a49dbe61ea0e +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 04c06490dd17cd5dccfd92bafa14acf64280ceaea370d9635f23aeb6984d1beae6d0d1d1506edc6f30f927deeb149b989d3e482b47fbe74008b371f629656e79 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-386.tar.gz) | ec6e9e87a7d685f8751d7e58f24f417753cff5554a7229218cb3a08195d461b2e12409344950228e9fbbc92a8a06d35dd86242da6ff1e6652ec1fae0365a88c1 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 51039e6221d3126b5d15e797002ae01d4f0b10789c5d2056532f27ef13f35c5a2e51be27764fda68e8303219963126559023aed9421313bec275c0827fbcaf8a ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | ae82d14b1214e4100f0cc2c988308b3e1edd040a65267d0eddb9082409f79644e55387889e3c0904a12c710f91206e9383edf510990bee8c9ea2e297b6472551 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | 9a2a5828b7d1ddb16cc19d573e99a4af642f84129408e6203eeeb0558e7b8db77f3269593b5770b6a976fe9df4a64240ed27ad05a4bd43719e55fce1db0abf58 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | ed700dd226c999354ce05b73927388d36d08474c15333ae689427de15de27c84feb6b23c463afd9dd81993315f31eb8265938cfc7ecf6f750247aa42b9b33fa9 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | abb7a9d726538be3ccf5057a0c63ff9732b616e213c6ebb81363f0c49f1e168ce8068b870061ad7cba7ba1d49252f94cf00a5f68cec0f38dc8fce4e24edc5ca6 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 3a51888af1bfdd2d5b0101d173ee589c1f39240e4428165f5f85c610344db219625faa42f00a49a83ce943fb079be873b1a114a62003fae2f328f9bf9d1227a4 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | 4edf820930c88716263560275e3bd7fadb8dc3700b9f8e1d266562e356e0abeb1a913f536377dab91218e3940b447d6bf1da343b85da25c2256dc4dcde5798dd +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | b15213e53a8ab4ba512ce6ef9ad42dd197d419c61615cd23de344227fd846c90448d8f3d98e555b63ba5b565afa627cca6b7e3990ebbbba359c96f2391302df1 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | 5be29cca9a9358fc68351ee63e99d57dc2ffce6e42fc3345753dbbf7542ff2d770c4852424158540435fa6e097ce3afa9b13affc40c8b3b69fe8406798f8068f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | 89fd99ab9ce85db0b94b86709932105efc883cc93959cf7ea9a39e79a4acea23064d7010eeb577450cccabe521c04b7ba47bbec212ed37edeed7cb04bad34518 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 2fbc30862c77d247aa8d96ab9d1a144599505287b0033a3a2d0988958e7bb2f2e8b67f52c1fec74b4ec47d74ba22cd0f6cb5c4228acbaa72b1678d5fece0254d ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | d0f28e3c38ca59a7ff1bfecb48a1ce97116520355d9286afdca1200d346c10018f5bbdf890f130a388654635a2e83e908b263ed45f8a88defca52a7c1d0a7984 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | ed9d3f13028beb3be39bce980c966f82c4b39dc73beaae38cc075fea5be30b0309e555cb2af8196014f2cc9f0df823354213c314b4d6545ff6e30dd2d00ec90e -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | ad5b3268db365dcdded9a9a4bffc90c7df0f844000349accdf2b8fb5f1081e553de9b9e9fb25d5e8a4ef7252d51fa94ef94d36d2ab31d157854e164136f662c2 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | c4de2524e513996def5eeba7b83f7b406f17eaf89d4d557833a93bd035348c81fa9375dcd5c27cfcc55d73995449fc8ee504be1b3bd7b9f108b0b2f153cb05ae -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 9157b44e3e7bd5478af9f72014e54d1afa5cd19b984b4cd8b348b312c385016bb77f29db47f44aea08b58abf47d8a396b92a2d0e03f2fe8acdd30f4f9466cbdb -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 8b40a43c5e6447379ad2ee8aac06e8028555e1b370a995f6001018a62411abe5fbbca6060b3d1682c5cadc07a27d49edd3204e797af46368800d55f4ca8aa1de +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | 95658d321a0a371c0900b401d1469d96915310afbc4e4b9b11f031438bb188513b57d5a60b5316c3b0c18f541cda6f0ac42f59a76495f8abc743a067115da23a +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | f375acfb42aad6c65b833c270e7e3acfe9cd1d6b2441c33874e77faae263957f7acfe86f1b71f14298118595e4cc6952c7dea0c832f7f2e72428336f13034362 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | 43b4baccd58d74e7f48d096ab92f2bbbcdf47e30e7a3d2b56c6cc9f90002cfd4fefaac894f69bd5f9f4dbdb09a4749a77eb76b1b97d91746bd96fe94457879ab +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | e7962b522c6c7c14b9ee4c1d254d8bdd9846b2b33b0443fc9c4a41be6c40e5e6981798b720f0148f36263d5cc45d5a2bb1dd2f9ab2838e3d002e45b9bddeb7bf +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 49ebc97f01829e65f7de15be00b882513c44782eaadd1b1825a227e3bd3c73cc6aca8345af05b303d8c43aa2cb944a069755b2709effb8cc22eae621d25d4ba5 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 6e0fd7724b09e6befbcb53b33574e97f2db089f2eee4bbf391abb7f043103a5e6e32e3014c0531b88f9a3ca88887bbc68625752c44326f98dd53adb3a6d1bed8 -## Changelog since v1.20.0-alpha.0 +## Changelog since v1.20.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Azure blob disk feature(`kind`: `Shared`, `Dedicated`) has been deprecated, you should use `kind`: `Managed` in `kubernetes.io/azure-disk` storage class. ([#92905](https://github.com/kubernetes/kubernetes/pull/92905), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] - - CVE-2020-8559 (Medium): Privilege escalation from compromised node to cluster. See https://github.com/kubernetes/kubernetes/issues/92914 for more details. - The API Server will no longer proxy non-101 responses for upgrade requests. This could break proxied backends (such as an extension API server) that respond to upgrade requests with a non-101 response code. ([#92941](https://github.com/kubernetes/kubernetes/pull/92941), [@tallclair](https://github.com/tallclair)) [SIG API Machinery] + - Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] ## Changes by Kind ### Deprecation -- Kube-apiserver: the componentstatus API is deprecated. This API provided status of etcd, kube-scheduler, and kube-controller-manager components, but only worked when those components were local to the API server, and when kube-scheduler and kube-controller-manager exposed unsecured health endpoints. Instead of this API, etcd health is included in the kube-apiserver health check and kube-scheduler/kube-controller-manager health checks can be made directly against those components' health endpoints. ([#93570](https://github.com/kubernetes/kubernetes/pull/93570), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Cluster Lifecycle] -- Kubeadm: deprecate the "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. ([#92881](https://github.com/kubernetes/kubernetes/pull/92881), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt to `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kubeadm: deprecated command "alpha selfhosting pivot" is removed now. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] ### API Change -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- A new alpha-level field, `SupportsFsGroup`, has been introduced for CSIDrivers to allow them to specify whether they support volume ownership and permission modifications. The `CSIVolumeSupportFSGroup` feature gate must be enabled to allow this field to be used. ([#92001](https://github.com/kubernetes/kubernetes/pull/92001), [@huffmanca](https://github.com/huffmanca)) [SIG API Machinery, CLI and Storage] -- Added pod version skew strategy for seccomp profile to synchronize the deprecated annotations with the new API Server fields. Please see the corresponding section [in the KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190717-seccomp-ga.md#version-skew-strategy) for more detailed explanations. ([#91408](https://github.com/kubernetes/kubernetes/pull/91408), [@saschagrunert](https://github.com/saschagrunert)) [SIG Apps, Auth, CLI and Node] -- Adds the ability to disable Accelerator/GPU metrics collected by Kubelet ([#91930](https://github.com/kubernetes/kubernetes/pull/91930), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- Custom Endpoints are now mirrored to EndpointSlices by a new EndpointSliceMirroring controller. ([#91637](https://github.com/kubernetes/kubernetes/pull/91637), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Cloud Provider, Instrumentation, Network and Testing] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Generic ephemeral volumes, a new alpha feature under the `GenericEphemeralVolume` feature gate, provide a more flexible alternative to `EmptyDir` volumes: as with `EmptyDir`, volumes are created and deleted for each pod automatically by Kubernetes. But because the normal provisioning process is used (`PersistentVolumeClaim`), storage can be provided by third-party storage vendors and all of the usual volume features work. Volumes don't need to be empt; for example, restoring from snapshot is supported. ([#92784](https://github.com/kubernetes/kubernetes/pull/92784), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Instrumentation, Node, Scheduling, Storage and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Kubernetes is now built with golang 1.15.0-rc.1. - - The deprecated, legacy behavior of treating the CommonName field on X.509 serving certificates as a host name when no Subject Alternative Names are present is now disabled by default. It can be temporarily re-enabled by adding the value x509ignoreCN=0 to the GODEBUG environment variable. ([#93264](https://github.com/kubernetes/kubernetes/pull/93264), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The kube-controller-manager managed signers can now have distinct signing certificates and keys. See the help about `--cluster-signing-[signer-name]-{cert,key}-file`. `--cluster-signing-{cert,key}-file` is still the default. ([#90822](https://github.com/kubernetes/kubernetes/pull/90822), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps and Auth] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.tls[*].secretName` values are required to pass validation rules for Secret API object names. ([#93929](https://github.com/kubernetes/kubernetes/pull/93929), [@liggitt](https://github.com/liggitt)) [SIG Network] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] +- Change the APIVersion proto name of BoundObjectRef from aPIVersion to apiVersion. ([#97379](https://github.com/kubernetes/kubernetes/pull/97379), [@kebe7jun](https://github.com/kebe7jun)) [SIG Auth] +- Promote Immutable Secrets/ConfigMaps feature to Stable. + This allows to set `Immutable` field in Secrets or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] ### Feature -- ACTION REQUIRED : In CoreDNS v1.7.0, [metrics names have been changed](https://github.com/coredns/coredns/blob/master/notes/coredns-1.7.0.md#metric-changes) which will be backward incompatible with existing reporting formulas that use the old metrics' names. Adjust your formulas to the new names before upgrading. +- Add flag --lease-max-object-size and metric etcd_lease_object_counts for kube-apiserver to config and observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` + takes into consideration global ip addresses on loopback interfaces when: + - the host has default routes + - there are no global IPs on those interfaces. + in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] + +### Bug or Regression + +- ## Changelog - Kubeadm now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92651](https://github.com/kubernetes/kubernetes/pull/92651), [@rajansandeep](https://github.com/rajansandeep)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Add tags support for Azure File Driver ([#92825](https://github.com/kubernetes/kubernetes/pull/92825), [@ZeroMagic](https://github.com/ZeroMagic)) [SIG Cloud Provider and Storage] -- Added kube-apiserver metrics: apiserver_current_inflight_request_measures and, when API Priority and Fairness is enable, windowed_request_stats. ([#91177](https://github.com/kubernetes/kubernetes/pull/91177), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery, Instrumentation and Testing] -- Audit events for API requests to deprecated API versions now include a `"k8s.io/deprecated": "true"` audit annotation. If a target removal release is identified, the audit event includes a `"k8s.io/removal-release": "."` audit annotation as well. ([#92842](https://github.com/kubernetes/kubernetes/pull/92842), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Instrumentation] -- Cloud node-controller use InstancesV2 ([#91319](https://github.com/kubernetes/kubernetes/pull/91319), [@gongguan](https://github.com/gongguan)) [SIG Apps, Cloud Provider, Scalability and Storage] -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate the "--csr-only" and "--csr-dir" flags of the "kubeadm init phase certs" subcommands. Please use "kubeadm alpha certs generate-csr" instead. This new command allows you to generate new private keys and certificate signing requests for all the control-plane components, so that the certificates can be signed by an external CA. ([#92183](https://github.com/kubernetes/kubernetes/pull/92183), [@wallrj](https://github.com/wallrj)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. + ### General + - Fix priority expander falling back to a random choice even though there is a higher priority option to choose + - Clone `kubernetes/kubernetes` in `update-vendor.sh` shallowly, instead of fetching all revisions + - Speed up binpacking by reducing the number of PreFilter calls (call once per pod instead of #pods*#nodes times) + - Speed up finding unneeded nodes by 5x+ in very large clusters by reducing the number of PreFilter calls + - Expose `--max-nodes-total` as a metric + - Errors in `IncreaseSize` changed from type `apiError` to `cloudProviderError` + - Make `build-in-docker` and `test-in-docker` work on Linux systems with SELinux enabled + - Fix an error where existing nodes were not considered as destinations while finding place for pods in scale-down simulations + - Remove redundant log lines and reduce severity around parsing kubeEnv + - Don't treat nodes created by virtual kubelet as nodes from non-autoscaled node groups + - Remove redundant logging around calculating node utilization + - Add configurable `--network` and `--rm` flags for docker in `Makefile` + - Subtract DaemonSet pods' requests from node allocatable in the denominator while computing node utilization + - Include taints by condition when determining if a node is unready/still starting + - Fix `update-vendor.sh` to work on OSX and zsh + - Add best-effort eviction for DaemonSet pods while scaling down non-empty nodes + - Add build support for ARM64 - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: @mikedanese: - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. + ### AliCloud + - Add missing daemonsets and replicasets to ALI example cluster role - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. + ### Apache CloudStack + - Add support for Apache CloudStack - - make kube::util::find-binary not dependent on bazel-out/ structure + ### AWS + - Regenerate list of EC2 instances + - Fix pricing endpoint in AWS China Region - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node to pod pid isolation - Promote SupportPodPidsLimit to GA to provide ability to limit pids per pod ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node and Testing] -- Rename pod_preemption_metrics to preemption_metrics. ([#93256](https://github.com/kubernetes/kubernetes/pull/93256), [@ahg-g](https://github.com/ahg-g)) [SIG Instrumentation and Scheduling] -- Server-side apply behavior has been regularized in the case where a field is removed from the applied configuration. Removed fields which have no other owners are deleted from the live object, or reset to their default value if they have one. Safe ownership transfers, such as the transfer of a `replicas` field from a user to an HPA without resetting to the default value are documented in [Transferring Ownership](/docs/reference/using-api/server-side-apply/#transferring-ownership) ([#92661](https://github.com/kubernetes/kubernetes/pull/92661), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Testing] -- Set CSIMigrationvSphere feature gates to beta. - Users should enable CSIMigration + CSIMigrationvSphere features and install the vSphere CSI Driver (https://github.com/kubernetes-sigs/vsphere-csi-driver) to move workload from the in-tree vSphere plugin "kubernetes.io/vsphere-volume" to vSphere CSI Driver. + ### Azure + - Add optional jitter on initial VMSS VM cache refresh, keep the refreshes spread over time + - Serve from cache for the whole period of ongoing throttling + - Fix unwanted VMSS VMs cache invalidations + - Enforce setting the number of retries if cloud provider backoff is enabled + - Don't update capacity if VMSS provisioning state is updating + - Support allocatable resources overrides via VMSS tags + - Add missing stable labels in template nodes + - Proactively set instance status to deleting on node deletions - Requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15 ([#92816](https://github.com/kubernetes/kubernetes/pull/92816), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support a smooth upgrade from client-side apply to server-side apply without conflicts, as well as support the corresponding downgrade. ([#90187](https://github.com/kubernetes/kubernetes/pull/90187), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery and Testing] -- Trace output in apiserver logs is more organized and comprehensive. Traces are nested, and for all non-long running request endpoints, the entire filter chain is instrumented (e.g. authentication check is included). ([#88936](https://github.com/kubernetes/kubernetes/pull/88936), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Scheduling] -- `kubectl alpha debug` now supports debugging nodes by creating a debugging container running in the node's host namespaces. ([#92310](https://github.com/kubernetes/kubernetes/pull/92310), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] - -### Failing Test - -- Kube-proxy iptables min-sync-period defaults to 1 sec. Previously, it was 0. ([#92836](https://github.com/kubernetes/kubernetes/pull/92836), [@aojea](https://github.com/aojea)) [SIG Network] - -### Bug or Regression - -- A panic in the apiserver caused by the `informer-sync` health checker is now fixed. ([#93600](https://github.com/kubernetes/kubernetes/pull/93600), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery] -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Adding fix to the statefulset controller to wait for pvc deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) [SIG Apps] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Azure: per VMSS VMSS VMs cache to prevent throttling on clusters having many attached VMSS ([#93107](https://github.com/kubernetes/kubernetes/pull/93107), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp field of an audit event take - into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Auth and Instrumentation] -- Build/lib/release: Explicitly use '--platform' in building server images + ### Cluster API + - Migrate interaction with the API from using internal types to using Unstructured + - Improve tests to work better with constrained resources + - Add support for node autodiscovery + - Add support for `--cloud-config` + - Update group identifier to use for Cluster API annotations + + ### Exoscale + - Add support for Exoscale - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. + ### GCE + - Decrease the number of GCE Read Requests made while deleting nodes + - Base pricing of custom instances on their instance family type + - Add pricing information for missing machine types + - Add pricing information for different GPU types + - Ignore the new `topology.gke.io/zone` label when comparing groups + - Add missing stable labels to template nodes - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. + ### HuaweiCloud + - Add auto scaling group support + - Implement node group by AS + - Implement getting desired instance number of node group + - Implement increasing node group size + - Implement TemplateNodeInfo + - Implement caching instances - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. + ### IONOS + - Add support for IONOS - Before: - `FROM ${base_image}` + ### Kubemark + - Skip non-kubemark nodes while computing node infos for node groups. - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- CVE-2020-8557 (Medium): Node-local denial of service via container /etc/hosts file. See https://github.com/kubernetes/kubernetes/issues/93032 for more details. ([#92916](https://github.com/kubernetes/kubernetes/pull/92916), [@joelsmith](https://github.com/joelsmith)) [SIG Node] -- Do not add nodes labeled with kubernetes.azure.com/managed=false to backend pool of load balancer. ([#93034](https://github.com/kubernetes/kubernetes/pull/93034), [@matthias50](https://github.com/matthias50)) [SIG Cloud Provider] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Do not retry volume expansion if CSI driver returns FailedPrecondition error ([#92986](https://github.com/kubernetes/kubernetes/pull/92986), [@gnufied](https://github.com/gnufied)) [SIG Node and Storage] -- Dockershim security: pod sandbox now always run with `no-new-privileges` and `runtime/default` seccomp profile - dockershim seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90948](https://github.com/kubernetes/kubernetes/pull/90948), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Endpoint controller requeues service after an endpoint deletion event occurs to confirm that deleted endpoints are undesired to mitigate the effects of an out of sync endpoint cache. ([#93030](https://github.com/kubernetes/kubernetes/pull/93030), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] -- EndpointSlice controllers now return immediately if they encounter an error creating, updating, or deleting resources. ([#93908](https://github.com/kubernetes/kubernetes/pull/93908), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now copies labels from Endpoints to EndpointSlices. ([#93442](https://github.com/kubernetes/kubernetes/pull/93442), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now mirrors Endpoints that do not have a Service associated with them. ([#94171](https://github.com/kubernetes/kubernetes/pull/94171), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Ensure backoff step is set to 1 for Azure armclient. ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Eviction requests for pods that have a non-zero DeletionTimestamp will always succeed ([#91342](https://github.com/kubernetes/kubernetes/pull/91342), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Extended DSR loadbalancer feature in winkernel kube-proxy to HNS versions 9.3-9.max, 10.2+ ([#93080](https://github.com/kubernetes/kubernetes/pull/93080), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Fix HandleCrash order ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG API Machinery] -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug where loadbalancer deletion gets stuck because of missing resource group #75198 ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) [SIG Cloud Provider] -- Fix calling AttachDisk on a previously attached EBS volume ([#93567](https://github.com/kubernetes/kubernetes/pull/93567), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider, Storage and Testing] -- Fix detection of image filesystem, disk metrics for devicemapper, detection of OOM Kills on 5.0+ linux kernels. ([#92919](https://github.com/kubernetes/kubernetes/pull/92919), [@dashpole](https://github.com/dashpole)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix instance not found issues when an Azure Node is recreated in a short time ([#93316](https://github.com/kubernetes/kubernetes/pull/93316), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix kube-apiserver /readyz to contain "informer-sync" check ensuring that internal informers are synced. ([#93670](https://github.com/kubernetes/kubernetes/pull/93670), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in EndpointSliceTracker for EndpointSliceMirroring controller. ([#93441](https://github.com/kubernetes/kubernetes/pull/93441), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: determine the correct ip config based on ip family ([#93043](https://github.com/kubernetes/kubernetes/pull/93043), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- Fix: initial delay in mounting azure disk & file ([#93052](https://github.com/kubernetes/kubernetes/pull/93052), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed Ceph RBD volume expansion when no ceph.conf exists ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) [SIG Storage] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a bug whereby the allocation of reusable CPUs and devices was not being honored when the TopologyManager was enabled ([#93189](https://github.com/kubernetes/kubernetes/pull/93189), [@klueska](https://github.com/klueska)) [SIG Node] -- Fixed a panic in kubectl debug when pod has multiple init containers or ephemeral containers ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) [SIG CLI] -- Fixed a regression that sometimes prevented `kubectl portforward` to work when TCP and UDP services were configured on the same port ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) [SIG CLI] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixed memory leak in endpointSliceTracker ([#92838](https://github.com/kubernetes/kubernetes/pull/92838), [@tnqn](https://github.com/tnqn)) [SIG Apps and Network] -- Fixed node data lost in kube-scheduler for clusters with imbalance on number of nodes across zones ([#93355](https://github.com/kubernetes/kubernetes/pull/93355), [@maelk](https://github.com/maelk)) [SIG Scheduling] -- Fixed the EndpointSliceController to correctly create endpoints for IPv6-only pods. + ### Magnum + - Add Magnum support in the Cluster Autoscaler helm chart - Fixed the EndpointController to allow IPv6 headless services, if the IPv6DualStack - feature gate is enabled, by specifying `ipFamily: IPv6` on the service. (This already - worked with the EndpointSliceController.) ([#91399](https://github.com/kubernetes/kubernetes/pull/91399), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Fixes a bug evicting pods after a taint with a limited tolerationSeconds toleration is removed from a node ([#93722](https://github.com/kubernetes/kubernetes/pull/93722), [@liggitt](https://github.com/liggitt)) [SIG Apps and Node] -- Fixes a bug where EndpointSlices would not be recreated after rapid Service recreation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Fixes a race condition in kubelet pod handling ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) [SIG Node] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes an issue that can result in namespaced custom resources being orphaned when their namespace is deleted, if the CRD defining the custom resource is removed concurrently with namespaces being deleted, then recreated. ([#93790](https://github.com/kubernetes/kubernetes/pull/93790), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Increased maximum IOPS of AWS EBS io1 volumes to 64,000 (current AWS maximum). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) [SIG Cloud Provider and Storage] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-aggregator certificates are dynamically loaded on change from disk ([#92791](https://github.com/kubernetes/kubernetes/pull/92791), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] -- Kube-apiserver: fixed a bug returning inconsistent results from list requests which set a field or label selector and set a paging limit ([#94002](https://github.com/kubernetes/kubernetes/pull/94002), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Kube-apiserver: jsonpath expressions with consecutive recursive descent operators are no longer evaluated for custom resource printer columns ([#93408](https://github.com/kubernetes/kubernetes/pull/93408), [@joelsmith](https://github.com/joelsmith)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kube-up now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92718](https://github.com/kubernetes/kubernetes/pull/92718), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cloud Provider] -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove duplicate DNS names and IP addresses from generated certificates ([#92753](https://github.com/kubernetes/kubernetes/pull/92753), [@QianChenglong](https://github.com/QianChenglong)) [SIG Cluster Lifecycle] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- Kubelet: fix race condition in pluginWatcher ([#93622](https://github.com/kubernetes/kubernetes/pull/93622), [@knight42](https://github.com/knight42)) [SIG Node] -- Kuberuntime security: pod sandbox now always runs with `runtime/default` seccomp profile - kuberuntime seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90949](https://github.com/kubernetes/kubernetes/pull/90949), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- NONE ([#71269](https://github.com/kubernetes/kubernetes/pull/71269), [@DeliangFan](https://github.com/DeliangFan)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Pods with invalid Affinity/AntiAffinity LabelSelectors will now fail scheduling when these plugins are enabled ([#93660](https://github.com/kubernetes/kubernetes/pull/93660), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Reverted devicemanager for Windows node added in 1.19rc1. ([#93263](https://github.com/kubernetes/kubernetes/pull/93263), [@liggitt](https://github.com/liggitt)) [SIG Node and Windows] -- Scheduler bugfix: Scheduler doesn't lose pod information when nodes are quickly recreated. This could happen when nodes are restarted or quickly recreated reusing a nodename. ([#93938](https://github.com/kubernetes/kubernetes/pull/93938), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scalability, Scheduling and Testing] -- The EndpointSlice controller now waits for EndpointSlice and Node caches to be synced before starting. ([#94086](https://github.com/kubernetes/kubernetes/pull/94086), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- The terminationGracePeriodSeconds from pod spec is respected for the mirror pod. ([#92442](https://github.com/kubernetes/kubernetes/pull/92442), [@tedyu](https://github.com/tedyu)) [SIG Node and Testing] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Updated Cluster Autoscaler to 1.19.0; ([#93577](https://github.com/kubernetes/kubernetes/pull/93577), [@vivekbagade](https://github.com/vivekbagade)) [SIG Autoscaling and Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] + ### Packet + - Allow empty nodepools + - Add support for multiple nodepools + - Add pricing support + + ## Image + Image: `k8s.gcr.io/autoscaling/cluster-autoscaler:v1.20.0` ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) [SIG Cloud Provider] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Add limited lines to log when having tail option ([#93920](https://github.com/kubernetes/kubernetes/pull/93920), [@zhouya0](https://github.com/zhouya0)) [SIG Node] +- Avoid systemd-logind loading configuration warning ([#97950](https://github.com/kubernetes/kubernetes/pull/97950), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Copy annotations with empty value when deployment rolls back ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Detach volumes from vSphere nodes not tracked by attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Fix kubectl label error when local=true is set. ([#97440](https://github.com/kubernetes/kubernetes/pull/97440), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Fix Azure file share not deleted issue when the namespace is deleted ([#97417](https://github.com/kubernetes/kubernetes/pull/97417), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix kubectl-convert import known versions ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) [SIG CLI and Testing] +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Kubeadm now installs version 3.4.13 of etcd when creating a cluster with v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: avoid detection of the container runtime for commands that do not need it ([#97625](https://github.com/kubernetes/kubernetes/pull/97625), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Performance regression [#97685](https://github.com/kubernetes/kubernetes/issues/97685) has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- The current version of the container image publicly exposed IP serving a /metrics endpoint to the Internet. The new version of the container image serves /metrics endpoint on a different port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] - Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.rules[*].http` values are now validated consistently when the `host` field contains a wildcard. ([#93954](https://github.com/kubernetes/kubernetes/pull/93954), [@Miciah](https://github.com/Miciah)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Storage and Testing] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- Warning about using a deprecated volume plugin is logged only once. ([#96751](https://github.com/kubernetes/kubernetes/pull/96751), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] ### Other (Cleanup or Flake) -- --cache-dir sets cache directory for both http and discovery, defaults to $HOME/.kube/cache ([#92910](https://github.com/kubernetes/kubernetes/pull/92910), [@soltysh](https://github.com/soltysh)) [SIG API Machinery and CLI] -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.2 and debian-iptables@v12.1.1 ([#93667](https://github.com/kubernetes/kubernetes/pull/93667), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.3 and debian-iptables@v12.1.2 ([#93916](https://github.com/kubernetes/kubernetes/pull/93916), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to go-runner:buster-v2.0.0 ([#94167](https://github.com/kubernetes/kubernetes/pull/94167), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Fix kubelet to properly log when a container is started. Before, sometimes the log said that a container is dead and was restarted when it was started for the first time. This only happened when using pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) [SIG Node] -- Fix: license issue in blob disk feature ([#92824](https://github.com/kubernetes/kubernetes/pull/92824), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixes the flooding warning messages about setting volume ownership for configmap/secret volumes ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) [SIG Instrumentation, Node and Storage] -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Kube-up: defaults to limiting critical pods to the kube-system namespace to match behavior prior to 1.17 ([#93121](https://github.com/kubernetes/kubernetes/pull/93121), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider and Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove support for the "ci/k8s-master" version label. This label has been removed in the Kubernetes CI release process and would no longer work in kubeadm. You can use the "ci/latest" version label instead. See kubernetes/test-infra#18517 ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubernetes is now built with go1.15.0 ([#93939](https://github.com/kubernetes/kubernetes/pull/93939), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Kubernetes is now built with go1.15.0-rc.2 ([#93827](https://github.com/kubernetes/kubernetes/pull/93827), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update Golang to v1.14.5 - - Update repo-infra to 0.0.7 (to support go1.14.5 and go1.13.13) - - Includes: - - bazelbuild/bazel-toolchains@3.3.2 - - bazelbuild/rules_go@v0.22.7 ([#93088](https://github.com/kubernetes/kubernetes/pull/93088), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update Golang to v1.14.6 - - Update repo-infra to 0.0.8 (to support go1.14.6 and go1.13.14) - - Includes: - - bazelbuild/bazel-toolchains@3.4.0 - - bazelbuild/rules_go@v0.22.8 ([#93198](https://github.com/kubernetes/kubernetes/pull/93198), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.9 ([#92349](https://github.com/kubernetes/kubernetes/pull/92349), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Delete deprecated mixed protocol annotation ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: fix a whitespace issue in the output of the "kubeadm join" command shown as the output of "kubeadm init" and "kubeadm token create --print-join-command" ([#97413](https://github.com/kubernetes/kubernetes/pull/97413), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: improve the error messaging when the user provides an invalid discovery token CA certificate hash. ([#97290](https://github.com/kubernetes/kubernetes/pull/97290), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Migrate log messages in pkg/scheduler/{scheduler.go,factory.go} to structured logging ([#97509](https://github.com/kubernetes/kubernetes/pull/97509), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- Migrate proxy/iptables/proxier.go logs to structured logging ([#97678](https://github.com/kubernetes/kubernetes/pull/97678), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some scheduler log messages to structured logging ([#97349](https://github.com/kubernetes/kubernetes/pull/97349), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- NONE ([#97167](https://github.com/kubernetes/kubernetes/pull/97167), [@geegeea](https://github.com/geegeea)) [SIG Node] +- NetworkPolicy validation framework optimizations for rapidly verifying CNI's work correctly across several pods and namespaces ([#91592](https://github.com/kubernetes/kubernetes/pull/91592), [@jayunit100](https://github.com/jayunit100)) [SIG Network, Storage and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Scheduler plugin validation now provides all errors detected instead of the first one. ([#96745](https://github.com/kubernetes/kubernetes/pull/96745), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node, Scheduling and Testing] +- Storage related e2e testsuite redesign & cleanup ([#96573](https://github.com/kubernetes/kubernetes/pull/96573), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Storage and Testing] +- The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- The `AttachVolumeLimit` feature gate that is GA since v1.17 is now removed. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Storage] +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior`(locked to false) features have been promoted to GA. + To prevent control plane nodes being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Apps, Cloud Provider and Network] + +### Uncategorized + +- Adding Brazilian Portuguese translation for kubectl ([#61595](https://github.com/kubernetes/kubernetes/pull/61595), [@cpanato](https://github.com/cpanato)) [SIG CLI] ## Dependencies ### Added -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/yuin/goldmark: [v1.1.27](https://github.com/yuin/goldmark/tree/v1.1.27) -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 +_Nothing has changed._ ### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.0](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.0) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.0](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.0) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/hcsshim: [v0.8.9 → 5eafd15](https://github.com/Microsoft/hcsshim/compare/v0.8.9...5eafd15) -- github.com/cilium/ebpf: [9f1617e → 1c8d4c9](https://github.com/cilium/ebpf/compare/9f1617e...1c8d4c9) -- github.com/containerd/cgroups: [bf292b2 → 0dbf7f0](https://github.com/containerd/cgroups/compare/bf292b2...0dbf7f0) -- github.com/coredns/corefile-migration: [v1.0.8 → v1.0.10](https://github.com/coredns/corefile-migration/compare/v1.0.8...v1.0.10) -- github.com/evanphx/json-patch: [e83c0a1 → v4.9.0+incompatible](https://github.com/evanphx/json-patch/compare/e83c0a1...v4.9.0) -- github.com/google/cadvisor: [8450c56 → v0.37.0](https://github.com/google/cadvisor/compare/8450c56...v0.37.0) -- github.com/json-iterator/go: [v1.1.9 → v1.1.10](https://github.com/json-iterator/go/compare/v1.1.9...v1.1.10) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [1b94395 → 819fcc6](https://github.com/opencontainers/runc/compare/1b94395...819fcc6) -- github.com/prometheus/client_golang: [v1.6.0 → v1.7.1](https://github.com/prometheus/client_golang/compare/v1.6.0...v1.7.1) -- github.com/prometheus/common: [v0.9.1 → v0.10.0](https://github.com/prometheus/common/compare/v0.9.1...v0.10.0) -- github.com/prometheus/procfs: [v0.0.11 → v0.1.3](https://github.com/prometheus/procfs/compare/v0.0.11...v0.1.3) -- github.com/rubiojr/go-vhd: [0bfd3b3 → 02e2102](https://github.com/rubiojr/go-vhd/compare/0bfd3b3...02e2102) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/urfave/cli: [v1.22.1 → v1.22.2](https://github.com/urfave/cli/compare/v1.22.1...v1.22.2) -- go.etcd.io/etcd: 54ba958 → dd1b699 -- golang.org/x/crypto: bac4c82 → 75b2880 -- golang.org/x/mod: v0.1.0 → v0.3.0 -- golang.org/x/net: d3edc99 → ab34263 -- golang.org/x/tools: c00d67e → c1934b7 -- k8s.io/kube-openapi: 656914f → 6aeccd4 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: 6e3d28b → d5654de +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.38.6](https://github.com/google/cadvisor/compare/v0.38.5...v0.38.6) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- k8s.io/system-validators: v1.2.0 → v1.3.0 ### Removed -- github.com/godbus/dbus: [ade71ed](https://github.com/godbus/dbus/tree/ade71ed) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- sigs.k8s.io/structured-merge-diff/v3: v3.0.0 -- vbom.ml/util: db5cfe1 +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 diff --git a/content/en/docs/tasks/administer-cluster/declare-network-policy.md b/content/en/docs/tasks/administer-cluster/declare-network-policy.md index fed4a77f9de69..7acbaa9e7d50b 100644 --- a/content/en/docs/tasks/administer-cluster/declare-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/declare-network-policy.md @@ -18,6 +18,7 @@ This document helps you get started using the Kubernetes [NetworkPolicy API](/do Make sure you've configured a network provider with network policy support. There are a number of network providers that support NetworkPolicy, including: +* [Antrea](/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy/) * [Calico](/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy/) * [Cilium](/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy/) * [Kube-router](/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy/) diff --git a/content/en/docs/tasks/administer-cluster/enabling-topology-aware-hints.md b/content/en/docs/tasks/administer-cluster/enabling-topology-aware-hints.md index 919f522c7c492..dadc653f4e196 100644 --- a/content/en/docs/tasks/administer-cluster/enabling-topology-aware-hints.md +++ b/content/en/docs/tasks/administer-cluster/enabling-topology-aware-hints.md @@ -27,7 +27,7 @@ The following prerequisite is needed in order to enable topology aware hints: ## Enable Topology Aware Hints To enable service topology hints, enable the `TopologyAwareHints` [feature -gate](docs/reference/command-line-tools-reference/feature-gates/) for the +gate](/docs/reference/command-line-tools-reference/feature-gates/) for the kube-apiserver, kube-controller-manager, and kube-proxy: ``` diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md b/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md index 02fdc958fa2b0..9ed45bd07f1db 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver.md @@ -45,7 +45,7 @@ A minimal example of configuring the field explicitly: # kubeadm-config.yaml kind: ClusterConfiguration apiVersion: kubeadm.k8s.io/v1beta2 -kubernetesVersion: v1.21 +kubernetesVersion: v1.21.0 --- kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 @@ -60,7 +60,7 @@ kubeadm init --config kubeadm-config.yaml {{< note >}} Kubeadm uses the same `KubeletConfiguration` for all nodes in the cluster. -The `KubeletConfiguration` is stored in a [ConfigMap](docs/concepts/configuration/configmap) +The `KubeletConfiguration` is stored in a [ConfigMap](/docs/concepts/configuration/configmap) object under the `kube-system` namespace. Executing the sub commands `init`, `join` and `upgrade` would result in kubeadm diff --git a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index dc7af4a32941b..62e66d1a8f5ec 100644 --- a/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/en/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -207,3 +207,71 @@ After a certificate is signed using your preferred method, the certificate and t Kubeadm does not support rotation or replacement of CA certificates out of the box. For more information about manual rotation or replacement of CA, see [manual rotation of CA certificates](/docs/tasks/tls/manual-rotation-of-ca-certificates/). + +## Enabling signed kubelet serving certificates {#kubelet-serving-certs} + +By default the kubelet serving certificate deployed by kubeadm is self-signed. +This means a connection from external services like the +[metrics-server](https://github.com/kubernetes-sigs/metrics-server) to a +kubelet cannot be secured with TLS. + +To configure the kubelets in a new kubeadm cluster to obtain properly signed serving +certificates you must pass the following minimal configuration to `kubeadm init`: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +serverTLSBootstrap: true +``` + +If you have already created the cluster you must adapt it by doing the following: + - Find and edit the `kubelet-config-{{< skew latestVersion >}}` ConfigMap in the `kube-system` namespace. +In that ConfigMap, the `config` key has a +[KubeletConfiguration](/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) +document as its value. Edit the KubeletConfiguration document to set `serverTLSBootstrap: true`. +- On each node, add the `serverTLSBootstrap: true` field in `/var/lib/kubelet/config.yaml` +and restart the kubelet with `systemctl restart kubelet` + +The field `serverTLSBootstrap: true` will enable the bootstrap of kubelet serving +certificates by requesting them from the `certificates.k8s.io` API. One known limitation +is that the CSRs (Certificate Signing Requests) for these certificates cannot be automatically +approved by the default signer in the kube-controller-manager - +[`kubernetes.io/kubelet-serving`](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers). +This will require action from the user or a third party controller. + +These CSRs can be viewed using: + +```shell +kubectl get csr +NAME AGE SIGNERNAME REQUESTOR CONDITION +csr-9wvgt 112s kubernetes.io/kubelet-serving system:node:worker-1 Pending +csr-lz97v 1m58s kubernetes.io/kubelet-serving system:node:control-plane-1 Pending +``` + +To approve them you can do the following: +```shell +kubectl certificate approve +``` + +By default, these serving certificate will expire after one year. Kubeadm sets the +`KubeletConfiguration` field `rotateCertificates` to `true`, which means that close +to expiration a new set of CSRs for the serving certificates will be created and must +be approved to complete the rotation. To understand more see +[Certificate Rotation](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/#certificate-rotation). + +If you are looking for a solution for automatic approval of these CSRs it is recommended +that you contact your cloud provider and ask if they have a CSR signer that verifies +the node identity with an out of band mechanism. + +{{% thirdparty-content %}} + +Third party custom controllers can be used: +- [kubelet-rubber-stamp](https://github.com/kontena/kubelet-rubber-stamp) + +Such a controller is not a secure mechanism unless it not only verifies the CommonName +in the CSR but also verifies the requested IPs and domain names. This would prevent +a malicious actor that has access to a kubelet client certificate to create +CSRs requesting serving certificates for any IP or domain name. diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy.md new file mode 100644 index 0000000000000..36da1839e9950 --- /dev/null +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/antrea-network-policy.md @@ -0,0 +1,24 @@ +--- +title: Use Antrea for NetworkPolicy +content_type: task +weight: 10 +--- + + +This page shows how to install and use Antrea CNI plugin on Kubernetes. +For background on Project Antrea, read the [Introduction to Antrea](https://antrea.io/docs/). + +## {{% heading "prerequisites" %}} + +You need to have a Kubernetes cluster. Follow the +[kubeadm getting started guide](/docs/reference/setup-tools/kubeadm/) to bootstrap one. + + + +## Deploying Antrea with kubeadm + +Follow [Getting Started](https://github.com/vmware-tanzu/antrea/blob/main/docs/getting-started.md) guide to deploy Antrea for kubeadm. + +## {{% heading "whatsnext" %}} + +Once your cluster is running, you can follow the [Declare Network Policy](/docs/tasks/administer-cluster/declare-network-policy/) to try out Kubernetes NetworkPolicy. diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md index f8ebea0963041..77a9ac76473e9 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes.md @@ -30,7 +30,7 @@ getting killed by the kubelet before they are up and running. ## {{% heading "prerequisites" %}} -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} +{{< include "task-tutorial-prereqs.md" >}} diff --git a/content/en/docs/tasks/debug-application-cluster/audit.md b/content/en/docs/tasks/debug-application-cluster/audit.md index 2f8e1c98751a1..c44caf66b59e1 100644 --- a/content/en/docs/tasks/debug-application-cluster/audit.md +++ b/content/en/docs/tasks/debug-application-cluster/audit.md @@ -251,5 +251,7 @@ By default truncate is disabled in both `webhook` and `log`, a cluster administr ## {{% heading "whatsnext" %}} * Learn about [Mutating webhook auditing annotations](/docs/reference/access-authn-authz/extensible-admission-controllers/#mutating-webhook-auditing-annotations). -* Read the [reference for `audit.k8s.io` API group](/docs/reference/config-api/apiserver-audit.v1/). +* Learn more about [`Event`](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Event) + and the [`Policy`](/docs/reference/config-api/apiserver-audit.v1/#audit-k8s-io-v1-Policy) + resource types by reading the Audit configuration reference. diff --git a/content/en/docs/tasks/debug-application-cluster/debug-application.md b/content/en/docs/tasks/debug-application-cluster/debug-application.md index f927ba5e5bc67..477c9b8248135 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-application.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-application.md @@ -144,7 +144,7 @@ Verify that the pod's `containerPort` matches up with the Service's `targetPort` #### Network traffic is not forwarded -Please see [debugging service](/docs/tasks/debug-application-cluster/debug-service.md) for more information. +Please see [debugging service](/docs/tasks/debug-application-cluster/debug-service/) for more information. ## {{% heading "whatsnext" %}} diff --git a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md index fdde133345f09..391efe4376adf 100644 --- a/content/en/docs/tasks/debug-application-cluster/debug-cluster.md +++ b/content/en/docs/tasks/debug-application-cluster/debug-cluster.md @@ -102,7 +102,7 @@ This is an incomplete list of things that could go wrong, and how to adjust your - Action: Use IaaS providers reliable storage (e.g. GCE PD or AWS EBS volume) for VMs with apiserver+etcd - Mitigates: Apiserver backing storage lost -- Action: Use [high-availability](/docs/admin/high-availability) configuration +- Action: Use [high-availability](/docs/setup/production-environment/tools/kubeadm/high-availability/) configuration - Mitigates: Control plane node shutdown or control plane components (scheduler, API server, controller-manager) crashing - Will tolerate one or more simultaneous node or component failures - Mitigates: API server backing storage (i.e., etcd's data directory) lost diff --git a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md index 671637c084616..1e21306e5f2be 100644 --- a/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md +++ b/content/en/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning.md @@ -72,7 +72,7 @@ after upgrading the objects to a new stored version. Removing an old version: 1. Ensure all clients are fully migrated to the new version. The kube-apiserver - logs can reviewed to help identify any clients that are still accessing via + logs can be reviewed to help identify any clients that are still accessing via the old version. 1. Set `served` to `false` for the old version in the `spec.versions` list. If any clients are still unexpectedly using the old version they may begin reporting diff --git a/content/en/docs/tasks/job/indexed-parallel-processing-static.md b/content/en/docs/tasks/job/indexed-parallel-processing-static.md index 08ada554716b1..b5492eed6ea4c 100644 --- a/content/en/docs/tasks/job/indexed-parallel-processing-static.md +++ b/content/en/docs/tasks/job/indexed-parallel-processing-static.md @@ -40,7 +40,7 @@ non-parallel, use of [Job](/docs/concepts/workloads/controllers/job/). To be able to create Indexed Jobs, make sure to enable the `IndexedJob` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) -on the [API server](docs/reference/command-line-tools-reference/kube-apiserver/) +on the [API server](/docs/reference/command-line-tools-reference/kube-apiserver/) and the [controller manager](/docs/reference/command-line-tools-reference/kube-controller-manager/). diff --git a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md index 3ea3c50e8d120..a41bab673686e 100644 --- a/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md +++ b/content/en/docs/tasks/manage-kubernetes-objects/kustomization.md @@ -114,6 +114,98 @@ metadata: name: example-configmap-2-g2hdhfc6tk ``` +To use a generated ConfigMap in a Deployment, reference it by the name of the configMapGenerator. Kustomize will automatically replace this name with the generated name. + +This is an example deployment that uses a generated ConfigMap: + +```yaml +# Create a application.properties file +cat <application.properties +FOO=Bar +EOF + +cat <deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + labels: + app: my-app +spec: + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: app + image: my-app + volumeMount: + - name: config + mountPath: /config + volumes: + - name: config + configMap: + name: example-configmap-1 +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +configMapGenerator: +- name: example-configmap-1 + files: + - application.properties +EOF +``` + +Generate the ConfigMap and Deployment: + +```shell +kubectl kustomize ./ +``` + +The generated Deployment will refer to the generated ConfigMap by name: + +```yaml +apiVersion: v1 +data: + application.properties: | + FOO=Bar +kind: ConfigMap +metadata: + name: example-configmap-1-g4hk9g2ff8 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: my-app + name: my-app +spec: + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - image: my-app + name: app + volumeMount: + - mountPath: /config + name: config + volumes: + - configMap: + name: example-configmap-1-g4hk9g2ff8 + name: config +``` + #### secretGenerator You can generate Secrets from files or literal key-value pairs. To generate a Secret from a file, add an entry to the `files` list in `secretGenerator`. Here is an example of generating a Secret with a data item from a file: @@ -170,6 +262,53 @@ metadata: type: Opaque ``` +Like ConfigMaps, generated Secrets can be used in Deployments by refering to the name of the secretGenerator: + +```shell +# Create a password.txt file +cat <./password.txt +username=admin +password=secret +EOF + +cat <deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + labels: + app: my-app +spec: + selector: + matchLabels: + app: my-app + template: + metadata: + labels: + app: my-app + spec: + containers: + - name: app + image: my-app + volumeMount: + - name: password + mountPath: /secrets + volumes: + - name: password + secret: + secretName: example-secret-1 +EOF + +cat <./kustomization.yaml +resources: +- deployment.yaml +secretGenerator: +- name: example-secret-1 + files: + - password.txt +EOF +``` + #### generatorOptions The generated ConfigMaps and Secrets have a content hash suffix appended. This ensures that a new ConfigMap or Secret is generated when the contents are changed. To disable the behavior of appending a suffix, one can use `generatorOptions`. Besides that, it is also possible to specify cross-cutting options for generated ConfigMaps and Secrets. @@ -815,14 +954,14 @@ deployment.apps "dev-my-nginx" deleted | commonLabels | map[string]string | labels to add to all resources and selectors | | commonAnnotations | map[string]string | annotations to add to all resources | | resources | []string | each entry in this list must resolve to an existing resource configuration file | -| configmapGenerator | [][ConfigMapArgs](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L99) | Each entry in this list generates a ConfigMap | -| secretGenerator | [][SecretArgs](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L106) | Each entry in this list generates a Secret | -| generatorOptions | [GeneratorOptions](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/kustomization.go#L109) | Modify behaviors of all ConfigMap and Secret generator | +| configMapGenerator | [][ConfigMapArgs](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/configmapargs.go#L7) | Each entry in this list generates a ConfigMap | +| secretGenerator | [][SecretArgs](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/secretargs.go#L7) | Each entry in this list generates a Secret | +| generatorOptions | [GeneratorOptions](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/generatoroptions.go#L7) | Modify behaviors of all ConfigMap and Secret generator | | bases | []string | Each entry in this list should resolve to a directory containing a kustomization.yaml file | | patchesStrategicMerge | []string | Each entry in this list should resolve a strategic merge patch of a Kubernetes object | -| patchesJson6902 | [][Json6902](https://github.com/kubernetes-sigs/kustomize/blob/release-kustomize-v4.0/api/types/patchjson6902.go#L8) | Each entry in this list should resolve to a Kubernetes object and a Json Patch | -| vars | [][Var](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/var.go#L31) | Each entry is to capture text from one resource's field | -| images | [][Image](https://github.com/kubernetes-sigs/kustomize/tree/master/api/types/image.go#L23) | Each entry is to modify the name, tags and/or digest for one image without creating patches | +| patchesJson6902 | [][Patch](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/patch.go#L10) | Each entry in this list should resolve to a Kubernetes object and a Json Patch | +| vars | [][Var](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/var.go#L19) | Each entry is to capture text from one resource's field | +| images | [][Image](https://github.com/kubernetes-sigs/kustomize/blob/master/api/types/image.go#L8) | Each entry is to modify the name, tags and/or digest for one image without creating patches | | configurations | []string | Each entry in this list should resolve to a file containing [Kustomize transformer configurations](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs) | | crds | []string | Each entry in this list should resolve to an OpenAPI definition file for Kubernetes types | diff --git a/content/en/docs/tasks/network/validate-dual-stack.md b/content/en/docs/tasks/network/validate-dual-stack.md index 5e11cb3057866..bc90dea4ead0f 100644 --- a/content/en/docs/tasks/network/validate-dual-stack.md +++ b/content/en/docs/tasks/network/validate-dual-stack.md @@ -40,9 +40,10 @@ a00:100::/24 ``` There should be one IPv4 block and one IPv6 block allocated. -Validate that the node has an IPv4 and IPv6 interface detected (replace node name with a valid node from the cluster. In this example the node name is k8s-linuxpool1-34450317-0): +Validate that the node has an IPv4 and IPv6 interface detected. Replace node name with a valid node from the cluster. In this example the node name is `k8s-linuxpool1-34450317-0`: + ```shell -kubectl get nodes k8s-linuxpool1-34450317-0 -o go-template --template='{{range .status.addresses}}{{printf "%s: %s \n" .type .address}}{{end}}' +kubectl get nodes k8s-linuxpool1-34450317-0 -o go-template --template='{{range .status.addresses}}{{printf "%s: %s\n" .type .address}}{{end}}' ``` ``` Hostname: k8s-linuxpool1-34450317-0 @@ -52,9 +53,10 @@ InternalIP: 2001:1234:5678:9abc::5 ### Validate Pod addressing -Validate that a Pod has an IPv4 and IPv6 address assigned. (replace the Pod name with a valid Pod in your cluster. In this example the Pod name is pod01) +Validate that a Pod has an IPv4 and IPv6 address assigned. Replace the Pod name with a valid Pod in your cluster. In this example the Pod name is `pod01`: + ```shell -kubectl get pods pod01 -o go-template --template='{{range .status.podIPs}}{{printf "%s \n" .ip}}{{end}}' +kubectl get pods pod01 -o go-template --template='{{range .status.podIPs}}{{printf "%s\n" .ip}}{{end}}' ``` ``` 10.244.1.4 @@ -72,6 +74,7 @@ You can also validate Pod IPs using the Downward API via the `status.podIPs` fie ``` The following command prints the value of the `MY_POD_IPS` environment variable from within a container. The value is a comma separated list that corresponds to the Pod's IPv4 and IPv6 addresses. + ```shell kubectl exec -it pod01 -- set | grep MY_POD_IPS ``` diff --git a/content/en/docs/tasks/tools/install-kubectl-linux.md b/content/en/docs/tasks/tools/install-kubectl-linux.md index 243dbf4e0dd54..d64ef99b13f65 100644 --- a/content/en/docs/tasks/tools/install-kubectl-linux.md +++ b/content/en/docs/tasks/tools/install-kubectl-linux.md @@ -12,8 +12,7 @@ card: ## {{% heading "prerequisites" %}} -You must use a kubectl version that is within one minor version difference of your cluster. -For example, a v1.2 client should work with v1.1, v1.2, and v1.3 master. +You must use a kubectl version that is within one minor version difference of your cluster. For example, a v{{< skew latestVersion >}} client can communicate with v{{< skew prevMinorVersion >}}, v{{< skew latestVersion >}}, and v{{< skew nextMinorVersion >}} control planes. Using the latest version of kubectl helps avoid unforeseen issues. ## Install kubectl on Linux diff --git a/content/en/docs/tasks/tools/install-kubectl-macos.md b/content/en/docs/tasks/tools/install-kubectl-macos.md index b4fa864985445..b748a38c6f526 100644 --- a/content/en/docs/tasks/tools/install-kubectl-macos.md +++ b/content/en/docs/tasks/tools/install-kubectl-macos.md @@ -12,8 +12,7 @@ card: ## {{% heading "prerequisites" %}} -You must use a kubectl version that is within one minor version difference of your cluster. -For example, a v1.2 client should work with v1.1, v1.2, and v1.3 master. +You must use a kubectl version that is within one minor version difference of your cluster. For example, a v{{< skew latestVersion >}} client can communicate with v{{< skew prevMinorVersion >}}, v{{< skew latestVersion >}}, and v{{< skew nextMinorVersion >}} control planes. Using the latest version of kubectl helps avoid unforeseen issues. ## Install kubectl on macOS @@ -29,17 +28,28 @@ The following methods exist for installing kubectl on macOS: 1. Download the latest release: - ```bash + {{< tabs name="download_binary_macos" >}} + {{< tab name="Intel" codelang="bash" >}} curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" - ``` + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl" + {{< /tab >}} + {{< /tabs >}} {{< note >}} To download a specific version, replace the `$(curl -L -s https://dl.k8s.io/release/stable.txt)` portion of the command with the specific version. - For example, to download version {{< param "fullversion" >}} on macOS, type: + For example, to download version {{< param "fullversion" >}} on Intel macOS, type: ```bash - curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/amd64/kubectl" + ``` + + And for macOS on Apple Silicon, type: + + ```bash + curl -LO "https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/darwin/arm64/kubectl" ``` {{< /note >}} @@ -48,10 +58,15 @@ The following methods exist for installing kubectl on macOS: Download the kubectl checksum file: - ```bash - curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl.sha256" - ``` - + {{< tabs name="download_checksum_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl.sha256" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl.sha256" + {{< /tab >}} + {{< /tabs >}} + Validate the kubectl binary against the checksum file: ```bash diff --git a/content/en/docs/tasks/tools/install-kubectl-windows.md b/content/en/docs/tasks/tools/install-kubectl-windows.md index 09e8217626f60..11f79b6d94709 100644 --- a/content/en/docs/tasks/tools/install-kubectl-windows.md +++ b/content/en/docs/tasks/tools/install-kubectl-windows.md @@ -12,8 +12,7 @@ card: ## {{% heading "prerequisites" %}} -You must use a kubectl version that is within one minor version difference of your cluster. -For example, a v1.2 client should work with v1.1, v1.2, and v1.3 master. +You must use a kubectl version that is within one minor version difference of your cluster. For example, a v{{< skew latestVersion >}} client can communicate with v{{< skew prevMinorVersion >}}, v{{< skew latestVersion >}}, and v{{< skew nextMinorVersion >}} control planes. Using the latest version of kubectl helps avoid unforeseen issues. ## Install kubectl on Windows @@ -21,7 +20,6 @@ Using the latest version of kubectl helps avoid unforeseen issues. The following methods exist for installing kubectl on Windows: - [Install kubectl binary with curl on Windows](#install-kubectl-binary-with-curl-on-windows) -- [Install with PowerShell from PSGallery](#install-with-powershell-from-psgallery) - [Install on Windows using Chocolatey or Scoop](#install-on-windows-using-chocolatey-or-scoop) - [Install on Windows as part of the Google Cloud SDK](#install-on-windows-as-part-of-the-google-cloud-sdk) @@ -76,33 +74,6 @@ The following methods exist for installing kubectl on Windows: If you have installed Docker Desktop before, you may need to place your `PATH` entry before the one added by the Docker Desktop installer or remove the Docker Desktop's `kubectl`. {{< /note >}} -### Install with PowerShell from PSGallery - -If you are on Windows and using the [PowerShell Gallery](https://www.powershellgallery.com/) package manager, you can install and update kubectl with PowerShell. - -1. Run the installation commands (making sure to specify a `DownloadLocation`): - - ```powershell - Install-Script -Name 'install-kubectl' -Scope CurrentUser -Force - install-kubectl.ps1 [-DownloadLocation ] - ``` - - {{< note >}} - If you do not specify a `DownloadLocation`, `kubectl` will be installed in the user's `temp` Directory. - {{< /note >}} - - The installer creates `$HOME/.kube` and instructs it to create a config file. - -1. Test to ensure the version you installed is up-to-date: - - ```powershell - kubectl version --client - ``` - -{{< note >}} -Updating the installation is performed by rerunning the two commands listed in step 1. -{{< /note >}} - ### Install on Windows using Chocolatey or Scoop 1. To install kubectl on Windows you can use either [Chocolatey](https://chocolatey.org) package manager or [Scoop](https://scoop.sh) command-line installer. diff --git a/content/en/examples/admin/cloud/ccm-example.yaml b/content/en/examples/admin/cloud/ccm-example.yaml index 20aafb31e5cdc..3bce7b58fa6e1 100644 --- a/content/en/examples/admin/cloud/ccm-example.yaml +++ b/content/en/examples/admin/cloud/ccm-example.yaml @@ -1,4 +1,4 @@ -# This is an example of how to setup cloud-controller-manger as a Daemonset in your cluster. +# This is an example of how to setup cloud-controller-manager as a Daemonset in your cluster. # It assumes that your masters can run pods and has the role node-role.kubernetes.io/master # Note that this Daemonset will not work straight out of the box for your cloud, this is # meant to be a guideline. diff --git a/content/es/docs/concepts/configuration/configmap.md b/content/es/docs/concepts/configuration/configmap.md index b607f0b82def9..ce16f99aca605 100644 --- a/content/es/docs/concepts/configuration/configmap.md +++ b/content/es/docs/concepts/configuration/configmap.md @@ -75,7 +75,7 @@ Hay cuatro maneras diferentes de usar un ConfigMap para configurar un contenedor dentro de un {{< glossary_tooltip text="Pod" term_id="pod" >}}: 1. Argumento en la linea de comandos como entrypoint de un contenedor -1. Variable de enorno de un contenedor +1. Variable de entorno de un contenedor 1. Como fichero en un volumen de solo lectura, para que lo lea la aplicación 1. Escribir el código para ejecutar dentro de un {{< glossary_tooltip text="Pod" term_id="pod" >}} que utiliza la API para leer el ConfigMap diff --git a/content/es/docs/tasks/tools/install-kubectl.md b/content/es/docs/tasks/tools/install-kubectl.md index af1abeb63dc79..fdca3db92c58f 100644 --- a/content/es/docs/tasks/tools/install-kubectl.md +++ b/content/es/docs/tasks/tools/install-kubectl.md @@ -188,7 +188,7 @@ Si estás en macOS y utilizas el gestor de paquetes [Macports](https://macports. ### Instalar el binario de kubectl con curl en Windows -1. Descargar la última entrega {{< param "fullversion" >}} de [este link]((https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). +1. Descargar la última entrega {{< param "fullversion" >}} de [este link](https://storage.googleapis.com/kubernetes-release/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe). o si tiene `curl` instalada, utiliza este comando: diff --git a/content/fr/docs/concepts/cluster-administration/logging.md b/content/fr/docs/concepts/cluster-administration/logging.md index 465ec742b1332..0932de7406790 100644 --- a/content/fr/docs/concepts/cluster-administration/logging.md +++ b/content/fr/docs/concepts/cluster-administration/logging.md @@ -82,7 +82,7 @@ conteneur a crashé. Si le Pod a plusieurs conteneurs, il faut spécifier le nom du conteneur dont on veut récupérer le journal d'évènement. Dans notre exemple le conteneur s'appelle -`count` donc vous pouvez utiliser `kubectl logs counter count`. Plus de détails +`count` donc vous pouvez utiliser `kubectl logs counter count`. Plus de détails dans la [documentation de `kubectl logs`] (/docs/reference/generated/kubectl/kubectl-commands#logs) diff --git a/content/fr/docs/concepts/workloads/controllers/statefulset.md b/content/fr/docs/concepts/workloads/controllers/statefulset.md new file mode 100644 index 0000000000000..87286aeaa43ce --- /dev/null +++ b/content/fr/docs/concepts/workloads/controllers/statefulset.md @@ -0,0 +1,278 @@ +--- +title: StatefulSets +content_type: concept +weight: 30 +--- + + + +StatefulSet est l'objet de l'API de charge de travail utilisé pour gérer des applications avec état (*stateful*). + +{{< glossary_definition term_id="statefulset" length="all" >}} + + + + +## Utiliser des StatefulSets + +Les StatefulSets sont utiles pour des applications qui nécessitent une ou plusieurs des choses suivantes : + +* Des identifiants réseau stables et uniques. +* Un stockage persistant stable. +* Un déploiement et une mise à l'échelle ordonnés et contrôlés. +* Des mises à jour continues (*rolling update*) ordonnées et automatisées. + +Ci-dessus, stable est synonyme de persistance suite au (re)scheduling de Pods. +Si une application ne nécessite aucun identifiant stable ou de déploiement, suppression ou +mise à l'échelle stables, vous devriez déployer votre application en utilisant un objet de charge de travail +fournissant un ensemble de réplicas sans état (*stateless*). + +Un [Deployment](/fr/docs/concepts/workloads/controllers/deployment/) ou +[ReplicaSet](/fr/docs/concepts/workloads/controllers/replicaset/) peut être mieux adapté pour vos applications sans état. + +## Limitations + +* Le stockage pour un Pod donné doit être provisionné soit par un [approvisionneur de PersistentVolume](https://github.com/kubernetes/examples/tree/{{< param "githubbranch" >}}/staging/persistent-volume-provisioning/README.md) basé sur un `storage class` donné, soit pré-provisionné par un admin. +* Supprimer et/ou réduire l'échelle d'un StatefulSet à zéro ne supprimera *pas* les volumes associés avec le StatefulSet. Ceci est fait pour garantir la sécurité des données, ce qui a généralement plus de valeur qu'une purge automatique de toutes les ressources relatives à un StatefulSet. +* Les StatefulSets nécessitent actuellement un [Service Headless](/fr/docs/concepts/services-networking/service/#headless-services) qui est responsable de l'identité réseau des Pods. Vous êtes responsable de la création de ce Service. +* Les StatefulSets ne fournissent aucune garantie de la terminaison des pods lorsqu'un StatefulSet est supprimé. Pour avoir une terminaison ordonnée et maîtrisée des pods du StatefulSet, il est possible de réduire l'échelle du StatefulSet à 0 avant de le supprimer. +* Lors de l'utilisation de [Rolling Updates](#rolling-updates) avec la + [Politique de gestion des Pods](#politiques-de-gestion-dun-pod) par défaut (`OrderedReady`), + il est possible de tomber dans un état indéfini nécessitant une + [intervention manuelle pour réparer](#rollback-forcé). + +## Composants + +L'exemple ci-dessous décrit les composants d'un StatefulSet. + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx # doit correspondre à .spec.template.metadata.labels + serviceName: "nginx" + replicas: 3 # est 1 par défaut + template: + metadata: + labels: + app: nginx # doit correspondre à .spec.selector.matchLabels + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "my-storage-class" + resources: + requests: + storage: 1Gi +``` + +Dans l'exemple ci-dessus : + +* Un Service Headless, appelé `nginx`, est utilisé pour contrôler le domaine réseau. +* Le StatefulSet, appelé `web`, a une Spec indiquant que 3 réplicas du container nginx seront démarrés dans des Pods. +* Le `volumeClaimTemplates` fournira un stockage stable utilisant des [PersistentVolumes](/docs/concepts/storage/persistent-volumes/) provisionnés par un approvisionneur de PersistentVolume. + +Le nom d'un objet StatefulSet doit être un +[nom de sous-domaine DNS](/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) valide. + +## Sélecteur de Pod + +Vous devez renseigner le champ `.spec.selector` d'un StatefulSet pour qu'il corresponde aux labels de son `.spec.template.metadata.labels`. Avant Kubernetes 1.8, le champ `.spec.selector` était mis par défaut s'il était omis. Pour les versions 1.8 et ultérieures, ne pas spécifier de sélecteur de Pod résulte en une erreur de validation lors de la création du StatefulSet. + +## Identité du Pod + +Les Pods d'un StatefulSet ont une identité unique comprenant un ordinal, une identité réseau stable et un stockage stable. +L'identité est accrochée au Pod, indépendamment du noeud sur lequel il est (re)programmé. + +### Index Ordinal + +Pour un StatefulSet avec N réplicas, chaque Pod du StatefulSet se verra assigné un ordinal entier, de 0 à N-1, +unique sur l'ensemble des pods. + +### ID réseau stable + +Chaque Pod dans un StatefulSet dérive son nom d'hôte du nom du StatefulSet +et de l'ordinal du Pod. Le modèle pour le nom d'hôte généré est +`$(nom statefulset)-$(ordinal)`. L'exemple ci-dessus créera trois Pods +nommés `web-0,web-1,web-2`. +Un StatefulSet peut utiliser un [Service Headless](/docs/concepts/services-networking/service/#headless-services) +pour contrôler le domaine de ses Pods. Le domaine pris en charge par ce Service prend la forme : +`$(nom service).$(namespace).svc.cluster.local`, où "cluster.local" est le domaine du cluster. +Chaque fois qu'un Pod est créé, il obtient un sous-domaine DNS correspondant, prenant la forme : +`$(nom pod).$(domaine du service gouvernant)`, où le service gouvernant est défini par le +champ `serviceName` du StatefulSet. + +En fonction de la façon dont est configuré le DNS dans votre cluster, vous ne pourrez peut-être pas rechercher immédiatement +le nom DNS d'un pod nouvellement exécuté. Ce problème peut se produire lorsque d'autres clients dans le +cluster ont déjà envoyé des requêtes pour le nom d'hôte du Pod avant sa création. +La mise en cache négative (normale pour le DNS) signifie que les résultats des recherches précédentes ayant échoué sont +mémorisés et réutilisés, même après que le Pod ait démarré, pendant au moins quelques secondes. + +Si vous avez besoin de découvrir les Pods rapidement après leur création, vous avez plusieurs options : + +- Interrogez directement l'API Kubernetes (par exemple, à l'aide d'un watch) plutôt que de vous fier aux recherches DNS. +- Réduisez le temps de mise en cache dans votre fournisseur de DNS Kubernetes (cela signifie généralement modifier le ConfigMap de CoreDNS, qui met actuellement en cache pendant 30 secondes). + +Comme mentionné dans la section [limitations](#limitations), vous êtes responsable de +créer le [Service Headless](/docs/concepts/services-networking/service/#headless-services) +responsable de l'identité réseau des Pods. + +Voici quelques exemples de choix pour le domaine du cluster, le nom du service, +le nom du StatefulSet et comment cela affecte les noms DNS des pods du StatefulSet. + +Domaine Cluster | Service (ns/nom) | StatefulSet (ns/nom) | Domaine StatefulSet | DNS Pod | Nom d'hôte | +--------------- | ----------------- | -------------------- | ------------------------------- | -------------------------------------------- | ------------ | + cluster.local | default/nginx | default/web | nginx.default.svc.cluster.local | web-{0..N-1}.nginx.default.svc.cluster.local | web-{0..N-1} | + cluster.local | foo/nginx | foo/web | nginx.foo.svc.cluster.local | web-{0..N-1}.nginx.foo.svc.cluster.local | web-{0..N-1} | + kube.local | foo/nginx | foo/web | nginx.foo.svc.kube.local | web-{0..N-1}.nginx.foo.svc.kube.local | web-{0..N-1} | + +{{< note >}} +Le domaine cluster sera `cluster.local` à moins qu'il soit +[configuré autrement](/docs/concepts/services-networking/dns-pod-service/). +{{< /note >}} + +### Stockage stable + +Kubernetes crée un [PersistentVolume](/docs/concepts/storage/persistent-volumes/) pour chaque +VolumeClaimTemplate. Dans l'exemple nginx ci-dessus, chaque Pod se verra affecter un unique PersistentVolume +avec un StorageClass de `my-storage-class` et 1 Gib de stockage provisionné. Si aucun StorageClass +n'est spécifié, alors le StorageClass par défaut sera utilisé. Lorsqu'un Pod est (re)schedulé +sur un noeud, ses `volumeMounts` montent les PersistentVolumes associés aux +PersistentVolumeClaims. Notez que les PersistentVolumes associés avec les PersistentVolumeClaims des Pods +ne sont pas supprimés lorsque les Pods, ou le StatefulSet, sont supprimés. +Ceci doit être fait manuellement. + +### Étiquette du nom de Pod + +Lorsque le StatefulSet {{< glossary_tooltip term_id="controller" >}} crée un Pod, +il ajoute une étiquette, `statefulset.kubernetes.io/pod-name`, renseignée avec le nom du Pod. +Cette étiquette vous permet d'attacher un Service à un Pod spécifique du StatefulSet. + +## Garanties de déploiment et de mise à l'échelle + +* Pour un StatefulSet avec N réplicas, lorsque les Pods sont déployés, ils sont créés de manière séquentielle, dans l'ordre {0..N-1}. +* Lorsque les Pods sont supprimés, ils sont terminés dans l'ordre inverse, {N-1..0}. +* Avant qu'une opération de mise à l'échelle soit appliquée à un Pod, tous ses prédécesseurs doivent être Running et Ready. +* Avant qu'un Pod soit terminé, tous ses successeurs doivent être complètement arrêtés. + +Le StatefulSet ne devrait pas spécifier un `pod.Spec.TerminationGracePeriodSeconds` à 0. Cette pratique +est dangereuse et fortement déconseillée. Pour plus d'explications, veuillez vous référer à [forcer la suppression de Pods de StatefulSet](/docs/tasks/run-application/force-delete-stateful-set-pod/). + +Lorsque l'exemple nginx ci-dessus est créé, trois Pods seront déployés dans l'ordre +web-0, web-1, web-2. web-1 ne sera pas déployé avant que web-0 soit +[Running et Ready](/fr/docs/concepts/workloads/pods/pod-lifecycle/), et web-2 ne sera pas déployé avant que +web-1 soit Running et Ready. Si web-0 venait à échouer, après que web-1 soit Running et Ready, mais avant que +web-2 soit lancé, web-2 ne serait pas lancé avant que web-0 soit correctement relancé et redevienne Running et Ready. + +Si un utilisateur venait à mettre à l'échelle l'exemple déployé en patchant le StatefulSet pour que +`replicas=1`, web-2 serait terminé en premier. web-1 ne serait pas terminé avant que web-2 +ne soit complètement arrêté et supprimé. Si web-0 venait à échouer après que web-2 soit terminé et complètement arrêté, +mais avant que web-1 soit terminé, web-1 ne serait pas terminé avant que web-0 soit Running et Ready. + +### Politiques de gestion d'un Pod + +Dans Kubernetes 1.7 et ultérieurs, le StatefulSet vous permet d'assouplir ses garanties d'ordre, +tout en préservant ses garanties d'unicité et d'identité via son champ `.spec.podManagementPolicy`. + +#### Gestion de Pod OrderedReady + +La gestion de Pod `OrderedReady` est la valeur par défaut pour les StatefulSets. Il implémente le comportement décrit [ci-dessus](#garanties-de-déploiment-et-de-mise-à-l-échelle). + +#### Gestion de Pod Parallel + +La gestion de Pod `Parallel` indique au contrôleur de StatefulSet de lancer ou +terminer tous les Pods en parallèle, et de ne pas attendre que les Pods deviennent Running +et Ready ou complètement terminés avant de lancer ou terminer un autre +Pod. Cette option affecte seulement le comportement pour les opérations de mise à l'échelle. +Les mises à jour ne sont pas affectées. + +## Stratégies de mise à jour + +Dans Kubernetes 1.7 et ultérieurs, le champ `.spec.updateStrategy` d'un StatefulSet vous permet +de configurer et désactiver les rolling updates automatisés pour les conteneurs, étiquettes, +requête/limites de ressources, et annotations pour les Pods d'un StatefulSet. + +### On Delete + +La stratégie de mise à jour `OnDelete` implémente l'ancien comportement (1.6 et précédents). Lorsque +`.spec.updateStrategy.type` d'un StatefulSet est mis à `OnDelete`, le contrôleur de StatefulSet +ne mettra pas à jour automatiquement les Pods dans un StatefulSet. +Les utilisateurs doivent supprimer manuellement les Pods pour forcer le contrôleur à créer de nouveaux +Pods qui réflètent les modifications faites à un `.spec.template` d'un StatefulSet. + +### Rolling Updates + +La stratégie de mise à jour `RollingUpdate` implémente le rolling update automatisé pour les Pods d'un +StatefulSet. C'est la stratégie par défaut lorsque `.spec.updateStrategy` n'est pas spécifié. +Lorsqu'un `.spec.updateStrategy.type` d'un StatefulSet est mis à `RollingUpdate`, le contrôleur de +StatefulSet va supprimer et recréer chaque Pod d'un StatefulSet. Il va procéder dans le même ordre +que pour la terminaison d'un Pod (de l'ordinal le plus grand au plus petit), mettant à jour chaque Pod, +un seul à la fois. Il va attendre qu'un Pod mis à jour soit Running et Ready avant de mettre à jour +son prédécesseur. + +#### Partitions + +La stratégie de mise à jour `RollingUpdate` peut être partitionnée, en spécifiant une +`.spec.updateStrategy.rollingUpdate.partition`. Si une partition est spécifiée, tous les Pods ayant un +ordinal plus grand ou égal à la partition seront mis à jour lorsque le +`.spec.template` du StatefulSet sera mis à jour. Tous les Pods ayant un ordinal inférieur à la partition +ne sera pas mis à jour, et, même s'ils sont supprimés, ils seront recréés avec l'ancienne version. Si une +`.spec.updateStrategy.rollingUpdate.partition` d'un StatefulSet est plus grand que son `.spec.replicas`, +les mises à jour de son `.spec.template` ne seront pas propagés à ses Pods. +Dans la plupart des cas vous n'aurez pas à utiliser de partition, mais elles sont utiles si vous désirez +organiser une mise à jour, déployer une version canari, ou effectuer un déploiement par étapes. + +#### Rollback forcé + +En utilisant des [Rolling Updates](#rolling-updates) avec la +[politique de gestion d'un Pod](#politiques-de-gestion-dun-pod) par défaut (`OrderedReady`), +il est possible de se retrouver dans un état inconsistant nécessitant une intervention manuelle pour réparation. + +Si vous mettez à jour le template de Pod dans une configuration qui ne devient jamais Running et +Ready (par exemple, du fait d'un mauvais binaire ou d'une erreur de configuration au niveau de l'application), +le StatefulSet va arrêter le rollout et attendre. + +Dans cet état, il n'est pas suffisant de revenir à une bonne configuration du template de Pod. +En raison d'une [erreur connue](https://github.com/kubernetes/kubernetes/issues/67250), +le StatefulSet va continuer à attendre que le Pod en échec Pod devienne Ready +(ce qui n'arrive jamais) avant qu'il tente de revenir à la bonne configuration. + +Après être revenu au bon template, vous devez aussi supprimer tous les Pods que le StatefulSet +avait déjà essayé de démarrer avec la mauvaise configuration. +Le StatefulSet va alors commencer à recréer les Pods en utilisant le bon template. + +## {{% heading "whatsnext" %}} + +* Suivre un exemple de [déploiement d'une application stateful](/docs/tutorials/stateful-application/basic-stateful-set/). +* Suivre un exemple de [déploiement de Cassandra avec des Stateful Sets](/docs/tutorials/stateful-application/cassandra/). +* Suivre un exemple d'[exécution d'une application stateful redondante](/docs/tasks/run-application/run-replicated-stateful-application/). diff --git a/content/fr/docs/reference/glossary/statefulset.md b/content/fr/docs/reference/glossary/statefulset.md new file mode 100755 index 0000000000000..2acf3370a193d --- /dev/null +++ b/content/fr/docs/reference/glossary/statefulset.md @@ -0,0 +1,22 @@ +--- +title: StatefulSet +id: statefulset +date: 2018-04-12 +full_link: /fr/docs/concepts/workloads/controllers/statefulset/ +short_description: > + Gère le déploiement et la mise à l'échelle d'un ensemble de Pods, avec un stockage durable et des identifiants persistants pour chaque Pod. + +aka: +tags: +- fundamental +- core-object +- workload +- storage +--- + Gère le déploiement et la mise à l'échelle d'un ensemble de {{< glossary_tooltip text="Pods" term_id="pod" >}}, *et fournit des garanties sur l'ordre et l'unicité* de ces Pods. + + + +Comme un {{< glossary_tooltip term_id="deployment" >}}, un StatefulSet gère des Pods qui sont basés sur une même spécification de conteneur. Contrairement à un Deployment, un StatefulSet maintient une identité pour chacun de ces Pods. Ces Pods sont créés à partir de la même spec, mais ne sont pas interchangeables : chacun a un identifiant persistant qu'il garde à travers tous ses re-scheduling. + +Si vous voulez utiliser des volumes de stockage pour fournir de la persistance à votre charge de travail, vous pouvez utiliser un StatefulSet comme partie de la solution. Même si des Pods individuels d'un StatefulSet sont susceptibles d'échouer, les identifiants persistants des Pods rendent plus facile de faire correspondre les volumes existants aux nouveaux Pods remplaçant ceux ayant échoué. diff --git a/content/fr/docs/tasks/configure-pod-container/configure-pod-configmap.md b/content/fr/docs/tasks/configure-pod-container/configure-pod-configmap.md new file mode 100644 index 0000000000000..6e9c4c5ba61a5 --- /dev/null +++ b/content/fr/docs/tasks/configure-pod-container/configure-pod-configmap.md @@ -0,0 +1,684 @@ +--- +title: Configurer un pod pour utiliser une ConfigMap +content_template: templates/task +weight: 150 +card: + name: tasks + weight: 50 +--- + + + +Les ConfigMaps vous permettent de découpler les artefacts de configuration du contenu de l'image pour garder les applications conteneurisées portables. +Cette page fournit une série d'exemples d'utilisation montrant comment créer des ConfigMaps et configurer des pods à l'aide des données stockées dans des ConfigMaps. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + +## Créer un ConfigMap + +Vous pouvez utiliser soit `kubectl create configmap` ou un générateur ConfigMap dans `kustomization.yaml` pour créer un ConfigMap. +Notez que `kubectl` prends en charge `kustomization.yaml` à partir de la version 1.14. + +### Créer un ConfigMap à l'aide de kubectl create configmap + +Utilisez la commande `kubectl create configmap` pour créer des Configmaps depuis des [dossiers](#create-configmaps-from-directories), [fichiers](#create-configmaps-from-files), ou des [valeurs littérales](#create-configmaps-from-literal-values): + +```shell +kubectl create configmap +``` + +où \ est le nom que vous souhaitez attribuer à ConfigMap et \ est le répertoire, le fichier ou la valeur littérale à partir de laquelle récupérer les données. + +La source de données correspond à une paire clé-valeur dans ConfigMap, où + +* clé = le nom du fichier ou la clé que vous avez fournie sur la ligne de commande, et +* valeur = le contenu du fichier ou la valeur littérale que vous avez fournie sur la ligne de commande. + +Vous pouvez utiliser [`kubectl describe`](/docs/reference/generated/kubectl/kubectl-commands/#describe) ou [`kubectl get`](/docs/reference/generated/kubectl/kubectl-commands/#get) pour récupérer des informations sur un ConfigMap. + +#### Créer des ConfigMaps à partir de répertoires + +Vous pouvez utiliser `kubectl create configmap` pour créer un ConfigMap à partir de plusieurs fichiers dans le même répertoire. + +Par exemple: + +```shell +# Créez le répertoire local +mkdir -p configure-pod-container/configmap/ + +# Téléchargez les exemples de fichiers dans le répertoire `configure-pod-container/configmap/` +wget https://kubernetes.io/examples/configmap/game.properties -O configure-pod-container/configmap/game.properties +wget https://kubernetes.io/examples/configmap/ui.properties -O configure-pod-container/configmap/ui.properties + +# Créer la configmap +kubectl create configmap game-config --from-file=configure-pod-container/configmap/ +``` + +combine le contenu du répertoire `configure-pod-container/configmap/` + +```shell +game.properties +ui.properties +``` + +dans le ConfigMap suivant: + +```shell +kubectl describe configmaps game-config +``` + +où la sortie est similaire à ceci: + +```text +Name: game-config +Namespace: default +Labels: +Annotations: + +Data +==== +game.properties: 158 bytes +ui.properties: 83 bytes +``` + +Les fichiers `game.properties` et `ui.properties` dans le répertoire `configure-pod-container/configmap/` sont représentés dans la section `data` de la ConfigMap. + +```shell +kubectl get configmaps game-config -o yaml +``` + +La sortie est similaire à ceci: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T18:52:05Z + name: game-config + namespace: default + resourceVersion: "516" + uid: b4952dc3-d670-11e5-8cd0-68f728db1985 +data: + game.properties: | + enemies=aliens + lives=3 + enemies.cheat=true + enemies.cheat.level=noGoodRotten + secret.code.passphrase=UUDDLRLRBABAS + secret.code.allowed=true + secret.code.lives=30 + ui.properties: | + color.good=purple + color.bad=yellow + allow.textmode=true + how.nice.to.look=fairlyNice +``` + +#### Créer des ConfigMaps à partir de fichiers + +Vous pouvez utiliser `kubectl create configmap` pour créer un ConfigMap à partir d'un fichier individuel ou de plusieurs fichiers. + +Par exemple, + +```shell +kubectl create configmap game-config-2 --from-file=configure-pod-container/configmap/game.properties +``` + +produirait le ConfigMap suivant: + +```shell +kubectl describe configmaps game-config-2 +``` + +où la sortie est similaire à ceci: + +```text +Name: game-config-2 +Namespace: default +Labels: +Annotations: + +Data +==== +game.properties: 158 bytes +``` + +Vous pouvez passer l'argument `--from-file` plusieurs fois pour créer un ConfigMap à partir de plusieurs sources de données. + +```shell +kubectl create configmap game-config-2 --from-file=configure-pod-container/configmap/game.properties --from-file=configure-pod-container/configmap/ui.properties +``` + +Décrivez la ConfigMap crée `game-config-2`: + +```shell +kubectl describe configmaps game-config-2 +``` + +La sortie est similaire à ceci: + +```text +Name: game-config-2 +Namespace: default +Labels: +Annotations: + +Data +==== +game.properties: 158 bytes +ui.properties: 83 bytes +``` + +Utilisez l'option `--from-env-file` pour créer un ConfigMap à partir d'un fichier env, par exemple: + +```shell +# Les fichiers env contiennent une liste de variables d'environnement. +# Ces règles de syntaxe s'appliquent: +# Chaque ligne d'un fichier env doit être au format VAR=VAL. +# Les lignes commençant par # (c'est-à-dire les commentaires) sont ignorées. +# Les lignes vides sont ignorées. +# Il n'y a pas de traitement spécial des guillemets (c'est-à-dire qu'ils feront partie de la valeur ConfigMap)). + +# Téléchargez les exemples de fichiers dans le dossier `configure-pod-container/configmap/` +wget https://kubernetes.io/examples/configmap/game-env-file.properties -O configure-pod-container/configmap/game-env-file.properties + +# Le fichier env `game-env-file.properties` ressemble à ceci +cat configure-pod-container/configmap/game-env-file.properties +enemies=aliens +lives=3 +allowed="true" + +# Ce commentaire et la ligne vide au-dessus sont ignorés +``` + +```shell +kubectl create configmap game-config-env-file \ + --from-env-file=configure-pod-container/configmap/game-env-file.properties +``` + +produirait le ConfigMap suivant: + +```shell +kubectl get configmap game-config-env-file -o yaml +``` + +où la sortie est similaire à ceci: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2017-12-27T18:36:28Z + name: game-config-env-file + namespace: default + resourceVersion: "809965" + uid: d9d1ca5b-eb34-11e7-887b-42010a8002b8 +data: + allowed: '"true"' + enemies: aliens + lives: "3" +``` + +{{< caution >}} +Lorsque vous passez plusieurs fois `--from-env-file` pour créer un ConfigMap à partir de plusieurs sources de données, seul le dernier fichier env est utilisé. +{{< /caution >}} + +Le comportement consistant à passer plusieurs fois `--from-env-file` est démontré par: + +```shell +# Téléchargez les exemples de fichiers dans le répertoire `configure-pod-container/configmap/` +wget https://k8s.io/examples/configmap/ui-env-file.properties -O configure-pod-container/configmap/ui-env-file.properties + +# Créez le configmap +kubectl create configmap config-multi-env-files \ + --from-env-file=configure-pod-container/configmap/game-env-file.properties \ + --from-env-file=configure-pod-container/configmap/ui-env-file.properties +``` + +produirait le ConfigMap suivant: + +```shell +kubectl get configmap config-multi-env-files -o yaml +``` + +où la sortie est similaire à ceci: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2017-12-27T18:38:34Z + name: config-multi-env-files + namespace: default + resourceVersion: "810136" + uid: 252c4572-eb35-11e7-887b-42010a8002b8 +data: + color: purple + how: fairlyNice + textmode: "true" +``` + +#### Définissez la clé à utiliser lors de la création d'un ConfigMap à partir d'un fichier + +Vous pouvez définir une clé autre que le nom de fichier à utiliser dans la section `data` de votre ConfigMap lorsque vous utilisez l'argument `--from-file`: + +```shell +kubectl create configmap game-config-3 --from-file== +``` + +où `` est la clé que vous souhaitez utiliser dans la ConfigMap et `` est l'emplacement du fichier de source de données que vous souhaitez que la clé représente. + +Par exemple: + +```shell +kubectl create configmap game-config-3 --from-file=game-special-key=configure-pod-container/configmap/game.properties +``` + +produirait la ConfigMap suivante: + +```shell +kubectl get configmaps game-config-3 -o yaml +``` + +où la sortie est similaire à ceci: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T18:54:22Z + name: game-config-3 + namespace: default + resourceVersion: "530" + uid: 05f8da22-d671-11e5-8cd0-68f728db1985 +data: + game-special-key: | + enemies=aliens + lives=3 + enemies.cheat=true + enemies.cheat.level=noGoodRotten + secret.code.passphrase=UUDDLRLRBABAS + secret.code.allowed=true + secret.code.lives=30 +``` + +#### Créer des ConfigMaps à partir de valeurs littérales + +Vous pouvez utiliser `kubectl create configmap` avec l'argument `--from-literal` définir une valeur littérale à partir de la ligne de commande: + +```shell +kubectl create configmap special-config --from-literal=special.how=very --from-literal=special.type=charm +``` + +Vous pouvez transmettre plusieurs paires clé-valeur. +Chaque paire fournie sur la ligne de commande est représentée comme une entrée distincte dans la section `data` de la ConfigMap. + +```shell +kubectl get configmaps special-config -o yaml +``` + +La sortie est similaire à ceci: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T19:14:38Z + name: special-config + namespace: default + resourceVersion: "651" + uid: dadce046-d673-11e5-8cd0-68f728db1985 +data: + special.how: very + special.type: charm +``` + +### Créer un ConfigMap à partir du générateur + +`kubectl` supporte `kustomization.yaml` depuis 1.14. +Vous pouvez également créer un ConfigMap à partir de générateurs, puis l'appliquer pour créer l'objet sur l'Apiserver. +Les générateurs doivent être spécifiés dans un `kustomization.yaml` à l'intérieur d'un répertoire. + +#### Générer des ConfigMaps à partir de fichiers + +Par exemple, pour générer un ConfigMap à partir de fichiers `configure-pod-container/configmap/game.properties` + +```shell +# Create a kustomization.yaml file with ConfigMapGenerator +cat <./kustomization.yaml +configMapGenerator: +- name: game-config-4 + files: + - configure-pod-container/configmap/game.properties +EOF +``` + +Appliquer le dossier kustomization pour créer l'objet ConfigMap. + +```shell +kubectl apply -k . +configmap/game-config-4-m9dm2f92bt created +``` + +Vous pouvez vérifier que le ConfigMap a été créé comme ceci: + +```text +kubectl get configmap +NAME DATA AGE +game-config-4-m9dm2f92bt 1 37s + + +kubectl describe configmaps/game-config-4-m9dm2f92bt +Name: game-config-4-m9dm2f92bt +Namespace: default +Labels: +Annotations: kubectl.kubernetes.io/last-applied-configuration: + {"apiVersion":"v1","data":{"game.properties":"enemies=aliens\nlives=3\nenemies.cheat=true\nenemies.cheat.level=noGoodRotten\nsecret.code.p... + +Data +==== +game.properties: +---- +enemies=aliens +lives=3 +enemies.cheat=true +enemies.cheat.level=noGoodRotten +secret.code.passphrase=UUDDLRLRBABAS +secret.code.allowed=true +secret.code.lives=30 +Events: +``` + +Notez que le nom ConfigMap généré a un suffixe obtenu par hachage de son contenu. +Cela garantit qu'un nouveau ConfigMap est généré chaque fois que le contenu est modifié. + +#### Définissez la clé à utiliser lors de la génération d'un ConfigMap à partir d'un fichier + +Vous pouvez définir une clé autre que le nom de fichier à utiliser dans le générateur ConfigMap. +Par exemple, pour générer un ConfigMap à partir du fichier `configure-pod-container/configmap/game.properties` +avec la clé `game-special-key` + +```shell +# Créer un fichier kustomization.yaml avec ConfigMapGenerator +cat <./kustomization.yaml +configMapGenerator: +- name: game-config-5 + files: + - game-special-key=configure-pod-container/configmap/game.properties +EOF +``` + +Appliquer le dossier kustomization pour créer l'objet ConfigMap. + +```text +kubectl apply -k . +configmap/game-config-5-m67dt67794 created +``` + +#### Générer des ConfigMaps à partir de littéraux + +Pour générer un ConfigMap à partir de littéraux `special.type=charm` et `special.how=very`, vous pouvez spécifier le générateur ConfigMap dans `kustomization.yaml` comme + +```shell +# Create a kustomization.yaml file with ConfigMapGenerator +cat <./kustomization.yaml +configMapGenerator: +- name: special-config-2 + literals: + - special.how=very + - special.type=charm +EOF +``` + +Appliquez le dossier kustomization pour créer l'objet ConfigMap. + +```text +kubectl apply -k . +configmap/special-config-2-c92b5mmcf2 created +``` + +## Définir des variables d'environnement de conteneur à l'aide des données ConfigMap + +### Définissez une variable d'environnement de conteneur avec les données d'une seule ConfigMap + +1. Définissez une variable d'environnement comme paire clé-valeur dans un ConfigMap: + + ```shell + kubectl create configmap special-config --from-literal=special.how=very + ``` + +1. Attribuez la valeur `special.how` défini dans ConfigMap à la variable d'environnement `SPECIAL_LEVEL_KEY` dans la spécification du Pod. + + {{< codenew file="pods/pod-single-configmap-env-variable.yaml" >}} + + Créez le pod: + + ```shell + kubectl create -f https://kubernetes.io/examples/pods/pod-single-configmap-env-variable.yaml + ``` + + Maintenant, la sortie du Pod comprend une variable d'environnement `SPECIAL_LEVEL_KEY=very`. + +### Définir des variables d'environnement de conteneur avec des données de plusieurs ConfigMaps + +* Comme avec l'exemple précédent, créez d'abord les ConfigMaps. + + {{< codenew file="configmap/configmaps.yaml" >}} + + Créez le ConfigMap: + + ```shell + kubectl create -f https://kubernetes.io/examples/configmap/configmaps.yaml + ``` + +* Définissez les variables d'environnement dans la spécification Pod. + + {{< codenew file="pods/pod-multiple-configmap-env-variable.yaml" >}} + + Créez le pod: + + ```shell + kubectl create -f https://kubernetes.io/examples/pods/pod-multiple-configmap-env-variable.yaml + ``` + + Maintenant, la sortie du Pod comprend des variables d'environnement `SPECIAL_LEVEL_KEY=very` et `LOG_LEVEL=INFO`. + +## Configurer toutes les paires clé-valeur dans un ConfigMap en tant que variables d'environnement de conteneur + +{{< note >}} +Cette fonctionnalité est disponible dans Kubernetes v1.6 et versions ultérieures. +{{< /note >}} + +* Créez un ConfigMap contenant plusieurs paires clé-valeur. + + {{< codenew file="configmap/configmap-multikeys.yaml" >}} + + Créez le ConfigMap: + + ```shell + kubectl create -f https://kubernetes.io/examples/configmap/configmap-multikeys.yaml + ``` + +* Utilisez `envFrom` pour définir toutes les données du ConfigMap en tant que variables d'environnement du conteneur. + La clé de ConfigMap devient le nom de la variable d'environnement dans le pod. + + {{< codenew file="pods/pod-configmap-envFrom.yaml" >}} + + Créez le pod: + + ```shell + kubectl create -f https://kubernetes.io/examples/pods/pod-configmap-envFrom.yaml + ``` + + Maintenant, la sortie du Pod comprend les variables d'environnement `SPECIAL_LEVEL=very` et `SPECIAL_TYPE=charm`. + +## Utiliser des variables d'environnement définies par ConfigMap dans les commandes du Pod + +Vous pouvez utiliser des variables d'environnement définies par ConfigMap dans la section `command` de la spécification du Pod en utilisant la syntaxe de substitution Kubernetes `$(VAR_NAME)`. + +Par exemple, la spécification de pod suivante + +{{< codenew file="pods/pod-configmap-env-var-valueFrom.yaml" >}} + +créé en exécutant + +```shell +kubectl create -f https://kubernetes.io/examples/pods/pod-configmap-env-var-valueFrom.yaml +``` + +produit la sortie suivante dans le conteneur `test-container`: + +```shell +very charm +``` + +## Ajouter des données ConfigMap à un volume + +Comme expliqué dans [Créer des ConfigMaps à partir de fichiers](#create-configmaps-from-files), lorsque vous créez un ConfigMap à l'aide `--from-file`, le nom de fichier devient une clé stockée dans la section `data` du ConfigMap. +Le contenu du fichier devient la valeur de la clé. + +Les exemples de cette section se réfèrent à un ConfigMap nommé special-config, illustré ci-dessous. + +{{< codenew file="configmap/configmap-multikeys.yaml" >}} + +Créez le ConfigMap: + +```shell +kubectl create -f https://kubernetes.io/examples/configmap/configmap-multikeys.yaml +``` + +### Remplissez un volume avec des données stockées dans un ConfigMap + +Ajoutez le nom ConfigMap sous la section `volumes` de la spécification Pod. +Ceci ajoute les données ConfigMap au répertoire spécifié comme `volumeMounts.mountPath` (dans ce cas, `/etc/config`). +La section `command` répertorie les fichiers de répertoire dont les noms correspondent aux clés de ConfigMap. + +{{< codenew file="pods/pod-configmap-volume.yaml" >}} + +Créez le pod: + +```shell +kubectl create -f https://kubernetes.io/examples/pods/pod-configmap-volume.yaml +``` + +Lorsque le pod s'exécute, la commande `ls /etc/config/` produit la sortie ci-dessous: + +```shell +SPECIAL_LEVEL +SPECIAL_TYPE +``` + +{{< caution >}} +S'il y a des fichiers dans le dossier `/etc/config/`, ils seront supprimés. +{{< /caution >}} + +### Ajouter un configmap à un chemin spécifique dans un volume + +Utilisez le champ `path` pour spécifier le chemin de fichier souhaité pour les éléments de configmap spécifiques. +Dans ce cas, le `SPECIAL_LEVEL` sera monté dans le volume `config-volume` au chemin `/etc/config/keys`. + +{{< codenew file="pods/pod-configmap-volume-specific-key.yaml" >}} + +Créez le Pod : + +```shell +kubectl create -f https://kubernetes.io/examples/pods/pod-configmap-volume-specific-key.yaml +``` + +Lorsque le pod fonctionne, la commande `cat /etc/config/keys` produit la sortie ci-dessous : + +```shell +very +``` + +{{< caution >}} +Comme avant, tous les fichiers précédents dans le répertoire `/etc/config/` seront supprimés. +{{< /caution >}} + +### Projections de clés pour des chemins et des autorisations de fichiers spécifiques + +Vous pouvez projeter des clés vers des chemins spécifiques avec des autorisations spécifiques fichiers par fichiers. +Le guide de l'utilisateur [Secrets](/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod) explique la syntaxe. + +### Les ConfigMaps montées sont mises à jour automatiquement + +Lorsqu'une ConfigMap déjà consommée dans un volume est mise à jour, les clés projetées sont éventuellement mises à jour elles aussi. +Kubelet vérifie si la ConfigMap montée est fraîche à chaque synchronisation périodique. +Cependant, il utilise son cache local basé sur le ttl pour obtenir la valeur actuelle de la ConfigMap. +Par conséquent, le délai total entre le moment où la ConfigMap est mise à jour et le moment où les nouvelles clés sont projetées vers le pod peut être aussi long que la période de synchronisation de kubelet (1 minute par défaut) + le ttl du cache ConfigMaps (1 minute par défaut) dans kubelet. +Vous pouvez déclencher un rafraîchissement immédiat en mettant à jour l'une des annotations du pod. + +{{< note >}} +Un conteneur utilisant un ConfigMap comme volume [subPath](/docs/concepts/storage/volumes/#using-subpath) ne recevra pas les mises à jour de ConfigMap. +{{< /note >}} + + + +## Comprendre le lien entre les ConfigMaps et les Pods + +La ressource API ConfigMap stocke les données de configuration sous forme de paires clé-valeur. +Les données peuvent être consommées dans des pods ou fournir les configurations des composants du système tels que les contrôleurs. +ConfigMap est similaire à [Secrets](/docs/concepts/configuration/secret/), mais fournit un moyen de travailler avec des chaînes de caractères qui ne contiennent pas d'informations sensibles. +Les utilisateurs comme les composants du système peuvent stocker des données de configuration dans un ConfigMap. + +{{< note >}} +Les ConfigMaps doivent faire référence aux fichiers de propriétés, et non les remplacer. +Pensez à la ConfigMap comme représentant quelque chose de similaire au répertoire `/etc` de Linux et à son contenu. +Par exemple, si vous créez un [volume Kubernetes](/docs/concepts/storage/volumes/) à partir d'une ConfigMap, chaque élément de données de la ConfigMap est représenté par un fichier individuel dans le volume. +{{< /note >}} + +Le champ `data` de la ConfigMap contient les données de configuration. +Comme le montre l'exemple ci-dessous, cela peut être simple -- comme des propriétés individuelles définies à l'aide de `--from-literal` -- ou complexe -- comme des fichiers de configuration ou des blobs JSON définis à l'aide de `--from-file`. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + creationTimestamp: 2016-02-18T19:14:38Z + name: example-config + namespace: default +data: + # example of a simple property defined using --from-literal + example.property.1: hello + example.property.2: world + # example of a complex property defined using --from-file + example.property.file: |- + property.1=value-1 + property.2=value-2 + property.3=value-3 +``` + +### Restrictions + +* Vous devez créer un ConfigMap avant de le référencer dans une spécification de Pod (sauf si vous marquez le ConfigMap comme "facultatif"). + Si vous faites référence à un ConfigMap qui n'existe pas, le Pod ne démarrera pas. + De même, les références à des clés qui n'existent pas dans la ConfigMap empêcheront le pod de démarrer. + +* Si vous utilisez `envFrom` pour définir des variables d'environnement à partir de ConfigMaps, les clés considérées comme invalides seront ignorées. + Le pod sera autorisé à démarrer, mais les noms invalides seront enregistrés dans le journal des événements (`InvalidVariableNames`). + Le message du journal énumère chaque clé sautée. + Par exemple : + + ```shell + kubectl get events + ``` + + Le résultat est similaire à celui-ci : + + ```text + LASTSEEN FIRSTSEEN COUNT NAME KIND SUBOBJECT TYPE REASON SOURCE MESSAGE + 0s 0s 1 dapi-test-pod Pod Warning InvalidEnvironmentVariableNames {kubelet, 127.0.0.1} Keys [1badkey, 2alsobad] from the EnvFrom configMap default/myconfig were skipped since they are considered invalid environment variable names. + ``` + +* Les ConfigMaps résident dans un {{< glossary_tooltip term_id="namespace" >}}. + Un ConfigMap ne peut être référencé que par des pods résidant dans le même namespace. + +* Vous ne pouvez pas utiliser des ConfigMaps pour {{< glossary_tooltip text="static pods" term_id="static-pod" >}}, car le Kubelet ne le supporte pas. + +{{% heading "whatsnext" %}} + +* Suivez un exemple concret de [Configurer Redis en utilisant un ConfigMap](/docs/tutorials/configuration/configure-redis-using-configmap/). diff --git a/content/fr/examples/configmap/configmap-multikeys.yaml b/content/fr/examples/configmap/configmap-multikeys.yaml new file mode 100644 index 0000000000000..289702d123caf --- /dev/null +++ b/content/fr/examples/configmap/configmap-multikeys.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: special-config + namespace: default +data: + SPECIAL_LEVEL: very + SPECIAL_TYPE: charm diff --git a/content/fr/examples/configmap/configmaps.yaml b/content/fr/examples/configmap/configmaps.yaml new file mode 100644 index 0000000000000..91b9f29755c2e --- /dev/null +++ b/content/fr/examples/configmap/configmaps.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: special-config + namespace: default +data: + special.how: very +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: env-config + namespace: default +data: + log_level: INFO diff --git a/content/fr/examples/pods/pod-configmap-env-var-valueFrom.yaml b/content/fr/examples/pods/pod-configmap-env-var-valueFrom.yaml new file mode 100644 index 0000000000000..00827ec98aaa3 --- /dev/null +++ b/content/fr/examples/pods/pod-configmap-env-var-valueFrom.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: k8s.gcr.io/busybox + command: [ "/bin/echo", "$(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)" ] + env: + - name: SPECIAL_LEVEL_KEY + valueFrom: + configMapKeyRef: + name: special-config + key: SPECIAL_LEVEL + - name: SPECIAL_TYPE_KEY + valueFrom: + configMapKeyRef: + name: special-config + key: SPECIAL_TYPE + restartPolicy: Never diff --git a/content/fr/examples/pods/pod-configmap-envFrom.yaml b/content/fr/examples/pods/pod-configmap-envFrom.yaml new file mode 100644 index 0000000000000..70ae7e5bcfaf9 --- /dev/null +++ b/content/fr/examples/pods/pod-configmap-envFrom.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: k8s.gcr.io/busybox + command: [ "/bin/sh", "-c", "env" ] + envFrom: + - configMapRef: + name: special-config + restartPolicy: Never diff --git a/content/fr/examples/pods/pod-configmap-volume-specific-key.yaml b/content/fr/examples/pods/pod-configmap-volume-specific-key.yaml new file mode 100644 index 0000000000000..72e38fd83635c --- /dev/null +++ b/content/fr/examples/pods/pod-configmap-volume-specific-key.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: k8s.gcr.io/busybox + command: [ "/bin/sh","-c","cat /etc/config/keys" ] + volumeMounts: + - name: config-volume + mountPath: /etc/config + volumes: + - name: config-volume + configMap: + name: special-config + items: + - key: SPECIAL_LEVEL + path: keys + restartPolicy: Never diff --git a/content/fr/examples/pods/pod-configmap-volume.yaml b/content/fr/examples/pods/pod-configmap-volume.yaml new file mode 100644 index 0000000000000..478c2e8d2b7ab --- /dev/null +++ b/content/fr/examples/pods/pod-configmap-volume.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: k8s.gcr.io/busybox + command: [ "/bin/sh", "-c", "ls /etc/config/" ] + volumeMounts: + - name: config-volume + mountPath: /etc/config + volumes: + - name: config-volume + configMap: + # Indiquez le nom de la ConfigMap contenant les fichiers que vous souhaitez ajouter au conteneur + name: special-config + restartPolicy: Never diff --git a/content/fr/examples/pods/pod-multiple-configmap-env-variable.yaml b/content/fr/examples/pods/pod-multiple-configmap-env-variable.yaml new file mode 100644 index 0000000000000..4790a9c661c84 --- /dev/null +++ b/content/fr/examples/pods/pod-multiple-configmap-env-variable.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: k8s.gcr.io/busybox + command: [ "/bin/sh", "-c", "env" ] + env: + - name: SPECIAL_LEVEL_KEY + valueFrom: + configMapKeyRef: + name: special-config + key: special.how + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: env-config + key: log_level + restartPolicy: Never diff --git a/content/fr/examples/pods/pod-single-configmap-env-variable.yaml b/content/fr/examples/pods/pod-single-configmap-env-variable.yaml new file mode 100644 index 0000000000000..09d6f4a696fdd --- /dev/null +++ b/content/fr/examples/pods/pod-single-configmap-env-variable.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dapi-test-pod +spec: + containers: + - name: test-container + image: k8s.gcr.io/busybox + command: [ "/bin/sh", "-c", "env" ] + env: + # Définie la variable d'environnement + - name: SPECIAL_LEVEL_KEY + valueFrom: + configMapKeyRef: + # La ConfigMap contenant la valeur que vous voulez attribuer à SPECIAL_LEVEL_KEY + name: special-config + # Spécifier la clé associée à la valeur + key: special.how + restartPolicy: Never diff --git a/content/id/docs/concepts/architecture/controller.md b/content/id/docs/concepts/architecture/controller.md index bff7130eb7241..096dd75085543 100644 --- a/content/id/docs/concepts/architecture/controller.md +++ b/content/id/docs/concepts/architecture/controller.md @@ -60,7 +60,7 @@ Job adalah sumber daya dalam Kubernetes yang menjalankan a {{< glossary_tooltip term_id="pod" >}}, atau mungkin beberapa Pod sekaligus, untuk melakukan sebuah pekerjaan dan kemudian berhenti. -(Setelah [dijadwalkan](../../../../en/docs/concepts/scheduling/), objek Pod +(Setelah [dijadwalkan](../../../../en/docs/concepts/scheduling-eviction/), objek Pod akan menjadi bagian dari keadaan yang diinginkan oleh kubelet). Ketika _controller job_ melihat tugas baru, maka _controller_ itu memastikan bahwa, diff --git a/content/id/docs/concepts/architecture/nodes.md b/content/id/docs/concepts/architecture/nodes.md index a4cae113e6764..685d54ddddb8f 100644 --- a/content/id/docs/concepts/architecture/nodes.md +++ b/content/id/docs/concepts/architecture/nodes.md @@ -166,7 +166,7 @@ Pada kasus ini, kontroler node berasumsi ada masalah pada jaringan master, dan m Mulai dari Kubernetes 1.6, kontroler node juga bertanggung jawab untuk melakukan eviction pada pod-pod yang berjalan di atas node dengan taints `NoExecute`, ketika pod-pod tersebut sudah tidak lagi tolerate terhadap taints. Sebagai tambahan, hal ini di-nonaktifkan secara default pada fitur alpha, kontroler node bertanggung jawab untuk menambahkan taints yang berhubungan dengan masalah pada node, seperti terputus atau `NotReady`. -Lihat [dokumentasi ini](/id/docs/concepts/configuration/taint-and-toleration/) untuk bahasan detail tentang taints `NoExecute` dan fitur alpha. +Lihat [dokumentasi ini](/id/docs/concepts/scheduling-eviction/taint-and-toleration/) untuk bahasan detail tentang taints `NoExecute` dan fitur alpha. Mulai dari versi 1.8, kontroler node bisa diatur untuk bertanggung jawab pada pembuatan taints yang merepresentasikan node condition. Ini merupakan fitur alpha untuk versi 1.8. diff --git a/content/id/docs/concepts/containers/runtime-class.md b/content/id/docs/concepts/containers/runtime-class.md index 73252a03e4af3..539fbb7038cfd 100644 --- a/content/id/docs/concepts/containers/runtime-class.md +++ b/content/id/docs/concepts/containers/runtime-class.md @@ -45,7 +45,7 @@ soal bagaimana melakukan konfigurasi untuk implementasi CRI yang kamu miliki. Untuk saat ini, RuntimeClass berasumsi bahwa semua _node_ di dalam klaster punya konfigurasi yang sama (homogen). Jika ada _node_ yang punya konfigurasi berbeda dari yang lain (heterogen), maka perbedaan ini harus diatur secara independen di luar RuntimeClass -melalui fitur _scheduling_ (lihat [Menempatkan Pod pada Node](/id/docs/concepts/configuration/assign-pod-node/)). +melalui fitur _scheduling_ (lihat [Menempatkan Pod pada Node](/id/docs/concepts/scheduling-eviction/assign-pod-node/)). {{< /note >}} Seluruh konfigurasi memiliki nama `handler` yang terkait, dijadikan referensi oleh RuntimeClass. diff --git a/content/id/docs/concepts/overview/working-with-objects/labels.md b/content/id/docs/concepts/overview/working-with-objects/labels.md index 306edc0bfbdf3..7b4b125062120 100644 --- a/content/id/docs/concepts/overview/working-with-objects/labels.md +++ b/content/id/docs/concepts/overview/working-with-objects/labels.md @@ -220,6 +220,6 @@ selector: #### Memilih kumpulan Node Salah satu contoh penggunaan pemilihan dengan menggunakan label yaitu untuk membatasi suatu kumpulan Node tertentu yang dapat digunakan oleh Pod. -Lihat dokumentasi pada [pemilihan Node](/id/docs/concepts/configuration/assign-pod-node/) untuk informasi lebih lanjut. +Lihat dokumentasi pada [pemilihan Node](/id/docs/concepts/scheduling-eviction/assign-pod-node/) untuk informasi lebih lanjut. diff --git a/content/id/docs/concepts/scheduling-eviction/_index.md b/content/id/docs/concepts/scheduling-eviction/_index.md new file mode 100644 index 0000000000000..0d080ed79dd4f --- /dev/null +++ b/content/id/docs/concepts/scheduling-eviction/_index.md @@ -0,0 +1,5 @@ +--- +title: "Penjadwalan dan Pengusiran" +weight: 90 +--- + diff --git a/content/id/docs/concepts/configuration/assign-pod-node.md b/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md similarity index 98% rename from content/id/docs/concepts/configuration/assign-pod-node.md rename to content/id/docs/concepts/scheduling-eviction/assign-pod-node.md index ee9e8bf2f4a88..f8654f42769fb 100644 --- a/content/id/docs/concepts/configuration/assign-pod-node.md +++ b/content/id/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -114,7 +114,7 @@ Berikut ini contoh dari pod yang menggunakan afinitas node: Aturan afinitas node tersebut menyatakan pod hanya bisa ditugaskan pada node dengan label yang memiliki kunci `kubernetes.io/e2e-az-name` dan bernilai `e2e-az1` atau `e2e-az2`. Selain itu, dari semua node yang memenuhi kriteria tersebut, mode dengan label dengan kunci `another-node-label-key` and bernilai `another-node-label-value` harus lebih diutamakan. -Kamu dapat meilhat operator `In` digunakan dalam contoh berikut. Sitaksis afinitas node yang baru mendukung operator-operator berikut: `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt`. Kamu dapat menggunakan `NotIn` dan `DoesNotExist` untuk mewujudkan perilaku node anti-afinitas, atau menggunakan [node taints](/id/docs/concepts/configuration/taint-and-toleration/) untuk menolak pod dari node tertentu. +Kamu dapat meilhat operator `In` digunakan dalam contoh berikut. Sitaksis afinitas node yang baru mendukung operator-operator berikut: `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt`. Kamu dapat menggunakan `NotIn` dan `DoesNotExist` untuk mewujudkan perilaku node anti-afinitas, atau menggunakan [node taints](/id/docs/concepts/scheduling-eviction/taint-and-toleration/) untuk menolak pod dari node tertentu. Jika kamu menyatakan `nodeSelector` dan `nodeAffinity`. *keduanya* harus dipenuhi agar pod dapat dijadwalkan pada node kandidat. @@ -284,7 +284,7 @@ Lihat [tutorial ZooKeeper](/docs/tutorials/stateful-application/zookeeper/#toler Untuk informasi lebih lanjut tentang afinitas/anti-afinitas antar pod, lihat [design doc](https://git.k8s.io/community/contributors/design-proposals/scheduling/podaffinity.md). -Kamu juga dapat mengecek [Taints](/id/docs/concepts/configuration/taint-and-toleration/), yang memungkinkan sebuah *node* untuk *menolak* sekumpulan pod. +Kamu juga dapat mengecek [Taints](/id/docs/concepts/scheduling-eviction/taint-and-toleration/), yang memungkinkan sebuah *node* untuk *menolak* sekumpulan pod. ## nodeName diff --git a/content/id/docs/concepts/scheduling/kube-scheduler.md b/content/id/docs/concepts/scheduling-eviction/kube-scheduler.md similarity index 99% rename from content/id/docs/concepts/scheduling/kube-scheduler.md rename to content/id/docs/concepts/scheduling-eviction/kube-scheduler.md index 6f7efab3d9773..f55b1d942d716 100644 --- a/content/id/docs/concepts/scheduling/kube-scheduler.md +++ b/content/id/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -94,7 +94,7 @@ penilaian oleh penjadwal: ## {{% heading "whatsnext" %}} -* Baca tentang [penyetelan performa penjadwal](/id/docs/concepts/scheduling/scheduler-perf-tuning/) +* Baca tentang [penyetelan performa penjadwal](/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) * Baca tentang [pertimbangan penyebarang topologi pod](/id/docs/concepts/workloads/pods/pod-topology-spread-constraints/) * Baca [referensi dokumentasi](/docs/reference/command-line-tools-reference/kube-scheduler/) untuk _kube-scheduler_ * Pelajari tentang [mengkonfigurasi beberapa penjadwal](/docs/tasks/administer-cluster/configure-multiple-schedulers/) diff --git a/content/id/docs/concepts/configuration/resource-bin-packing.md b/content/id/docs/concepts/scheduling-eviction/resource-bin-packing.md similarity index 100% rename from content/id/docs/concepts/configuration/resource-bin-packing.md rename to content/id/docs/concepts/scheduling-eviction/resource-bin-packing.md diff --git a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md b/content/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md similarity index 97% rename from content/id/docs/concepts/scheduling/scheduler-perf-tuning.md rename to content/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 3689ecf7cb6f7..3e94be54329a2 100644 --- a/content/id/docs/concepts/scheduling/scheduler-perf-tuning.md +++ b/content/id/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -8,7 +8,7 @@ weight: 70 {{< feature-state for_k8s_version="v1.14" state="beta" >}} -[kube-scheduler](/id/docs/concepts/scheduling/kube-scheduler/#kube-scheduler) +[kube-scheduler](/id/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler) merupakan penjadwal (_scheduler_) Kubernetes bawaan yang bertanggung jawab terhadap penempatan Pod-Pod pada seluruh Node di dalam sebuah klaster. @@ -66,7 +66,7 @@ Kamu bisa mengatur ambang batas untuk menentukan berapa banyak jumlah Node minim persentase bagian dari seluruh Node di dalam klaster kamu. kube-scheduler akan mengubahnya menjadi bilangan bulat berisi jumlah Node. Saat penjadwalan, jika kube-scheduler mengidentifikasi cukup banyak Node-Node layak untuk melewati jumlah persentase yang diatur, maka kube-scheduler -akan berhenti mencari Node-Node layak dan lanjut ke [fase penskoran] (/id/docs/concepts/scheduling/kube-scheduler/#kube-scheduler-implementation). +akan berhenti mencari Node-Node layak dan lanjut ke [fase penskoran] (/id/docs/concepts/scheduling-eviction/kube-scheduler/#kube-scheduler-implementation). [Bagaimana penjadwal mengecek Node](#bagaimana-penjadwal-mengecek-node) menjelaskan proses ini secara detail. diff --git a/content/id/docs/concepts/scheduling/scheduling-framework.md b/content/id/docs/concepts/scheduling-eviction/scheduling-framework.md similarity index 100% rename from content/id/docs/concepts/scheduling/scheduling-framework.md rename to content/id/docs/concepts/scheduling-eviction/scheduling-framework.md diff --git a/content/id/docs/concepts/configuration/taint-and-toleration.md b/content/id/docs/concepts/scheduling-eviction/taint-and-toleration.md similarity index 100% rename from content/id/docs/concepts/configuration/taint-and-toleration.md rename to content/id/docs/concepts/scheduling-eviction/taint-and-toleration.md diff --git a/content/id/docs/concepts/scheduling/_index.md b/content/id/docs/concepts/scheduling/_index.md deleted file mode 100644 index 8903577124f58..0000000000000 --- a/content/id/docs/concepts/scheduling/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: "Penjadwalan" -weight: 90 ---- - diff --git a/content/id/docs/concepts/services-networking/service.md b/content/id/docs/concepts/services-networking/service.md index 5136a00415e95..3c85c5abbf49a 100644 --- a/content/id/docs/concepts/services-networking/service.md +++ b/content/id/docs/concepts/services-networking/service.md @@ -745,7 +745,7 @@ dan tidak akan menerima trafik apa pun. Untuk menghasilkan distribusi trafik yang merata, kamu dapat menggunakan _DaemonSet_ atau melakukan spesifikasi -[pod anti-affinity](/id/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) +[pod anti-affinity](/id/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature) agar `Pod` tidak di-_assign_ ke _node_ yang sama. NLB juga dapat digunakan dengan anotasi [internal load balancer](/id/docs/concepts/services-networking/service/#internal-load-balancer). diff --git a/content/id/docs/concepts/storage/storage-classes.md b/content/id/docs/concepts/storage/storage-classes.md index 2897399e80995..c5fc71a8de8a4 100644 --- a/content/id/docs/concepts/storage/storage-classes.md +++ b/content/id/docs/concepts/storage/storage-classes.md @@ -149,10 +149,10 @@ PersistentVolumeClaim dibuat. PersistentVolume akan dipilih atau di-_provisionin sesuai dengan topologi yang dispesifikasikan oleh limitasi yang diberikan oleh mekanisme _scheduling_ Pod. Hal ini termasuk, tetapi tidak hanya terbatas pada, [persyaratan sumber daya](/id/docs/concepts/configuration/manage-compute-resources-container), -[_node selector_](/id/docs/concepts/configuration/assign-pod-node/#nodeselector), +[_node selector_](/id/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector), [afinitas dan -anti-afinitas Pod](/id/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity), -serta [_taint_ dan _toleration_](/id/docs/concepts/configuration/taint-and-toleration). +anti-afinitas Pod](/id/docs/concepts/scheduling-evictionassign-pod-node/#affinity-and-anti-affinity), +serta [_taint_ dan _toleration_](/id/docs/concepts/scheduling-eviction/taint-and-toleration). Beberapa _plugin_ di bawah ini mendukung `WaitForFirstConsumer` dengan _provisioning_ dinamis: diff --git a/content/id/docs/concepts/workloads/controllers/daemonset.md b/content/id/docs/concepts/workloads/controllers/daemonset.md index 0b1c0e71e92f1..905a0a193aa9f 100644 --- a/content/id/docs/concepts/workloads/controllers/daemonset.md +++ b/content/id/docs/concepts/workloads/controllers/daemonset.md @@ -97,8 +97,8 @@ membuat Pod dengan nilai yang berbeda di sebuah Node untuk _testing_. Jika kamu menspesifikasikan `.spec.template.spec.nodeSelector`, maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [selektor -Node](/id/docs/concepts/configuration/assign-pod-node/). Demikian juga, jika kamu menspesifikasikan `.spec.template.spec.affinity`, -maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [Node affinity](/id/docs/concepts/configuration/assign-pod-node/). +Node](/id/docs/concepts/scheduling-eviction/assign-pod-node/). Demikian juga, jika kamu menspesifikasikan `.spec.template.spec.affinity`, +maka _controller_ DaemonSet akan membuat Pod pada Node yang cocok dengan [Node affinity](/id/docs/concepts/scheduling-eviction/assign-pod-node/). Jika kamu tidak menspesifikasikan sama sekali, maka _controller_ DaemonSet akan membuat Pod pada semua Node. diff --git a/content/id/docs/reference/glossary/taint.md b/content/id/docs/reference/glossary/taint.md new file mode 100644 index 0000000000000..1545a025fde4f --- /dev/null +++ b/content/id/docs/reference/glossary/taint.md @@ -0,0 +1,18 @@ +--- +title: Taint +id: taint +date: 2019-01-11 +full_link: /id/docs/concepts/scheduling-eviction/taint-and-toleration/ +short_description: > + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Taint mencegah penjadwalan Pod pada Node atau grup Node. + +aka: +tags: +- core-object +- fundamental +--- + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Taint mencegah penjadwalan {{< glossary_tooltip text="Pod" term_id="pod" >}} pada {{< glossary_tooltip text="Node" term_id="node" >}} atau grup dari Node. + + + +Taint dan {{< glossary_tooltip text="toleransi" term_id="toleration" >}} bekerja sama untuk memastikan bahwa Pod tidak dijadwalkan ke Node yang tidak sesuai. Satu atau lebih taint dapat diterapkan pada Node. Sebuah Node seharusnya hanya menjadwalkan Pod dengan toleransi yang cocok untuk taint yang dikonfigurasi. diff --git a/content/id/docs/reference/glossary/toleration.md b/content/id/docs/reference/glossary/toleration.md new file mode 100644 index 0000000000000..45a2ac7f7c7dc --- /dev/null +++ b/content/id/docs/reference/glossary/toleration.md @@ -0,0 +1,17 @@ +--- +title: Toleransi (Toleration) +id: toleration +date: 2019-01-11 +full_link: /docs/concepts/scheduling-eviction/taint-and-toleration/ +short_description: > + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Toleransi memungkinkan penjadwalan Pod pada Node atau grup dari Node yang memiliki taint yang cocok. +aka: +tags: +- core-object +- fundamental +--- + Objek inti yang terdiri dari tiga properti yang diperlukan: _key_(kunci), _value_(nilai), dan _effect_(efek). Toleransi memungkinkan penjadwalan Pod pada Node atau grup dari Node yang memiliki {{< glossary_tooltip text="taints" term_id="taint" >}} yang cocok. + + + +Toleransi dan {{< glossary_tooltip text="taints" term_id="taint" >}} bekerja sama untuk memastikan bahwa Pod tidak dijadwalkan ke Node yang tidak sesuai. Satu atau lebih taint dapat diterapkan pada Node. Sebuah Node seharusnya hanya menjadwalkan Pod dengan toleransi yang cocok untuk taint yang dikonfigurasi. diff --git a/content/id/docs/setup/learning-environment/_index.md b/content/id/docs/setup/learning-environment/_index.md index 4c116a9bc08a6..cdfe637e6402a 100644 --- a/content/id/docs/setup/learning-environment/_index.md +++ b/content/id/docs/setup/learning-environment/_index.md @@ -2,3 +2,34 @@ title: Lingkungan Pembelajaran weight: 20 --- + +## kind + +[`kind`](https://kind.sigs.k8s.io/docs/) memberikan kamu kemampuan untuk +menjalankan Kubernetes pada komputer lokal kamu. Perangkat ini membutuhkan +[Docker](https://docs.docker.com/get-docker/) yang sudah diinstal dan +terkonfigurasi. + +Halaman [Memulai Cepat](https://kind.sigs.k8s.io/docs/user/quick-start/) `kind` +memperlihatkan kepada kamu tentang apa yang perlu kamu lakukan agar `kind` dapat +berjalan dan bekerja. + +Melihat Memulai Cepat Kind + +## minikube + +Seperti halnya dengan `kind`, [`minikube`](https://minikube.sigs.k8s.io/) +merupakan perangkat yang memungkinkan kamu untuk menjalankan Kubernetes +secara lokal. `minikube` menjalankan sebuah klaster Kubernetes dengan +satu node saja dalam komputer pribadi (termasuk Windows, macOS dan Linux) +sehingga kamu dapat mencoba Kubernetes atau untuk pekerjaan pengembangan +sehari-hari. + +Kamu bisa mengikuti petunjuk resmi +[Memulai!](https://minikube.sigs.k8s.io/docs/start/) +`minikube` jika kamu ingin fokus agar perangkat ini terinstal. + +Lihat Panduan Memulai! Minikube + +Setelah kamu memiliki `minikube` yang bekerja, kamu bisa menggunakannya +untuk [menjalankan aplikasi contoh](/id/docs/tutorials/hello-minikube/). diff --git a/content/id/docs/setup/learning-environment/minikube.md b/content/id/docs/setup/learning-environment/minikube.md deleted file mode 100644 index 8729968738903..0000000000000 --- a/content/id/docs/setup/learning-environment/minikube.md +++ /dev/null @@ -1,514 +0,0 @@ ---- -title: Instalasi Kubernetes dengan Minikube -weight: 30 -content_type: concept ---- - - - -Minikube adalah alat yang memudahkan untuk menjalankan Kubernetes pada komputer lokal. Minikube menjalankan satu Node klaster Kubernetes di dalam _Virtual Machine_ (VM) pada laptop kamu untuk pengguna yang ingin mencoba Kubernetes atau mengembangkannya. - - - - -## Fitur Minikube - -Minikube mendukung fitur Kubernetes berikut: - -* DNS -* NodePort -* {{< glossary_tooltip text="ConfigMap" term_id="configmap" >}} dan {< glossary_tooltip text="Secret" term_id="secret" >}} -* _Dashboard_ -* _Container runtime_: [Docker](https://www.docker.com/), [CRI-O](https://cri-o.io/), dan [containerd](https://github.com/containerd/containerd) -* {{< glossary_tooltip text="CNI" term_id="cni" >}} -* Ingress - -## Instalasi - -Lihat [Instalasi Minikube](/id/docs/tasks/tools/install-minikube/). - -## Memulai Cepat - -Demonstrasi singkat ini memandu kamu tentang bagaimana memulai, menggunakan dan menghapus Minikube secara lokal. Ikuti langkah berikut untuk memulai dan menjelajahi Minikube. - -1. Mulailah Minikube dan buatlah sebuah klaster: - - ```shell - minikube start - ``` - - Keluaran menyerupai: - - ``` - Starting local Kubernetes cluster... - Running pre-create checks... - Creating machine... - Starting local Kubernetes cluster... - ``` - Untuk informasi lebih lanjut mengenai bagaimana memulai klaster pada versi Kubernetes tertentu, VM atau Container _runtime_, lihatlah [Memulai klaster](#memulai-klaster). - -2. Kini kamu bisa berinteraksi dengan klaster kamu dengan kubectl. Untuk informasi lebih lanjut, lihatlah [Interaksi dengan klaster kamu](#interaksi-dengan-klaster-kamu). - - Mari kita buat Kubernetes Deployment menggunakan _image_ bernama `echoserver`, yaitu sebuah server HTTP sederhana dan buka layanan pada porta 8080 dengan menggunakan opsi `--port`. - - ```shell - kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.10 - ``` - - Keluaran menyerupai: - - ``` - deployment.apps/hello-minikube created - ``` -3. Untuk mengakses Deployment `hello-minikube`, bukalah dia sebagai sebuah Service: - - ```shell - kubectl expose deployment hello-minikube --type=NodePort --port=8080 - ``` - - Opsi `--type=NodePort` menentukan tipe Service. - - Keluarannya menyerupai: - - ``` - service/hello-minikube exposed - ``` - -4. Pod `hello-minikube` saat ini telah dibuat namun kamu harus menunggu hingga Pod selesai dijalankan sebelum dapat mengaksesnya melalui Service yang telah dibuka. - - Cek apakah Pod sudah berjalan dan beroperasi: - - ```shell - kubectl get pod - ``` - - Jika keluaran menampilkan `STATUS` sebagai `ContainerCreating`, maka Pod sedang dalam proses pembuatan: - - ``` - NAME READY STATUS RESTARTS AGE - hello-minikube-3383150820-vctvh 0/1 ContainerCreating 0 3s - ``` - - Jika keluaran menampilkan `STATUS` sebagai `Running`, maka Pod sudah berjalan dan beroperasi: - - ``` - NAME READY STATUS RESTARTS AGE - hello-minikube-3383150820-vctvh 1/1 Running 0 13s - ``` - -5. Ambil URL Service yang telah dibuka untuk melihat Service secara detail: - - ```shell - minikube service hello-minikube --url - ``` - -6. Untuk melihat detail dari klaster lokal kamu, salin dan tempel URL yang kamu dapatkan dari keluaran pada peramban kamu. - - Keluarannya menyerupai: - - ``` - Hostname: hello-minikube-7c77b68cff-8wdzq - - Pod Information: - -no pod information available- - - Server values: - server_version=nginx: 1.13.3 - lua: 10008 - - Request Information: - client_address=172.17.0.1 - method=GET - real path=/ - query= - request_version=1.1 - request_scheme=http - request_uri=http://192.168.99.100:8080/ - - Request Headers: - accept=*/* - host=192.168.99.100:30674 - user-agent=curl/7.47.0 - - Request Body: - -no body in request- - ``` - - Jika kamu tidak lagi membutuhkan Service dan klaster, maka kamu bisa menghapusnya. - -7. Hapuslah Service `hello-minikube`: - - ```shell - kubectl delete services hello-minikube - ``` - - Keluarannya menyerupai: - - ``` - service "hello-minikube" deleted - ``` - -8. Hapuslah Deployment `hello-minikube`: - - ```shell - kubectl delete deployment hello-minikube - ``` - - Keluarannya menyerupai: - - ``` - deployment.extensions "hello-minikube" deleted - ``` - -9. Hentikanlah klaster Minikube lokal: - - ```shell - minikube stop - ``` - - Keluarannya menyerupai: - - ``` - Stopping "minikube"... - "minikube" stopped. - ``` - - Untuk informasi lebih lanjut, lihatlah [Menghentikan Klaster](#menghentikan-klaster). - -10. Hapuslah klaster Minikube lokal - - ```shell - minikube delete - ``` - Keluarannya menyerupai: - ``` - Deleting "minikube" ... - The "minikube" cluster has been deleted. - ``` - Untuk informasi lebih lanjut, lihat [Menghapus Klaster](#menghapus-klaster). - -## Mengelola Klaster - -### Memulai Klaster - -Perintah `minikube start` bisa digunakan untuk memulai klaster kamu. -Perintah ini membuat dan mengonfigurasi sebuah mesin virtual yang menjalankan klaster Kubernetes dengan satu Node. -Perintah ini juga mengonfigurasi instalasi [kubectl](/id/docs/user-guide/kubectl-overview/) untuk berkomunikasi dengan klaster ini. - -{{< note >}} -Jika kamu menggunakan proksi web, maka kamu harus meneruskan informasi berikut ini ke perintah `minikube start`: - -```shell -https_proxy= minikube start --docker-env http_proxy= --docker-env https_proxy= --docker-env no_proxy=192.168.99.0/24 -``` -Sayangnya, pengaturan dengan _environment variable_ saja tidak berguna. - -Minikube juga membuat konteks "minikube", dan menetapkannya sebagai bawaan di kubectl. -Untuk kembali menggunakan konteks ini, jalankan perintah: `kubectl config use-context minikube`. -{{< /note >}} - -#### Menentukan Versi Kubernetes - -Kamu bisa menentukan versi Kubernetes yang digunakan oleh Minikube dengan -menambahkan `--kubernetes-version` ke perintah `minikube start`. Sebagai -contoh, untuk menjalankan versi {{}}, kamu akan menjalankan perintah berikut: - -``` -minikube start --kubernetes-version {{< param "fullversion" >}} -``` -#### Menentukan _driver_ VM - -Kamu bisa mengubah _driver_ VM dengan menambahkan tanda `--driver=` pada `minikube start`. -Sebagai contoh: -```shell -minikube start --driver= -``` - -Minikube mendukung _driver_ berikut ini: -{{< note >}} -Lihat [_DRIVER_](https://minikube.sigs.k8s.io/docs/reference/drivers/) untuk detail tentang _driver_ yang didukung dan proses instalasi _plugin_. -{{< /note >}} - -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/drivers/docker/)) docker -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/drivers/virtualbox/)) virtualbox -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/drivers/podman/)) podman (TAHAP EXPERIMEN) -* vmwarefusion -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/kvm2/)) kvm2 -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/hyperkit/)) hyperkit -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/hyperv/)) hyperv -Perlu diingat bahwa IP dibawah adalah dinamik dan bisa berubah. IP ini bisa diambil dengan `minikube ip`. -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/vmware/)) vmware (_driver_ VMware terpadu) -* ([instalasi driver](https://minikube.sigs.k8s.io/docs/reference/drivers/parallels/)) parallels -* none (menjalankan komponen Kubernetes pada hos dan bukan pada mesin virtual. Kamu harus menjalankan Linux dan harus menginstal {{}}.) - -{{< caution >}} -Jika kamu menggunakan _driver_ `none`, beberapa komponen Kubernetes dijalankan sebagai Container istimewa yang memiliki efek samping di luar lingkungan Minikube. Efek samping tersebut berarti bahwa _driver_ `none` tidak direkomendasikan untuk komputer pribadi. -{{< /caution >}} - -#### Memulai klaster pada _runtime_ kontainer alternatif -Kamu bisa memulai Minikube pada _runtime_ kontainer berikut. -{{< tabs name="container_runtimes" >}} -{{% tab name="containerd" %}} -Untuk menggunakan [containerd](https://github.com/containerd/containerd) sebagai _runtime_ kontainer, jalankan: -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --container-runtime=containerd \ - --bootstrapper=kubeadm -``` - -Atau kamu bisa menggunakan versi yang diperpanjang: - -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --extra-config=kubelet.container-runtime=remote \ - --extra-config=kubelet.container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --extra-config=kubelet.image-service-endpoint=unix:///run/containerd/containerd.sock \ - --bootstrapper=kubeadm -``` -{{% /tab %}} -{{% tab name="CRI-O" %}} -Untuk menggunakan [CRI-O](https://cri-o.io/) sebagain _runtime_ kontainer, jalankan: -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --container-runtime=cri-o \ - --bootstrapper=kubeadm -``` -Atau kamu bisa menggunakan versi yang diperpanjang: - -```bash -minikube start \ - --network-plugin=cni \ - --enable-default-cni \ - --extra-config=kubelet.container-runtime=remote \ - --extra-config=kubelet.container-runtime-endpoint=/var/run/crio.sock \ - --extra-config=kubelet.image-service-endpoint=/var/run/crio.sock \ - --bootstrapper=kubeadm -``` -{{% /tab %}} -{{< /tabs >}} - -#### Menggunakan _image_ lokal degan menggunakan kembali _daemon_ Docker - -Saat menggunakan sebuah VM untuk Kubernetes, akan lebih baik jika _daemon_ Docker bawaan Minikube digunakan kembali. Menggunakan kembali _daemon_ bawaan membuat kamu tidak perlu membangun registri Docker pada mesin hos kamu dan mengunggah _image_ ke dalamnya. Namun, kamu dapat membangun di dalam _daemon_ Docker yang sama dengan Minikube, yang tentunya dapat mempercepat percobaan lokal. - -{{< note >}} -Pastikan untuk memberi _tag_ pada Docker _image_ kamu dengan sesuatu selain `latest` dan gunakan _tag_ tersebut untuk menarik _image_. Karena `:latest` adalah bawaan, dengan kebijakan penarikan _image_ bawaan, yaitu `Always`, kesalahan penarikan _image_ (`ErrImagePull`) akhirnya dapat terjadi jika kamu tidak memiliki _image_ Docker di register Docker bawaan (biasanya DockerHub). -{{< /note >}} - -Untuk bekerja dengan _daemon_ Docker pada mesin Mac/Linux, jalankan baris terakhir dari `minikube docker-env`. - -Kamu sekarang dapat menggunakan Docker di terminal mesin Mac/Linux kamu untuk berkomunikasi dengan _daemon_ Docker di dalam VM Minikube: - -```shell -docker ps -``` - -{{< note >}} -Pada Centos 7, Docker bisa memberikan kesalahan berikut: - -``` -Could not read CA certificate "/etc/docker/ca.pem": open /etc/docker/ca.pem: no such file or directory -``` - -Kamu bisa memperbaikinya dengan memperbaharui /etc/sysconfig/docker untuk memastikan bahwa lingkungan Minikube dikenali: - -```shell -< DOCKER_CERT_PATH=/etc/docker ---- -> if [ -z "${DOCKER_CERT_PATH}" ]; then -> DOCKER_CERT_PATH=/etc/docker -> fi -``` -{{< /note >}} - -### Mengonfigurasi Kubernetes - -Minikube memiliki sebuah fitur "pengonfigurasi" yang memperbolehkan pengguna untuk mengkonfigurasi komponen Kubernetes dengan sembarang nilai. -Untuk menggunakan fitur ini, kamu bisa menggunakan _flag_ `--extra-config` pada perintah `minikube start`. - -_Flag_ ini berulang, jadi kamu bisa menggunakannya beberapa kali dengan beberapa nilai yang berbeda untuk mengatur beberapa opsi. - -_Flag_ ini menerima sebuah _string_ dalam format `component.key=value`, di mana `component` adalah salah satu _string_ dari list di bawah, `key` adalah nilai dari _struct_ configurasi dan `value` adalah nilai yang digunakan. - -Kunci yang valid bisa ditemukan dengan memeriksa dokumentasi `componentconfigs` Kubernetes untuk setiap komponen. -Berikut adalah dokumentasi untuk setiap konfigurasi yang didukung: - -* [kubelet](https://godoc.org/k8s.io/kubernetes/pkg/kubelet/apis/config#KubeletConfiguration) -* [apiserver](https://godoc.org/k8s.io/kubernetes/cmd/kube-apiserver/app/options#ServerRunOptions) -* [proxy](https://godoc.org/k8s.io/kubernetes/pkg/proxy/apis/config#KubeProxyConfiguration) -* [controller-manager](https://godoc.org/k8s.io/kubernetes/pkg/controller/apis/config#KubeControllerManagerConfiguration) -* [etcd](https://godoc.org/github.com/coreos/etcd/etcdserver#ServerConfig) -* [scheduler](https://godoc.org/k8s.io/kubernetes/pkg/scheduler/apis/config#KubeSchedulerConfiguration) - -#### Contoh - -Untuk mengubah pengaturan `MaxPods` menjadi 5 pada Kubelet, gunakan _flag_ ini: `--extra-config=kubelet.MaxPods=5`. - -Fitur ini juga mendukung _struct_ yang berulang. Untuk mengubah pengaturan `LeaderElection.LeaderElect` menjadi `true` pada penjadwal, gunakan _flag_: `--extra-config=scheduler.LeaderElection.LeaderElect=true`. - -Untuk mengatur `AuthorizationMode` pada `apiserver` menjadi `RBAC`, kamu bisa menggunakan: `--extra-config=apiserver.authorization-mode=RBAC`. - -### Menghentikan klaster -Perintah `minikube stop` bisa digunakan untuk menghentikan klaster kamu. -Perintah ini menghentikan mesin virtual Minikube, tapi mempertahankan semua status dan data klaster. -Memulai klaster lagi akan mengembalikannya ke keadaan sebelumnya. - -### Menghapus klaster -Perintah `minikube delete` bisa digunakan untuk menghapus klaster kamu. -Perintah ini menghentikan dan menghapus mesin virtual Minikube. Tidak ada data atau _state_ yang dipertahankan. - -### Memperbaharui Minikube -Jika kamu menggunakan MacOS dan [Brew Package Manager](https://brew.sh/) sudah terpasang, jalankan: - -```shell -brew update -brew upgrade minikube -``` - -## Interaksi dengan Klaster Kamu - -### Kubectl - -Perintah `minikube start` membuat sebuah [konteks kubectl](/id/docs/reference/generated/kubectl/kubectl-commands#-em-set-context-em-) yang disebut "minikube". -Konteks ini menyimpan pengaturan untuk berkomunikasi dengan klaster Minikube kamu. - -Minikube menetapkan konteks ini sebagai bawaan secara otomatis, tetapi jika kamu ingin mengubah kembali ke konteks tersebut di kemudian hari, gunakan: - -`kubectl config use-context minikube` - -Atau berikan konteks untuk setiap perintah seperti ini: - -`kubectl get pods --context=minikube` - -### Dashboard - -Untuk mengakses [Kubernetes Dashboard](/docs/tasks/access-application-cluster/web-ui-dashboard/), gunakan perintah ini pada terminal setelah memulai Minikube untuk mendapatkan alamatnya: - -```shell -minikube dashboard -``` - -### Service - -Untuk mengakses Service yang dibuka via NodePort, jalankan perintah ini pada terminal setelah memulai Minikube untuk mendapatkan alamat: - -```shell -minikube service [-n NAMESPACE] [--url] NAME -``` - -## Jaringan - -Mesin virtual Minikube dibuka ke sistem hos melalui alamat IP _host-only_ , yang bisa didapatkan dengan perintah `minikube ip`. -Seluruh Service dengan jenis `NodePort` bisa diakses melalui alamat IP pada NodePort. - -Untuk mementukan NodePort pada Service kamu, kamu bisa menggunakan perintah `kubectl` sebagai berikut: - -`kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].nodePort}"'` - -## PersistentVolume - -Minikube mendukung [PersistentVolume](/id/docs/concepts/storage/persistent-volumes/) dengan jenis `hostPath`. -PersistenVolume ini dipetakan ke direktori di dalam mesin virtual Minikube. - -Mesin virtual Minikube melakukan _booting_ ke tmpfs, sehingga sebagian besar direktori tidak akan bertahan setelah di _reboot_ (`minikube stop`). - -Namun, Minikube diatur untuk mempertahankan berkas yang tersimpan didalam direktori hos berikut: - -* `/data` -* `/var/lib/minikube` -* `/var/lib/docker` - -Ini adalah contoh pengaturan PersistentVolume untuk mempertahankan data di dalam direktori `/data`: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pv0001 -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 5Gi - hostPath: - path: /data/pv0001/ -``` - -## Folder hos yang di _mount_ -Beberapa _driver_ akan memasang folder _hos_ dalam VM sehingga kamu dapat dengan mudah berbagi berkas antara VM dan hos. Saat ini, hal tersebut tidak dapat dikonfigurasi dan berbeda untuk setiap _driver_ dan sistem operasi yang kamu gunakan. - -{{< note >}} -Berbagi folder hos belum diimplementasikan pada _driver_ KVM. -{{< /note >}} - -| Driver | OS | HostFolder | VM | -| --- | --- | --- | --- | -| VirtualBox | Linux | /home | /hosthome | -| VirtualBox | macOS | /Users | /Users | -| VirtualBox | Windows | C://Users | /c/Users | -| VMware Fusion | macOS | /Users | /mnt/hgfs/Users | -| Xhyve | macOS | /Users | /Users | - -## Registri Container Pribadi - -Untuk mengakses registri Container pribadi, ikuti langkah berikut pada [halaman ini](/id/docs/concepts/containers/images/). - -Kami merekomendasi penggunaan `ImagePullSecrets`, tetapi jika kamu ingin mengonfigurasi akses pada virtual mesin Minikube, kamu bisa menempatkan `.dockercfg` pada direktori `/home/docker` atau `config.json` dalam direktori `/home/docker/.docker`. - -## Tambahan (_Add-on_) - -Supaya Minikube memulai atau memulai kembali kustom tambahan dengan benar, -tempatkan tambahan yang ingin kamu jalankan di dalam direktori `~/.minikube/addons`. -Tambahan dalam folder akan dipindahkan ke virtual mesin Minikube dan dijalankan setiap kali Minikube -dimulai atau dimulai ulang. - -## Menggunakan Minikube dengan Proksi HTTP - -Minikube membuat sebuah mesin virtual yang memasukkan Kubernetes dan _daemon_ Docker. -Ketika Kubernetes berusaha untuk menjadwalkan Container dengan Docker, _daemon_ Docker mungkin membutuhkan -akses jaringan eksternal untuk menarik Container. - -Jika kamu berada di belakang _proxy_ HTTP, kamu mungkin perlu menyediakan Docker dengan pengaturan proksi. -Untuk melakukan ini, berikan _environment variable_ yang dibutuhkan sebagai _flag_ pada saat `minikube start`. - -Contoh: - -```shell -minikube start --docker-env http_proxy=http://$YOURPROXY:PORT \ - --docker-env https_proxy=https://$YOURPROXY:PORT -``` - -Jika alamat mesin virtual kamu adalah 192.168.99.100, maka ada kemungkinan pengaturan proksi kamu akan mencegah `kubectl` untuk mencapainya. -Untuk melewatkan konfigurasi _proxy_ untuk alamat IP ini, kamu harus memodifikasi pengaturan _no_proxy` kamu. Kamu bisa melakukannya dengan: - -```shell -export no_proxy=$no_proxy,$(minikube ip) -``` - -## Masalah yang Diketahui - -Fitur yang memerlukan banyak Node tidak akan berfungsi dalam Minikube. - -## Desain - -Minikube menggunakan [libmachine](https://github.com/docker/machine/tree/master/libmachine) untuk menyediakan mesin virtual, dan [kubeadm](https://github.com/kubernetes/kubeadm) untuk menyediakan klaster Kubernetes. - -Untuk info lebih lanjut tentang Minikube, lihat [proposal](https://git.k8s.io/community/contributors/design-proposals/cluster-lifecycle/local-cluster-ux.md). - -## Tautan Tambahan - -* **Tujuan and Non-Tujuan**: Untuk tujuan dan non-tujuan dari projek Minikube, lihat [roadmap](https://minikube.sigs.k8s.io/docs/contrib/roadmap/). -* **Petunjuk Pengembangan**: Lihat [Berkontribusi](https://minikube.sigs.k8s.io/docs/contrib/) untuk ikhtisar bagaimana cara mengirimkan _pull request_. -* **Membangun Minikube**: Untuk instruksi bagaimana membangun atau mengetes Minikube dari sumber kode, lihat [petunjuk membangun](https://minikube.sigs.k8s.io/docs/contrib/building/). -* **Menambahkan Dependensi Baru**: Untuk instruksi bagaimana menambahkan dependensi baru ke Minikube, lihat [petunjuk penambahan dependensi](https://minikube.sigs.k8s.io/docs/contrib/drivers/). -* **Menambahkan Addon Baru**: Untuk instruksi bagaimana menambahkan tambahan baru untuk Minikube, lihat [petunjuk menambahkan addon baru](https://minikube.sigs.k8s.io/docs/contrib/addons/). -* **MicroK8s**: Pengguna Linux yang ingin menghindari penggunaan mesin virtual, bisa mempertimbangkan [MicroK8s](https://microk8s.io/) sebagai alternatif. - -## Komunitas - -Kontribusi, pertanyaan, dan komentar sangat diharapkan! Pengembang Minikube berkumpul dalam [Slack](https://kubernetes.slack.com) di _channel_ #minikube (dapatkan undangan [di sini](http://slack.kubernetes.io/)). Kami juga memiliki [milis kubernetes-dev Google Groups](https://groups.google.com/forum/#!forum/kubernetes-dev). Jika kamu memposting sesuatu, awali subjek kamu dengan "minikube: ". diff --git a/content/id/docs/tasks/administer-cluster/sysctl-cluster.md b/content/id/docs/tasks/administer-cluster/sysctl-cluster.md index 9adbb50a9f9fb..7120f087fa44f 100644 --- a/content/id/docs/tasks/administer-cluster/sysctl-cluster.md +++ b/content/id/docs/tasks/administer-cluster/sysctl-cluster.md @@ -156,7 +156,7 @@ Sangat disarankan untuk menggunakan Kubernetes [fitur _taints and toleration_](/ Pod dengan sysctl _unsafe_ akan gagal diluncurkan pada sembarang Node yang belum mengaktifkan kedua sysctl _unsafe_ secara eksplisit. Seperti halnya sysctl _node-level_ sangat disarankan untuk menggunakan [fitur _taints and toleration_](/docs/reference/generated/kubectl/kubectl-commands/#taint) atau -[pencemaran dalam Node](/docs/concepts/scheduling-eviction/taint-and-toleration/) +[pencemaran dalam Node](/id/docs/concepts/scheduling-eviction/taint-and-toleration/) untuk Pod dalam Node yang tepat. ## PodSecurityPolicy diff --git a/content/id/docs/tutorials/hello-minikube.md b/content/id/docs/tutorials/hello-minikube.md index faba283d89bdf..398c5a3a3f8d9 100644 --- a/content/id/docs/tutorials/hello-minikube.md +++ b/content/id/docs/tutorials/hello-minikube.md @@ -15,11 +15,11 @@ card: -Tutorial ini menunjukkan bagaimana caranya menjalankan aplikasi sederhana Node.js Halo Dunia di Kubernetes, dengan [Minikube](/docs/getting-started-guides/minikube) dan Katacoda. +Tutorial ini menunjukkan bagaimana caranya menjalankan aplikasi sederhana Node.js Halo Dunia di Kubernetes, dengan [`minikube`](/docs/getting-started-guides/minikube) dan Katacoda. Katacoda menyediakan environment Kubernetes secara gratis di dalam browser. {{< note >}} -Kamupun bisa mengikuti tutorial ini kalau sudah instalasi [Minikube di lokal](/id/docs/tasks/tools/install-minikube/) kamu. +Kamupun bisa mengikuti tutorial ini kalau sudah instalasi minikube di lokal. Silakan lihat [memulai `minikube`](https://minikube.sigs.k8s.io/docs/start/) untuk instruksi instalasi. {{< /note >}} @@ -27,7 +27,7 @@ Kamupun bisa mengikuti tutorial ini kalau sudah instalasi [Minikube di lokal](/i ## {{% heading "objectives" %}} -* Deploy aplikasi halo dunia pada Minikube. +* Deploy aplikasi halo dunia pada minikube. * Jalankan aplikasinya. * Melihat log aplikasi. @@ -54,7 +54,7 @@ Untuk info lebih lanjut tentang perintah `docker build`, baca [dokumentasi Docke {{< kat-button >}} - {{< note >}}Kalau kamu memilih instalasi Minikube secara lokal, jalankan `minikube start`.{{< /note >}} + {{< note >}}Kalau kamu memilih instalasi minikube secara lokal, jalankan `minikube start`.{{< /note >}} 2. Buka dasbor Kubernetes di dalam browser: @@ -147,7 +147,7 @@ Supaya Kontainer `hello-node` bisa diakses dari luar jaringan virtual Kubernetes ``` Untuk penyedia cloud yang memiliki load balancer, sebuah alamat IP eksternal akan disediakan untuk mengakses Servis tersebut. - Pada Minikube, tipe `LoadBalancer` membuat Servis tersebut dapat diakses melalui perintah `minikube service`. + Pada minikube, tipe `LoadBalancer` membuat Servis tersebut dapat diakses melalui perintah `minikube service`. 3. Jalankan perintah berikut: @@ -163,7 +163,7 @@ Supaya Kontainer `hello-node` bisa diakses dari luar jaringan virtual Kubernetes ## Aktifkan addons -Minikube punya beberapa addons yang bisa diaktifkan, dinon-aktifkan, maupun dibuka di dalam environment Kubernetes lokal. +Perangkat minikube meliputi sekumpulan {{< glossary_tooltip text="addons" term_id="addons" >}} bawaan yang bisa diaktifkan, dinonaktifkan, maupun dibuka di dalam environment Kubernetes lokal. 1. Daftar addons yang ada saat ini: @@ -249,13 +249,13 @@ kubectl delete service hello-node kubectl delete deployment hello-node ``` -Kamu juga boleh mematikan mesin virtual (VM) untuk Minikube: +Kamu juga boleh mematikan mesin virtual atau _virtual machine_ (VM) untuk minikube: ```shell minikube stop ``` -Kamu juga boleh menghapus Minikube VM: +Kamu juga boleh menghapus minikube VM: ```shell minikube delete diff --git a/content/id/examples/application/job/cronjob.yaml b/content/id/examples/application/job/cronjob.yaml index 2ce31233c3cd4..34ab2a3f06007 100644 --- a/content/id/examples/application/job/cronjob.yaml +++ b/content/id/examples/application/job/cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: hello diff --git a/content/it/docs/concepts/containers/images.md b/content/it/docs/concepts/containers/images.md new file mode 100644 index 0000000000000..43fe439ee0b88 --- /dev/null +++ b/content/it/docs/concepts/containers/images.md @@ -0,0 +1,316 @@ +--- +title: Immagini +content_type: concept +weight: 10 +--- + + + +L'immagine di un container rappresenta dati binari che incapsulano un'applicazione e +tutte le sue dipendenze software. Le immagini sono costituite da pacchetti software +eseguibili che possono essere avviati in modalità standalone e su cui si possono fare +ipotesi ben precise circa l'ambiente in cui vengono eseguiti. + +Tipicamente viene creata un'immagine di un'applicazione ed effettuato il _push_ +su un registry (un repository pubblico di immagini) prima di poterne fare riferimento esplicito in un +{{< glossary_tooltip text="Pod" term_id="pod" >}} + +Questa pagina va a delineare nello specifico il concetto di immagine di un container. + + + +## I nomi delle immagini + +Alle immagini dei container vengono normalmente attribuiti nomi come `pause`, `example/mycontainer`, o `kube-apiserver`. +Le immagini possono anche contenere l'hostname del registry in cui le immagini sono pubblicate; +ad esempio: `registro.fittizio.esempio/nomeimmagine`, +ed è possibile che sia incluso nel nome anche il numero della porta; ad esempio: `registro.fittizio.esempio:10443/nomeimmagine`. + +Se non si specifica l'hostname di un registry, Kubernetes assume che ci si riferisca al registry pubblico di Docker. + +Dopo la parte relativa al nome dell'immagine si può aggiungere un _tag_ (come comunemente avviene per comandi come `docker` e `podman`). +I tag permettono l'identificazione di differenti versioni della stessa serie di immagini. + +I tag delle immagini sono composti da lettere minuscole e maiuscole, numeri, underscore (`_`), +punti (`.`), e trattini (`-`). +Esistono regole aggiuntive relative a dove i caratteri separatori (`_`, `-`, and `.`) +possano essere inseriti nel tag di un'immagine. +Se non si specifica un tag, Kubernetes assume il tag `latest` che va a definire l'immagine disponibile più recente. + +{{< caution >}} +Evitate di utilizzare il tag `latest` quando si rilasciano dei container in produzione, +in quanto risulta difficile tracciare quale versione dell'immagine sia stata avviata e persino più difficile +effettuare un rollback ad una versione precente. + +Invece, meglio specificare un tag specifico come ad esempio `v1.42.0`. +{{< /caution >}} + +## Aggiornamento delle immagini + +Quando un {{< glossary_tooltip text="Deployment" term_id="deployment" >}}, +{{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}, Pod, o qualsiasi altro +oggetto che includa un Pod template viene creato per la prima volta, la policy di default per il pull di tutti i container nel Pod +è impostata su `IfNotPresent` (se non presente) se non specificato diversamente. +Questa policy permette al +{{< glossary_tooltip text="kubelet" term_id="kubelet" >}} di evitare di fare il pull +di un'immagine se questa è già presente. + +Se necessario, si può forzare il pull in ogni occasione in uno dei seguenti modi: + +- impostando `imagePullPolicy` (specifica per il pull delle immagini) del container su `Always` (sempre). +- omettendo `imagePullPolicy` ed usando il tag `:latest` (più recente) per l'immagine da utilizzare; + Kubernetes imposterà la policy su `Always` (sempre). +- omettendo `imagePullPolicy` ed il tag per l'immagine da utilizzare. +- abilitando l'admission controller [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages). + +{{< note >}} +Il valore dell'impostazione `imagePullPolicy` del container è sempre presente quando l'oggetto viene creato per la prima volta +e non viene aggiornato se il tag dell'immagine dovesse cambiare successivamente. + +Ad esempio, creando un Deployment con un'immagine il cui tag _non_ è +`:latest`, e successivamente aggiornando il tag di quell'immagine a `:latest`, il campo + `imagePullPolicy` _non_ cambierà su `Always`. +È necessario modificare manualmente la policy di pull di ogni oggetto dopo la sua creazione. +{{< /note >}} + +Quando `imagePullPolicy` è definito senza un valore specifico, esso è impostato su `Always`. + +## Multi-architecture support nelle immagini + +Oltre a fornire immagini binarie, un _container registry_ può fornire un [indice delle immagini disponibili per un container](https://github.com/opencontainers/image-spec/blob/master/image-index.md). +L'indice di un'immagine può puntare a più [file manifest](https://github.com/opencontainers/image-spec/blob/master/manifest.md) ciascuno per una versione specifica dell'architettura di un container. +L'idea è che si può avere un unico nome per una stessa immagine (ad esempio: `pause`, `example/mycontainer`, `kube-apiserver`) e permettere a diversi sistemi di recuperare l'immagine binaria corretta a seconda dell'architettura della macchina che la sta utilizzando. + + +Kubernetes stesso tipicamente nomina le immagini dei container tramite il suffisso `-$(ARCH)`. +Per la garantire la retrocompatibilità è meglio generare le vecchie immagini con dei suffissi. +L'idea è quella di generare, ad esempio, l'immagine `pause` con un manifest che include tutte le architetture supportate, +affiancata, ad esempio, da `pause-amd64` che è retrocompatibile per le vecchie configurazioni o per quei file YAML +in cui sono specificate le immagini con i suffissi. + +## Utilizzare un private registry + +I private registry possono richiedere l'utilizzo di chiavi per accedere alle immagini in essi contenute. +Le credenziali possono essere fornite in molti modi: + - configurando i nodi in modo tale da autenticarsi al private registry + - tutti i pod possono acquisire informazioni da qualsiasi private registry configurato + - è necessario che l'amministratore del cluster configuri i nodi in tal senso + - tramite pre-pulled images (immagini pre-caricate sui nodi) + - tutti i pod possono accedere alle immagini salvate sulla cache del nodo a cui si riferiscono + - è necessario effettuare l'accesso come root di sistema su ogni nodo per inserire questa impostazione + - specificando _ImagePullSecrets_ su un determinato pod + - solo i pod che forniscono le proprie chiavi hanno la possibilità di accedere al private registry + - tramite estensioni locali o specifiche di un _Vendor_ + - se si sta utilizzando una configurazione personalizzata del nodo oppure se manualmente, o tramite il _cloud provider_, + si implementa un meccanismo di autenticazione del nodo presso il _container registry_. + +Di seguito la spiegazione dettagliata di queste opzioni. + +### Configurazione dei nodi per l'autenticazione ad un private registry + +Se si sta utilizzando Docker sui nodi, si può configurare il _Docker container runtime_ +per autenticare il nodo presso un private container registry. + +Questo è un approccio possibile se si ha il controllo sulle configurazioni del nodo. + +{{< note >}} +Kubernetes di default supporta solo le sezioni `auths` e `HttpHeaders` nelle configurazioni relative a Docker. +Eventuali _helper_ per le credenziali di Docker (`credHelpers` o `credsStore`) non sono supportati. +{{< /note >}} + + +Docker salva le chiavi per i registri privati in `$HOME/.dockercfg` oppure nel file `$HOME/.docker/config.json`. +Inserendo lo stesso file nella lista seguente, kubelet lo utilizzerà per recuperare le credenziali quando deve fare il _pull_ delle immagini. + +* `{--root-dir:-/var/lib/kubelet}/config.json` +* `{cwd of kubelet}/config.json` +* `${HOME}/.docker/config.json` +* `/.docker/config.json` +* `{--root-dir:-/var/lib/kubelet}/.dockercfg` +* `{cwd of kubelet}/.dockercfg` +* `${HOME}/.dockercfg` +* `/.dockercfg` + +{{< note >}} +Potrebbe essere necessario impostare `HOME=/root` esplicitamente come variabile d'ambiente del processo _kubelet_. +{{< /note >}} + +Di seguito i passi consigliati per configurare l'utilizzo di un private registry da parte dei nodi del _cluster_. +In questo esempio, eseguire i seguenti comandi sul proprio desktop/laptop: + + 1. Esegui `docker login [server]` per ogni _set_ di credenziali che vuoi utilizzare. Questo comando aggiornerà `$HOME/.docker/config.json` sul tuo PC. + 1. Controlla il file `$HOME/.docker/config.json` in un editor di testo per assicurarti che contenga le credenziali che tu voglia utilizzare. + 1. Recupera la lista dei tuoi nodi; ad esempio: + - se vuoi utilizzare i nomi: `nodes=$( kubectl get nodes -o jsonpath='{range.items[*].metadata}{.name} {end}' )` + - se vuoi recuperare gli indirizzi IP: `nodes=$( kubectl get nodes -o jsonpath='{range .items[*].status.addresses[?(@.type=="ExternalIP")]}{.address} {end}' )` + 1. Copia il tuo file locale `.docker/config.json` in uno dei path sopra riportati nella lista di ricerca. + - ad esempio, per testare il tutto: `for n in $nodes; do scp ~/.docker/config.json root@"$n":/var/lib/kubelet/config.json; done` + +{{< note >}} +Per i cluster di produzione, utilizza un configuration management tool per poter applicare le impostazioni su tutti i nodi laddove necessario. +{{< /note >}} + +Puoi fare una verifica creando un Pod che faccia uso di un'immagine privata; ad esempio: + +```shell +kubectl apply -f - <}} +Questo approccio è possibile se si ha il controllo sulla configurazione del nodo. +Non funzionerà qualora il cloud provider gestisca i nodi e li sostituisca automaticamente. +{{< /note >}} + +Kubelet di default prova a fare il pull di ogni immagine dal registry specificato. +Tuttavia, qualora la proprietà `imagePullPolicy` (specifica di pull dell'immagine) del container sia impostata su `IfNotPresent` (vale a dire, se non è già presente) oppure su `Never` (mai), +allora l'immagine locale è utilizzata (in via preferenziale o esclusiva, rispettivamente). + +Se si vuole fare affidamento a immagini pre-scaricate per non dover incorrere in una fase di autenticazione presso il registry, +bisogna assicurarsi che tutti i nodi nel cluster abbiano scaricato le stesse versioni delle immagini. + +Questa procedura può essere utilizzata per accelerare il processo di creazione delle istanze o come alternativa all'autenticazione presso un private registry. + +Tutti i pod avranno accesso in lettura a qualsiasi immagine pre-scaricata. + +### Specificare la proprietà imagePullSecrets su un Pod + +{{< note >}} +Questo approccio è quello consigliato per l'avvio di container a partire da immagini presenti in registri privati. +{{< /note >}} + +Kubernetes da la possibilità di specificare le chiavi del _container registry_ su un Pod. + +#### Creare un Secret tramite Docker config + +Esegui il comando seguente, sostituendo i valori riportati in maiuscolo con quelli corretti: + +```shell +kubectl create secret docker-registry --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL +``` + +Se possiedi il file delle credenziali per Docker, anziché utilizzare il comando quì sopra +puoi importare il file di credenziali come un Kubernetes +{{< glossary_tooltip text="Secrets" term_id="secret" >}}. +[Creare un Secret a partire da credenziali Docker](/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials) fornisce la spiegazione dettagliata su come fare. + +Ciò è particolarmente utile se si utilizzano più _container registry_ privati, +in quanto il comando `kubectl create secret docker-registry` genera un Secret che +funziona con un solo private registry. + +{{< note >}} +I Pod possono fare riferimento ai Secret per il pull delle immagini soltanto nel proprio _namespace_, +quindi questo procedimento deve essere svolto per ogni _namespace_. +{{< /note >}} + +#### Fare riferimento ad imagePullSecrets in un Pod + +È possibile creare pod che referenzino quel Secret aggiungendo la sezione `imagePullSecrets` alla definizione del Pod. + +Ad esempio: + +```shell +cat < pod.yaml +apiVersion: v1 +kind: Pod +metadata: + name: foo + namespace: awesomeapps +spec: + containers: + - name: foo + image: janedoe/awesomeapp:v1 + imagePullSecrets: + - name: myregistrykey +EOF + +cat <> ./kustomization.yaml +resources: +- pod.yaml +EOF +``` + +Questo deve esser fatto per ogni Pod che utilizzi un private registry. + +Comunque, le impostazioni relative a questo campo possono essere automatizzate inserendo la sezione _imagePullSecrets_ +nella definizione della risorsa [ServiceAccount](/docs/tasks/configure-pod-container/configure-service-account/). + +Visitare la pagina [Aggiungere ImagePullSecrets ad un Service Account](/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account) per istruzioni più dettagliate. + +Puoi utilizzarlo in congiunzione al file `.docker/config.json` configurato per ogni nodo. In questo caso, si applicherà un _merge_ delle credenziali. + +## Casi d'uso + +Ci sono varie soluzioni per configurare i private registry. Di seguito, alcuni casi d'uso comuni e le soluzioni suggerite. + +1. Cluster in cui sono utilizzate soltanto immagini non proprietarie (ovvero _open-source_). In questo caso non sussiste il bisogno di nascondere le immagini. + - Utilizza immagini pubbliche da Docker hub. + - Nessuna configurazione richiesta. + - Alcuni _cloud provider_ mettono in _cache_ o effettuano il _mirror_ di immagini pubbliche, il che migliora la disponibilità delle immagini e ne riduce il tempo di _pull_. +1. Cluster con container avviati a partire da immagini proprietarie che dovrebbero essere nascoste a chi è esterno all'organizzazione, ma + visibili a tutti gli utenti abilitati nel cluster. + - Utilizza un private [Docker registry](https://docs.docker.com/registry/). + - Esso può essere ospitato da [Docker Hub](https://hub.docker.com/signup), o da qualche altra piattaforma. + - Configura manualmente il file .docker/config.json su ogni nodo come descritto sopra. + - Oppure, avvia un private registry dietro il tuo firewall con accesso in lettura libero. + - Non è necessaria alcuna configurazione di Kubernetes. + - Utilizza un servizio di _container registry_ che controlli l'accesso alle immagini + - Esso funzionerà meglio con una configurazione del cluster basata su _autoscaling_ che con una configurazione manuale del nodo. + - Oppure, su un cluster dove la modifica delle configurazioni del nodo non è conveniente, utilizza `imagePullSecrets`. +1. Cluster con immagini proprietarie, alcune delle quali richiedono un controllo sugli accessi. + - Assicurati che l'_admission controller_ [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) sia attivo. Altrimenti, tutti i Pod potenzialmente possono avere accesso a tutte le immagini. + - Sposta i dati sensibili un un _Secret_, invece di inserirli in un'immagine. +1. Un cluster multi-tenant dove ogni tenant necessiti di un private registry. + - Assicurati che l'_admission controller_ [AlwaysPullImages](/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) sia attivo. Altrimenti, tutti i Pod di tutti i tenant potrebbero potenzialmente avere accesso a tutte le immagini. + - Avvia un private registry che richieda un'autorizzazione all'accesso. + - Genera delle credenziali di registry per ogni tenant, inseriscile in dei _Secret_, e popola i _Secret_ per ogni _namespace_ relativo ad ognuno dei tenant. + - Il singolo tenant aggiunge così quel _Secret_ all'impostazione _imagePullSecrets_ di ogni _namespace_. + + +Se si ha la necessità di accedere a più registri, si può generare un _Secret_ per ognuno di essi. +Kubelet farà il _merge_ di ogni `imagePullSecrets` in un singolo file virtuale `.docker/config.json`. + +## {{% heading "whatsnext" %}} + +* Leggi [OCI Image Manifest Specification](https://github.com/opencontainers/image-spec/blob/master/manifest.md) \ No newline at end of file diff --git a/content/ja/docs/concepts/configuration/pod-priority-preemption.md b/content/ja/docs/concepts/configuration/pod-priority-preemption.md new file mode 100644 index 0000000000000..c06b3b41b0a0b --- /dev/null +++ b/content/ja/docs/concepts/configuration/pod-priority-preemption.md @@ -0,0 +1,240 @@ +--- +title: Podの優先度とプリエンプション +content_type: concept +weight: 70 +--- + + + +{{< feature-state for_k8s_version="v1.14" state="stable" >}} + +[Pod](/ja/docs/concepts/workloads/pods/)は _priority_(優先度)を持つことができます。 +優先度は他のPodに対する相対的なPodの重要度を示します。 +もしPodをスケジューリングできないときには、スケジューラーはそのPodをスケジューリングできるようにするため、優先度の低いPodをプリエンプトする(追い出す)ことを試みます。 + + + + + + +{{< warning >}} +クラスターの全てのユーザーが信用されていない場合、悪意のあるユーザーが可能な範囲で最も高い優先度のPodを作成することが可能です。これは他のPodが追い出されたりスケジューリングできない状態を招きます。 +管理者はResourceQuotaを使用して、ユーザーがPodを高い優先度で作成することを防ぐことができます。 + +詳細は[デフォルトで優先度クラスの消費を制限する](/ja/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) +を参照してください。 +{{< /warning >}} + +## 優先度とプリエンプションを使う方法 + +優先度とプリエンプションを使うには、 + +1. 1つまたは複数の[PriorityClass](#priorityclass)を追加します + +1. 追加したPriorityClassを[`priorityClassName`](#pod-priority)に設定したPodを作成します。 + もちろんPodを直接作る必要はありません。 + 一般的には`priorityClassName`をDeploymentのようなコレクションオブジェクトのPodテンプレートに追加します。 + +これらの手順のより詳しい情報については、この先を読み進めてください。 + +{{< note >}} +Kubernetesには最初から既に2つのPriorityClassが設定された状態になっています。 +`system-cluster-critical`と`system-node-critical`です。 +これらは汎用のクラスであり、[重要なコンポーネントが常に最初にスケジュールされることを保証する](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/)ために使われます。 +{{< /note >}} + +## PriorityClass + +PriorityClassはnamespaceによらないオブジェクトで、優先度クラスの名称から優先度を表す整数値への対応を定義します。 +PriorityClassオブジェクトのメタデータの`name`フィールドにて名称を指定します。 +値は`value`フィールドで指定し、必須です。 +値が大きいほど、高い優先度を示します。 +PriorityClassオブジェクトの名称は[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)として適切であり、かつ`system-`から始まってはいけません。 + +PriorityClassオブジェクトは10億以下の任意の32ビットの整数値を持つことができます。 +それよりも大きな値は通常はプリエンプトや追い出すべきではない重要なシステム用のPodのために予約されています。 +クラスターの管理者は割り当てたい優先度に対して、PriorityClassオブジェクトを1つずつ作成すべきです。 + +PriorityClassは任意でフィールド`globalDefault`と`description`を設定可能です。 +`globalDefault`フィールドは`priorityClassName`が指定されないPodはこのPriorityClassを使うべきであることを示します。`globalDefault`がtrueに設定されたPriorityClassはシステムで一つのみ存在可能です。`globalDefault`が設定されたPriorityClassが存在しない場合は、`priorityClassName`が設定されていないPodの優先度は0に設定されます。 + +`description`フィールドは任意の文字列です。クラスターの利用者に対して、PriorityClassをどのような時に使うべきか示すことを意図しています。 + +### PodPriorityと既存のクラスターに関する注意 + +- もし既存のクラスターをこの機能がない状態でアップグレードすると、既存のPodの優先度は実質的に0になります。 + +- `globalDefault`が`true`に設定されたPriorityClassを追加しても、既存のPodの優先度は変わりません。PriorityClassのそのような値は、PriorityClassが追加された以後に作成されたPodのみに適用されます。 + +- PriorityClassを削除した場合、削除されたPriorityClassの名前を使用する既存のPodは変更されませんが、削除されたPriorityClassの名前を使うPodをそれ以上作成することはできなくなります。 + +### PriorityClassの例 + +```yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority +value: 1000000 +globalDefault: false +description: "この優先度クラスはXYZサービスのPodに対してのみ使用すべきです。" +``` + +## 非プリエンプトのPriorityClass {#non-preempting-priority-class} + +{{< feature-state for_k8s_version="v1.19" state="beta" >}} + +`PreemptionPolicy: Never`と設定されたPodは、スケジューリングのキューにおいて他の優先度の低いPodよりも優先されますが、他のPodをプリエンプトすることはありません。 +スケジューリングされるのを待つ非プリエンプトのPodは、リソースが十分に利用可能になるまでスケジューリングキューに残ります。 +非プリエンプトのPodは、他のPodと同様に、スケジューラーのバックオフの対象になります。これは、スケジューラーがPodをスケジューリングしようと試みたものの失敗した場合、低い頻度で再試行するようにして、より優先度の低いPodが先にスケジューリングされることを許します。 + +非プリエンプトのPodは、他の優先度の高いPodにプリエンプトされる可能性はあります。 + +`PreemptionPolicy`はデフォルトでは`PreemptLowerPriority`に設定されており、これが設定されているPodは優先度の低いPodをプリエンプトすることを許容します。これは既存のデフォルトの挙動です。 +`PreemptionPolicy`を`Never`に設定すると、これが設定されたPodはプリエンプトを行わないようになります。 + +ユースケースの例として、データサイエンスの処理を挙げます。 +ユーザーは他の処理よりも優先度を高くしたいジョブを追加できますが、そのとき既存の実行中のPodの処理結果をプリエンプトによって破棄させたくはありません。 +`PreemptionPolicy: Never`が設定された優先度の高いジョブは、他の既にキューイングされたPodよりも先に、クラスターのリソースが「自然に」開放されたときにスケジューリングされます。 + +### 非プリエンプトのPriorityClassの例 + +```yaml +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority-nonpreempting +value: 1000000 +preemptionPolicy: Never +globalDefault: false +description: "この優先度クラスは他のPodをプリエンプトさせません。" +``` + +## Podの優先度 {#pod-priority} + +一つ以上のPriorityClassがあれば、仕様にPriorityClassを指定したPodを作成することができるようになります。優先度のアドミッションコントローラーは`priorityClassName`フィールドを使用し、優先度の整数値を設定します。PriorityClassが見つからない場合、そのPodの作成は拒否されます。 + +下記のYAMLは上記の例で作成したPriorityClassを使用するPodの設定の例を示します。優先度のアドミッションコントローラーは仕様を確認し、このPodの優先度は1000000であると設定します。 + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: nginx + labels: + env: test +spec: + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent + priorityClassName: high-priority +``` + +### スケジューリング順序におけるPodの優先度の効果 + +Podの優先度が有効な場合、スケジューラーは待機状態のPodをそれらの優先度順に並べ、スケジューリングキューにおいてより優先度の低いPodよりも前に来るようにします。その結果、その条件を満たしたときには優先度の高いPodは優先度の低いPodより早くスケジューリングされます。優先度の高いPodがスケジューリングできない場合は、スケジューラーは他の優先度の低いPodのスケジューリングも試みます。 + +## プリエンプション + +Podが作成されると、スケジューリング待ちのキューに入り待機状態になります。スケジューラーはキューからPodを取り出し、ノードへのスケジューリングを試みます。Podに指定された条件を全て満たすノードが見つからない場合は、待機状態のPodのためにプリエンプションロジックが発動します。待機状態のPodをPと呼ぶことにしましょう。プリエンプションロジックはPよりも優先度の低いPodを一つ以上追い出せばPをスケジューリングできるようになるノードを探します。そのようなノードがあれば、優先度の低いPodはノードから追い出されます。Podが追い出された後に、Pはノードへスケジューリング可能になります。 + +### ユーザーへ開示される情報 + +Pod PがノードNのPodをプリエンプトした場合、ノードNの名称がPのステータスの`nominatedNodeName`フィールドに設定されます。このフィールドはスケジューラーがPod Pのために予約しているリソースの追跡を助け、ユーザーにクラスターにおけるプリエンプトに関する情報を与えます。 + +Pod Pは必ずしも「指名したノード」へスケジューリングされないことに注意してください。Podがプリエンプトされると、そのPodは終了までの猶予期間を得ます。スケジューラーがPodの終了を待つ間に他のノードが利用可能になると、スケジューラーは他のノードをPod Pのスケジューリング先にします。この結果、Podの`nominatedNodeName`と`nodeName`は必ずしも一致しません。また、スケジューラーがノードNのPodをプリエンプトさせた後に、Pod Pよりも優先度の高いPodが来た場合、スケジューラーはノードNをその新しい優先度の高いPodへ与えます。このような場合は、スケジューラーはPod Pの`nominatedNodeName`を消去します。これによって、スケジューラーはPod Pが他のノードのPodをプリエンプトさせられるようにします。 + +### プリエンプトの制限 + +#### プリエンプトされるPodの正常終了 + +Podがプリエンプトされると、[猶予期間](/ja/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)が与えられます。 + +Podは作業を完了し、終了するために十分な時間が与えられます。仮にそうでない場合、強制終了されます。この猶予期間によって、スケジューラーがPodをプリエンプトした時刻と、待機状態のPod Pがノード Nにスケジュール可能になるまでの時刻の間に間が開きます。この間、スケジューラーは他の待機状態のPodをスケジュールしようと試みます。プリエンプトされたPodが終了したら、スケジューラーは待ち行列にあるPodをスケジューリングしようと試みます。そのため、Podがプリエンプトされる時刻と、Pがスケジュールされた時刻には間が開くことが一般的です。この間を最小にするには、優先度の低いPodの猶予期間を0または小さい値にする方法があります。 + +#### PodDisruptionBudgetは対応するが、保証されない + +[PodDisruptionBudget](/docs/concepts/workloads/pods/disruptions/) (PDB)は、アプリケーションのオーナーが冗長化されたアプリケーションのPodが意図的に中断される数の上限を設定できるようにするものです。KubernetesはPodをプリエンプトする際にPDBに対応しますが、PDBはベストエフォートで考慮します。スケジューラーはプリエンプトさせたとしてもPDBに違反しないPodを探します。そのようなPodが見つからない場合でもプリエンプションは実行され、PDBに反しますが優先度の低いPodが追い出されます。 + +#### 優先度の低いPodにおけるPod間のアフィニティ + +次の条件が真の場合のみ、ノードはプリエンプションの候補に入ります。 +「待機状態のPodよりも優先度の低いPodをノードから全て追い出したら、待機状態のPodをノードへスケジュールできるか」 + +{{< note >}} +プリエンプションは必ずしも優先度の低いPodを全て追い出しません。 +優先度の低いPodを全て追い出さなくても待機状態のPodがスケジューリングできる場合、一部のPodのみ追い出されます。 +このような場合であったとしても、上記の条件は真である必要があります。偽であれば、そのノードはプリエンプションの対象とはされません。 +{{< /note >}} + +待機状態のPodが、優先度の低いPodとの間でPod間のアフィニティを持つ場合、Pod間のアフィニティはそれらの優先度の低いPodがなければ満たされません。この場合、スケジューラーはノードのどのPodもプリエンプトしようとはせず、代わりに他のノードを探します。スケジューラーは適切なノードを探せる場合と探せない場合があります。この場合、待機状態のPodがスケジューリングされる保証はありません。 + +この問題に対して推奨される解決策は、優先度が同一または高いPodに対してのみPod間のアフィニティを作成することです。 + +#### 複数ノードに対するプリエンプション + +Pod PがノードNにスケジューリングできるよう、ノードNがプリエンプションの対象となったとします。 +他のノードのPodがプリエンプトされた場合のみPが実行可能になることもあります。下記に例を示します。 + +* Pod PをノードNに配置することを検討します。 +* Pod QはノードNと同じゾーンにある別のノードで実行中です。 +* Pod Pはゾーンに対するQへのアンチアフィニティを持ちます (`topologyKey: topology.kubernetes.io/zone`)。 +* Pod Pと、ゾーン内の他のPodに対しては他のアンチアフィニティはない状態です。 +* Pod PをノードNへスケジューリングするには、Pod Qをプリエンプトすることが考えられますが、スケジューラーは複数ノードにわたるプリエンプションは行いません。そのため、Pod PはノードNへはスケジューリングできないとみなされます。 + +Pod Qがそのノードから追い出されると、Podアンチアフィニティに違反しなくなるので、Pod PはノードNへスケジューリング可能になります。 + +複数ノードに対するプリエンプションに関しては、十分な需要があり、合理的な性能を持つアルゴリズムを見つけられた場合に、追加することを検討する可能性があります。 + +## トラブルシューティング + +Podの優先度とプリエンプションは望まない副作用をもたらす可能性があります。 +いくつかの起こりうる問題と、その対策について示します。 + +### Podが不必要にプリエンプトされる + +プリエンプションは、リソースが不足している場合に優先度の高い待機状態のPodのためにクラスターの既存のPodを追い出します。 +誤って高い優先度をPodに割り当てると、意図しない高い優先度のPodはクラスター内でプリエンプションを引き起こす可能性があります。Podの優先度はPodの仕様の`priorityClassName`フィールドにて指定されます。優先度を示す整数値へと変換された後、`podSpec`の`priority`へ設定されます。 + +この問題に対処するには、Podの`priorityClassName`をより低い優先度に変更するか、このフィールドを未設定にすることができます。`priorityClassName`が未設定の場合、デフォルトでは優先度は0とされます。 + +Podがプリエンプトされたとき、プリエンプトされたPodのイベントが記録されます。 +プリエンプションはPodに必要なリソースがクラスターにない場合のみ起こるべきです。 +このような場合、プリエンプションはプリエンプトされるPodよりも待機状態のPodの優先度が高い場合のみ発生します。 +プリエンプションは待機状態のPodがない場合や待機状態のPodがプリエンプト対象のPod以下の優先度を持つ場合には決して発生しません。そのような状況でプリエンプションが発生した場合、問題を報告してください。 + +### Podはプリエンプトされたが、プリエンプトさせたPodがスケジューリングされない + +Podがプリエンプトされると、それらのPodが要求した猶予期間が与えられます。そのデフォルトは30秒です。 +Podがその期間内に終了しない場合、強制終了されます。プリエンプトされたPodがなくなれば、プリエンプトさせたPodはスケジューリング可能です。 + +プリエンプトさせたPodがプリエンプトされたPodの終了を待っている間に、より優先度の高いPodが同じノードに対して作成されることもあります。この場合、スケジューラーはプリエンプトさせたPodの代わりに優先度の高いPodをスケジューリングします。 + +これは予期された挙動です。優先度の高いPodは優先度の低いPodに取って代わります。 + +### 優先度の高いPodが優先度の低いPodより先にプリエンプトされる + +スケジューラーは待機状態のPodが実行可能なノードを探します。ノードが見つからない場合、スケジューラーは任意のノードから優先度の低いPodを追い出し、待機状態のPodのためのリソースを確保しようとします。 +仮に優先度の低いPodが動いているノードが待機状態のPodを動かすために適切ではない場合、スケジューラーは他のノードで動いているPodと比べると、優先度の高いPodが動いているノードをプリエンプションの対象に選ぶことがあります。この場合もプリエンプトされるPodはプリエンプトを起こしたPodよりも優先度が低い必要があります。 + +複数のノードがプリエンプションの対象にできる場合、スケジューラーは優先度が最も低いPodのあるノードを選ぼうとします。しかし、そのようなPodがPodDisruptionBudgetを持っており、プリエンプトするとPDBに反する場合はスケジューラーは優先度の高いPodのあるノードを選ぶこともあります。 + +複数のノードがプリエンプションの対象として利用可能で、上記の状況に当てはまらない場合、スケジューラーは優先度の最も低いノードを選択します。 + +## Podの優先度とQoSの相互作用 {#interactions-of-pod-priority-and-qos} + +Podの優先度と{{< glossary_tooltip text="QoSクラス" term_id="qos-class" >}}は直交する機能で、わずかに相互作用がありますが、デフォルトではQoSクラスによる優先度の設定の制約はありません。スケジューラーのプリエンプションのロジックはプリエンプションの対象を決めるときにQoSクラスは考慮しません。 +プリエンプションはPodの優先度を考慮し、優先度が最も低いものを候補とします。より優先度の高いPodは優先度の低いPodを追い出すだけではプリエンプトを起こしたPodのスケジューリングに不十分な場合と、`PodDisruptionBudget`により優先度の低いPodが保護されている場合のみ対象になります。 + +QoSとPodの優先度の両方を考慮するコンポーネントは[リソース不足によりkubeletがPodを追い出す](/docs/tasks/administer-cluster/out-of-resource/)のみです。 +kubeletは追い出すPodの順位付けを次の順で行います。枯渇したリソースを要求以上に使用しているか、優先度、枯渇したリソースの消費量の複数のPodの要求に対する相対値。 +詳細は[エンドユーザーのPodの追い出し](/docs/tasks/administer-cluster/out-of-resource/#evicting-end-user-pods)を参照してください。 + + +kubeletによるリソース不足時のPodの追い出しでは、リソースの消費が要求を超えないPodは追い出されません。優先度の低いPodのリソースの利用量がその要求を超えていなければ、追い出されることはありません。より優先度が高く、要求を超えてリソースを使用しているPodが追い出されます。 + + +## {{% heading "whatsnext" %}} + +* PriorityClassと関連付けてResourceQuotaを使用することに関して [デフォルトで優先度クラスの消費を制限する](/ja/docs/concepts/policy/resource-quotas/#limit-priority-class-consumption-by-default) diff --git a/content/ja/docs/concepts/containers/_index.md b/content/ja/docs/concepts/containers/_index.md index 5b10416c0f83c..fd3506ea40658 100755 --- a/content/ja/docs/concepts/containers/_index.md +++ b/content/ja/docs/concepts/containers/_index.md @@ -21,7 +21,7 @@ no_list: true ## コンテナイメージ [コンテナイメージ](/docs/concepts/containers/images/)はすぐに実行可能なソフトウェアパッケージで、アプリケーションの実行に必要なものをすべて含んています。コードと必要なランタイム、アプリケーションとシステムのライブラリ、そして必須な設定項目のデフォルト値を含みます。 -設計上、コンテナは不変で、既に実行中のコンテナのコードを変更することはできません。コンテナ化されたアプリケーションがあり変更したい場合は、変更を含んだ新しいコンテナをビルドし、コンテナを再作成して、更新されたイメージから起動する必要があります。 +設計上、コンテナは不変で、既に実行中のコンテナのコードを変更することはできません。コンテナ化されたアプリケーションがあり変更したい場合は、変更を含んだ新しいイメージをビルドし、コンテナを再作成して、更新されたイメージから起動する必要があります。 ## コンテナランタイム diff --git a/content/ja/docs/concepts/policy/resource-quotas.md b/content/ja/docs/concepts/policy/resource-quotas.md index 7b00056fcf564..e9381ce89de71 100644 --- a/content/ja/docs/concepts/policy/resource-quotas.md +++ b/content/ja/docs/concepts/policy/resource-quotas.md @@ -484,7 +484,7 @@ count/secrets 1 4 リソースクォータは集約されたクラスターリソースを分割しますが、ノードに対しては何の制限も行わないことに注意して下さい。例: 複数の名前空間のPodは同一のノード上で稼働する可能性があります。 -## デフォルトで優先度クラスの消費を制限する +## デフォルトで優先度クラスの消費を制限する {#limit-priority-class-consumption-by-default} 例えば"cluster-services"のように、条件に一致するクォータオブジェクトが存在する場合に限り、特定の優先度のPodを名前空間で許可することが望ましい場合があります。 diff --git a/content/ja/docs/contribute/new-content/_index.md b/content/ja/docs/contribute/new-content/_index.md new file mode 100644 index 0000000000000..f9cb2e1301495 --- /dev/null +++ b/content/ja/docs/contribute/new-content/_index.md @@ -0,0 +1,4 @@ +--- +title: 新しいコンテンツの貢献 +weight: 20 +--- diff --git a/content/ja/docs/contribute/new-content/overview.md b/content/ja/docs/contribute/new-content/overview.md new file mode 100644 index 0000000000000..e3db3744e92a2 --- /dev/null +++ b/content/ja/docs/contribute/new-content/overview.md @@ -0,0 +1,54 @@ +--- +title: 新しいコンテンツの貢献の概要 +linktitle: 概要 +content_type: concept +main_menu: true +weight: 5 +--- + + + +このセクションでは、新しいコンテンツの貢献を行う前に知っておくべき情報を説明します。 + + + +## 貢献の基本 + +- KubernetesのドキュメントはMarkdownで書き、Kubernetesのウェブサイトは[Hugo](https://gohugo.io/)を使ってビルドします。 +- ソースは[GitHub](https://github.com/kubernetes/website)にあります。Kubernetesのドキュメントは`/content/en/docs/`にあります。リファレンスドキュメントの一部は、`update-imported-docs/`ディレクトリ内のスクリプトから自動的に生成されます。 +- [Page content types](/docs/contribute/style/page-content-types/)にHugoによるドキュメントのコンテンツの見え方を記述しています。 +- 標準のHugoのshortcodeに加えて、多数の[カスタムのHugo shortcode](/docs/contribute/style/hugo-shortcodes/)を使用してコンテンツの見え方をコントロールしています。 +- ドキュメントのソースは`/content/`内にある複数の言語で利用できます。各言語はそれぞれ[ISO 639-1標準](https://www.loc.gov/standards/iso639-2/php/code_list.php)で定義された2文字のコードの名前のフォルダを持ちます。たとえば、英語のドキュメントのソースは`/content/en/docs/`内に置かれています。 +- 複数言語でのドキュメントへの貢献や新しい翻訳の開始に関する情報については、[Kubernetesのドキュメントを翻訳する](/docs/contribute/localization)を参照してください。 + +## 始める前に {#before-you-begin} + +### CNCF CLAに署名する {#sign-the-cla} + +すべてのKubernetesのコントリビューターは、[コントリビューターガイド](https://github.com/kubernetes/community/blob/master/contributors/guide/README.md)を読み、[Contributor License Agreement(コントリビューターライセンス契約、CLA)への署名](https://github.com/kubernetes/community/blob/master/CLA.md)を**必ず行わなければなりません**。 + +CLAへの署名が完了していないコントリビューターからのpull requestは、自動化されたテストで失敗します。名前とメールアドレスは`git config`コマンドで表示されるものに一致し、gitの名前とメールアドレスはCNCF CLAで使われたものに一致しなければなりません。 + +### どのGitブランチを使用するかを選ぶ + +pull requestをオープンするときは、どのブランチをベースにして作業するかをあらかじめ知っておく必要があります。 + +シナリオ | ブランチ +:---------|:------------ +現在のリリースに対する既存または新しい英語のコンテンツ | `master` +機能変更のリリースに対するコンテンツ | 機能変更が含まれるメジャーおよびマイナーバージョンに対応する、`dev-`というパターンのブランチを使います。たとえば、機能変更が`v{{< skew nextMinorVersion >}}`に含まれる場合、ドキュメントの変更は``dev-{{< skew nextMinorVersion >}}``ブランチに追加します。 +他の言語内のコンテンツ(翻訳) | 各翻訳対象の言語のルールに従います。詳しい情報は、[翻訳のブランチ戦略](/docs/contribute/localization/#branching-strategy)を読んでください。 + +それでも選ぶべきブランチがわからないときは、Slack上の`#sig-docs`チャンネルで質問してください。 + +{{< note >}} +すでにpull requestを作成していて、ベースブランチが間違っていたことに気づいた場合は、作成者であるあなただけがベースブランチを変更できます。 +{{< /note >}} + +### 言語ごとのPR + +pull requestはPRごとに1つの言語に限定してください。複数の言語に同一の変更を行う必要がある場合は、言語ごとに別々のPRを作成してください。 + +## コントリビューターのためのツール + +`kubernetes/website`リポジトリ内の[doc contributors tools](https://github.com/kubernetes/website/tree/master/content/en/docs/doc-contributor-tools)ディレクトリには、コントリビューターとしての旅を楽にしてくれるツールがあります。 diff --git a/content/ja/docs/reference/_index.md b/content/ja/docs/reference/_index.md index 0496a730c441e..aca4c278b5a36 100644 --- a/content/ja/docs/reference/_index.md +++ b/content/ja/docs/reference/_index.md @@ -16,7 +16,7 @@ content_type: concept ## APIリファレンス -* [Kubernetes API概要](/docs/reference/using-api/api-overview/) - Kubernetes APIの概要です。 +* [Kubernetes API概要](/docs/reference/using-api/) - Kubernetes APIの概要です。 * [Kubernetes APIリファレンス {{< latest-version >}}](/docs/reference/generated/kubernetes-api/{{< latest-version >}}/) ## APIクライアントライブラリー @@ -30,9 +30,9 @@ content_type: concept ## CLIリファレンス -* [kubectl](/docs/reference/kubectl/overview/) - コマンドの実行やKubernetesクラスターの管理に使う主要なCLIツールです。 +* [kubectl](/ja/docs/reference/kubectl/overview/) - コマンドの実行やKubernetesクラスターの管理に使う主要なCLIツールです。 * [JSONPath](/ja/docs/reference/kubectl/jsonpath/) - kubectlで[JSONPath記法](https://goessner.net/articles/JsonPath/)を使うための構文ガイドです。 -* [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/) - セキュアなKubernetesクラスターを簡単にプロビジョニングするためのCLIツールです。 +* [kubeadm](ja/docs/reference/setup-tools/kubeadm/) - セキュアなKubernetesクラスターを簡単にプロビジョニングするためのCLIツールです。 ## コンポーネントリファレンス diff --git a/content/ja/docs/reference/access-authn-authz/rbac.md b/content/ja/docs/reference/access-authn-authz/rbac.md index fee25f323e95b..04409b3b8ea90 100644 --- a/content/ja/docs/reference/access-authn-authz/rbac.md +++ b/content/ja/docs/reference/access-authn-authz/rbac.md @@ -43,7 +43,7 @@ ClusterRolesにはいくつかの用途があります。ClusterRoleを利用し 2. Namespaceに属するリソースに対する権限を定義し、すべてのNamespaceにわたって付与する 3. クラスター単位でスコープされているリソースに対するアクセス許可を定義する -NamespaceでRoleを定義する場合は、Roleを使用します。クラスター全体でRoleを定義する婆は、ClusterRoleを使用します +NamespaceでRoleを定義する場合は、Roleを使用します。クラスター全体でRoleを定義する場合は、ClusterRoleを使用します #### Roleの例 diff --git a/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md b/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md new file mode 100644 index 0000000000000..f9572ca1f4551 --- /dev/null +++ b/content/ja/docs/tasks/configmap-secret/managing-secret-using-config-file.md @@ -0,0 +1,183 @@ +--- +title: 設定ファイルを使用してSecretを管理する +content_type: task +weight: 20 +description: リソース設定ファイルを使用してSecretを作成する +--- + + + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} + + + +## 設定ファイルを作成する + +あらかじめYAMLまたはJSON形式でSecretのマニフェストを作成したうえで、オブジェクトを作成することができます。 +[Secret](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#secret-v1-core)リソースには、`data`と`stringData`の2つのマップが含まれています。 +`data`フィールドは任意のデータを格納するのに使用され、base64でエンコードされます。 +`stringData`フィールドは利便性のために用意されており、Secretデータをエンコードされていない文字列として提供することができます。 +`data`と`stringData`のキーは、英数字、`-`、`_`、`.`で構成されている必要があります。 + +たとえば、`data`フィールドを使用して2つの文字列をSecretに格納するには、次のように文字列をbase64に変換します: + +```shell +echo -n 'admin' | base64 +``` + +出力は次のようになります: + +``` +YWRtaW4= +``` + +```shell +echo -n '1f2d1e2e67df' | base64 +``` + +出力は次のようになります: + +``` +MWYyZDFlMmU2N2Rm +``` + +以下のようなSecret設定ファイルを記述します: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= + password: MWYyZDFlMmU2N2Rm +``` + +なお、Secretオブジェクトの名前は、有効な[DNSサブドメイン名](/ja/docs/concepts/overview/working-with-objects/names#dns-subdomain-names)である必要があります。 + +{{< note >}} +SecretデータのシリアライズされたJSONおよびYAMLの値は、base64文字列としてエンコードされます。 +文字列中の改行は不正で、含まれていてはなりません。 +Darwin/macOSで`base64`ユーティリティーを使用する場合、長い行を分割するために`-b`オプションを使用するのは避けるべきです。 +逆に、Linux ユーザーは、`base64` コマンドにオプション`-w 0`を追加するか、`-w`オプションが利用できない場合には、パイプライン`base64 | tr -d '\n'`を追加する*必要があります*。 +{{< /note >}} + +特定のシナリオでは、代わりに`stringData`フィールドを使用できます。 +このフィールドでは、base64エンコードされていない文字列を直接Secretに入れることができ、Secretの作成時や更新時には、その文字列がエンコードされます。 + +たとえば、設定ファイルを保存するためにSecretを使用しているアプリケーションをデプロイする際に、デプロイプロセス中に設定ファイルの一部を入力したい場合などが考えられます。 + +たとえば、次のような設定ファイルを使用しているアプリケーションの場合: + +```yaml +apiUrl: "https://my.api.com/api/v1" +username: "" +password: "" +``` + +次のような定義でSecretに格納できます: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +stringData: + config.yaml: | + apiUrl: "https://my.api.com/api/v1" + username: + password: +``` + +## Secretを作成する + +[`kubectl apply`](/docs/reference/generated/kubectl/kubectl-commands#apply)でSecretを作成します: + +```shell +kubectl apply -f ./secret.yaml +``` + +出力は次のようになります: + +``` +secret/mysecret created +``` + +## Secretを確認する + +`stringData`フィールドは、書き込み専用の便利なフィールドです。Secretを取得する際には決して出力されません。たとえば、次のようなコマンドを実行した場合: + +```shell +kubectl get secret mysecret -o yaml +``` + +出力は次のようになります: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:40:59Z + name: mysecret + namespace: default + resourceVersion: "7225" + uid: c280ad2e-e916-11e8-98f2-025000000001 +type: Opaque +data: + config.yaml: YXBpVXJsOiAiaHR0cHM6Ly9teS5hcGkuY29tL2FwaS92MSIKdXNlcm5hbWU6IHt7dXNlcm5hbWV9fQpwYXNzd29yZDoge3twYXNzd29yZH19 +``` + +`kubectl get`と`kubectl describe`コマンドはデフォルトではSecretの内容を表示しません。 +これは、Secretが不用意に他人にさらされたり、ターミナルログに保存されたりしないようにするためです。 +エンコードされたデータの実際の内容を確認するには、[Secretのデコード](/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl/#decoding-secret)を参照してください。 + +`username`などのフィールドが`data`と`stringData`の両方に指定されている場合は、`stringData`の値が使われます。 +たとえば、以下のようなSecretの定義の場合: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: mysecret +type: Opaque +data: + username: YWRtaW4= +stringData: + username: administrator +``` + +結果は以下の通りです: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + creationTimestamp: 2018-11-15T20:46:46Z + name: mysecret + namespace: default + resourceVersion: "7579" + uid: 91460ecb-e917-11e8-98f2-025000000001 +type: Opaque +data: + username: YWRtaW5pc3RyYXRvcg== +``` + +`YWRtaW5pc3RyYXRvcg==`をデコードすると`administrator`となります。 + +## クリーンアップ + +作成したSecretを削除するには次のコマンドを実行します: + +```shell +kubectl delete secret mysecret +``` + +## {{% heading "whatsnext" %}} + +- [Secretのコンセプト](/ja/docs/concepts/configuration/secret/)を読む +- [kubectlを使用してSecretを管理する](/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl/)方法を知る +- [kustomizeを使用してSecretを管理する](/docs/tasks/configmap-secret/managing-secret-using-kustomize/)方法を知る diff --git a/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md index fb8c89c1e3580..9e498de8acbba 100644 --- a/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md +++ b/content/ja/docs/tasks/configmap-secret/managing-secret-using-kubectl.md @@ -142,5 +142,5 @@ kubectl delete secret db-user-pass ## {{% heading "whatsnext" %}} - [Secretのコンセプト](/ja/docs/concepts/configuration/secret/)を読む -- [設定ファイルを使用してSecretを管理する](/docs/tasks/configmap-secret/managing-secret-using-config-file/)方法を知る +- [設定ファイルを使用してSecretを管理する](/ja/docs/tasks/configmap-secret/managing-secret-using-config-file/)方法を知る - [kustomizeを使用してSecretを管理する](/docs/tasks/configmap-secret/managing-secret-using-kustomize/)方法を知る diff --git a/content/ja/docs/tasks/job/_index.md b/content/ja/docs/tasks/job/_index.md new file mode 100644 index 0000000000000..bde073017bc07 --- /dev/null +++ b/content/ja/docs/tasks/job/_index.md @@ -0,0 +1,6 @@ +--- +title: "Jobの実行" +description: 並列処理を使用してJobを実行します。 +weight: 50 +--- + diff --git a/content/ja/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/ja/docs/tasks/job/automated-tasks-with-cron-jobs.md new file mode 100644 index 0000000000000..17855a66d6067 --- /dev/null +++ b/content/ja/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -0,0 +1,175 @@ +--- +title: CronJobを使用して自動化タスクを実行する +min-kubernetes-server-version: v1.21 +content_type: task +weight: 10 +--- + + + +CronJobは、Kubernetes v1.21で一般利用(GA)に昇格しました。古いバージョンのKubernetesを使用している場合、正確な情報を参照できるように、使用しているバージョンのKubernetesのドキュメントを参照してください。古いKubernetesのバージョンでは、`batch/v1` CronJob APIはサポートされていません。 + +{{< glossary_tooltip text="CronJob" term_id="cronjob" >}}を使用すると、{{< glossary_tooltip text="Job" term_id="job" >}}を時間ベースのスケジュールで実行できるようになります。この自動化されたJobは、LinuxまたはUNIXシステム上の[Cron](https://ja.wikipedia.org/wiki/Cron)のように実行されます。 + +CronJobは、バックアップやメールの送信など、定期的なタスクや繰り返しのタスクを作成する時に便利です。CronJobはそれぞれのタスクを、たとえばアクティビティが少ない期間など、特定の時間にスケジューリングすることもできます。 + +CronJobには制限と特性があります。たとえば、特定の状況下では、1つのCronJobが複数のJobを作成する可能性があるため、Jobは冪等性を持つようにしなければいけません。 + +制限に関する詳しい情報については、[CronJob](/ja/docs/concepts/workloads/controllers/cron-jobs/)を参照してください。 + +## {{% heading "prerequisites" %}} + +* {{< include "task-tutorial-prereqs.md" >}} + + + +## CronJobを作成する + +CronJobには設定ファイルが必要です。次の例のCronJobの`.spec`は、現在の時刻とhelloというメッセージを1分ごとに表示します。 + +{{< codenew file="application/job/cronjob.yaml" >}} + +次のコマンドで例のCronJobを実行します。 + +```shell +kubectl create -f https://k8s.io/examples/application/job/cronjob.yaml +``` + +出力は次のようになります。 + +``` +cronjob.batch/hello created +``` + +CronJobを作成したら、次のコマンドで状態を取得します。 + +```shell +kubectl get cronjob hello +``` + +出力は次のようになります。 + +``` +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +hello */1 * * * * False 0 10s +``` + +コマンドの結果からわかるように、CronJobはまだスケジュールされておらず、まだ何のJobも実行していません。約1分以内にJobが作成されるのを見てみましょう。 + +```shell +kubectl get jobs --watch +``` + +出力は次のようになります。 + +``` +NAME COMPLETIONS DURATION AGE +hello-4111706356 0/1 0s +hello-4111706356 0/1 0s 0s +hello-4111706356 1/1 5s 5s +``` + +"hello"CronJobによってスケジュールされたJobが1つ実行中になっていることがわかります。Jobを見るのをやめて、再度CronJobを表示して、Jobがスケジュールされたことを確認してみます。 + +```shell +kubectl get cronjob hello +``` + +出力は次のようになります。 + +``` +NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE +hello */1 * * * * False 0 50s 75s +``` + +CronJob`hello`が、`LAST SCHEDULE`で指定された時間にJobを正しくスケジュールしたことが確認できるはずです。現在、activeなJobの数は0です。つまり、Jobは完了または失敗したことがわかります。 + +それでは、最後にスケジュールされたJobの作成と、Podの1つの標準出力を表示してみましょう。 + +{{< note >}} +Jobの名前とPodの名前は異なります。 +{{< /note >}} + +```shell +# "hello-4111706356" の部分は、あなたのシステム上のJobの名前に置き換えてください。 +pods=$(kubectl get pods --selector=job-name=hello-4111706356 --output=jsonpath={.items[*].metadata.name}) +``` + +Podのログを表示します。 + +```shell +kubectl logs $pods +``` + +出力は次のようになります。 + +``` +Fri Feb 22 11:02:09 UTC 2019 +Hello from the Kubernetes cluster +``` + +## CronJobの削除 + +CronJobが必要なくなったときは、`kubectl delete cronjob `で削除します。 + +```shell +kubectl delete cronjob hello +``` + +CronJobを削除すると、すべてのJobと、そのJobが作成したPodが削除され、追加のJobの作成が停止されます。Jobの削除について詳しく知りたい場合は、[ガベージコレクション](/ja/docs/concepts/workloads/controllers/garbage-collection/)を読んでください。 + +## CronJobのspecを書く {#writing-a-cron-job-spec} + +すべてのKubernetesの設定と同じように、CronJobにも`apiVersion`、`kind`、`metadata`のフィールドが必要です。設定ファイルの扱い方についての一般的な情報については、[アプリケーションのデプロイ](/ja/docs/tasks/run-application/run-stateless-application-deployment/)と[kubectlを使用してリソースを管理する](/ja/docs/concepts/overview/working-with-objects/object-management/)を読んでください。 + +CronJobの設定には、[`.spec`セクション](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)も必要です。 + +{{< note >}} +CronJobの特に`spec`へのすべての修正は、それ以降の実行にのみ適用されます。 +{{< /note >}} + +### Schedule + +`.spec.schedule`は、`.spec`には必須のフィールドです。`0 * * * *`や`@hourly`などの[Cron](https://ja.wikipedia.org/wiki/Cron)形式の文字列を取り、Jobの作成と実行のスケジュール時間を指定します。 + +フォーマットにはVixie cronのステップ値(step value)も指定できます。[FreeBSDのマニュアル](https://www.freebsd.org/cgi/man.cgi?crontab%285%29)では次のように説明されています。 + +> ステップ値は範囲指定と組み合わせて使用できます。範囲の後ろに`/`を付けると、範囲全体で指定したnumberの値ごとにスキップすることを意味します。たとえば、`0-23/2`をhoursフィールドに指定すると、2時間毎にコマンド実行を指定することになります(V7標準では代わりに`0,2,4,6,8,10,12,14,16,18,20,22`と指定する必要があります)。ステップはアスタリスクの後ろにつけることもできます。そのため、「2時間毎に実行」したい場合は、単純に`*/2`と指定できます。 + +{{< note >}} +スケジュール内の疑問符`?`はアスタリスク`*`と同じ意味を持ちます。つまり、与えられたフィールドには任意の値が使えるという意味になります。 +{{< /note >}} + +### Job Template + +`.spec.jobTemplate`はJobのテンプレートであり、必須です。[Job](/docs/concepts/workloads/controllers/job/)と完全に同一のスキーマを持ちますが、フィールドがネストされている点と、`apiVersion`と`kind`が存在しない点だけが異なります。Jobの`.spec`を書くための情報については、[JobのSpecを書く](/docs/concepts/workloads/controllers/job/#writing-a-job-spec)を参照してください。 + +### Starting Deadline + +`.spec.startingDeadlineSeconds`フィールドはオプションです。何かの理由でスケジュールに間に合わなかった場合に適用される、Jobの開始のデッドライン(締め切り)を秒数で指定します。デッドラインを過ぎると、CronJobはJobを開始しません。この場合にデッドラインに間に合わなかったJobは、失敗したJobとしてカウントされます。もしこのフィールドが指定されなかった場合、Jobはデッドラインを持ちません。 + +`.spec.startingDeadlineSeconds`フィールドがnull以外に設定された場合、CronJobコントローラーはJobの作成が期待される時間と現在時刻との間の時間を計測します。もしその差が制限よりも大きかった場合、その実行はスキップされます。 + +たとえば、この値が`200`に設定された場合、実際のスケジュールの最大200秒後までに作成されるJobだけが許可されます。 + +### Concurrency Policy + +`.spec.concurrencyPolicy`フィールドもオプションです。このフィールドは、このCronJobで作成されたJobの並列実行をどのように扱うかを指定します。specには以下のconcurrency policyのいずれかを指定します。 + +* `Allow` (デフォルト): CronJobがJobを並列に実行することを許可します。 +* `Forbid`: CronJobの並列実行を禁止します。もし新しいJobの実行時に過去のJobがまだ完了していなかった場合、CronJobは新しいJobの実行をスキップします。 +* `Replace`: もし新しいJobの実行の時間になっても過去のJobの実行が完了していなかった場合、CronJobは現在の実行中のJobを新しいJobで置換します。 + +concurrency policyは、同じCronJobが作成したJobにのみ適用されます。もし複数のCronJobがある場合、それぞれのJobの並列実行は常に許可されます。 + +### Suspend + +`.spec.suspend`フィールドもオプションです。このフィールドを`true`に設定すると、すべての後続の実行がサスペンド(一時停止)されます。この設定はすでに実行開始したJobには適用されません。デフォルトはfalseです。 + +{{< caution >}} +スケジュールされた時間中にサスペンドされた実行は、見逃されたJob(missed job)としてカウントされます。[starting deadline](#starting-deadline)が設定されていない既存のCronJob`.spec.suspend`が`true`から`false`に変更されると、見逃されたJobは即座にスケジュールされます。 +{{< /caution >}} + +### Job History Limit + +`.spec.successfulJobsHistoryLimit`と`.spec.failedJobsHistoryLimit`フィールドはオプションです。これらのフィールドには、完了したJobと失敗したJobをいくつ保持するかを指定します。デフォルトでは、それぞれ3と1に設定されます。リミットを`0`に設定すると、対応する種類のJobを実行完了後に何も保持しなくなります。 diff --git a/content/ja/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/ja/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 445742e1d6c57..a395a412ba59e 100644 --- a/content/ja/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/ja/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -10,7 +10,7 @@ Horizontal Pod Autoscalerは、Deployment、ReplicaSetまたはStatefulSetとい このドキュメントはphp-apacheサーバーに対しHorizontal Pod Autoscalerを有効化するという例に沿ってウォークスルーで説明していきます。Horizontal Pod Autoscalerの動作についてのより詳細な情報を知りたい場合は、[Horizontal Pod Autoscalerユーザーガイド](/docs/tasks/run-application/horizontal-pod-autoscale/)をご覧ください。 -## {{% heading "前提条件" %}} +## {{% heading "prerequisites" %}} この例ではバージョン1.2以上の動作するKubernetesクラスターおよびkubectlが必要です。 [Metrics API](https://github.com/kubernetes/metrics)を介してメトリクスを提供するために、[Metrics server](https://github.com/kubernetes-sigs/metrics-server)によるモニタリングがクラスター内にデプロイされている必要があります。 diff --git a/content/ja/docs/tutorials/clusters/_index.md b/content/ja/docs/tutorials/clusters/_index.md new file mode 100755 index 0000000000000..0f3a08fdda0ab --- /dev/null +++ b/content/ja/docs/tutorials/clusters/_index.md @@ -0,0 +1,4 @@ +--- +title: "クラスター" +weight: 60 +--- diff --git a/content/ja/docs/tutorials/clusters/apparmor.md b/content/ja/docs/tutorials/clusters/apparmor.md new file mode 100644 index 0000000000000..5ee0201d71bcc --- /dev/null +++ b/content/ja/docs/tutorials/clusters/apparmor.md @@ -0,0 +1,375 @@ +--- +title: AppArmorを使用してコンテナのリソースへのアクセスを制限する +content_type: tutorial +weight: 10 +--- + + + +{{< feature-state for_k8s_version="v1.4" state="beta" >}} + +AppArmorは、Linux標準のユーザー・グループをベースとしたパーミッションを補完するLinuxカーネルのセキュリティモジュールであり、プログラムのアクセスを限定されたリソースセットに制限するために利用されます。AppArmorを設定することで、任意のアプリケーションの攻撃サーフェイスとなりうる面を減らしたり、より優れた多重の防御を提供できます。AppArmorは、たとえばLinuxのcapability、ネットワークアクセス、ファイルのパーミッションなど、特定のプログラムやコンテナに必要なアクセスを許可するようにチューニングされたプロファイルにより設定を行います。各プロファイルは、許可されなかったリソースへのアクセスをブロックする*enforcing*モードと、ルール違反を報告するだけの*complain*モードのいずれかで実行できます。 + +AppArmorを利用すれば、コンテナに許可することを制限したりシステムログを通してよりよい監査を提供することで、デプロイをよりセキュアにする助けになります。しかし、AppArmorは銀の弾丸ではなく、アプリケーションコードの悪用からの防御を強化できるだけであることを心に留めておくことが重要です。制限の強い優れたプロファイルを提供し、アプリケーションとクラスターを別の角度から強化することが重要です。 + +## {{% heading "objectives" %}} + +* プロファイルをノードに読み込む方法の例を見る +* Pod上でプロファイルを矯正する方法を学ぶ +* プロファイルが読み込まれたかを確認する方法を学ぶ +* プロファイルに違反した場合に何が起こるのかを見る +* プロファイルが読み込めなかった場合に何が起こるのかを見る + +## {{% heading "prerequisites" %}} + +以下のことを確認してください。 + +1. Kubernetesのバージョンがv1.4以上であること。KubernetesのAppArmorのサポートはv1.4で追加されました。v1.4より古いバージョンのKubernetesのコンポーネントは、新しいAppArmorのアノテーションを認識できないため、AppArmorの設定を与えたとしても**黙って無視されてしまいます**。Podが期待した保護を確実に受けられるようにするためには、次のようにノードのKubeletのバージョンを確認することが重要です。 + + ```shell + kubectl get nodes -o=jsonpath=$'{range .items[*]}{@.metadata.name}: {@.status.nodeInfo.kubeletVersion}\n{end}' + ``` + ``` + gke-test-default-pool-239f5d02-gyn2: v1.4.0 + gke-test-default-pool-239f5d02-x1kf: v1.4.0 + gke-test-default-pool-239f5d02-xwux: v1.4.0 + ``` + +2. AppArmorカーネルモジュールが有効であること。LinuxカーネルがAppArmorプロファイルを強制するためには、AppArmorカーネルモジュールのインストールと有効化が必須です。UbuntuやSUSEなどのディストリビューションではデフォルトで有効化されますが、他の多くのディストリビューションでのサポートはオプションです。モジュールが有効になっているかチェックするには、次のように`/sys/module/apparmor/parameters/enabled`ファイルを確認します。 + + ```shell + cat /sys/module/apparmor/parameters/enabled + Y + ``` + + KubeletがAppArmorをサポートしていれば(>= v1.4)、カーネルモジュールが有効になっていない場合にはAppArmorオプションが付いたPodを拒否します。 + + {{< note >}} + UbuntuはAppArmorに対して、アップストリームのLinuxにマージしていない多数のパッチを当てています。その中には、追加のフックや機能を加えるパッチも含まれます。Kubernetesはアップストリームのバージョンでのみテストされており、その他の機能に対するサポートを約束していません。 + {{< /note >}} + +3. コンテナランタイムがAppArmorをサポートしていること。現在、Kubernetesがサポートするすべての一般的なコンテナランタイム、{{< glossary_tooltip term_id="docker">}}、{{< glossary_tooltip term_id="cri-o" >}}、{{< glossary_tooltip term_id="containerd" >}}などは、AppArmorをサポートしています。関連するランタイムのドキュメントを参照して、クラスターがAppArmorを利用するための要求を満たしているかどうかを検証してください。 + +4. プロファイルが読み込まれていること。AppArmorがPodに適用されるのは、各コンテナが実行されるべきAppArmorプロファイルを指定したときです。もし指定されたプロファイルがまだカーネルに読み込まれていなければ、Kubelet(>= v1.4)はPodを拒否します。どのプロファイルがノードに読み込まれているのかを確かめるには、次のようなコマンドを実行して`/sys/kernel/security/apparmor/profiles`をチェックします。 + + ```shell + ssh gke-test-default-pool-239f5d02-gyn2 "sudo cat /sys/kernel/security/apparmor/profiles | sort" + ``` + ``` + apparmor-test-deny-write (enforce) + apparmor-test-audit-write (enforce) + docker-default (enforce) + k8s-nginx (enforce) + ``` + + ノード上でのプロファイルの読み込みの詳細については、[プロファイルを使用したノードのセットアップ](#setting-up-nodes-with-profiles)を参照してください。 + +KubeletのバージョンがAppArmorサポートに対応しているもの(>= v1.4)である限り、Kubeletは必要条件を1つでも満たさないAppArmorオプションが付けられたPodをリジェクトします。また、ノード上のAppArmorのサポートは、次のようにready conditionのメッセージで確認することもできます(ただし、この機能は将来のリリースで削除される可能性があります)。 + +```shell +kubectl get nodes -o=jsonpath=$'{range .items[*]}{@.metadata.name}: {.status.conditions[?(@.reason=="KubeletReady")].message}\n{end}' +``` +``` +gke-test-default-pool-239f5d02-gyn2: kubelet is posting ready status. AppArmor enabled +gke-test-default-pool-239f5d02-x1kf: kubelet is posting ready status. AppArmor enabled +gke-test-default-pool-239f5d02-xwux: kubelet is posting ready status. AppArmor enabled +``` + + + +## Podをセキュアにする + +{{< note >}} +AppArmorは現在beta版であるため、オプションはアノテーションとして指定します。将来サポートが一般利用可能(GA)になれば、アノテーションは第1級のフィールドで置き換えられます(詳細については、[一般利用可能(General Availability)への更新パス](#upgrade-path-to-general-availability)を参照してください)。 +{{< /note >}} + +AppArmorのプロファイルは*各コンテナごとに*指定します。Podのコンテナで実行するAppArmorのプロファイルを指定するには、Podのメタデータに次のようなアノテーションを追加します。 + +```yaml +container.apparmor.security.beta.kubernetes.io/: +``` + +ここで、``はプロファイルを適用するコンテナの名前であり、``には適用するプロファイルを指定します。`profile_ref`は次の値のうち1つを指定します。 + +* `runtime/default`: ランタイムのデフォルトのプロファイルを適用する +* `localhost/`: ``という名前でホストにロードされたプロファイルを適用する +* `unconfined`: いかなるプロファイルもロードされないことを示す + +アノテーションとプロファイルの名前のフォーマットの詳細については、[APIリファレンス](#api-reference)を参照してください。 + +KubernetesのAppArmorの強制では、まずはじめにすべての前提条件が満たされているかどうかをチェックします。その後、強制を行うためにプロファイルの選択をコンテナランタイムに委ねます。前提条件が満たされなかった場合、Podはリジェクトされ、実行されません。 + +プロファイルが適用されたかどうか確認するには、AppArmor securityオプションがコンテナ作成イベントに一覧されているかどうかを確認します。 + +```shell +kubectl get events | grep Created +``` +``` +22s 22s 1 hello-apparmor Pod spec.containers{hello} Normal Created {kubelet e2e-test-stclair-node-pool-31nt} Created container with docker id 269a53b202d3; Security:[seccomp=unconfined apparmor=k8s-apparmor-example-deny-write] +``` + +proc attrを調べることで、コンテナのルートプロセスが正しいプロファイルで実行されているかどうかを直接確認することもできます。 + +```shell +kubectl exec cat /proc/1/attr/current +``` +``` +k8s-apparmor-example-deny-write (enforce) +``` + +## 例 {#example} + +*この例は、クラスターがすでにAppArmorのサポート付きでセットアップ済みであることを前提としています。* + +まず、使用したいプロファイルをノード上に読み込む必要があります。このプロファイルは、すべてのファイル書き込みを拒否します。 + +```shell +#include + +profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { + #include + + file, + + # Deny all file writes. + deny /** w, +} +``` + +Podがどのノードにスケジュールされるかは予測できないため、プロファイルはすべてのノードに読み込ませる必要があります。この例では、単純にSSHを使ってプロファイルをインストールしますが、[プロファイルを使用したノードのセットアップ](#setting-up-nodes-with-profiles)では、他のアプローチについて議論しています。 + +```shell +NODES=( + # SSHでアクセス可能なノードのドメイン名 + gke-test-default-pool-239f5d02-gyn2.us-central1-a.my-k8s + gke-test-default-pool-239f5d02-x1kf.us-central1-a.my-k8s + gke-test-default-pool-239f5d02-xwux.us-central1-a.my-k8s) +for NODE in ${NODES[*]}; do ssh $NODE 'sudo apparmor_parser -q < + +profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { + #include + + file, + + # Deny all file writes. + deny /** w, +} +EOF' +done +``` + +次に、deny-writeプロファイルを使用した単純な "Hello AppArmor" Podを実行します。 + +{{< codenew file="pods/security/hello-apparmor.yaml" >}} + +```shell +kubectl create -f ./hello-apparmor.yaml +``` + +Podイベントを確認すると、PodコンテナがAppArmorプロファイル "k8s-apparmor-example-deny-write" を使用して作成されたことがわかります。 + +```shell +kubectl get events | grep hello-apparmor +``` +``` +14s 14s 1 hello-apparmor Pod Normal Scheduled {default-scheduler } Successfully assigned hello-apparmor to gke-test-default-pool-239f5d02-gyn2 +14s 14s 1 hello-apparmor Pod spec.containers{hello} Normal Pulling {kubelet gke-test-default-pool-239f5d02-gyn2} pulling image "busybox" +13s 13s 1 hello-apparmor Pod spec.containers{hello} Normal Pulled {kubelet gke-test-default-pool-239f5d02-gyn2} Successfully pulled image "busybox" +13s 13s 1 hello-apparmor Pod spec.containers{hello} Normal Created {kubelet gke-test-default-pool-239f5d02-gyn2} Created container with docker id 06b6cd1c0989; Security:[seccomp=unconfined apparmor=k8s-apparmor-example-deny-write] +13s 13s 1 hello-apparmor Pod spec.containers{hello} Normal Started {kubelet gke-test-default-pool-239f5d02-gyn2} Started container with docker id 06b6cd1c0989 +``` + +コンテナがこのプロファイルで実際に実行されていることを確認するために、コンテナのproc attrをチェックします。 + +```shell +kubectl exec hello-apparmor cat /proc/1/attr/current +``` +``` +k8s-apparmor-example-deny-write (enforce) +``` + +最後に、ファイルへの書き込みを行おうとすることで、プロファイルに違反すると何が起こるか見てみましょう。 + +```shell +kubectl exec hello-apparmor touch /tmp/test +``` +``` +touch: /tmp/test: Permission denied +error: error executing remote command: command terminated with non-zero exit code: Error executing in Docker Container: 1 +``` + +まとめとして、読み込まれていないプロファイルを指定しようとするとどうなるのか見てみましょう。 + +```shell +kubectl create -f /dev/stdin < +Annotations: container.apparmor.security.beta.kubernetes.io/hello=localhost/k8s-apparmor-example-allow-write +Status: Pending +Reason: AppArmor +Message: Pod Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded +IP: +Controllers: +Containers: + hello: + Container ID: + Image: busybox + Image ID: + Port: + Command: + sh + -c + echo 'Hello AppArmor!' && sleep 1h + State: Waiting + Reason: Blocked + Ready: False + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from default-token-dnz7v (ro) +Conditions: + Type Status + Initialized True + Ready False + PodScheduled True +Volumes: + default-token-dnz7v: + Type: Secret (a volume populated by a Secret) + SecretName: default-token-dnz7v + Optional: false +QoS Class: BestEffort +Node-Selectors: +Tolerations: +Events: + FirstSeen LastSeen Count From SubobjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 23s 23s 1 {default-scheduler } Normal Scheduled Successfully assigned hello-apparmor-2 to e2e-test-stclair-node-pool-t1f5 + 23s 23s 1 {kubelet e2e-test-stclair-node-pool-t1f5} Warning AppArmor Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded +``` + +PodのステータスはPendingとなり、`Pod Cannot enforce AppArmor: profile +"k8s-apparmor-example-allow-write" is not loaded`(PodはAppArmorを強制できません: プロファイル "k8s-apparmor-example-allow-write" はロードされていません)という役に立つエラーメッセージが表示されています。同じメッセージのイベントも記録されています。 + +## 管理 + +### プロファイルを使用したノードのセットアップ {#setting-up-nodes-with-profiles} + +現在、KubernetesはAppArmorのプロファイルをノードに読み込むネイティブの仕組みは提供していません。しかし、プロファイルをセットアップする方法は、以下のように様々な方法があります。 + +* 各ノード上に正しいプロファイルがロードされていることを保証するPodを実行する[DaemonSet](/ja/docs/concepts/workloads/controllers/daemonset/)を利用する方法。[ここ](https://git.k8s.io/kubernetes/test/images/apparmor-loader)に実装例があります。 +* ノードの初期化時に初期化スクリプト(例: Salt、Ansibleなど)や初期化イメージを使用する。 +* [例](#example)で示したような方法で、プロファイルを各ノードにコピーし、SSHで読み込む。 + +スケジューラーはどのプロファイルがどのノードに読み込まれているのかがわからないため、すべてのプロファイルがすべてのノードに読み込まれていなければなりません。もう1つのアプローチとしては、各プロファイル(あるいはプロファイルのクラス)ごとにノードラベルを追加し、[node selector](/ja/docs/concepts/scheduling-eviction/assign-pod-node/)を用いてPodが必要なプロファイルを読み込んだノードで実行されるようにする方法もあります。 + +### PodSecurityPolicyを使用したプロファイルの制限 + +PodSecurityPolicy extensionが有効になっている場合、クラスタ全体でAppArmorn制限が適用されます。PodSecurityPolicyを有効にするには、`apiserver`上で次のフラグを設定する必要があります。 + +``` +--enable-admission-plugins=PodSecurityPolicy[,others...] +``` + +AppArmorのオプションはPodSecurityPolicy上でアノテーションとして指定します。 + +```yaml +apparmor.security.beta.kubernetes.io/defaultProfileName: +apparmor.security.beta.kubernetes.io/allowedProfileNames: [,others...] +``` + +defaultProfileNameオプションには、何も指定されなかった場合にコンテナにデフォルトで適用されるプロファイルを指定します。allowedProfileNamesオプションには、Podコンテナの実行が許可されるプロファイルのリストを指定します。両方のオプションが指定された場合、デフォルトは許可されなければいけません。プロファイルはコンテナ上で同じフォーマットで指定されます。完全な仕様については、[APIリファレンス](#api-reference)を参照してください。 + +### AppArmorの無効化 + +クラスタ上でAppArmorを利用可能にしたくない場合、次のコマンドラインフラグで無効化できます。 + +``` +--feature-gates=AppArmor=false +``` + +無効化すると、AppArmorプロファイルを含むPodは"Forbidden"エラーで検証に失敗します。ただし、デフォルトのdockerは非特権Pod上では"docker-default"というプロファイルを常に有効化し(AppArmorカーネルモジュールが有効である場合)、フィーチャーゲートで無効化したとしても有効化し続けることに注意してください。AppArmorを無効化するオプションは、AppArmorが一般利用(GA)になったときに削除される予定です。 + +### AppArmorを使用するKubernetes v1.4にアップグレードする + +クラスタをv1.4にアップグレードするために、AppArmorに関する操作は必要ありません。ただし、既存のPodがAppArmorのアノテーションを持っている場合、検証(またはPodSecurityPolicy admission)は行われません。もしpermissiveなプロファイルがノードに読み込まれていた場合、悪意のあるユーザーがPodの権限を上述のdocker-defaultより昇格させるために、permissiveなプロファイルを再適用する恐れがあります。これが問題となる場合、`apparmor.security.beta.kubernetes.io`のアノテーションを含むすべてのPodのクラスターをクリーンアップすることを推奨します。 + +### 一般利用可能(General Availability)への更新パス {#upgrade-path-to-general-availability} + +AppArmorが一般利用可能(GA)になったとき、現在アノテーションで指定しているオプションはフィールドに変換されます。移行中のすべてのアップグレードとダウングレードの経路をサポートするのは非常に微妙であるため、以降が必要になったときに詳細に説明する予定です。最低2リリースの間はフィールドとアノテーションの両方がサポートされるようにする予定です。最低2リリースの後は、アノテーションは明示的に拒否されるようになります。 + +## Profilesの作成 + +AppArmorのプロファイルを正しく指定するのはやっかいな作業です。幸い、その作業を補助するツールがいくつかあります。 + +* `aa-genprof`および`aa-logprof`は、アプリケーションの動作とログを監視することによりプロファイルのルールを生成します。詳しい説明については、[AppArmor documentation](https://gitlab.com/apparmor/apparmor/wikis/Profiling_with_tools)を参照してください。 +* [bane](https://github.com/jfrazelle/bane)は、Docker向けのAppArmorのプロファイル・ジェネレータです。簡略化されたプロファイル言語を使用しています。 + +プロファイルの生成には、アプリケーションを開発用ワークステーション上でDockerで実行することを推奨します。しかし、実際にPodが実行されるKubernetesノード上でツールを実行してはいけない理由はありません。 + +AppArmorに関する問題をデバッグするには、システムログをチェックして、特に何が拒否されたのかを確認できます。AppArmorのログは`dmesg`にverboseメッセージを送り、エラーは通常システムログまたは`journalctl`で確認できます。詳しい情報は、[AppArmor failures](https://gitlab.com/apparmor/apparmor/wikis/AppArmor_Failures)で提供されています。 + +## APIリファレンス {#api-reference} + +### Podアノテーション + +コンテナが実行するプロファイルを指定します。 + +- **key**: `container.apparmor.security.beta.kubernetes.io/` + ここで、``はPod内のコンテナの名前を一致させます。Pod内の各コンテナごとに別々のプロファイルを指定できます。 +- **value**: 下で説明するプロファイルのリファレンス + +### プロファイルのリファレンス + +- `runtime/default`: デフォルトのランタイムプロファイルを指します。 + - (PodSecurityPolicyのデフォルトを設定せずに)プロファイルを指定しない場合と同等ですが、AppArmorを有効化する必要があります。 + - Dockerの場合、非特権コンテナでは[`docker-default`](https://docs.docker.com/engine/security/apparmor/)プロファイルが選択され、特権コンテナではunconfined(プロファイルなし)が選択されます。 +- `localhost/`: 名前で指定されたノード(localhost)に読み込まれたプロファイルを指します。 + - 利用できるプロファイル名の詳細は[core policy reference](https://gitlab.com/apparmor/apparmor/wikis/AppArmor_Core_Policy_Reference#profile-names-and-attachment-specifications)で説明されています。 +- `unconfined`: これは実質的にコンテナ上のAppArmorを無効化します。 + +これ以外のプロファイルリファレンスはすべて無効です。 + +### PodSecurityPolicyアノテーション + +何も指定されなかった場合にコンテナに適用するデフォルトのプロファイルは、以下のように指定します。 + +* **key**: `apparmor.security.beta.kubernetes.io/defaultProfileName` +* **value**: 上で説明したプロファイルのリファレンス + +Podコンテナが指定することを許可するプロファイルのリストは、以下のように指定します。 + +* **key**: `apparmor.security.beta.kubernetes.io/allowedProfileNames` +* **value**: カンマ区切りの上述のプロファイルリファレンスのリスト + - プロファイル名ではエスケープしたカンマは不正な文字ではありませんが、ここでは明示的に許可されません。 + +## {{% heading "whatsnext" %}} + +追加のリソースとしては以下のものがあります。 + +* [Quick guide to the AppArmor profile language](https://gitlab.com/apparmor/apparmor/wikis/QuickProfileLanguage) +* [AppArmor core policy reference](https://gitlab.com/apparmor/apparmor/wikis/Policy_Layout) diff --git a/content/ja/examples/admin/cloud/ccm-example.yaml b/content/ja/examples/admin/cloud/ccm-example.yaml index 4c98162a70db9..96b78331744c1 100644 --- a/content/ja/examples/admin/cloud/ccm-example.yaml +++ b/content/ja/examples/admin/cloud/ccm-example.yaml @@ -1,4 +1,4 @@ -# This is an example of how to setup cloud-controller-manger as a Daemonset in your cluster. +# This is an example of how to setup cloud-controller-manager as a Daemonset in your cluster. # It assumes that your masters can run pods and has the role node-role.kubernetes.io/master # Note that this Daemonset will not work straight out of the box for your cloud, this is # meant to be a guideline. diff --git a/content/ko/docs/concepts/architecture/cloud-controller.md b/content/ko/docs/concepts/architecture/cloud-controller.md index f7666ef0c3c6b..fe7fda364a4e6 100644 --- a/content/ko/docs/concepts/architecture/cloud-controller.md +++ b/content/ko/docs/concepts/architecture/cloud-controller.md @@ -206,6 +206,8 @@ rules: [클라우드 컨트롤러 매니저 관리](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager)에는 클라우드 컨트롤러 매니저의 실행과 관리에 대한 지침이 있다. +클라우드 컨트롤러 매니저를 사용하기 위해 HA 컨트롤 플레인을 업그레이드하려면, [클라우드 컨트롤러 매니저를 사용하기 위해 복제된 컨트롤 플레인 마이그레이션 하기](/docs/tasks/administer-cluster/controller-manager-leader-migration/)를 참고한다. + 자체 클라우드 컨트롤러 매니저를 구현하거나 기존 프로젝트를 확장하는 방법을 알고 싶은가? 클라우드 컨트롤러 매니저는 Go 인터페이스를 사용해서 모든 클라우드 플러그인을 구현할 수 있다. 구체적으로, [kubernetes/cloud-provider](https://github.com/kubernetes/cloud-provider)의 [`cloud.go`](https://github.com/kubernetes/cloud-provider/blob/release-1.17/cloud.go#L42-L62)에 정의된 `CloudProvider` 인터페이스를 사용한다. diff --git a/content/ko/docs/concepts/architecture/control-plane-node-communication.md b/content/ko/docs/concepts/architecture/control-plane-node-communication.md index 1831692bdf36e..52fa728043b2d 100644 --- a/content/ko/docs/concepts/architecture/control-plane-node-communication.md +++ b/content/ko/docs/concepts/architecture/control-plane-node-communication.md @@ -8,14 +8,15 @@ aliases: -이 문서는 컨트롤 플레인(실제로는 API 서버)과 쿠버네티스 클러스터 사이에 대한 통신 경로의 목록을 작성한다. 이는 사용자가 신뢰할 수 없는 네트워크(또는 클라우드 공급자의 완전한 퍼블릭 IP)에서 클러스터를 실행할 수 있도록 네트워크 구성을 강화하기 위한 맞춤 설치를 할 수 있도록 한다. +이 문서는 컨트롤 플레인(API 서버)과 쿠버네티스 클러스터 사이에 대한 통신 경로의 목록을 작성한다. 이는 사용자가 신뢰할 수 없는 네트워크(또는 클라우드 공급자의 완전한 퍼블릭 IP)에서 클러스터를 실행할 수 있도록 네트워크 구성을 강화하기 위한 맞춤 설치를 할 수 있도록 한다. ## 노드에서 컨트롤 플레인으로의 통신 -쿠버네티스에는 "허브 앤 스포크(hub-and-spoke)" API 패턴을 가지고 있다. 노드(또는 노드에서 실행되는 파드들)의 모든 API 사용은 API 서버에서 종료된다(다른 컨트롤 플레인 컴포넌트 중 어느 것도 원격 서비스를 노출하도록 설계되지 않았다). API 서버는 하나 이상의 클라이언트 [인증](/docs/reference/access-authn-authz/authentication/) 형식이 활성화된 보안 HTTPS 포트(일반적으로 443)에서 원격 연결을 수신하도록 구성된다. +쿠버네티스에는 "허브 앤 스포크(hub-and-spoke)" API 패턴을 가지고 있다. 노드(또는 노드에서 실행되는 파드들)의 모든 API 사용은 API 서버에서 종료된다. 다른 컨트롤 플레인 컴포넌트 중 어느 것도 원격 서비스를 노출하도록 설계되지 않았다. API 서버는 하나 이상의 클라이언트 [인증](/docs/reference/access-authn-authz/authentication/) 형식이 활성화된 보안 HTTPS 포트(일반적으로 443)에서 원격 연결을 수신하도록 구성된다. + 특히 [익명의 요청](/docs/reference/access-authn-authz/authentication/#anonymous-requests) 또는 [서비스 어카운트 토큰](/docs/reference/access-authn-authz/authentication/#service-account-tokens)이 허용되는 경우, 하나 이상의 [권한 부여](/ko/docs/reference/access-authn-authz/authorization/) 형식을 사용해야 한다. 노드는 유효한 클라이언트 자격 증명과 함께 API 서버에 안전하게 연결할 수 있도록 클러스터에 대한 공개 루트 인증서로 프로비전해야 한다. 예를 들어, 기본 GKE 배포에서, kubelet에 제공되는 클라이언트 자격 증명은 클라이언트 인증서 형식이다. kubelet 클라이언트 인증서의 자동 프로비저닝은 [kubelet TLS 부트스트랩](/docs/reference/command-line-tools-reference/kubelet-tls-bootstrapping/)을 참고한다. @@ -50,20 +51,20 @@ API 서버에서 kubelet으로의 연결은 다음의 용도로 사용된다. ### API 서버에서 노드, 파드 및 서비스로의 통신 -API 서버에서 노드, 파드 또는 서비스로의 연결은 기본적으로 일반 HTTP 연결로 연결되므로 인증되거나 암호화되지 않는다. API URL에서 노드, 파드 또는 서비스 이름을 접두어 `https:` 로 사용하여 보안 HTTPS 연결을 통해 실행될 수 있지만, HTTPS 엔드포인트가 제공한 인증서의 유효성을 검증하지 않거나 클라이언트 자격 증명을 제공하지 않으므로 연결이 암호화되는 동안 무결성을 보장하지 않는다. 이러한 연결은 신뢰할 수 없는 네트워크 및/또는 공용 네트워크에서 실행하기에 **현재는 안전하지 않다** . +API 서버에서 노드, 파드 또는 서비스로의 연결은 기본적으로 일반 HTTP 연결로 연결되므로 인증되거나 암호화되지 않는다. API URL에서 노드, 파드 또는 서비스 이름을 접두어 `https:` 로 사용하여 보안 HTTPS 연결을 통해 실행될 수 있지만, HTTPS 엔드포인트가 제공한 인증서의 유효성을 검증하지 않거나 클라이언트 자격 증명을 제공하지 않는다. 그래서 연결이 암호화되는 동안 무결성을 보장하지 않는다. 이러한 연결은 신뢰할 수 없는 네트워크 및/또는 공용 네트워크에서 실행하기에 **현재는 안전하지 않다** . ### SSH 터널 쿠버네티스는 SSH 터널을 지원하여 컨트롤 플레인에서 노드로의 통신 경로를 보호한다. 이 구성에서, API 서버는 클러스터의 각 노드에 SSH 터널을 시작하고(포트 22에서 수신 대기하는 ssh 서버에 연결) 터널을 통해 kubelet, 노드, 파드 또는 서비스로 향하는 모든 트래픽을 전달한다. 이 터널은 트래픽이 노드가 실행 중인 네트워크 외부에 노출되지 않도록 한다. -SSH 터널은 현재 더 이상 사용되지 않으므로 수행 중인 작업이 어떤 것인지 모른다면 사용하면 안된다. Konnectivity 서비스는 이 통신 채널을 대체한다. +SSH 터널은 현재 더 이상 사용되지 않으므로, 수행 중인 작업이 어떤 것인지 모른다면 사용하면 안된다. Konnectivity 서비스는 이 통신 채널을 대체한다. ### Konnectivity 서비스 {{< feature-state for_k8s_version="v1.18" state="beta" >}} -SSH 터널을 대체하는 Konnectivity 서비스는 컨트롤 플레인에서 클러스터 통신에 TCP 레벨 프록시를 제공한다. Konnectivity 서비스는 컨트롤 플레인 네트워크와 노드 네트워크에서 각각 실행되는 Konnectivity 서버와 Konnectivity 에이전트의 두 부분으로 구성된다. Konnectivity 에이전트는 Konnectivity 서버에 대한 연결을 시작하고 네트워크 연결을 유지한다. +SSH 터널을 대체하는 Konnectivity 서비스는 컨트롤 플레인에서 클러스터 통신에 TCP 레벨 프록시를 제공한다. Konnectivity 서비스는 컨트롤 플레인 네트워크의 Konnectivity 서버와 노드 네트워크의 Konnectivity 에이전트, 두 부분으로 구성된다. Konnectivity 에이전트는 Konnectivity 서버에 대한 연결을 시작하고 네트워크 연결을 유지한다. Konnectivity 서비스를 활성화한 후, 모든 컨트롤 플레인에서 노드로의 트래픽은 이 연결을 통과한다. [Konnectivity 서비스 태스크](/docs/tasks/extend-kubernetes/setup-konnectivity/)에 따라 클러스터에서 Konnectivity 서비스를 설정한다. diff --git a/content/ko/docs/concepts/architecture/nodes.md b/content/ko/docs/concepts/architecture/nodes.md index cb65b2832a503..291b7e82ce6b2 100644 --- a/content/ko/docs/concepts/architecture/nodes.md +++ b/content/ko/docs/concepts/architecture/nodes.md @@ -63,6 +63,16 @@ kubelet이 노드의 `metadata.name` 필드와 일치하는 API 서버에 등록 노드 오브젝트의 이름은 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. +### 노드 이름 고유성 + +[이름](/ko/docs/concepts/overview/working-with-objects/names#names)은 노드를 식별한다. 두 노드는 +동시에 같은 이름을 가질 수 없다. 쿠버네티스는 또한 같은 이름의 리소스가 +동일한 객체라고 가정한다. 노드의 경우, 동일한 이름을 사용하는 인스턴스가 동일한 +상태(예: 네트워크 설정, 루트 디스크 내용)를 갖는다고 암시적으로 가정한다. 인스턴스가 +이름을 변경하지 않고 수정된 경우 이로 인해 불일치가 발생할 수 있다. 노드를 대폭 교체하거나 +업데이트해야 하는 경우, 기존 노드 오브젝트를 먼저 API 서버에서 제거하고 +업데이트 후 다시 추가해야 한다. + ### 노드에 대한 자체-등록 kubelet 플래그 `--register-node`는 참(기본값)일 경우, kubelet 은 API 서버에 @@ -233,6 +243,7 @@ apiserver로부터 삭제되어 그 이름을 사용할 수 있는 결과를 낳 - 노드가 계속 접근 불가할 경우 나중에 노드로부터 정상적인 종료를 이용해서 모든 파드를 축출 한다. ConditionUnknown을 알리기 시작하는 기본 타임아웃 값은 40초 이고, 파드를 축출하기 시작하는 값은 5분이다. + 노드 컨트롤러는 매 `--node-monitor-period` 초 마다 각 노드의 상태를 체크한다. #### 하트비트 @@ -273,6 +284,7 @@ ConditionFalse 다.). - 클러스터가 작으면 (즉 `--large-cluster-size-threshold` 노드 이하면 - 기본값 50) 축출은 중지되고, 그렇지 않으면 축출 비율은 초당 `--secondary-node-eviction-rate`(기본값 0.01)로 감소된다. + 이 정책들이 가용성 영역 단위로 실행되어지는 이유는 나머지가 연결되어 있는 동안 하나의 가용성 영역이 마스터로부터 분할되어 질 수도 있기 때문이다. 만약 클러스터가 여러 클라우드 제공사업자의 가용성 영역에 걸쳐 있지 않으면, @@ -329,14 +341,27 @@ ConditionFalse 다.). 자세한 내용은 [노드의 컨트롤 토폴로지 관리 정책](/docs/tasks/administer-cluster/topology-manager/)을 본다. -## 그레이스풀(Graceful) 노드 셧다운 +## 그레이스풀(Graceful) 노드 셧다운 {#graceful-node-shutdown} -{{< feature-state state="alpha" for_k8s_version="v1.20" >}} +{{< feature-state state="beta" for_k8s_version="v1.21" >}} + +kubelet은 노드 시스템 셧다운을 감지하고 노드에서 실행 중인 파드를 종료하려고 시도한다. -`GracefulNodeShutdown` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화한 경우 kubelet은 노드 시스템 종료를 감지하고 노드에서 실행 중인 파드를 종료한다. Kubelet은 노드가 종료되는 동안 파드가 일반 [파드 종료 프로세스](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)를 따르도록 한다. -`GracefulNodeShutdown` 기능 게이트가 활성화되면 kubelet은 [systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/)를 사용하여 주어진 기간 동안 노드 종료를 지연시킨다. 종료 중에 kubelet은 두 단계로 파드를 종료시킨다. +그레이스풀 노드 셧다운 기능은 +[systemd inhibitor locks](https://www.freedesktop.org/wiki/Software/systemd/inhibit/)를 +사용하여 주어진 기간 동안 노드 종료를 지연시키므로 systemd에 의존한다. + +그레이스풀 노드 셧다운은 1.21에서 기본적으로 활성화된 `GracefulNodeShutdown` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)로 제어된다. + +기본적으로, 아래 설명된 두 구성 옵션, +`ShutdownGracePeriod` 및 `ShutdownGracePeriodCriticalPods` 는 모두 0으로 설정되어 있으므로, +그레이스풀 노드 셧다운 기능이 활성화되지 않는다. +기능을 활성화하려면, 두 개의 kubelet 구성 설정을 적절하게 구성하고 0이 아닌 값으로 설정해야 한다. + +그레이스풀 셧다운 중에 kubelet은 다음의 두 단계로 파드를 종료한다. 1. 노드에서 실행 중인 일반 파드를 종료시킨다. 2. 노드에서 실행 중인 [중요(critical) 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)를 종료시킨다. @@ -345,9 +370,13 @@ Kubelet은 노드가 종료되는 동안 파드가 일반 [파드 종료 프로 * `ShutdownGracePeriod`: * 노드가 종료를 지연해야 하는 총 기간을 지정한다. 이것은 모든 일반 및 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)의 파드 종료에 필요한 총 유예 기간에 해당한다. * `ShutdownGracePeriodCriticalPods`: - * 노드 종료 중에 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)를 종료하는 데 사용되는 기간을 지정한다. 이는 `ShutdownGracePeriod`보다 작아야 한다. + * 노드 종료 중에 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)를 종료하는 데 사용되는 기간을 지정한다. 이 값은 `ShutdownGracePeriod` 보다 작아야 한다. -예를 들어 `ShutdownGracePeriod=30s`, `ShutdownGracePeriodCriticalPods=10s` 인 경우 kubelet은 노드 종료를 30 초까지 지연시킨다. 종료하는 동안 처음 20(30-10) 초는 일반 파드의 유예 종료에 할당되고, 마지막 10 초는 [중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)의 종료에 할당된다. +예를 들어, `ShutdownGracePeriod=30s`, +`ShutdownGracePeriodCriticalPods=10s` 인 경우, kubelet은 노드 종료를 30초까지 +지연시킨다. 종료하는 동안 처음 20(30-10)초는 일반 파드의 +유예 종료에 할당되고, 마지막 10초는 +[중요 파드](/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/#marking-pod-as-critical)의 종료에 할당된다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/cluster-administration/addons.md b/content/ko/docs/concepts/cluster-administration/addons.md index 811f77362632d..7e67dd604f769 100644 --- a/content/ko/docs/concepts/cluster-administration/addons.md +++ b/content/ko/docs/concepts/cluster-administration/addons.md @@ -16,6 +16,7 @@ content_type: concept ## 네트워킹과 네트워크 폴리시 * [ACI](https://www.github.com/noironetworks/aci-containers)는 Cisco ACI로 통합 컨테이너 네트워킹 및 네트워크 보안을 제공한다. +* [Antrea](https://antrea.io/)는 레이어 3/4에서 작동하여 쿠버네티스를 위한 네트워킹 및 보안 서비스를 제공하며, Open vSwitch를 네트워킹 데이터 플레인으로 활용한다. * [Calico](https://docs.projectcalico.org/latest/introduction/)는 네트워킹 및 네트워크 폴리시 제공자이다. Calico는 유연한 네트워킹 옵션을 지원하므로 BGP 유무에 관계없이 비-오버레이 및 오버레이 네트워크를 포함하여 가장 상황에 맞는 옵션을 선택할 수 있다. Calico는 동일한 엔진을 사용하여 서비스 메시 계층(service mesh layer)에서 호스트, 파드 및 (이스티오(istio)와 Envoy를 사용하는 경우) 애플리케이션에 대한 네트워크 폴리시를 적용한다. * [Canal](https://github.com/tigera/canal/tree/master/k8s-install)은 Flannel과 Calico를 통합하여 네트워킹 및 네트워크 폴리시를 제공한다. * [Cilium](https://github.com/cilium/cilium)은 L3 네트워크 및 네트워크 폴리시 플러그인으로 HTTP/API/L7 폴리시를 투명하게 시행할 수 있다. 라우팅 및 오버레이/캡슐화 모드를 모두 지원하며, 다른 CNI 플러그인 위에서 작동할 수 있다. diff --git a/content/ko/docs/concepts/cluster-administration/logging.md b/content/ko/docs/concepts/cluster-administration/logging.md index 695c747a5d7c6..85f3e4efde166 100644 --- a/content/ko/docs/concepts/cluster-administration/logging.md +++ b/content/ko/docs/concepts/cluster-administration/logging.md @@ -83,12 +83,15 @@ kubectl logs counter [`configure-helper` 스크립트](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/cluster/gce/gci/configure-helper.sh)를 통해 자세히 알 수 있다. +**CRI 컨테이너 런타임** 을 사용할 때, kubelet은 로그를 로테이션하고 로깅 디렉터리 구조를 관리한다. kubelet은 +이 정보를 CRI 컨테이너 런타임에 전송하고 런타임은 컨테이너 로그를 지정된 위치에 기록한다. 두 개의 kubelet 플래그 `container-log-max-size` 및 `container-log-max-files` 를 사용하여 각 로그 파일의 최대 크기와 각 컨테이너에 허용되는 최대 파일 수를 각각 구성할 수 있다. + 기본 로깅 예제에서와 같이 [`kubectl logs`](/docs/reference/generated/kubectl/kubectl-commands#logs)를 실행하면, 노드의 kubelet이 요청을 처리하고 로그 파일에서 직접 읽는다. kubelet은 로그 파일의 내용을 반환한다. {{< note >}} -만약, 일부 외부 시스템이 로테이션을 수행한 경우, +만약, 일부 외부 시스템이 로테이션을 수행했거나 CRI 컨테이너 런타임이 사용된 경우, `kubectl logs` 를 통해 최신 로그 파일의 내용만 사용할 수 있다. 예를 들어, 10MB 파일이 있으면, `logrotate` 가 로테이션을 수행하고 두 개의 파일이 생긴다. (크기가 10MB인 파일 하나와 비어있는 파일) diff --git a/content/ko/docs/concepts/cluster-administration/manage-deployment.md b/content/ko/docs/concepts/cluster-administration/manage-deployment.md index d61d111274bae..abcc4c2cd58e0 100644 --- a/content/ko/docs/concepts/cluster-administration/manage-deployment.md +++ b/content/ko/docs/concepts/cluster-administration/manage-deployment.md @@ -278,7 +278,7 @@ pod/my-nginx-2035384211-u3t6x labeled ``` 먼저 "app=nginx" 레이블이 있는 모든 파드를 필터링한 다음, "tier=fe" 레이블을 지정한다. -방금 레이블을 지정한 파드를 보려면, 다음을 실행한다. +레이블을 지정한 파드를 보려면, 다음을 실행한다. ```shell kubectl get pods -l app=nginx -L tier diff --git a/content/ko/docs/concepts/cluster-administration/proxies.md b/content/ko/docs/concepts/cluster-administration/proxies.md index df431575785a1..ab1f6611fd770 100644 --- a/content/ko/docs/concepts/cluster-administration/proxies.md +++ b/content/ko/docs/concepts/cluster-administration/proxies.md @@ -39,7 +39,7 @@ weight: 90 - UDP, TCP, SCTP를 이용하여 프락시 한다. - HTTP는 이해하지 못한다. - 로드 밸런싱을 제공한다. - - 단지 서비스에 도달하는데 사용한다. + - 서비스에 도달하는데만 사용한다. 1. API 서버 앞단의 프락시/로드밸런서 @@ -61,7 +61,3 @@ weight: 90 ## 요청을 리다이렉트하기 프락시는 리다이렉트 기능을 대체했다. 리다이렉트는 더 이상 사용하지 않는다. - - - - diff --git a/content/ko/docs/concepts/cluster-administration/system-metrics.md b/content/ko/docs/concepts/cluster-administration/system-metrics.md index 08b7b79d0d59e..737c1ded252bc 100644 --- a/content/ko/docs/concepts/cluster-administration/system-metrics.md +++ b/content/ko/docs/concepts/cluster-administration/system-metrics.md @@ -130,7 +130,7 @@ cloudprovider_gce_api_request_duration_seconds { request = "list_disk"} ### kube-scheduler 메트릭 -{{< feature-state for_k8s_version="v1.20" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} 스케줄러는 실행 중인 모든 파드의 요청(request)된 리소스와 요구되는 제한(limit)을 보고하는 선택적 메트릭을 노출한다. 이러한 메트릭은 용량 계획(capacity planning) 대시보드를 구축하고, 현재 또는 과거 스케줄링 제한을 평가하고, 리소스 부족으로 스케줄할 수 없는 워크로드를 빠르게 식별하고, 실제 사용량을 파드의 요청과 비교하는 데 사용할 수 있다. @@ -148,6 +148,24 @@ kube-scheduler는 각 파드에 대해 구성된 리소스 [요청과 제한](/k 메트릭은 HTTP 엔드포인트 `/metrics/resources`에 노출되며 스케줄러의 `/metrics` 엔드포인트 와 동일한 인증이 필요하다. 이러한 알파 수준의 메트릭을 노출시키려면 `--show-hidden-metrics-for-version=1.20` 플래그를 사용해야 한다. +## 메트릭 비활성화 + +커맨드 라인 플래그 `--disabled-metrics` 를 통해 메트릭을 명시적으로 끌 수 있다. 이 방법이 필요한 이유는 메트릭이 성능 문제를 일으키는 경우을 예로 들 수 있다. 입력값은 비활성화되는 메트릭 목록이다(예: `--disabled-metrics=metric1,metric2`). + +## 메트릭 카디널리티(cardinality) 적용 + +제한되지 않은 차원의 메트릭은 계측하는 컴포넌트에서 메모리 문제를 일으킬 수 있다. 리소스 사용을 제한하려면, `--allow-label-value` 커맨드 라인 옵션을 사용하여 메트릭 항목에 대한 레이블 값의 허용 목록(allow-list)을 동적으로 구성한다. + +알파 단계에서, 플래그는 메트릭 레이블 허용 목록으로 일련의 매핑만 가져올 수 있다. +각 매핑은 `,=` 형식이다. 여기서 +`` 는 허용되는 레이블 이름의 쉼표로 구분된 목록이다. + +전체 형식은 다음과 같다. +`--allow-label-value ,=', ...', ,=', ...', ...`. + +예시는 다음과 같다. +`--allow-label-value number_count_metric,odd_number='1,3,5', number_count_metric,even_number='2,4,6', date_gauge_metric,weekend='Saturday,Sunday'` + ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/configuration/configmap.md b/content/ko/docs/concepts/configuration/configmap.md index 50ef8b3632f2b..841feb8fb34fe 100644 --- a/content/ko/docs/concepts/configuration/configmap.md +++ b/content/ko/docs/concepts/configuration/configmap.md @@ -223,7 +223,7 @@ spec: 현재 볼륨에서 사용된 컨피그맵이 업데이트되면, 프로젝션된 키도 마찬가지로 업데이트된다. kubelet은 모든 주기적인 동기화에서 마운트된 컨피그맵이 최신 상태인지 확인한다. 그러나, kubelet은 로컬 캐시를 사용해서 컨피그맵의 현재 값을 가져온다. -캐시 유형은 [KubeletConfiguration 구조체](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)의 +캐시 유형은 [KubeletConfiguration 구조체](/docs/reference/config-api/kubelet-config.v1beta1/)의 `ConfigMapAndSecretChangeDetectionStrategy` 필드를 사용해서 구성할 수 있다. 컨피그맵은 watch(기본값), ttl 기반 또는 API 서버로 직접 모든 요청을 리디렉션할 수 있다. @@ -233,11 +233,12 @@ kubelet은 모든 주기적인 동기화에서 마운트된 컨피그맵이 최 지연을 지켜보거나, 캐시의 ttl 또는 0에 상응함). 환경 변수로 사용되는 컨피그맵은 자동으로 업데이트되지 않으며 파드를 다시 시작해야 한다. + ## 변경할 수 없는(immutable) 컨피그맵 {#configmap-immutable} -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -쿠버네티스 베타 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 개별 시크릿과 +쿠버네티스 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 개별 시크릿과 컨피그맵을 변경할 수 없는 것으로 설정하는 옵션을 제공한다. 컨피그맵을 광범위하게 사용하는 클러스터(최소 수만 개의 고유한 컨피그맵이 파드에 마운트)의 경우 데이터 변경을 방지하면 다음과 같은 이점이 있다. diff --git a/content/ko/docs/concepts/configuration/manage-resources-containers.md b/content/ko/docs/concepts/configuration/manage-resources-containers.md index 8f499fbc6a12b..ccd3ee929060b 100644 --- a/content/ko/docs/concepts/configuration/manage-resources-containers.md +++ b/content/ko/docs/concepts/configuration/manage-resources-containers.md @@ -21,9 +21,6 @@ feature: 컨테이너가 사용할 수 있도록 해당 시스템 리소스의 최소 _요청_ 량을 예약한다. - - - ## 요청 및 제한 @@ -72,7 +69,7 @@ Huge page는 노드 커널이 기본 페이지 크기보다 훨씬 큰 메모리 이것은 `memory` 및 `cpu` 리소스와는 다르다. {{< /note >}} -CPU와 메모리를 통칭하여 *컴퓨트 리소스* 또는 그냥 *리소스* 라고 한다. 컴퓨트 +CPU와 메모리를 통칭하여 *컴퓨트 리소스* 또는 *리소스* 라고 한다. 컴퓨트 리소스는 요청, 할당 및 소비될 수 있는 측정 가능한 수량이다. 이것은 [API 리소스](/ko/docs/concepts/overview/kubernetes-api/)와는 다르다. 파드 및 @@ -441,7 +438,9 @@ kubelet은 각 `emptyDir` 볼륨, 컨테이너 로그 디렉터리 및 쓰기 프로젝트 쿼터를 사용하려면, 다음을 수행해야 한다. -* kubelet 구성에서 `LocalStorageCapacityIsolationFSQuotaMonitoring=true` +* [kubelet 구성](/docs/reference/config-api/kubelet-config.v1beta1/)의 + `featureGates` 필드 또는 `--feature-gates` 커맨드 라인 플래그를 사용하여 + `LocalStorageCapacityIsolationFSQuotaMonitoring=true` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화한다. @@ -449,6 +448,7 @@ kubelet은 각 `emptyDir` 볼륨, 컨테이너 로그 디렉터리 및 쓰기 프로젝트 쿼터가 활성화되어 있는지 확인한다. 모든 XFS 파일시스템은 프로젝트 쿼터를 지원한다. ext4 파일시스템의 경우, 파일시스템이 마운트되지 않은 상태에서 프로젝트 쿼터 추적 기능을 활성화해야 한다. + ```bash # ext4인 /dev/block-device가 마운트되지 않은 경우 sudo tune2fs -O project -Q prjquota /dev/block-device @@ -518,9 +518,8 @@ JSON-Pointer로 해석된다. 더 자세한 내용은, 클러스터-레벨의 확장된 리소스는 노드에 연결되지 않는다. 이들은 일반적으로 리소스 소비와 리소스 쿼터를 처리하는 스케줄러 익스텐더(extender)에 의해 관리된다. -[스케줄러 정책 구성](https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/scheduler/api/v1/types.go#L31)에서 -스케줄러 익스텐더가 -처리하는 확장된 리소스를 지정할 수 있다. +[스케줄러 정책 구성](/docs/reference/config-api/kube-scheduler-policy-config.v1/)에서 +스케줄러 익스텐더가 처리하는 확장된 리소스를 지정할 수 있다. **예제:** @@ -743,23 +742,13 @@ LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-0 컨테이너가 `reason:OOM Killed`(`OOM` 은 메모리 부족(Out Of Memory)의 약자) 때문에 종료된 것을 알 수 있다. - - - - - ## {{% heading "whatsnext" %}} - * [컨테이너와 파드에 메모리 리소스를 할당](/ko/docs/tasks/configure-pod-container/assign-memory-resource/)하는 핸즈온 경험을 해보자. - * [컨테이너와 파드에 CPU 리소스를 할당](/docs/tasks/configure-pod-container/assign-cpu-resource/)하는 핸즈온 경험을 해보자. - * 요청과 제한의 차이점에 대한 자세한 내용은, [리소스 QoS](https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md)를 참조한다. - * [컨테이너](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) API 레퍼런스 읽어보기 - * [ResourceRequirements](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#resourcerequirements-v1-core) API 레퍼런스 읽어보기 - * XFS의 [프로젝트 쿼터](https://xfs.org/docs/xfsdocs-xml-dev/XFS_User_Guide/tmp/en-US/html/xfs-quotas.html)에 대해 읽어보기 +* [kube-scheduler 정책 레퍼런스 (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/)에 대해 더 읽어보기 diff --git a/content/ko/docs/concepts/configuration/secret.md b/content/ko/docs/concepts/configuration/secret.md index 4637c1a63d6e1..a4544397d7a0f 100644 --- a/content/ko/docs/concepts/configuration/secret.md +++ b/content/ko/docs/concepts/configuration/secret.md @@ -109,7 +109,7 @@ empty-secret Opaque 0 2m6s ``` 해당 `DATA` 열은 시크릿에 저장된 데이터 아이템의 수를 보여준다. -이 경우, `0` 은 비어 있는 시크릿을 방금 하나 생성하였다는 것을 의미한다. +이 경우, `0` 은 비어 있는 시크릿을 하나 생성하였다는 것을 의미한다. ### 서비스 어카운트 토큰 시크릿 @@ -667,7 +667,7 @@ cat /etc/foo/password 볼륨에서 현재 사용되는 시크릿이 업데이트되면, 투영된 키도 결국 업데이트된다. kubelet은 마운트된 시크릿이 모든 주기적인 동기화에서 최신 상태인지 여부를 확인한다. 그러나, kubelet은 시크릿의 현재 값을 가져 오기 위해 로컬 캐시를 사용한다. -캐시의 유형은 [KubeletConfiguration 구조체](https://github.com/kubernetes/kubernetes/blob/{{< param "docsbranch" >}}/staging/src/k8s.io/kubelet/config/v1beta1/types.go)의 +캐시의 유형은 [KubeletConfiguration 구조체](/docs/reference/config-api/kubelet-config.v1beta1/)의 `ConfigMapAndSecretChangeDetectionStrategy` 필드를 사용하여 구성할 수 있다. 시크릿은 watch(기본값), ttl 기반 또는 API 서버로 모든 요청을 직접 리디렉션하여 전파할 수 있다. @@ -749,9 +749,9 @@ echo $SECRET_PASSWORD ## 변경할 수 없는(immutable) 시크릿 {#secret-immutable} -{{< feature-state for_k8s_version="v1.19" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} -쿠버네티스 베타 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 +쿠버네티스 기능인 _변경할 수 없는 시크릿과 컨피그맵_ 은 개별 시크릿과 컨피그맵을 변경할 수 없는 것으로 설정하는 옵션을 제공한다. 시크릿을 광범위하게 사용하는 클러스터(최소 수만 개의 고유한 시크릿이 파드에 마운트)의 경우, 데이터 변경을 방지하면 다음과 같은 이점이 있다. @@ -760,8 +760,8 @@ echo $SECRET_PASSWORD - immutable로 표시된 시크릿에 대한 감시를 중단하여, kube-apiserver의 부하를 크게 줄임으로써 클러스터의 성능을 향상시킴 -이 기능은 v1.19부터 기본적으로 활성화된 `ImmutableEphemeralVolumes` [기능 -게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)에 +이 기능은 v1.19부터 기본적으로 활성화된 `ImmutableEphemeralVolumes` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)에 의해 제어된다. `immutable` 필드를 `true` 로 설정하여 변경할 수 없는 시크릿을 생성할 수 있다. 다음은 예시이다. ```yaml @@ -865,6 +865,7 @@ LASTSEEN FIRSTSEEN COUNT NAME KIND SUBOBJECT ### 사용 사례: 컨테이너 환경 변수로 사용하기 시크릿 정의를 작성한다. + ```yaml apiVersion: v1 kind: Secret @@ -877,6 +878,7 @@ data: ``` 시크릿을 생성한다. + ```shell kubectl apply -f mysecret.yaml ``` @@ -1173,14 +1175,12 @@ HTTP 요청을 처리하고, 복잡한 비즈니스 로직을 수행한 다음, 시크릿 API에 접근해야 하는 애플리케이션은 필요한 시크릿에 대한 `get` 요청을 수행해야 한다. 이를 통해 관리자는 앱에 필요한 -[개별 인스턴스에 대한 접근을 허용 목록에 추가]( -/docs/reference/access-authn-authz/rbac/#referring-to-resources)하면서 모든 시크릿에 대한 접근을 +[개별 인스턴스에 대한 접근을 허용 목록에 추가](/docs/reference/access-authn-authz/rbac/#referring-to-resources)하면서 모든 시크릿에 대한 접근을 제한할 수 있다. `get` 반복을 통한 성능 향상을 위해, 클라이언트는 시크릿을 참조한 다음 리소스를 감시(`watch`)하고, 참조가 변경되면 시크릿을 다시 요청하는 리소스를 -설계할 수 있다. 덧붙여, 클라이언트에게 개별 리소스를 감시(`watch`)하도록 하는 ["대량 감시" API]( -https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/bulk_watch.md)도 +설계할 수 있다. 덧붙여, 클라이언트에게 개별 리소스를 감시(`watch`)하도록 하는 ["대량 감시" API](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/bulk_watch.md)도 제안되었으며, 쿠버네티스의 후속 릴리스에서 사용할 수 있을 것이다. diff --git a/content/ko/docs/concepts/containers/container-lifecycle-hooks.md b/content/ko/docs/concepts/containers/container-lifecycle-hooks.md index d9a1137024976..f2ef1f10a9bf7 100644 --- a/content/ko/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/ko/docs/concepts/containers/container-lifecycle-hooks.md @@ -50,10 +50,11 @@ terminated 또는 completed 상태인 경우에는 `PreStop` 훅 요청이 실 ### 훅 핸들러 구현 컨테이너는 훅의 핸들러를 구현하고 등록함으로써 해당 훅에 접근할 수 있다. -구현될 수 있는 컨테이너의 훅 핸들러에는 두 가지 유형이 있다. +구현될 수 있는 컨테이너의 훅 핸들러에는 세 가지 유형이 있다. * Exec - 컨테이너의 cgroups와 네임스페이스 안에서, `pre-stop.sh`와 같은, 특정 커맨드를 실행. 커맨드에 의해 소비된 리소스는 해당 컨테이너에 대해 계산된다. +* TCP - 컨테이너의 특정 포트에 대한 TCP 연결을 연다. * HTTP - 컨테이너의 특정 엔드포인트에 대해서 HTTP 요청을 실행. ### 훅 핸들러 실행 diff --git a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index d5490601081dc..b543addee6f3f 100644 --- a/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -44,7 +44,7 @@ _선언_ 하거나 지정할 수 있게 해주며 쿠버네티스 오브젝트 클러스터 라이프사이클과 관계없이 실행 중인 클러스터에 커스텀 컨트롤러를 배포하고 업데이트할 수 있다. 커스텀 컨트롤러는 모든 종류의 리소스와 함께 작동할 수 있지만 커스텀 리소스와 결합할 때 특히 효과적이다. -[오퍼레이터 패턴](https://coreos.com/blog/introducing-operators.html)은 사용자 정의 +[오퍼레이터 패턴](/ko/docs/concepts/extend-kubernetes/operator/)은 사용자 정의 리소스와 커스텀 컨트롤러를 결합한다. 커스텀 컨트롤러를 사용하여 특정 애플리케이션에 대한 도메인 지식을 쿠버네티스 API의 익스텐션으로 인코딩할 수 있다. diff --git a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 5f58604cd71c4..3596c9f72e411 100644 --- a/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -192,10 +192,69 @@ kubelet은 gRPC 서비스를 제공하여 사용 중인 장치를 검색하고, // PodResourcesLister는 kubelet에서 제공하는 서비스로, 노드의 포드 및 컨테이너가 // 사용한 노드 리소스에 대한 정보를 제공한다. service PodResourcesLister { - rpc List(ListPodResourcesRequest) returns (ListPodResourcesResponse) {} + rpc GetAllocatableResources(AllocatableResourcesRequest) returns (AllocatableResourcesResponse) {} } ``` +`List` 엔드포인트는 독점적으로 할당된 CPU의 ID, 장치 플러그인에 의해 보고된 장치 ID, +이러한 장치가 할당된 NUMA 노드의 ID와 같은 세부 정보와 함께 +실행 중인 파드의 리소스에 대한 정보를 제공한다. + +```gRPC +// ListPodResourcesResponse는 List 함수가 반환하는 응답이다 +message ListPodResourcesResponse { + repeated PodResources pod_resources = 1; +} + +// PodResources에는 파드에 할당된 노드 리소스에 대한 정보가 포함된다 +message PodResources { + string name = 1; + string namespace = 2; + repeated ContainerResources containers = 3; +} + +// ContainerResources는 컨테이너에 할당된 리소스에 대한 정보를 포함한다 +message ContainerResources { + string name = 1; + repeated ContainerDevices devices = 2; + repeated int64 cpu_ids = 3; +} + +// 토폴로지는 리소스의 하드웨어 토폴로지를 설명한다 +message TopologyInfo { + repeated NUMANode nodes = 1; +} + +// NUMA 노드의 NUMA 표현 +message NUMANode { + int64 ID = 1; +} + +// ContainerDevices는 컨테이너에 할당된 장치에 대한 정보를 포함한다 +message ContainerDevices { + string resource_name = 1; + repeated string device_ids = 2; + TopologyInfo topology = 3; +} +``` + +GetAllocatableResources는 워커 노드에서 처음 사용할 수 있는 리소스에 대한 정보를 제공한다. +kubelet이 APIServer로 내보내는 것보다 더 많은 정보를 제공한다. + +```gRPC +// AllocatableResourcesResponses에는 kubelet이 알고 있는 모든 장치에 대한 정보가 포함된다. +message AllocatableResourcesResponse { + repeated ContainerDevices devices = 1; + repeated int64 cpu_ids = 2; +} + +``` + +`ContainerDevices` 는 장치가 어떤 NUMA 셀과 연관되는지를 선언하는 토폴로지 정보를 노출한다. +NUMA 셀은 불분명한(opaque) 정수 ID를 사용하여 식별되며, 이 값은 +[kubelet에 등록할 때](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager) 장치 플러그인이 보고하는 것과 일치한다. + + gRPC 서비스는 `/var/lib/kubelet/pod-resources/kubelet.sock` 의 유닉스 소켓을 통해 제공된다. 장치 플러그인 리소스에 대한 모니터링 에이전트는 데몬 또는 데몬셋으로 배포할 수 있다. 표준 디렉터리 `/var/lib/kubelet/pod-resources` 에는 특권을 가진 접근이 필요하므로, 모니터링 @@ -204,7 +263,7 @@ gRPC 서비스는 `/var/lib/kubelet/pod-resources/kubelet.sock` 의 유닉스 `/var/lib/kubelet/pod-resources` 를 {{< glossary_tooltip text="볼륨" term_id="volume" >}}으로 마운트해야 한다. -"PodResources 서비스"를 지원하려면 `KubeletPodResources` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. +`PodResourcesLister service` 를 지원하려면 `KubeletPodResources` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. ## 토폴로지 관리자와 장치 플러그인 통합 diff --git a/content/ko/docs/concepts/overview/components.md b/content/ko/docs/concepts/overview/components.md index d4b77e9319080..b4c6079213217 100644 --- a/content/ko/docs/concepts/overview/components.md +++ b/content/ko/docs/concepts/overview/components.md @@ -30,8 +30,9 @@ card: 컨트롤 플레인 컴포넌트는 클러스터 내 어떠한 머신에서든지 동작할 수 있다. 그러나 간결성을 위하여, 구성 스크립트는 보통 동일 머신 상에 모든 컨트롤 플레인 컴포넌트를 구동시키고, -사용자 컨테이너는 해당 머신 상에 동작시키지 않는다. 다중-마스터-VM 설치 예제를 보려면 -[고가용성 클러스터 구성하기](/docs/admin/high-availability/)를 확인해본다. +사용자 컨테이너는 해당 머신 상에 동작시키지 않는다. 여러 VM에서 +실행되는 컨트롤 플레인 설정의 예제를 보려면 +[kubeadm을 사용하여 고가용성 클러스터 만들기](/docs/setup/production-environment/tools/kubeadm/high-availability/)를 확인해본다. ### kube-apiserver diff --git a/content/ko/docs/concepts/overview/working-with-objects/names.md b/content/ko/docs/concepts/overview/working-with-objects/names.md index 891ad4d07a21b..78b7addd43c02 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/names.md +++ b/content/ko/docs/concepts/overview/working-with-objects/names.md @@ -21,11 +21,15 @@ weight: 20 {{< glossary_definition term_id="name" length="all" >}} +{{< note >}} +물리적 호스트를 나타내는 노드와 같이 오브젝트가 물리적 엔티티를 나타내는 경우, 노드를 삭제한 후 다시 생성하지 않은 채 동일한 이름으로 호스트를 다시 생성하면, 쿠버네티스는 새 호스트를 불일치로 이어질 수 있는 이전 호스트로 취급한다. +{{< /note >}} + 다음은 리소스에 일반적으로 사용되는 세 가지 유형의 이름 제한 조건이다. ### DNS 서브도메인 이름 -대부분의 리소스 유형에는 [RFC 1123](https://tools.ietf.org/html/rfc1123)에 정의된 대로 +대부분의 리소스 유형에는 [RFC 1123](https://tools.ietf.org/html/rfc1123)에 정의된 대로 DNS 서브도메인 이름으로 사용할 수 있는 이름이 필요하다. 이것은 이름이 다음을 충족해야 한다는 것을 의미한다. @@ -83,4 +87,3 @@ UUID는 ISO/IEC 9834-8 과 ITU-T X.667 로 표준화 되어 있다. * 쿠버네티스의 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)에 대해 읽기. * [쿠버네티스의 식별자와 이름](https://git.k8s.io/community/contributors/design-proposals/architecture/identifiers.md) 디자인 문서 읽기. - diff --git a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md index 905375bdc5628..ef75f4f0819b2 100644 --- a/content/ko/docs/concepts/overview/working-with-objects/namespaces.md +++ b/content/ko/docs/concepts/overview/working-with-objects/namespaces.md @@ -26,7 +26,7 @@ weight: 30 동일한 소프트웨어의 다른 버전과 같이 약간 다른 리소스를 분리하기 위해 여러 네임스페이스를 사용할 필요는 없다. 동일한 네임스페이스 내에서 리소스를 -구별하기 위해 [레이블](/ko/docs/concepts/overview/working-with-objects/labels/)을 +구별하기 위해 {{< glossary_tooltip text="레이블" term_id="label" >}}을 사용한다. ## 네임스페이스 다루기 @@ -109,6 +109,16 @@ kubectl api-resources --namespaced=true kubectl api-resources --namespaced=false ``` +## 자동 레이블링 + +{{< feature-state state="beta" for_k8s_version="1.21" >}} + +쿠버네티스 컨트롤 플레인은 `NamespaceDefaultLabelName` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 +활성화된 경우 모든 네임스페이스에 변경할 수 없는(immutable) {{< glossary_tooltip text="레이블" term_id="label" >}} +`kubernetes.io / metadata.name` 을 설정한다. +레이블 값은 네임스페이스 이름이다. + + ## {{% heading "whatsnext" %}} * [신규 네임스페이스 생성](/docs/tasks/administer-cluster/namespaces/#creating-a-new-namespace)에 대해 더 배우기. diff --git a/content/ko/docs/concepts/policy/pod-security-policy.md b/content/ko/docs/concepts/policy/pod-security-policy.md index e3c67a4ff9348..8afee5760b112 100644 --- a/content/ko/docs/concepts/policy/pod-security-policy.md +++ b/content/ko/docs/concepts/policy/pod-security-policy.md @@ -9,7 +9,9 @@ weight: 30 -{{< feature-state state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} + +파드시큐리티폴리시(PodSecurityPolicy)는 쿠버네티스 v1.21부터 더이상 사용되지 않으며, v1.25에서 제거된다. 파드 시큐리티 폴리시를 사용하면 파드 생성 및 업데이트에 대한 세분화된 권한을 부여할 수 있다. diff --git a/content/ko/docs/concepts/policy/resource-quotas.md b/content/ko/docs/concepts/policy/resource-quotas.md index df23da5d57464..8e1d918ef423d 100644 --- a/content/ko/docs/concepts/policy/resource-quotas.md +++ b/content/ko/docs/concepts/policy/resource-quotas.md @@ -124,6 +124,10 @@ GPU 리소스를 다음과 같이 쿼터를 정의할 수 있다. | `limits.ephemeral-storage` | 네임스페이스의 모든 파드에서 로컬 임시 스토리지 제한의 합은 이 값을 초과할 수 없음. | | `ephemeral-storage` | `requests.ephemeral-storage` 와 같음. | +{{< note >}} +CRI 컨테이너 런타임을 사용할 때, 컨테이너 로그는 임시 스토리지 쿼터에 포함된다. 이로 인해 스토리지 쿼터를 소진한 파드가 예기치 않게 축출될 수 있다. 자세한 내용은 [로깅 아키텍처](/ko/docs/concepts/cluster-administration/logging/)를 참조한다. +{{< /note >}} + ## 오브젝트 수 쿼터 다음 구문을 사용하여 모든 표준 네임스페이스 처리된(namespaced) 리소스 유형에 대한 @@ -188,7 +192,8 @@ GPU 리소스를 다음과 같이 쿼터를 정의할 수 있다. | `NotTerminating` | `.spec.activeDeadlineSeconds is nil`에 일치하는 파드 | | `BestEffort` | 최상의 서비스 품질을 제공하는 파드 | | `NotBestEffort` | 서비스 품질이 나쁜 파드 | -| `PriorityClass` | 지정된 [프라이올리티 클래스](/ko/docs/concepts/configuration/pod-priority-preemption)를 참조하여 일치하는 파드. | +| `PriorityClass` | 지정된 [프라이어리티 클래스](/ko/docs/concepts/configuration/pod-priority-preemption)를 참조하여 일치하는 파드. | +| `CrossNamespacePodAffinity` | 크로스-네임스페이스 파드 [(안티)어피니티 용어]가 있는 파드 | `BestEffort` 범위는 다음의 리소스를 추적하도록 쿼터를 제한한다. @@ -429,6 +434,63 @@ memory 0 20Gi pods 0 10 ``` +### 네임스페이스 간 파드 어피니티 쿼터 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +오퍼레이터는 네임스페이스를 교차하는 어피니티가 있는 파드를 가질 수 있는 네임스페이스를 +제한하기 위해 `CrossNamespacePodAffinity` 쿼터 범위를 사용할 수 있다. 특히, 파드 어피니티 용어의 +`namespaces` 또는 `namespaceSelector` 필드를 설정할 수 있는 파드를 제어한다. + +안티-어피니티 제약 조건이 있는 파드는 장애 도메인에서 다른 모든 네임스페이스의 파드가 예약되지 않도록 +차단할 수 있으므로 사용자가 네임스페이스 간 어피니티 용어를 +사용하지 못하도록 하는 것이 바람직할 수 있다. + +이 범위 오퍼레이터를 사용하면 `CrossNamespaceAffinity` 범위와 하드(hard) 제한이 0인 +네임스페이스에 리소스 쿼터 오브젝트를 생성하여 특정 네임스페이스(아래 예에서 `foo-ns`)가 네임스페이스 간 파드 어피니티를 +사용하는 파드를 사용하지 못하도록 방지할 수 있다. + +```yaml +apiVersion: v1 +kind: ResourceQuota +metadata: + name: disable-cross-namespace-affinity + namespace: foo-ns +spec: + hard: + pods: "0" + scopeSelector: + matchExpressions: + - scopeName: CrossNamespaceAffinity +``` + +오퍼레이터가 기본적으로 `namespaces` 및 `namespaceSelector` 사용을 허용하지 않고, +특정 네임스페이스에만 허용하려는 경우, kube-apiserver 플래그 --admission-control-config-file를 +다음의 구성 파일의 경로로 설정하여 `CrossNamespaceAffinity` 를 +제한된 리소스로 구성할 수 있다. + +```yaml +apiVersion: apiserver.config.k8s.io/v1 +kind: AdmissionConfiguration +plugins: +- name: "ResourceQuota" + configuration: + apiVersion: apiserver.config.k8s.io/v1 + kind: ResourceQuotaConfiguration + limitedResources: + - resource: pods + matchScopes: + - scopeName: CrossNamespaceAffinity +``` + +위의 구성을 사용하면, 파드는 생성된 네임스페이스에 `CrossNamespaceAffinity` 범위가 있는 리소스 쿼터 오브젝트가 있고, +해당 필드를 사용하는 파드 수보다 크거나 같은 하드 제한이 있는 경우에만 +파드 어피니티에서 `namespaces` 및 `namespaceSelector` 를 사용할 수 있다. + +이 기능은 알파이며 기본적으로 비활성화되어 있다. kube-apiserver 및 kube-scheduler 모두에서 +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) +`PodAffinityNamespaceSelector` 를 설정하여 활성화할 수 있다. + ## 요청과 제한의 비교 {#requests-vs-limits} 컴퓨트 리소스를 할당할 때 각 컨테이너는 CPU 또는 메모리에 대한 요청과 제한값을 지정할 수 있다. diff --git a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md index dba31a6c7f0a0..8c095e4a27459 100644 --- a/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/ko/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -11,18 +11,17 @@ weight: 20 -{{< glossary_tooltip text="파드" term_id="pod" >}}를 특정한 {{< glossary_tooltip text="노드(들)" term_id="node" >}}에서만 동작하도록 하거나, -특정 노드들을 선호하도록 제한할 수 있다. -이를 수행하는 방법에는 여러 가지가 있으며, 권장되는 접근 방식은 모두 -[레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/)를 사용하여 선택한다. -보통 스케줄러가 자동으로 합리적인 배치(예: 노드들에 걸쳐 파드를 분배하거나, -자원이 부족한 노드에 파드를 배치하지 않는 등)를 수행하기에 이런 제약 조건은 필요하지 않지만 -간혹 파드가 배치되는 노드에 대해 더 많은 제어를 원할 수 있는 상황이 있다. +특정한 {{< glossary_tooltip text="노드(들)" term_id="node" >}} 집합에서만 동작하도록 +{{< glossary_tooltip text="파드" term_id="pod" >}}를 제한할 수 있다. +이를 수행하는 방법에는 여러 가지가 있으며 권장되는 접근 방식은 모두 +[레이블 셀렉터](/ko/docs/concepts/overview/working-with-objects/labels/)를 사용하여 선택을 용이하게 한다. +보통 스케줄러가 자동으로 합리적인 배치(예: 자원이 부족한 노드에 파드를 배치하지 않도록 +노드 간에 파드를 분배하는 등)를 수행하기에 이러한 제약 조건은 필요하지 않지만 +간혹 파드가 배포할 노드를 제어해야 하는 경우가 있다. 예를 들어 SSD가 장착된 머신에 파드가 연결되도록 하거나 또는 동일한 가용성 영역(availability zone)에서 많은 것을 통신하는 두 개의 서로 다른 서비스의 파드를 같이 배치할 수 있다. - ## 노드 셀렉터(nodeSelector) @@ -120,13 +119,13 @@ spec: 여기에 현재 `requiredDuringSchedulingIgnoredDuringExecution` 와 `preferredDuringSchedulingIgnoredDuringExecution` 로 부르는 두 가지 종류의 노드 어피니티가 있다. 전자는 파드가 노드에 스케줄되도록 *반드시* -규칙을 만족해야 하는 것(`nodeSelector` 와 같으나 보다 표현적인 구문을 사용해서)을 지정하고, +규칙을 만족해야 하는 것(`nodeSelector` 와 비슷하나 보다 표현적인 구문을 사용해서)을 지정하고, 후자는 스케줄러가 시도하려고는 하지만, 보증하지 않는 *선호(preferences)* 를 지정한다는 점에서 이를 각각 "엄격함(hard)" 과 "유연함(soft)" 으로 생각할 수 있다. 이름의 "IgnoredDuringExecution" 부분은 `nodeSelector` 작동 방식과 유사하게 노드의 -레이블이 런타임 중에 변경되어 파드의 어피니티 규칙이 더 이상 충족되지 않으면 파드가 여전히 그 노드에서 +레이블이 런타임 중에 변경되어 파드의 어피니티 규칙이 더 이상 충족되지 않으면 파드가 그 노드에서 동작한다는 의미이다. 향후에는 파드의 노드 어피니티 요구 사항을 충족하지 않는 노드에서 파드를 제거한다는 -점을 제외하고는 `preferredDuringSchedulingIgnoredDuringExecution` 와 같은 `requiredDuringSchedulingIgnoredDuringExecution` 를 제공할 계획이다. +점을 제외하고는 `preferredDuringSchedulingIgnoredDuringExecution` 와 동일한 `requiredDuringSchedulingIgnoredDuringExecution` 를 제공할 계획이다. 따라서 `requiredDuringSchedulingIgnoredDuringExecution` 의 예로는 "인텔 CPU가 있는 노드에서만 파드 실행"이 될 수 있고, `preferredDuringSchedulingIgnoredDuringExecution` 의 예로는 "장애 조치 영역 XYZ에 파드 집합을 실행하려고 @@ -271,6 +270,18 @@ PodSpec에 지정된 NodeAffinity도 적용된다. 파드를 노드에 스케줄하려면 `requiredDuringSchedulingIgnoredDuringExecution` 어피니티와 안티-어피니티와 연관된 `matchExpressions` 가 모두 충족되어야 한다. +#### 네임스페이스 셀렉터 +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +사용자는 네임스페이스 집합에 대한 레이블 쿼리인 `namespaceSelector` 를 사용하여 일치하는 네임스페이스를 선택할 수도 있다. +어피니티 용어는 `namespaceSelector` 에서 선택한 네임스페이스와 `namespaces` 필드에 나열된 네임스페이스의 결합에 적용된다. +빈 `namespaceSelector` ({})는 모든 네임스페이스와 일치하는 반면, null 또는 빈 `namespaces` 목록과 +null `namespaceSelector` 는 "이 파드의 네임스페이스"를 의미한다. + +이 기능은 알파이며 기본적으로 비활성화되어 있다. kube-apiserver 및 kube-scheduler 모두에서 +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) +`PodAffinityNamespaceSelector` 를 설정하여 활성화할 수 있다. + #### 더 실용적인 유스케이스 파드간 어피니티와 안티-어피니티는 레플리카셋, 스테이트풀셋, 디플로이먼트 등과 같은 diff --git a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md index 83059ba9315a3..86e67978c2137 100644 --- a/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/ko/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -86,6 +86,7 @@ _스코어링_ 단계에서 스케줄러는 목록에 남아있는 노드의 순 * [스케줄러 성능 튜닝](/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning/)에 대해 읽기 * [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)에 대해 읽기 * kube-scheduler의 [레퍼런스 문서](/docs/reference/command-line-tools-reference/kube-scheduler/) 읽기 +* [kube-scheduler 구성(v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) 레퍼런스 읽기 * [멀티 스케줄러 구성하기](/docs/tasks/extend-kubernetes/configure-multiple-schedulers/)에 대해 배우기 * [토폴로지 관리 정책](/docs/tasks/administer-cluster/topology-manager/)에 대해 배우기 * [파드 오버헤드](/ko/docs/concepts/scheduling-eviction/pod-overhead/)에 대해 배우기 diff --git a/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 03eec40faec13..9e049cd348165 100644 --- a/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/ko/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -24,8 +24,6 @@ API 서버에 해당 결정을 통지한다. 본 페이지에서는 상대적으로 큰 규모의 쿠버네티스 클러스터에 대한 성능 튜닝 최적화에 대해 설명한다. - - 큰 규모의 클러스터에서는 스케줄러의 동작을 튜닝하여 응답 시간 @@ -44,8 +42,10 @@ kube-scheduler 의 `percentageOfNodesToScore` 설정을 통해 `percentageOfNodesToScore` 를 100 보다 높게 설정해도 kube-scheduler는 마치 100을 설정한 것처럼 작동한다. -값을 변경하려면, kube-scheduler 구성 파일(이 파일은 `/etc/kubernetes/config/kube-scheduler.yaml` -일 수 있다)을 편집한 다음 스케줄러를 재시작 한다. +값을 변경하려면, +[kube-scheduler 구성 파일](/docs/reference/config-api/kube-scheduler-config.v1beta1/)을 +편집한 다음 스케줄러를 재시작한다. +대부분의 경우, 구성 파일은 `/etc/kubernetes/config/kube-scheduler.yaml` 에서 찾을 수 있다. 이를 변경한 후에 다음을 실행해서 @@ -99,7 +99,6 @@ algorithmSource: percentageOfNodesToScore: 50 ``` - ### percentageOfNodesToScore 튜닝 `percentageOfNodesToScore`는 1과 100 사이의 값이어야 하며 @@ -159,3 +158,7 @@ percentageOfNodesToScore: 50 ``` 모든 노드를 검토한 후, 노드 1로 돌아간다. + +## {{% heading "whatsnext" %}} + +* [kube-scheduler 구성 레퍼런스(v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) 확인 diff --git a/content/ko/docs/concepts/security/controlling-access.md b/content/ko/docs/concepts/security/controlling-access.md index 3b45be648cac0..9612159eb4807 100644 --- a/content/ko/docs/concepts/security/controlling-access.md +++ b/content/ko/docs/concepts/security/controlling-access.md @@ -38,7 +38,7 @@ API 서버가 하나 이상의 인증기 모듈을 실행하도록 구성한다. 인증기는 [여기](/docs/reference/access-authn-authz/authentication/)에서 더 자세히 서술한다. 인증 단계로 들어가는 것은 온전한 HTTP 요청이지만 -일반적으로 헤더 그리고/또는 클라이언트 인증서만 검사한다. +일반적으로 헤더 그리고/또는 클라이언트 인증서를 검사한다. 인증 모듈은 클라이언트 인증서, 암호 및 일반 토큰, 부트스트랩 토큰, JWT 토큰(서비스 어카운트에 사용됨)을 포함한다. diff --git a/content/ko/docs/concepts/services-networking/connect-applications-service.md b/content/ko/docs/concepts/services-networking/connect-applications-service.md index 9002778ede3e1..0848c357725ff 100644 --- a/content/ko/docs/concepts/services-networking/connect-applications-service.md +++ b/content/ko/docs/concepts/services-networking/connect-applications-service.md @@ -383,7 +383,7 @@ $ curl https://: -k

Welcome to nginx!

``` -이제 클라우드 로드 밸런서를 사용하도록 서비스를 재생성하고, `my-nginx` 서비스의 `Type` 을 `NodePort` 에서 `LoadBalancer` 로 변경한다. +이제 클라우드 로드 밸런서를 사용하도록 서비스를 재생성한다. `my-nginx` 서비스의 `Type` 을 `NodePort` 에서 `LoadBalancer` 로 변경한다. ```shell kubectl edit svc my-nginx diff --git a/content/ko/docs/concepts/services-networking/dual-stack.md b/content/ko/docs/concepts/services-networking/dual-stack.md index dcfb818650a58..821ca34989e68 100644 --- a/content/ko/docs/concepts/services-networking/dual-stack.md +++ b/content/ko/docs/concepts/services-networking/dual-stack.md @@ -11,11 +11,11 @@ weight: 70 -{{< feature-state for_k8s_version="v1.16" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} - IPv4/IPv6 이중 스택을 사용하면 {{< glossary_tooltip text="파드" term_id="pod" >}} 와 {{< glossary_tooltip text="서비스" term_id="service" >}} 에 IPv4와 IPv6 주소를 모두 할당 할 수 있다. +IPv4/IPv6 이중 스택 네트워킹을 사용하면 {{< glossary_tooltip text="파드" term_id="pod" >}}와 {{< glossary_tooltip text="서비스" term_id="service" >}}에 IPv4와 IPv6 주소를 모두 할당할 수 있다. -만약 쿠버네티스 클러스터에서 IPv4/IPv6 이중 스택 네트워킹을 활성화하면, 클러스터는 IPv4와 IPv6 주소의 동시 할당을 지원하게 된다. +IPv4/IPv6 이중 스택 네트워킹은 1.21부터 쿠버네티스 클러스터에 기본적으로 활성화되어 있고, IPv4 및 IPv6 주소를 동시에 할당할 수 있다. @@ -23,7 +23,7 @@ weight: 70 ## 지원되는 기능 -쿠버네티스 클러스터에서 IPv4/IPv6 이중 스택을 활성화하면 다음의 기능을 제공한다. +쿠버네티스 클러스터의 IPv4/IPv6 이중 스택은 다음의 기능을 제공한다. * 이중 스택 파드 네트워킹(파드 당 단일 IPv4와 IPv6 주소 할당) * IPv4와 IPv6 지원 서비스 @@ -40,34 +40,34 @@ IPv4/IPv6 이중 스택 쿠버네티스 클러스터를 활용하려면 다음 * 이중 스택 네트워킹을 위한 공급자의 지원(클라우드 공급자 또는 다른 방식으로 쿠버네티스 노드에 라우팅 가능한 IPv4/IPv6 네트워크 인터페이스를 제공할 수 있어야 한다.) * 이중 스택(예: Kubenet 또는 Calico)을 지원하는 네트워크 플러그인 -## IPv4/IPv6 이중 스택 활성화 +## IPv4/IPv6 이중 스택 구성 -IPv4/IPv6 이중 스택을 활성화 하려면, 클러스터의 관련 구성요소에 대해 `IPv6DualStack` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) 를 활성화 하고, 이중 스택 클러스터 네트워크 할당을 설정한다. +IPv4/IPv6 이중 스택을 사용하려면, 클러스터의 관련 구성 요소에 대해 `IPv6DualStack` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화한다. (1.21부터 IPv4/IPv6 이중 스택이 기본적으로 활성화된다.) + +IPv4/IPv6 이중 스택을 구성하려면, 이중 스택 클러스터 네트워크 할당을 설정한다. * kube-apiserver: - * `--feature-gates="IPv6DualStack=true"` * `--service-cluster-ip-range=,` * kube-controller-manager: - * `--feature-gates="IPv6DualStack=true"` * `--cluster-cidr=,` * `--service-cluster-ip-range=,` * `--node-cidr-mask-size-ipv4|--node-cidr-mask-size-ipv6` IPv4의 기본값은 /24 이고 IPv6의 기본값은 /64 이다. - * kubelet: - * `--feature-gates="IPv6DualStack=true"` * kube-proxy: * `--cluster-cidr=,` - * `--feature-gates="IPv6DualStack=true"` {{< note >}} IPv4 CIDR의 예: `10.244.0.0/16` (자신의 주소 범위를 제공하더라도) IPv6 CIDR의 예: `fdXY:IJKL:MNOP:15::/64` (이 형식으로 표시되지만, 유효한 주소는 아니다 - [RFC 4193](https://tools.ietf.org/html/rfc4193)을 본다.) +1.21부터, IPv4/IPv6 이중 스택은 기본적으로 활성화된다. +필요한 경우 kube-apiserver, kube-controller-manager, kubelet 및 kube-proxy 커맨드 라인에 +`--feature-gates="IPv6DualStack=false"` 를 지정하여 비활성화할 수 있다. {{< /note >}} ## 서비스 -클러스터에 이중 스택이 활성화된 경우 IPv4, IPv6 또는 둘 다를 사용할 수 있는 {{< glossary_tooltip text="서비스" term_id="service" >}}를 만들 수 있다. +IPv4, IPv6 또는 둘 다를 사용할 수 있는 {{< glossary_tooltip text="서비스" term_id="service" >}}를 생성할 수 있다. 서비스의 주소 계열은 기본적으로 첫 번째 서비스 클러스터 IP 범위의 주소 계열로 설정된다. (`--service-cluster-ip-range` 플래그를 통해 kube-apiserver에 구성) @@ -76,11 +76,9 @@ IPv6 CIDR의 예: `fdXY:IJKL:MNOP:15::/64` (이 형식으로 표시되지만, * `SingleStack`: 단일 스택 서비스. 컨트롤 플레인은 첫 번째로 구성된 서비스 클러스터 IP 범위를 사용하여 서비스에 대한 클러스터 IP를 할당한다. * `PreferDualStack`: - * 클러스터에 이중 스택이 활성화된 경우에만 사용된다. 서비스에 대해 IPv4 및 IPv6 클러스터 IP를 할당한다. - * 클러스터에 이중 스택이 활성화되지 않은 경우, 이 설정은 `SingleStack`과 동일한 동작을 따른다. + * 서비스에 IPv4 및 IPv6 클러스터 IP를 할당한다. (클러스터에 `--feature-gates="IPv6DualStack=false"` 가 있는 경우, 이 설정은 `SingleStack` 과 동일한 동작을 따른다.) * `RequireDualStack`: IPv4 및 IPv6 주소 범위 모두에서 서비스 `.spec.ClusterIPs`를 할당한다. * `.spec.ipFamilies` 배열의 첫 번째 요소의 주소 계열을 기반으로 `.spec.ClusterIPs` 목록에서 `.spec.ClusterIP`를 선택한다. - * 클러스터에는 이중 스택 네트워킹이 구성되어 있어야 한다. 단일 스택에 사용할 IP 계열을 정의하거나 이중 스택에 대한 IP 군의 순서를 정의하려는 경우, 서비스에서 옵션 필드 `.spec.ipFamilies`를 설정하여 주소 군을 선택할 수 있다. @@ -121,7 +119,7 @@ IPv6 CIDR의 예: `fdXY:IJKL:MNOP:15::/64` (이 형식으로 표시되지만, #### 기존 서비스의 이중 스택 기본값 -이 예제는 서비스가 이미있는 클러스터에서 이중 스택이 새로 활성화된 경우의 기본 동작을 보여준다. +이 예제는 서비스가 이미 있는 클러스터에서 이중 스택이 새로 활성화된 경우의 기본 동작을 보여준다. (`--feature-gates="IPv6DualStack=false"` 가 설정되지 않은 경우 기존 클러스터를 1.21로 업그레이드하면 이중 스택이 활성화된다.) 1. 클러스터에서 이중 스택이 활성화된 경우 기존 서비스 (`IPv4` 또는 `IPv6`)는 컨트롤 플레인이 `.spec.ipFamilyPolicy`를 `SingleStack`으로 지정하고 `.spec.ipFamilies`를 기존 서비스의 주소 계열로 설정한다. 기존 서비스 클러스터 IP는 `.spec.ClusterIPs`에 저장한다. @@ -237,3 +235,5 @@ spec: * [IPv4/IPv6 이중 스택 검증](/ko/docs/tasks/network/validate-dual-stack) 네트워킹 +* [kubeadm을 사용하여 이중 스택 네트워킹 활성화 +](/docs/setup/production-environment/tools/kubeadm/dual-stack-support/) diff --git a/content/ko/docs/concepts/services-networking/endpoint-slices.md b/content/ko/docs/concepts/services-networking/endpoint-slices.md index f75dc819c3c6e..4e12cf9ff210d 100644 --- a/content/ko/docs/concepts/services-networking/endpoint-slices.md +++ b/content/ko/docs/concepts/services-networking/endpoint-slices.md @@ -1,13 +1,13 @@ --- title: 엔드포인트슬라이스 content_type: concept -weight: 35 +weight: 45 --- -{{< feature-state for_k8s_version="v1.17" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} _엔드포인트슬라이스_ 는 쿠버네티스 클러스터 내의 네트워크 엔드포인트를 추적하는 간단한 방법을 제공한다. 이것은 엔드포인트를 더 확장하고, 확장 가능한 @@ -50,7 +50,7 @@ term_id="selector" >}}가 지정되면 컨트롤 플레인은 자동으로 리소스 샘플이 있다. ```yaml -apiVersion: discovery.k8s.io/v1beta1 +apiVersion: discovery.k8s.io/v1 kind: EndpointSlice metadata: name: example-abc @@ -67,13 +67,12 @@ endpoints: conditions: ready: true hostname: pod-1 - topology: - kubernetes.io/hostname: node-1 - topology.kubernetes.io/zone: us-west2-a + nodeName: node-1 + zone: us-west2-a ``` 기본적으로, 컨트롤 플레인은 각각 100개 이하의 엔드포인트를 -갖도록 엔드포인트슬라이스를 +갖도록 엔드포인트슬라이스를 생성하고 관리한다. `--max-endpoints-per-slice` {{< glossary_tooltip text="kube-controller-manager" term_id="kube-controller-manager" >}} 플래그를 사용하여, 최대 1000개까지 구성할 수 있다. @@ -98,9 +97,9 @@ endpoints: #### 준비 -`ready`는 파드의 `Ready` 조건에 매핑되는 조건이다. `Ready` 조건이 `True`로 설정된 실행 중인 파드는 -이 엔드포인트슬라이스 조건도 `true`로 설정되어야 한다. 호환성의 -이유로, 파드가 종료될 때 `ready`는 절대 `true`가 되면 안 된다. 컨슈머는 `serving` 조건을 참조하여 +`ready`는 파드의 `Ready` 조건에 매핑되는 조건이다. `Ready` 조건이 `True`로 설정된 실행 중인 파드는 +이 엔드포인트슬라이스 조건도 `true`로 설정되어야 한다. 호환성의 +이유로, 파드가 종료될 때 `ready`는 절대 `true`가 되면 안 된다. 컨슈머는 `serving` 조건을 참조하여 파드 종료 준비 상태(readiness)를 검사해야 한다. 이 규칙의 유일한 예외는 `spec.publishNotReadyAddresses`가 `true`로 설정된 서비스이다. 이러한 서비스의 엔드 포인트는 항상 `ready`조건이 `true`로 설정된다. @@ -110,16 +109,16 @@ endpoints: {{< feature-state for_k8s_version="v1.20" state="alpha" >}} `serving`은 종료 상태를 고려하지 않는다는 점을 제외하면 `ready` 조건과 동일하다. -엔드포인트슬라이스 API 컨슈머는 파드가 종료되는 동안 파드 준비 상태에 관심이 있다면 +엔드포인트슬라이스 API 컨슈머는 파드가 종료되는 동안 파드 준비 상태에 관심이 있다면 이 조건을 확인해야 한다. {{< note >}} `serving`은 `ready`와 거의 동일하지만 `ready`의 기존 의미가 깨지는 것을 방지하기 위해 추가되었다. -엔드포인트를 종료하기 위해 `ready`가 `true` 일 수 있다면 기존 클라이언트에게는 예상치 못한 일이 될 수 있다. +엔드포인트를 종료하기 위해 `ready`가 `true` 일 수 있다면 기존 클라이언트에게는 예상치 못한 일이 될 수 있다. 역사적으로 종료된 엔드포인트는 처음부터 엔드포인트 또는 엔드포인트슬라이스 API에 포함되지 않았기 때문이다. -이러한 이유로 `ready`는 엔드포인트 종료를 위해 _always_ `false`이며, -클라이언트가 `ready`에 대한 기존 의미와 관계없이 파드 종료 준비 상태를 +이러한 이유로 `ready`는 엔드포인트 종료를 위해 _always_ `false`이며, +클라이언트가 `ready`에 대한 기존 의미와 관계없이 파드 종료 준비 상태를 추적 할 수 있도록 v1.20에 새로운 조건 `serving`이 추가되었다. {{< /note >}} @@ -133,30 +132,26 @@ endpoints: ### 토폴로지 정보 {#토폴로지} -{{< feature-state for_k8s_version="v1.20" state="deprecated" >}} +엔드포인트슬라이스 내의 각 엔드 포인트는 관련 토폴로지 정보를 포함할 수 있다. +토폴로지 정보에는 엔드 포인트의 위치와 해당 노드 및 +영역에 대한 정보가 포함된다. 엔드포인트슬라이스의 다음의 엔드 포인트별 +필드에서 사용할 수 있다. + +*`nodeName` - 이 엔드 포인트가 있는 노드의 이름이다. +*`zone` - 이 엔드 포인트가 있는 영역이다. {{< note >}} -엔드포인트슬라이스의 토폴로지 필드는 사용 중단되었으며 향후 릴리스에서 제거된다. -토폴로지에서 `kubernetes.io/hostname`을 설정하는 대신 새로운 `nodeName` 필드가 -사용된다. 영역 및 리전을 커버하는 다른 토폴로지 필드는 -엔드포인트슬라이스 내의 모든 엔드포인트에 적용되는 -엔드포인트슬라이스 레이블을 이용해 더 잘 표현될 수 있다. +v1 API에서는, 전용 필드 `nodeName` 및 `zone` 을 위해 엔드 포인트별 +`topology` 가 효과적으로 제거되었다. + +`EndpointSlice` 리소스의 `endpoint` 필드에 임의의 토폴로지 필드를 +설정하는 것은 더 이상 사용되지 않으며, v1 API에서 지원되지 않는다. 대신, +v1 API는 개별 `nodeName` 및 `zone` 필드 설정을 지원한다. 이러한 +필드는 API 버전 간에 자동으로 번역된다. 예를 들어, +v1beta1 API의 `topology` 필드에 있는 `"topology.kubernetes.io/zone"` +키 값은 v1 API의 `zone` 필드로 접근할 수 있다. {{< /note >}} -엔드포인트슬라이스 내 각 엔드포인트는 연관된 토폴로지 정보를 포함할 수 있다. -이는 해당 노드, 영역 그리고 지역에 대한 정보가 포함된 -엔드포인트가 있는 위치를 나타나는데 사용 한다. 값을 사용할 수 있으면, -컨트롤 플레인은 엔드포인트슬라이스에 대해 다음의 토폴로지 레이블을 설정한다. - -* `kubernetes.io/hostname` - 이 엔드포인트가 있는 노드의 이름. -* `topology.kubernetes.io/zone` - 이 엔드포인트가 있는 영역의 이름. -* `topology.kubernetes.io/region` - 이 엔드포인트가 있는 지역의 이름. - -이런 레이블 값은 슬라이스의 각 엔드포인트와 연관된 리소스에서 -파생된다. 호스트 이름 레이블은 해당 파드의 -NodeName 필드 값을 나타낸다. 영역 및 지역 레이블은 해당 -노드에서 이름이 같은 값을 나타낸다. - ### 관리 대부분의 경우, 컨트롤 플레인(특히, 엔드포인트 슬라이스 diff --git a/content/ko/docs/concepts/services-networking/ingress.md b/content/ko/docs/concepts/services-networking/ingress.md index ec705e6a7c113..802cc486bfff2 100644 --- a/content/ko/docs/concepts/services-networking/ingress.md +++ b/content/ko/docs/concepts/services-networking/ingress.md @@ -218,7 +218,19 @@ Events: {{< codenew file="service/networking/external-lb.yaml" >}} IngressClass 리소스에는 선택적인 파라미터 필드가 있다. 이 클래스에 대한 -추가 구성을 참조하는데 사용할 수 있다. +추가 구현 별 구성을 참조하는데 사용할 수 있다. + +#### 네임스페이스 범위의 파라미터 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +`Parameters` 필드에는 인그레스 클래스 구성을 위해 네임스페이스 별 리소스를 참조하는 데 +사용할 수 있는 `scope` 및 `namespace` 필드가 있다. +`Scope` 필드의 기본값은 `Cluster` 이다. 즉, 기본값은 클러스터 범위의 +리소스이다. `Scope` 를 `Namespace` 로 설정하고 `Namespace` 필드를 +설정하면 특정 네임스페이스의 파라미터 리소스를 참조한다. + +{{< codenew file="service/networking/namespaced-params.yaml" >}} ### 사용중단(Deprecated) 어노테이션 @@ -257,7 +269,7 @@ IngressClass 리소스에는 선택적인 파라미터 필드가 있다. 이 클 {{< codenew file="service/networking/test-ingress.yaml" >}} -만약 `kubectl apply -f` 를 사용해서 생성한다면 방금 추가한 인그레스의 +만약 `kubectl apply -f` 를 사용해서 생성한다면 추가한 인그레스의 상태를 볼 수 있어야 한다. ```bash diff --git a/content/ko/docs/concepts/services-networking/network-policies.md b/content/ko/docs/concepts/services-networking/network-policies.md index d7872b1d92e02..c68d6f2862d82 100644 --- a/content/ko/docs/concepts/services-networking/network-policies.md +++ b/content/ko/docs/concepts/services-networking/network-policies.md @@ -220,18 +220,72 @@ __ipBlock__: 인그레스 소스 또는 이그레스 대상으로 허용할 IP C SCTP 프로토콜 네트워크폴리시를 지원하는 {{< glossary_tooltip text="CNI" term_id="cni" >}} 플러그인을 사용하고 있어야 한다. {{< /note >}} +## 포트 범위 지정 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +네트워크폴리시를 작성할 때, 단일 포트 대신 포트 범위를 대상으로 지정할 수 있다. + +다음 예와 같이 `endPort` 필드를 사용하면, 이 작업을 수행할 수 있다. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: multi-port-egress + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 32000 + endPort: 32768 +``` + +위 규칙은 대상 포트가 32000에서 32768 사이에 있는 경우, 네임스페이스 `default` 에 레이블이 `db` 인 모든 파드가 TCP를 통해 `10.0.0.0/24` 범위 내의 모든 IP와 통신하도록 허용한다. + +이 필드를 사용할 때 다음의 제한 사항이 적용된다. +* 알파 기능으로, 기본적으로 비활성화되어 있다. 클러스터 수준에서 `endPort` 필드를 활성화하려면, 사용자(또는 클러스터 관리자)가 `--feature-gates=NetworkPolicyEndPort=true,…` 가 있는 API 서버에 대해 `NetworkPolicyEndPort` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. +* `endPort` 필드는 `port` 필드보다 크거나 같아야 한다. +* `endPort` 는 `port` 도 정의된 경우에만 정의할 수 있다. +* 두 포트 모두 숫자여야 한다. + +{{< note >}} +클러스터는 {{< glossary_tooltip text="CNI" term_id="cni" >}} 플러그인을 사용해야 한다. +네트워크폴리시 명세에서 `endPort` 필드를 지원한다. +{{< /note >}} + +## 이름으로 네임스페이스 지정 + +{{< feature-state state="beta" for_k8s_version="1.21" >}} + +쿠버네티스 컨트롤 플레인은 `NamespaceDefaultLabelName` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화된 경우 +모든 네임스페이스에 변경할 수 없는(immutable) 레이블 `kubernetes.io/metadata.name` 을 설정한다. +레이블의 값은 네임스페이스 이름이다. + +네트워크폴리시는 일부 오브젝트 필드가 있는 이름으로 네임스페이스를 대상으로 지정할 수 없지만, 표준화된 레이블을 사용하여 +특정 네임스페이스를 대상으로 지정할 수 있다. + ## 네트워크 정책으로 할 수 없는 것(적어도 아직은 할 수 없는) -쿠버네티스 1.20부터 다음의 기능은 네트워크폴리시 API에 존재하지 않지만, 운영 체제 컴포넌트(예: SELinux, OpenVSwitch, IPTables 등) 또는 Layer 7 기술(인그레스 컨트롤러, 서비스 메시 구현) 또는 어드미션 컨트롤러를 사용하여 제2의 해결책을 구현할 수 있다. 쿠버네티스의 네트워크 보안을 처음 사용하는 경우, 네트워크폴리시 API를 사용하여 다음의 사용자 스토리를 (아직) 구현할 수 없다는 점에 유의할 가치가 있다. 이러한 사용자 스토리 중 일부(전부는 아님)가 네트워크폴리시 API의 향후 릴리스에서 활발히 논의되고 있다. +쿠버네티스 {{< skew latestVersion >}}부터 다음의 기능은 네트워크폴리시 API에 존재하지 않지만, 운영 체제 컴포넌트(예: SELinux, OpenVSwitch, IPTables 등) 또는 Layer 7 기술(인그레스 컨트롤러, 서비스 메시 구현) 또는 어드미션 컨트롤러를 사용하여 제2의 해결책을 구현할 수 있다. 쿠버네티스의 네트워크 보안을 처음 사용하는 경우, 네트워크폴리시 API를 사용하여 다음의 사용자 스토리를 (아직) 구현할 수 없다는 점에 유의할 필요가 있다. - 내부 클러스터 트래픽이 공통 게이트웨이를 통과하도록 강제한다(서비스 메시나 기타 프록시와 함께 제공하는 것이 가장 좋을 수 있음). - TLS와 관련된 모든 것(이를 위해 서비스 메시나 인그레스 컨트롤러 사용). - 노드별 정책(이에 대해 CIDR 표기법을 사용할 수 있지만, 특히 쿠버네티스 ID로 노드를 대상으로 지정할 수 없음). -- 이름으로 네임스페이스나 서비스를 타겟팅한다(그러나, {{< glossary_tooltip text="레이블" term_id="label" >}}로 파드나 네임스페이스를 타겟팅할 수 있으며, 이는 종종 실행할 수 있는 해결 방법임). +- 이름으로 서비스를 타겟팅한다(그러나, {{< glossary_tooltip text="레이블" term_id="label" >}}로 파드나 네임스페이스를 타겟팅할 수 있으며, 이는 종종 실행할 수 있는 해결 방법임). - 타사 공급사가 이행한 "정책 요청"의 생성 또는 관리. - 모든 네임스페이스나 파드에 적용되는 기본 정책(이를 수행할 수 있는 타사 공급사의 쿠버네티스 배포본 및 프로젝트가 있음). - 고급 정책 쿼리 및 도달 가능성 도구. -- 단일 정책 선언에서 포트 범위를 대상으로 하는 기능. - 네트워크 보안 이벤트를 기록하는 기능(예: 차단되거나 수락된 연결). - 명시적으로 정책을 거부하는 기능(현재 네트워크폴리시 모델은 기본적으로 거부하며, 허용 규칙을 추가하는 기능만 있음). - 루프백 또는 들어오는 호스트 트래픽을 방지하는 기능(파드는 현재 로컬 호스트 접근을 차단할 수 없으며, 상주 노드의 접근을 차단할 수 있는 기능도 없음). diff --git a/content/ko/docs/concepts/services-networking/service-topology.md b/content/ko/docs/concepts/services-networking/service-topology.md index 567b9987911dd..47799ba9f73fc 100644 --- a/content/ko/docs/concepts/services-networking/service-topology.md +++ b/content/ko/docs/concepts/services-networking/service-topology.md @@ -1,10 +1,8 @@ --- -title: 서비스 토폴로지 -feature: - title: 서비스 토폴로지 - description: > - 클러스터 토폴로지를 기반으로 서비스 트래픽 라우팅. + + +title: 토폴로지 키를 사용하여 토폴로지-인지 트래픽 라우팅 content_type: concept weight: 10 --- @@ -12,7 +10,16 @@ weight: 10 -{{< feature-state for_k8s_version="v1.17" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="deprecated" >}} + +{{< note >}} + +이 기능, 특히 알파 `topologyKeys` API는 쿠버네티스 v1.21부터 +더 이상 사용되지 않는다. +쿠버네티스 v1.21에 도입된 [토폴로지 인지 힌트](/docs/concepts/services-networking/topology-aware-hints/)는 +유사한 기능을 제공한다. + +{{}} _서비스 토폴로지_ 를 활성화 하면 서비스는 클러스터의 노드 토폴로지를 기반으로 트래픽을 라우팅한다. 예를 들어, 서비스는 트래픽을 @@ -20,33 +27,33 @@ _서비스 토폴로지_ 를 활성화 하면 서비스는 클러스터의 노 우선적으로 라우팅되도록 지정할 수 있다. - ## 소개 기본적으로 `ClusterIP` 또는 `NodePort` 서비스로 전송된 트래픽은 서비스의 -모든 백엔드 주소로 라우팅 될 수 있다. 쿠버네티스 1.7부터는 "외부(external)" -트래픽을 수신한 노드에서 실행중인 파드로 라우팅할 수 있었지만, -`ClusterIP` 서비스에서는 지원되지 않으며 더 복잡한 -토폴로지 — 영역별 라우팅과 같은 — 에서는 불가능 했다. -_서비스 토폴로지_ 기능은 서비스 생성자가 발신 노드와 수신 노드에 대해서 -노드 레이블에 기반한 트래픽 라우팅 정책을 정의할 수 있도록 -함으로써 이 문제를 해결한다. - -소스와 목적지의 노드 레이블 일치를 사용하여 운영자는 운영자의 요구 사항에 -적합한 메트릭에 대해서 서로 "근접(closer)" 하거나 "먼(farther)" -노드 그룹을 지정할 수 있다. 공용 클라우드의 많은 운영자들이 서비스 트래픽을 -동일한 영역에서 유지하는 것을 선호하는 것을 필요성의 예제로 볼 수 있다. 그 이유는 -지역간의 트래픽에는 관련 비용이 발생하지만 지역 내의 트래픽은 발생하지 않기 때문이다. -다른 일반적인 필요성으로는 DaemonSet이 관리하는 로컬 파드로 -트래픽을 라우팅 하거나, 대기시간을 최소화하기 위해 동일한 랙 상단(top-of-rack) 스위치에 -연결된 노드로 트래픽을 유지하는 것이 있다. +모든 백엔드 주소로 라우팅될 수 있다. 쿠버네티스 1.7을 사용하면 트래픽을 수신한 +동일한 노드에서 실행 중인 파드로 "외부(external)" 트래픽을 라우팅할 수 +있다. `ClusterIP` 서비스의 경우, 라우팅에 대한 동일한 노드 기본 설정이 +불가능했다. 또한 동일한 영역 내의 엔드 포인트에 대한 라우팅을 선호하도록 +클러스터를 구성할 수도 없다. +서비스에 `topologyKeys` 를 설정하면, 출발 및 대상 노드에 대한 +노드 레이블을 기반으로 트래픽을 라우팅하는 정책을 정의할 수 있다. + +소스와 목적지 사이의 레이블 일치를 통해 클러스터 운영자는 +서로 "근접(closer)"하거나 "먼(father)" 노드 그룹을 지정할 수 있다. +자신의 요구 사항에 맞는 메트릭을 나타내는 레이블을 정의할 수 있다. +예를 들어, 퍼블릭 클라우드에서는 지역 간의 트래픽에는 관련 비용이 발생(지역 내 +트래픽은 일반적으로 그렇지 않다)하기 때문에, 네트워크 트래픽을 동일한 지역 내에 유지하는 것을 +선호할 수 있다. 다른 일반적인 필요성으로는 데몬셋(DaemonSet)이 관리하는 +로컬 파드로 트래픽을 라우팅하거나, 대기 시간을 최소화하기 위해 +동일한 랙 상단(top-of-rack) 스위치에 연결된 노드로 트래픽을 +유지하는 것이 있다. ## 서비스 토폴로지 사용하기 -만약 클러스터에서 서비스 토폴로지가 활성화된 경우, 서비스 사양에서 +만약 클러스터에서 `ServiceTopology` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화된 경우, 서비스 사양에서 `topologyKeys` 필드를 지정해서 서비스 트래픽 라우팅을 제어할 수 있다. 이 필드는 이 서비스에 접근할 때 엔드포인트를 정렬하는데 사용되는 노드 레이블의 우선 순위 목록이다. 트래픽은 첫 번째 레이블 값이 해당 레이블의 @@ -196,5 +203,3 @@ spec: * [서비스 토폴로지 활성화하기](/docs/tasks/administer-cluster/enabling-service-topology)를 읽어보기. * [서비스와 애플리케이션 연결하기](/ko/docs/concepts/services-networking/connect-applications-service/)를 읽어보기. - - diff --git a/content/ko/docs/concepts/services-networking/service.md b/content/ko/docs/concepts/services-networking/service.md index b01a971cffaf9..e5aa794ae0bdb 100644 --- a/content/ko/docs/concepts/services-networking/service.md +++ b/content/ko/docs/concepts/services-networking/service.md @@ -187,9 +187,14 @@ ExternalName 서비스는 셀렉터가 없고 DNS명을 대신 사용하는 특수한 상황의 서비스이다. 자세한 내용은 이 문서 뒷부분의 [ExternalName](#externalname) 섹션을 참조한다. +### 초과 용량 엔드포인트 +엔드포인트 리소스에 1,000개가 넘는 엔드포인트가 있는 경우 쿠버네티스 v1.21(또는 그 이상) +클러스터는 해당 엔드포인트에 `endpoints.kubernetes.io/over-capacity: warning` 어노테이션을 추가한다. +이 어노테이션은 영향을 받는 엔드포인트 오브젝트가 용량을 초과했음을 나타낸다. + ### 엔드포인트슬라이스 -{{< feature-state for_k8s_version="v1.17" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} 엔드포인트슬라이스는 엔드포인트에 보다 확장 가능한 대안을 제공할 수 있는 API 리소스이다. 개념적으로 엔드포인트와 매우 유사하지만, 엔드포인트슬라이스를 @@ -513,8 +518,12 @@ API에서 `엔드포인트` 레코드를 생성하고, DNS 구성을 수정하 각 노드는 해당 포트 (모든 노드에서 동일한 포트 번호)를 서비스로 프록시한다. 서비스는 할당된 포트를 `.spec.ports[*].nodePort` 필드에 나타낸다. -포트를 프록시하기 위해 특정 IP를 지정하려면 kube-proxy의 `--nodeport-addresses` 플래그를 특정 IP 블록으로 설정할 수 있다. 이것은 쿠버네티스 v1.10부터 지원된다. -이 플래그는 쉼표로 구분된 IP 블록 목록 (예: 10.0.0.0/8, 192.0.2.0/25)을 사용하여 kube-proxy가 로컬 노드로 고려해야 하는 IP 주소 범위를 지정한다. +포트를 프록시하기 위해 특정 IP를 지정하려면, kube-proxy에 대한 +`--nodeport-addresses` 플래그 또는 +[kube-proxy 구성 파일](/docs/reference/config-api/kube-proxy-config.v1alpha1/)의 +동등한 `nodePortAddresses` 필드를 +특정 IP 블록으로 설정할 수 있다. +이 플래그는 쉼표로 구분된 IP 블록 목록(예: `10.0.0.0/8`, `192.0.2.0/25`)을 사용하여 kube-proxy가 로컬 노드로 고려해야 하는 IP 주소 범위를 지정한다. 예를 들어, `--nodeport-addresses=127.0.0.0/8` 플래그로 kube-proxy를 시작하면, kube-proxy는 NodePort 서비스에 대하여 루프백(loopback) 인터페이스만 선택한다. `--nodeport-addresses`의 기본 값은 비어있는 목록이다. 이것은 kube-proxy가 NodePort에 대해 사용 가능한 모든 네트워크 인터페이스를 고려해야 한다는 것을 의미한다. (이는 이전 쿠버네티스 릴리스와도 호환된다). @@ -530,7 +539,9 @@ NodePort를 사용하면 자유롭게 자체 로드 밸런싱 솔루션을 설 하나 이상의 노드 IP를 직접 노출시킬 수 있다. 이 서비스는 `:spec.ports[*].nodePort`와 -`.spec.clusterIP:spec.ports[*].port`로 표기된다. (kube-proxy에서 `--nodeport-addresses` 플래그가 설정되면, 는 NodeIP를 필터링한다.) +`.spec.clusterIP:spec.ports[*].port`로 표기된다. +kube-proxy에 대한 `--nodeport-addresses` 플래그 또는 kube-proxy 구성 파일의 +동등한 필드가 설정된 경우, `` 는 노드 IP를 필터링한다. 예를 들면 @@ -628,6 +639,25 @@ v1.20부터는 `spec.allocateLoadBalancerNodePorts` 필드를 `false`로 설정 이러한 노드 포트를 할당 해제하려면 모든 서비스 포트에서 `nodePorts` 항목을 명시적으로 제거해야 한다. 이 필드를 사용하려면 `ServiceLBNodePortControl` 기능 게이트를 활성화해야 한다. +#### 로드 밸런서 구현 클래스 지정 {#load-balancer-class} + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +v1.21부터는, `spec.loadBalancerClass` 필드를 설정하여 `LoadBalancer` 서비스 유형에 +대한 로드 밸런서 구현 클래스를 선택적으로 지정할 수 있다. +기본적으로, `spec.loadBalancerClass` 는 `nil` 이고 `LoadBalancer` 유형의 서비스는 +클라우드 공급자의 기본 로드 밸런서 구현을 사용한다. +`spec.loadBalancerClass` 가 지정되면, 지정된 클래스와 일치하는 로드 밸런서 +구현이 서비스를 감시하고 있다고 가정한다. +모든 기본 로드 밸런서 구현(예: 클라우드 공급자가 제공하는 +로드 밸런서 구현)은 이 필드가 설정된 서비스를 무시한다. +`spec.loadBalancerClass` 는 `LoadBalancer` 유형의 서비스에서만 설정할 수 있다. +한 번 설정하면 변경할 수 없다. +`spec.loadBalancerClass` 의 값은 "`internal-vip`" 또는 +"`example.com/internal-vip`" 와 같은 선택적 접두사가 있는 레이블 스타일 식별자여야 한다. +접두사가 없는 이름은 최종 사용자를 위해 예약되어 있다. +이 필드를 사용하려면 `ServiceLoadBalancerClass` 기능 게이트를 활성화해야 한다. + #### 내부 로드 밸런서 혼재된 환경에서는 서비스의 트래픽을 동일한 (가상) 네트워크 주소 블록 내로 @@ -785,8 +815,7 @@ TCP 및 SSL은 4 계층 프록시를 선택한다. ELB는 헤더를 수정하지 ``` 위의 예에서, 서비스에 `80`, `443`, `8443`의 3개 포트가 포함된 경우, -`443`, `8443`은 SSL 인증서를 사용하지만, `80`은 단순히 -프록시만 하는 HTTP이다. +`443`, `8443`은 SSL 인증서를 사용하지만, `80`은 프록시하는 HTTP이다. 쿠버네티스 v1.9부터는 서비스에 대한 HTTPS 또는 SSL 리스너와 함께 [사전에 정의된 AWS SSL 정책](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-policy-table.html)을 사용할 수 있다. 사용 가능한 정책을 확인하려면, `aws` 커맨드라인 툴을 사용한다. @@ -958,7 +987,8 @@ NLB는 특정 인스턴스 클래스에서만 작동한다. 지원되는 인스 | 규칙 | 프로토콜 | 포트 | IP 범위 | IP 범위 설명 | |------|----------|---------|------------|---------------------| -| 헬스 체크 | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | VPC CIDR | kubernetes.io/rule/nlb/health=\ | +| 헬스 체크 | TCP | NodePort(s) (`.spec.healthCheckNodePort` for `.spec.externalTrafficPolicy = Local`) | Subnet CIDR | kubernetes.io/rule/nlb/health=\ | + | 클라이언트 트래픽 | TCP | NodePort(s) | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/client=\ | | MTU 탐색 | ICMP | 3,4 | `.spec.loadBalancerSourceRanges` (defaults to `0.0.0.0/0`) | kubernetes.io/rule/nlb/mtu=\ | diff --git a/content/ko/docs/concepts/storage/persistent-volumes.md b/content/ko/docs/concepts/storage/persistent-volumes.md index 05997eb0f3e9d..3a85139cd207b 100644 --- a/content/ko/docs/concepts/storage/persistent-volumes.md +++ b/content/ko/docs/concepts/storage/persistent-volumes.md @@ -29,7 +29,7 @@ _퍼시스턴트볼륨_ (PV)은 관리자가 프로비저닝하거나 [스토리 _퍼시스턴트볼륨클레임_ (PVC)은 사용자의 스토리지에 대한 요청이다. 파드와 비슷하다. 파드는 노드 리소스를 사용하고 PVC는 PV 리소스를 사용한다. 파드는 특정 수준의 리소스(CPU 및 메모리)를 요청할 수 있다. 클레임은 특정 크기 및 접근 모드를 요청할 수 있다(예: ReadWriteOnce, ReadOnlyMany 또는 ReadWriteMany로 마운트 할 수 있음. [AccessModes](#접근-모드) 참고). -퍼시스턴트볼륨클레임을 사용하면 사용자가 추상화된 스토리지 리소스를 사용할 수 있지만, 다른 문제들 때문에 성능과 같은 다양한 속성을 가진 퍼시스턴트볼륨이 필요한 경우가 일반적이다. 클러스터 관리자는 사용자에게 해당 볼륨의 구현 방법에 대한 세부 정보를 제공하지 않고 단순히 크기와 접근 모드와는 다른 방식으로 다양한 퍼시스턴트볼륨을 제공할 수 있어야 한다. 이러한 요구에는 _스토리지클래스_ 리소스가 있다. +퍼시스턴트볼륨클레임을 사용하면 사용자가 추상화된 스토리지 리소스를 사용할 수 있지만, 다른 문제들 때문에 성능과 같은 다양한 속성을 가진 퍼시스턴트볼륨이 필요한 경우가 일반적이다. 클러스터 관리자는 사용자에게 해당 볼륨의 구현 방법에 대한 세부 정보를 제공하지 않고 크기와 접근 모드와는 다른 방식으로 다양한 퍼시스턴트볼륨을 제공할 수 있어야 한다. 이러한 요구에는 _스토리지클래스_ 리소스가 있다. [실습 예제와 함께 상세한 내용](/ko/docs/tasks/configure-pod-container/configure-persistent-volume-storage/)을 참고하길 바란다. diff --git a/content/ko/docs/concepts/storage/volumes.md b/content/ko/docs/concepts/storage/volumes.md index 5323182966d1e..698ee14e726a6 100644 --- a/content/ko/docs/concepts/storage/volumes.md +++ b/content/ko/docs/concepts/storage/volumes.md @@ -34,7 +34,7 @@ weight: 10 더 이상 존재하지 않으면, 쿠버네티스는 임시(ephemeral) 볼륨을 삭제하지만, 퍼시스턴트(persistent) 볼륨은 삭제하지 않는다. -기본적으로 볼륨은 디렉터리일 뿐이며, 일부 데이터가 있을 수 있으며, 파드 +기본적으로 볼륨은 디렉터리이며, 일부 데이터가 있을 수 있으며, 파드 내 컨테이너에서 접근할 수 있다. 디렉터리의 생성 방식, 이를 지원하는 매체와 내용은 사용된 특정 볼륨의 유형에 따라 결정된다. @@ -149,14 +149,16 @@ EBS 볼륨이 파티션된 경우, 선택적 필드인 `partition: "}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} `azureFile` 의 `CSIMigration` 기능이 활성화된 경우, 기존 트리 내 플러그인에서 `file.csi.azure.com` 컨테이너 스토리지 인터페이스(CSI) 드라이버로 모든 플러그인 작업을 수행한다. 이 기능을 사용하려면, 클러스터에 [Azure 파일 CSI 드라이버](https://github.com/kubernetes-sigs/azurefile-csi-driver) 를 설치하고 `CSIMigration` 과 `CSIMigrationAzureFile` -알파 기능을 활성화해야 한다. +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 활성화해야 한다. + +Azure File CSI 드라이버는 동일한 볼륨을 다른 fsgroup에서 사용하는 것을 지원하지 않는다. Azurefile CSI 마이그레이션이 활성화된 경우, 다른 fsgroup에서 동일한 볼륨을 사용하는 것은 전혀 지원되지 않는다. ### cephfs @@ -205,14 +207,17 @@ spec: #### 오픈스택 CSI 마이그레이션 -{{< feature-state for_k8s_version="v1.18" state="beta" >}} - -Cinder의 `CSIMigration` 기능이 활성화된 경우, 기존 트리 내 플러그인에서 -`cinder.csi.openstack.org` 컨테이너 스토리지 인터페이스(CSI) -드라이버로 모든 플러그인 작업을 수행한다. 이 기능을 사용하려면, 클러스터에 [오픈스택 Cinder CSI -드라이버](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md)를 -설치하고 `CSIMigration` 과 `CSIMigrationOpenStack` -베타 기능을 활성화해야 한다. +{{< feature-state for_k8s_version="v1.21" state="beta" >}} + +Cinder의`CSIMigration` 기능은 Kubernetes 1.21에서 기본적으로 활성화됩니다. +기존 트리 내 플러그인에서 `cinder.csi.openstack.org` 컨테이너 스토리지 인터페이스(CSI) +드라이버로 모든 플러그인 작업을 수행한다. +[오픈스택 Cinder CSI 드라이버](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md)가 +클러스터에 설치되어 있어야 한다. +`CSIMigrationOpenStack` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 +`false` 로 설정하여 클러스터에 대한 Cinder CSI 마이그레이션을 비활성화할 수 있다. +`CSIMigrationOpenStack` 기능을 비활성화하면, 트리 내 Cinder 볼륨 플러그인이 +Cinder 볼륨 스토리지 관리의 모든 측면을 담당한다. ### 컨피그맵(configMap) {#configmap} diff --git a/content/ko/docs/concepts/workloads/controllers/cron-jobs.md b/content/ko/docs/concepts/workloads/controllers/cron-jobs.md index 7756c93cb0474..6935cf8fb404e 100644 --- a/content/ko/docs/concepts/workloads/controllers/cron-jobs.md +++ b/content/ko/docs/concepts/workloads/controllers/cron-jobs.md @@ -10,7 +10,7 @@ weight: 80 -{{< feature-state for_k8s_version="v1.8" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} _크론잡은_ 반복 일정에 따라 {{< glossary_tooltip term_id="job" text="잡" >}}을 만든다. @@ -115,12 +115,17 @@ Cannot determine if job needs to be started. Too many missed start time (> 100). 크론잡은 오직 그 일정에 맞는 잡 생성에 책임이 있고, 잡은 그 잡이 대표하는 파드 관리에 책임이 있다. -## 새 컨트롤러 +## 컨트롤러 버전 {#new-controller} -쿠버네티스 1.20부터 알파 기능으로 사용할 수 있는 크론잡 컨트롤러의 대체 구현이 있다. 크론잡 컨트롤러의 버전 2를 선택하려면, 다음의 [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) 플래그를 {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}에 전달한다. +쿠버네티스 v1.21부터 크론잡 컨트롤러의 두 번째 버전이 +기본 구현이다. 기본 크론잡 컨트롤러를 비활성화하고 +대신 원래 크론잡 컨트롤러를 사용하려면, `CronJobControllerV2` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/) +플래그를 {{< glossary_tooltip term_id="kube-controller-manager" text="kube-controller-manager" >}}에 전달하고, +이 플래그를 `false` 로 설정한다. 예를 들면, 다음과 같다. ``` ---feature-gates="CronJobControllerV2=true" +--feature-gates="CronJobControllerV2=false" ``` diff --git a/content/ko/docs/concepts/workloads/controllers/deployment.md b/content/ko/docs/concepts/workloads/controllers/deployment.md index e19720491dd58..ac782e700821e 100644 --- a/content/ko/docs/concepts/workloads/controllers/deployment.md +++ b/content/ko/docs/concepts/workloads/controllers/deployment.md @@ -706,7 +706,7 @@ nginx-deployment-618515232 11 11 11 7m 하나 이상의 업데이트를 트리거하기 전에 디플로이먼트를 일시 중지한 다음 다시 시작할 수 있다. 이렇게 하면 불필요한 롤아웃을 트리거하지 않고 일시 중지와 재개 사이에 여러 수정 사항을 적용할 수 있다. -* 예를 들어, 방금 생성된 디플로이먼트의 경우 +* 예를 들어, 생성된 디플로이먼트의 경우 디플로이먼트 상세 정보를 가져온다. ```shell kubectl get deploy diff --git a/content/ko/docs/concepts/workloads/controllers/job.md b/content/ko/docs/concepts/workloads/controllers/job.md index 64b5d3879d6cc..b9411ecc31226 100644 --- a/content/ko/docs/concepts/workloads/controllers/job.md +++ b/content/ko/docs/concepts/workloads/controllers/job.md @@ -16,7 +16,8 @@ weight: 50 잡에서 하나 이상의 파드를 생성하고 지정된 수의 파드가 성공적으로 종료될 때까지 계속해서 파드의 실행을 재시도한다. 파드가 성공적으로 완료되면, 성공적으로 완료된 잡을 추적한다. 지정된 수의 성공 완료에 도달하면, 작업(즉, 잡)이 완료된다. 잡을 삭제하면 잡이 생성한 -파드가 정리된다. +파드가 정리된다. 작업을 일시 중지하면 작업이 다시 재개될 때까지 활성 파드가 +삭제된다. 간단한 사례는 잡 오브젝트를 하나 생성해서 파드 하나를 안정적으로 실행하고 완료하는 것이다. 첫 번째 파드가 실패 또는 삭제된 경우(예로는 노드 하드웨어의 실패 또는 @@ -98,8 +99,8 @@ echo $pods pi-5rwd7 ``` -여기서 셀렉터는 잡의 셀렉터와 동일하다. `--output=jsonpath` 옵션은 반환된 목록의 -각각의 파드에서 이름을 가져와서 표현하는 방식을 지정한다. +여기서 셀렉터는 잡의 셀렉터와 동일하다. `--output = jsonpath` 옵션은 반환된 +목록에 있는 각 파드의 이름으로 표현식을 지정한다. 파드 중 하나를 표준 출력으로 본다. @@ -145,8 +146,8 @@ kubectl logs $pods - 파드가 성공적으로 종료하자마자 즉시 잡이 완료된다. 1. *고정적(fixed)인 완료 횟수* 를 가진 병렬 잡: - `.spec.completions` 에 0이 아닌 양수 값을 지정한다. - - 잡은 전체 작업을 나타내며 1에서 `.spec.completions` 까지의 범위의 각 값에 대해 한 개씩 성공한 파드가 있으면 완료된다. - - **아직 구현되지 않음:** 각 파드에게는 1부터 `.spec.completions` 까지의 범위 내의 서로 다른 인덱스가 전달된다. + - 잡은 전체 작업을 나타내며, `.spec.completions` 성공한 파드가 있을 때 완료된다. + - `.spec.completionMode="Indexed"` 를 사용할 때, 각 파드는 0에서 `.spec.completions-1` 범위 내의 서로 다른 인덱스를 가져온다. 1. *작업 큐(queue)* 가 있는 병렬 잡: - `.spec.completions` 를 지정하지 않고, `.spec.parallelism` 를 기본으로 한다. - 파드는 각자 또는 외부 서비스 간에 조정을 통해 각각의 작업을 결정해야 한다. 예를 들어 파드는 작업 큐에서 최대 N 개의 항목을 일괄로 가져올(fetch) 수 있다. @@ -166,7 +167,6 @@ _작업 큐_ 잡은 `.spec.completions` 를 설정하지 않은 상태로 두고 다른 유형의 잡을 사용하는 방법에 대한 더 자세한 정보는 [잡 패턴](#잡-패턴) 섹션을 본다. - #### 병렬 처리 제어하기 요청된 병렬 처리(`.spec.parallelism`)는 음수가 아닌 값으로 설정할 수 있다. @@ -185,6 +185,33 @@ _작업 큐_ 잡은 `.spec.completions` 를 설정하지 않은 상태로 두고 - 잡 컨트롤러는 동일한 잡에서 과도하게 실패한 이전 파드들로 인해 새로운 파드의 생성을 조절할 수 있다. - 파드가 정상적으로(gracefully) 종료되면, 중지하는데 시간이 소요된다. +### 완료 모드 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +{{< note >}} +인덱싱된 잡을 생성하려면, [API 서버](/docs/reference/command-line-tools-reference/kube-apiserver/) +및 [컨트롤러 관리자](/docs/reference/command-line-tools-reference/kube-controller-manager/)에서 +`IndexedJob` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 +활성화해야 한다. +{{< /note >}} + +완료 횟수가 _고정적인 완료 횟수_ 즉, null이 아닌 `.spec.completions` 가 있는 잡은 +`.spec.completionMode` 에 지정된 완료 모드를 가질 수 있다. + +- `NonIndexed` (기본값): `.spec.completions` 가 성공적으로 + 완료된 파드가 있는 경우 작업이 완료된 것으로 간주된다. 즉, 각 파드 + 완료는 서로 상동하다(homologous). null `.spec.completions` 가 있는 + 잡은 암시적으로 `NonIndexed` 이다. +- `Indexed`: 잡의 파드는 `batch.kubernetes.io/job-completion-index` + 어노테이션에서 사용할 수 있는 0에서 `.spec.completions-1` 까지 연결된 완료 인덱스를 가져온다. + 각 인덱스에 대해 성공적으로 완료된 파드가 하나 있으면 작업이 완료된 것으로 + 간주된다. 이 모드를 사용하는 방법에 대한 자세한 내용은 + [정적 작업 할당을 사용한 병렬 처리를 위해 인덱싱된 잡](/docs/tasks/job/indexed-parallel-processing-static/)을 참고한다. + 참고로, 드물기는 하지만, 동일한 인덱스에 대해 둘 이상의 파드를 시작할 수 + 있지만, 그 중 하나만 완료 횟수에 포함된다. + + ## 파드와 컨테이너 장애 처리하기 파드내 컨테이너의 프로세스가 0이 아닌 종료 코드로 종료되었거나 컨테이너 메모리 제한을 @@ -348,12 +375,12 @@ spec: 여기에 트레이드오프가 요약되어있고, 2열에서 4열까지가 위의 트레이드오프에 해당한다. 패턴 이름은 예시와 더 자세한 설명을 위한 링크이다. -| 패턴 | 단일 잡 오브젝트 | 작업 항목보다 파드가 적은가? | 수정하지 않은 앱을 사용하는가? | Kube 1.1에서 작동하는가? | -| -------------------------------------------------------------------- |:-----------------:|:---------------------------:|:-------------------:|:-------------------:| -| [잡 템플릿 확장](/ko/docs/tasks/job/parallel-processing-expansion/) | | | ✓ | ✓ | -| [작업 항목 당 파드가 있는 큐](/docs/tasks/job/coarse-parallel-processing-work-queue/) | ✓ | | 때때로 | ✓ | -| [가변 파드 수를 가진 큐](/ko/docs/tasks/job/fine-parallel-processing-work-queue/) | ✓ | ✓ | | ✓ | -| 정적 작업이 할당된 단일 잡 | ✓ | | ✓ | | +| 패턴 | 단일 잡 오브젝트 | 작업 항목보다 파드가 적은가? | 수정되지 않은 앱을 사용하는가? | +| ----------------------------------------- |:-----------------:|:---------------------------:|:-------------------:| +| [작업 항목 당 파드가 있는 큐] | ✓ | | 때때로 | +| [가변 파드 수를 가진 큐] | ✓ | ✓ | | +| [정적 작업 할당을 사용한 인덱싱된 잡] | ✓ | | ✓ | +| [잡 템플릿 확장] | | | ✓ | `.spec.completions` 로 완료를 지정할 때, 잡 컨트롤러에 의해 생성된 각 파드는 동일한 [`사양`](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)을 갖는다. 이 의미는 @@ -364,16 +391,121 @@ spec: 이 표는 각 패턴에 필요한 `.spec.parallelism` 그리고 `.spec.completions` 설정을 보여준다. 여기서 `W` 는 작업 항목의 수이다. -| 패턴 | `.spec.completions` | `.spec.parallelism` | -| -------------------------------------------------------------------- |:-------------------:|:--------------------:| -| [잡 템플릿 확장](/ko/docs/tasks/job/parallel-processing-expansion/) | 1 | 1이어야 함 | -| [작업 항목 당 파드가 있는 큐](/docs/tasks/job/coarse-parallel-processing-work-queue/) | W | any | -| [가변 파드 수를 가진 큐](/ko/docs/tasks/job/fine-parallel-processing-work-queue/) | 1 | any | -| 정적 작업이 할당된 단일 잡 | W | any | +| 패턴 | `.spec.completions` | `.spec.parallelism` | +| ----------------------------------------- |:-------------------:|:--------------------:| +| [작업 항목 당 파드가 있는 큐] | W | any | +| [가변 파드 수를 가진 큐] | null | any | +| [정적 작업 할당을 사용한 인덱싱된 잡] | W | any | +| [잡 템플릿 확장] | 1 | 1이어야 함 | +[작업 항목 당 파드가 있는 큐]: /docs/tasks/job/coarse-parallel-processing-work-queue/ +[가변 파드 수를 가진 큐]: /docs/tasks/job/fine-parallel-processing-work-queue/ +[정적 작업 할당을 사용한 인덱싱된 잡]: /docs/tasks/job/indexed-parallel-processing-static/ +[잡 템플릿 확장]: /docs/tasks/job/parallel-processing-expansion/ ## 고급 사용법 +### 잡 일시 중지 + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +{{< note >}} +잡 일시 중지는 쿠버네티스 버전 1.21 이상에서 사용할 수 있다. 이 기능을 +사용하려면 [API 서버](/docs/reference/command-line-tools-reference/kube-apiserver/) +및 [컨트롤러 관리자](/docs/reference/command-line-tools-reference/kube-controller-manager/)에서 +`SuspendJob` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 +활성화해야 한다. +{{< /note >}} + +잡이 생성되면, 잡 컨트롤러는 잡의 요구 사항을 충족하기 위해 +즉시 파드 생성을 시작하고 잡이 완료될 때까지 +계속한다. 그러나, 잡의 실행을 일시적으로 중단하고 나중에 +다시 시작할 수도 있다. 잡을 일시 중지하려면, 잡의 `.spec.suspend` 필드를 true로 +업데이트할 수 있다. 나중에, 다시 재개하려면, false로 업데이트한다. +`.spec.suspend` 로 설정된 잡을 생성하면 일시 중지된 상태로 +생성된다. + +잡이 일시 중지에서 재개되면, 해당 `.status.startTime` 필드가 +현재 시간으로 재설정된다. 즉, 잡이 일시 중지 및 재개되면 `.spec.activeDeadlineSeconds` +타이머가 중지되고 재설정된다. + +잡을 일시 중지하면 모든 활성 파드가 삭제된다. 잡이 +일시 중지되면, SIGTERM 시그널로 [파드가 종료된다](/ko/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination). +파드의 정상 종료 기간이 적용되며 사용자의 파드는 이 기간 동안에 +이 시그널을 처리해야 한다. 나중에 진행 상황을 저장하거나 +변경 사항을 취소하는 작업이 포함될 수 있다. 이 방법으로 종료된 파드는 +잡의 `completions` 수에 포함되지 않는다. + +일시 중지된 상태의 잡 정의 예시는 다음과 같다. + +```shell +kubectl get job myjob -o yaml +``` + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: myjob +spec: + suspend: true + parallelism: 1 + completions: 5 + template: + spec: + ... +``` + +잡의 상태를 사용하여 잡이 일시 중지되었는지 또는 과거에 일시 중지되었는지 +확인할 수 있다. + +```shell +kubectl get jobs/myjob -o yaml +``` + +```json +apiVersion: batch/v1 +kind: Job +# .metadata and .spec omitted +status: + conditions: + - lastProbeTime: "2021-02-05T13:14:33Z" + lastTransitionTime: "2021-02-05T13:14:33Z" + status: "True" + type: Suspended + startTime: "2021-02-05T13:13:48Z" +``` + +"True" 상태인 "Suspended" 유형의 잡의 컨디션은 잡이 +일시 중지되었음을 의미한다. 이 `lastTransitionTime` 필드는 잡이 일시 중지된 +기간을 결정하는 데 사용할 수 있다. 해당 컨디션의 상태가 "False"이면, 잡이 +이전에 일시 중지되었다가 현재 실행 중이다. 이러한 컨디션이 +잡의 상태에 없으면, 잡이 중지되지 않은 것이다. + +잡이 일시 중지 및 재개될 때에도 이벤트가 생성된다. + +```shell +kubectl describe jobs/myjob +``` + +``` +Name: myjob +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 12m job-controller Created pod: myjob-hlrpl + Normal SuccessfulDelete 11m job-controller Deleted pod: myjob-hlrpl + Normal Suspended 11m job-controller Job suspended + Normal SuccessfulCreate 3s job-controller Created pod: myjob-jvb44 + Normal Resumed 3s job-controller Job resumed +``` + +마지막 4개의 이벤트, 특히 "Suspended" 및 "Resumed" 이벤트는 +`.spec.suspend` 필드를 전환한 결과이다. 이 두 이벤트 사이의 시간동안 +파드가 생성되지 않았지만, 잡이 재개되자마자 파드 생성이 다시 +시작되었음을 알 수 있다. + ### 자신의 파드 셀렉터를 지정하기 일반적으로 잡 오브젝트를 생성할 때 `.spec.selector` 를 지정하지 않는다. diff --git a/content/ko/docs/concepts/workloads/controllers/replicaset.md b/content/ko/docs/concepts/workloads/controllers/replicaset.md index 8966029620015..7cf399d24275c 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicaset.md +++ b/content/ko/docs/concepts/workloads/controllers/replicaset.md @@ -222,7 +222,7 @@ pod2 1/1 Running 0 36s ## 레플리카셋 매니페스트 작성하기 레플리카셋은 모든 쿠버네티스 API 오브젝트와 마찬가지로 `apiVersion`, `kind`, `metadata` 필드가 필요하다. -레플리카셋에 대한 kind 필드의 값은 항상 레플리카셋이다. +레플리카셋에 대한 `kind` 필드의 값은 항상 레플리카셋이다. 쿠버네티스 1.9에서의 레플리카셋의 kind에 있는 API 버전 `apps/v1`은 현재 버전이며, 기본으로 활성화 되어있다. API 버전 `apps/v1beta2`은 사용 중단(deprecated)되었다. API 버전에 대해서는 `frontend.yaml` 예제의 첫 번째 줄을 참고한다. @@ -237,7 +237,7 @@ API 버전에 대해서는 `frontend.yaml` 예제의 첫 번째 줄을 참고한 우리는 `frontend.yaml` 예제에서 `tier: frontend`이라는 레이블을 하나 가지고 있다. 이 파드를 다른 컨트롤러가 취하지 않도록 다른 컨트롤러의 셀렉터와 겹치지 않도록 주의해야 한다. -템플릿의 [재시작 정책](/ko/docs/concepts/workloads/pods/pod-lifecycle/#재시작-정책) 필드인 +템플릿의 [재시작 정책](/ko/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy) 필드인 `.spec.template.spec.restartPolicy`는 기본값인 `Always`만 허용된다. ### 파드 셀렉터 @@ -307,9 +307,51 @@ curl -X DELETE 'localhost:8080/apis/apps/v1/namespaces/default/replicasets/fron ### 레플리카셋의 스케일링 -레플리카셋을 손쉽게 스케일 업 또는 다운하는 방법은 단순히 `.spec.replicas` 필드를 업데이트 하면 된다. +레플리카셋을 손쉽게 스케일 업 또는 다운하는 방법은 단순히 `.spec.replicas` 필드를 업데이트하면 된다. 레플리카셋 컨트롤러는 일치하는 레이블 셀렉터가 있는 파드가 의도한 수 만큼 가용하고 운영 가능하도록 보장한다. +스케일 다운할 때, 레플리카셋 컨트롤러는 스케일 다운할 파드의 +우선순위를 정하기 위해 다음의 기준으로 가용 파드를 정렬하여 삭제할 파드를 결정한다. + 1. Pending 상태인 (+ 스케줄링할 수 없는) 파드가 먼저 스케일 다운된다. + 2. `controller.kubernetes.io/pod-deletion-cost` 어노테이션이 설정되어 있는 + 파드에 대해서는, 낮은 값을 갖는 파드가 먼저 스케일 다운된다. + 3. 더 많은 레플리카가 있는 노드의 파드가 더 적은 레플리카가 있는 노드의 파드보다 먼저 스케일 다운된다. + 4. 파드 생성 시간이 다르면, 더 최근에 생성된 파드가 + 이전에 생성된 파드보다 먼저 스케일 다운된다. + (`LogarithmicScaleDown` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화되어 있으면 생성 시간이 정수 로그 스케일로 버킷화된다) + +모든 기준에 대해 동등하다면, 스케일 다운할 파드가 임의로 선택된다. + +### 파드 삭제 비용 +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + +[`controller.kubernetes.io/pod-deletion-cost`](/docs/reference/labels-annotations-taints/#pod-deletion-cost) 어노테이션을 이용하여, +레플리카셋을 스케일 다운할 때 어떤 파드부터 먼저 삭제할지에 대한 우선순위를 설정할 수 있다. + +이 어노테이션은 파드에 설정되어야 하며, [-2147483647, 2147483647] 범위를 갖는다. +이 어노테이션은 하나의 레플리카셋에 있는 다른 파드와의 상대적 삭제 비용을 나타낸다. +삭제 비용이 낮은 파드는 삭제 비용이 높은 파드보다 삭제 우선순위가 높다. + +파드에 대해 이 값을 명시하지 않으면 기본값은 0이다. 음수로도 설정할 수 있다. +유효하지 않은 값은 API 서버가 거부한다. + +이 기능은 알파 상태이며 기본적으로는 비활성화되어 있다. +kube-apiserver와 kube-controller-manager에서 `PodDeletionCost` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 켜서 활성화할 수 있다. + +{{< note >}} +- 이 기능은 best-effort 방식으로 동작하므로, 파드 삭제 순서를 보장하지는 않는다. +- 이 값을 자주 바꾸는 것은 피해야 한다 (예: 메트릭 값에 따라 변경). +apiserver에서 많은 양의 파드 업데이트를 동반하기 때문이다. +{{< /note >}} + +#### 사용 예시 +한 애플리케이션 내의 여러 파드는 각각 사용률이 다를 수 있다. 스케일 다운 시, +애플리케이션은 사용률이 낮은 파드를 먼저 삭제하고 싶을 수 있다. 파드를 자주 +업데이트하는 것을 피하기 위해, 애플리케이션은 `controller.kubernetes.io/pod-deletion-cost` 값을 +스케일 다운하기 전에 1회만 업데이트해야 한다 (파드 사용률에 비례하는 값으로 설정). +이 방식은 Spark 애플리케이션의 드라이버 파드처럼 애플리케이션이 스스로 다운스케일링을 수행하는 경우에 유효하다. + ### 레플리카셋을 Horizontal Pod Autoscaler 대상으로 설정 레플리카셋은 diff --git a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md index 06ce54301238a..db69cf921c8d9 100644 --- a/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md +++ b/content/ko/docs/concepts/workloads/controllers/replicationcontroller.md @@ -54,7 +54,9 @@ kubectl 명령에서 숏컷으로 사용된다. ```shell kubectl apply -f https://k8s.io/examples/controllers/replication.yaml ``` + 출력 결과는 다음과 같다. + ``` replicationcontroller/nginx created ``` @@ -64,7 +66,9 @@ replicationcontroller/nginx created ```shell kubectl describe replicationcontrollers/nginx ``` + 출력 결과는 다음과 같다. + ``` Name: nginx Namespace: default @@ -103,14 +107,16 @@ Pods Status: 3 Running / 0 Waiting / 0 Succeeded / 0 Failed pods=$(kubectl get pods --selector=app=nginx --output=jsonpath={.items..metadata.name}) echo $pods ``` + 출력 결과는 다음과 같다. + ``` nginx-3ntk0 nginx-4ok8v nginx-qrm3m ``` 여기서 셀렉터는 레플리케이션컨트롤러(`kubectl describe` 의 출력에서 보인)의 셀렉터와 같고, -다른 형식의 파일인 `replication.yaml` 의 것과 동일하다. `--output=jsonpath` 옵션은 -반환된 목록의 각 파드에서 이름을 가져오는 표현식을 지정한다. +다른 형식의 파일인 `replication.yaml` 의 것과 동일하다. `--output=jsonpath` 은 +반환된 목록의 각 파드의 이름을 출력하도록 하는 옵션이다. ## 레플리케이션 컨트롤러의 Spec 작성 @@ -118,7 +124,7 @@ nginx-3ntk0 nginx-4ok8v nginx-qrm3m 다른 모든 쿠버네티스 컨피그와 마찬가지로 레플리케이션 컨트롤러는 `apiVersion`, `kind`, `metadata` 와 같은 필드가 필요하다. 레플리케이션 컨트롤러 오브젝트의 이름은 유효한 [DNS 서브도메인 이름](/ko/docs/concepts/overview/working-with-objects/names/#dns-서브도메인-이름)이어야 한다. -컨피그 파일의 동작에 관련된 일반적인 정보는 [쿠버네티스 오브젝트 관리](/ko/docs/concepts/overview/working-with-objects/object-management/)를 참고한다. +환경설정 파일의 동작에 관련된 일반적인 정보는 [쿠버네티스 오브젝트 관리](/ko/docs/concepts/overview/working-with-objects/object-management/)를 참고한다. 레플리케이션 컨트롤러는 또한 [`.spec` section](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status)도 필요하다. @@ -198,7 +204,7 @@ REST API나 Go 클라이언트 라이브러리를 사용하는 경우 레플리 ### 레플리케이션 컨트롤러에서 파드 격리 -파드는 레이블을 변경하여 레플리케이션 컨트롤러의 대상 셋에서 제거될 수 있다. 이 기술은 디버깅, 데이터 복구 등을 위해 서비스에서 파드를 제거하는데 사용될 수 있다. 이 방법으로 제거된 파드는 자동으로 교체된다 (레플리카 수가 변경되지 않는다고 가정). +파드는 레이블을 변경하여 레플리케이션 컨트롤러의 대상 셋에서 제거될 수 있다. 이 기술은 디버깅과 데이터 복구를 위해 서비스에서 파드를 제거하는 데 사용될 수 있다. 이 방법으로 제거된 파드는 자동으로 교체된다 (레플리카 수가 변경되지 않는다고 가정). ## 일반적인 사용법 패턴 @@ -208,8 +214,7 @@ REST API나 Go 클라이언트 라이브러리를 사용하는 경우 레플리 ### 스케일링 -레플리케이션컨트롤러는 `replicas` 필드를 설정하여 레플리카의 수를 늘리거나 줄인다. -레플리카를 수동으로 또는 오토 스케일링 제어 에이전트로 관리하도록 레플리케이션컨트롤러를 구성할 수 있다. +레플리케이션컨트롤러는 `replicas` 필드를 업데이트하여, 수동으로 또는 오토 스케일링 제어 에이전트를 통해, 레플리카의 수를 늘리거나 줄일 수 있다. ### 롤링 업데이트 @@ -246,7 +251,6 @@ REST API나 Go 클라이언트 라이브러리를 사용하는 경우 레플리 레플리케이션 컨트롤러는 조합 가능한 빌딩-블록 프리미티브가 되도록 고안되었다. 향후 사용자의 편의를 위해 더 상위 수준의 API 및/또는 도구와 그리고 다른 보완적인 기본 요소가 그 위에 구축 될 것으로 기대한다. 현재 kubectl이 지원하는 "매크로" 작업 (실행, 스케일)은 개념 증명의 예시이다. 예를 들어 [Asgard](https://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html)와 같이 레플리케이션 컨트롤러, 오토 스케일러, 서비스, 정책 스케줄링, 카나리 등을 관리할 수 있다. - ## API 오브젝트 레플리케이션 컨트롤러는 쿠버네티스 REST API의 최상위 수준의 리소스이다. @@ -261,8 +265,7 @@ API 오브젝트에 대한 더 자세한 것은 이것은 주로 [디플로이먼트](/ko/docs/concepts/workloads/controllers/deployment/)에 의해 파드의 생성, 삭제 및 업데이트를 오케스트레이션 하는 메커니즘으로 사용된다. 사용자 지정 업데이트 조정이 필요하거나 업데이트가 필요하지 않은 경우가 아니면 레플리카셋을 직접 사용하는 대신 디플로이먼트를 사용하는 것이 좋다. - -### 디플로이먼트 (권장되는) +### 디플로이먼트 (권장됨) [`Deployment`](/ko/docs/concepts/workloads/controllers/deployment/)는 기본 레플리카셋과 그 파드를 업데이트하는 상위 수준의 API 오브젝트이다. 선언적이며, 서버 사이드이고, 추가 기능이 있기 때문에 롤링 업데이트 기능을 원한다면 디플로이먼트를 권장한다. diff --git a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md index 5ed869fb576cc..4703b63b4a3c9 100644 --- a/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md +++ b/content/ko/docs/concepts/workloads/controllers/ttlafterfinished.md @@ -6,7 +6,7 @@ weight: 70 -{{< feature-state for_k8s_version="v1.12" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} TTL 컨트롤러는 실행이 완료된 리소스 오브젝트의 수명을 제한하는 TTL (time to live) 메커니즘을 제공한다. TTL 컨트롤러는 현재 @@ -14,9 +14,9 @@ TTL 컨트롤러는 실행이 완료된 리소스 오브젝트의 수명을 처리하며, 파드와 커스텀 리소스와 같이 실행을 완료할 다른 리소스를 처리하도록 확장될 수 있다. -알파(Alpha) 고지 사항: 이 기능은 현재 알파이고, -kube-apiserver와 kube-controller-manager와 함께 -[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)로 `TTLAfterFinished` 를 활성화할 수 있다. +이 기능은 현재 베타이고 기본적으로 활성화되어 있다. +kube-apiserver와 kube-controller-manager에서 `TTLAfterFinished` +[기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)를 이용하여 비활성화할 수 있다. diff --git a/content/ko/docs/concepts/workloads/pods/disruptions.md b/content/ko/docs/concepts/workloads/pods/disruptions.md index bcfde559cb19e..9d2319ae6af96 100644 --- a/content/ko/docs/concepts/workloads/pods/disruptions.md +++ b/content/ko/docs/concepts/workloads/pods/disruptions.md @@ -89,7 +89,7 @@ weight: 60 ## 파드 disruption budgets -{{< feature-state for_k8s_version="v1.5" state="beta" >}} +{{< feature-state for_k8s_version="v1.21" state="stable" >}} 쿠버네티스는 자발적인 중단이 자주 발생하는 경우에도 고 가용성 애플리케이션을 실행하는 데 도움이 되는 기능을 제공한다. diff --git a/content/ko/docs/concepts/workloads/pods/init-containers.md b/content/ko/docs/concepts/workloads/pods/init-containers.md index 56f59c1d304c1..b7a1241fc26c6 100644 --- a/content/ko/docs/concepts/workloads/pods/init-containers.md +++ b/content/ko/docs/concepts/workloads/pods/init-containers.md @@ -313,17 +313,16 @@ myapp-pod 1/1 Running 0 9m 파드는 다음과 같은 사유로, 초기화 컨테이너들의 재-실행을 일으키는, 재시작을 수행할 수 있다. -* 사용자가 초기화 컨테이너 이미지의 변경을 일으키는 파드 스펙 업데이트를 수행했다. - Init Container 이미지를 변경하면 파드가 다시 시작된다. 앱 컨테이너 - 이미지의 변경은 앱 컨테이너만 재시작시킨다. -* 파드 인프라스트럭처 컨테이너가 재시작되었다. 이는 일반적인 상황이 아니며 노드에 +* 파드 인프라스트럭처 컨테이너가 재시작된 상황. 이는 일반적인 상황이 아니며 노드에 대해서 root 접근 권한을 가진 누군가에 의해서 수행됐을 것이다. -* 파드 내의 모든 컨테이너들이, 재시작을 강제하는 `restartPolicy` 가 항상(Always)으로 설정되어 있는, - 동안 종료되었다. 그리고 초기화 컨테이너의 완료 기록이 가비지 수집 - 때문에 유실되었다. - - - +* 초기화 컨테이너의 완료 기록이 가비지 수집 때문에 유실된 상태에서, + `restartPolicy`가 Always로 설정된 파드의 모든 컨테이너가 종료되어 + 모든 컨테이너를 재시작해야 하는 상황 + +초기화 컨테이너 이미지가 변경되거나 초기화 컨테이너의 완료 기록이 가비지 수집 +때문에 유실된 상태이면 파드는 재시작되지 않는다. 이는 쿠버네티스 버전 1.20 이상에 +적용된다. 이전 버전의 쿠버네티스를 사용하는 경우 해당 쿠버네티스 버전의 문서를 +참고한다. ## {{% heading "whatsnext" %}} diff --git a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md index aa154a4b42661..71523e183a515 100644 --- a/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/ko/docs/concepts/workloads/pods/pod-lifecycle.md @@ -312,8 +312,8 @@ kubelet은 실행 중인 컨테이너들에 대해서 선택적으로 세 가지 준비성 프로브는 활성 프로브와는 다르게 준비성에 특정된 엔드포인트를 확인한다. {{< note >}} -파드가 삭제될 때 단지 요청들을 흘려 보낼(drain) 목적으로, -준비성 프로브가 필요하지는 않다는 점을 유념해야 한다. 삭제 시에, 파드는 +파드가 삭제될 때 요청들을 흘려 보내기(drain) 위해 +준비성 프로브가 꼭 필요한 것은 아니다. 삭제 시에, 파드는 프로브의 존재 여부와 무관하게 자동으로 스스로를 준비되지 않은 상태(unready)로 변경한다. 파드는 파드 내의 모든 컨테이너들이 중지될 때까지 준비되지 않은 상태로 남아 있다. diff --git a/content/ko/docs/contribute/generate-ref-docs/_index.md b/content/ko/docs/contribute/generate-ref-docs/_index.md index 756c509206395..ad9d221f1599d 100644 --- a/content/ko/docs/contribute/generate-ref-docs/_index.md +++ b/content/ko/docs/contribute/generate-ref-docs/_index.md @@ -1,11 +1,11 @@ --- -title: 참조 문서 개요 +title: 레퍼런스 문서 개요 main_menu: true weight: 80 --- -이 섹션은 쿠버네티스 참조 가이드를 생성하는 방법에 대해 설명한다. +이 섹션은 쿠버네티스 레퍼런스 가이드를 생성하는 방법에 대해 설명한다. -참조 문서화 시스템을 빌드하려면, 다음의 가이드를 참고한다. +레퍼런스 문서를 생성하려면, 다음의 가이드를 참고한다. -* [참조 문서 생성에 대한 퀵스타트 가이드](/docs/contribute/generate-ref-docs/quickstart/) \ No newline at end of file +* [레퍼런스 문서 생성에 대한 퀵스타트 가이드](/ko/docs/contribute/generate-ref-docs/quickstart/) diff --git a/content/ko/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md b/content/ko/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md new file mode 100644 index 0000000000000..b7c77889d8278 --- /dev/null +++ b/content/ko/docs/contribute/generate-ref-docs/prerequisites-ref-docs.md @@ -0,0 +1,22 @@ + +### 필요 사항: {#Requirements} + +- 리눅스 또는 macOS 로 구동되는 개발 환경이 필요하다. + +- 다음의 도구들이 설치되어 있어야 한다. + + - [Python](https://www.python.org/downloads/) v3.7.x + - [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) + - [Golang](https://golang.org/doc/install) version 1.13+ + - [Pip](https://pypi.org/project/pip/) (PyYAML 설치에 필요함) + - [PyYAML](https://pyyaml.org/) v5.1.2 + - [make](https://www.gnu.org/software/make/) + - [gcc compiler/linker](https://gcc.gnu.org/) + - [Docker](https://docs.docker.com/engine/installation/) (`kubectl` 명령어 레퍼런스 업데이트에만 필요함) + +- 위에 나열된 도구들 (예: `Go` 바이너리나 `python`) 을 사용할 수 있도록 `PATH` 환경 변수를 알맞게 설정해야 한다. + +- GitHub 저장소로 풀 리퀘스트를 생성하는 방법을 알고 있어야 한다. +이를 위해 `kubernetes/website` 저장소를 개인 계정으로 포크해야 한다. +더 자세한 내용은 [로컬 포크에서 작업하기](/ko/docs/contribute/new-content/open-a-pr/#fork-the-repo)를 참조한다. + diff --git a/content/ko/docs/contribute/generate-ref-docs/quickstart.md b/content/ko/docs/contribute/generate-ref-docs/quickstart.md new file mode 100644 index 0000000000000..6e3fbb52636e1 --- /dev/null +++ b/content/ko/docs/contribute/generate-ref-docs/quickstart.md @@ -0,0 +1,257 @@ +--- +title: 퀵스타트 가이드 +content_type: task +weight: 40 +--- + + + +이 문서에서는 `update-imported-docs` 스크립트를 사용하여 +쿠버네티스 레퍼런스 문서를 생성하는 방법에 대해 설명한다. +이 스크립트는 특정 쿠버네티스 릴리스 버전에 대해 빌드 설정을 자동으로 수행하고 레퍼런스 문서를 생성한다. + +## {{% heading "prerequisites" %}} + +{{< include "prerequisites-ref-docs.md" >}} + + + +## `website` 저장소 클론하기 {#Getting-the-docs-repository} + +개인 계정에 있는 포크 버전의 `website` 저장소가 `kubernetes/website` 저장소의 master 브랜치만큼 최신인지 확인한 뒤, +개인 계정에 있는 포크 버전의 `website` 저장소를 로컬 개발 환경으로 클론한다. + +```shell +mkdir github.com +cd github.com +git clone git@github.com:/website.git +``` + +아래에서 사용될 '베이스 디렉터리'를 숙지해야 한다. 예를 들어 위에 안내된 대로 +저장소를 클론했다면, 베이스 디렉터리는 +`github.com/website` 가 된다. 이제 이 문서의 나머지 부분에서 `` 라는 구문이 나오면 +이 부분에 당신의 베이스 디렉터리를 대입하면 된다. + +{{< note>}} +만약 쿠버네티스 구성 도구와 API 레퍼런스에 기여하고 싶다면, +[업스트림 코드에 기여하기 (영문)](/docs/contribute/generate-ref-docs/contribute-upstream) 를 참조한다. +{{< /note >}} + +## `update-imported-docs` 스크립트 개요 {#Overview-of-update-imported-docs} + +`update-imported-docs` 스크립트는 `/update-imported-docs/` +디렉터리에 존재한다. + +이 스크립트는 다음 레퍼런스를 생성한다. + +* 구성요소 및 도구 레퍼런스 페이지 +* `kubectl` 명령어 레퍼런스 +* 쿠버네티스 API 레퍼런스 + +`update-imported-docs` 스크립트는 쿠버네티스 소스코드로부터 레퍼런스 문서를 +생성한다. 스크립트가 실행되면 개발 머신의 `/tmp` 디렉터리 아래에 임시 디렉터리를 +생성하고, 이 임시 디렉터리 아래에 레퍼런스 문서 생성에 필요한 `kubernetes/kubernetes` 저장소와 +`kubernetes-sigs/reference-docs` 저장소를 클론하며, +`GOPATH` 환경 변수를 이 임시 디렉터리로 지정한다. +또한 이 스크립트는 다음의 환경 변수를 설정한다. + +* `K8S_RELEASE` +* `K8S_ROOT` +* `K8S_WEBROOT` + +스크립트가 정상적으로 실행되려면 인자 2개를 전달해야 한다. + +* 환경설정 YAML 파일 (`reference.yml`) +* 쿠버네티스 릴리스 버전 (예: `1.17`) + +환경설정 파일은 `generate-command` 라는 필드를 포함하는데, +이 필드에는 +`kubernetes-sigs/reference-docs/Makefile` 에 있는 Make 타겟들을 활용하여 빌드하는 일련의 과정이 명시되어 있다. +`K8S_RELEASE` 환경 변수는 릴리스 버전을 결정한다. + +`update-imported-docs` 스크립트는 다음의 과정을 수행한다. + +1. 환경설정 파일에 있는 관련 저장소를 클론한다. + 레퍼런스 문서 생성을 위해 + 기본적으로는 `kubernetes-sigs/reference-docs` 저장소를 클론하도록 되어 있다. +1. 클론한 안에서, 문서 생성에 필요한 사항을 준비하기 위한 명령어를 실행한 뒤, + HTML 파일과 마크다운 파일을 생성한다. +1. 생성된 HTML 파일과 마크다운 파일을 + 환경설정 파일에 명시된 규칙에 따라 `` 로 복사한다. +1. `kubectl`.md 에 있는 `kubectl` 명령어 링크들이 + `kubectl` 명령어 레퍼런스 페이지의 올바른 섹션으로 연결되도록 업데이트한다. + +생성된 파일이 `` 아래에 복사되었으면, +`kubernetes/website` 저장소로 [풀 리퀘스트를 생성](/ko/docs/contribute/new-content/open-a-pr/) +할 수 있다. + +## 환경설정 파일 형식 {#Configuration-file-format} + +각 환경설정 파일은 레퍼런스 생성을 위해 필요한 여러 저장소의 정보를 담을 수 있다. +필요한 경우, 환경설정 파일을 직접 수정하여 사용할 수도 있다. +또는, 다른 그룹의 문서를 임포트하기 위해 새로운 환경설정 파일을 작성할 수도 있다. +다음은 환경설정 YAML 파일의 예시이다. + +```yaml +repos: +- name: community + remote: https://github.com/kubernetes/community.git + branch: master + files: + - src: contributors/devel/README.md + dst: docs/imported/community/devel.md + - src: contributors/guide/README.md + dst: docs/imported/community/guide.md +``` + +이 도구에 의해 처리될 단일 페이지 마크다운 문서는 +[문서 스타일 가이드](/docs/contribute/style/style-guide/)의 내용을 만족해야 한다. + +## reference.yml 환경설정 파일 다루기 {#Customizing-reference-yml} + +`/update-imported-docs/reference.yml` 환경설정 파일을 열어 수정할 수 있다. +레퍼런스 문서 생성을 위해 명령어들이 어떻게 사용되고 있는지 파악하지 못했다면, +`generate-command` 필드의 내용은 수정하지 말아야 한다. +대부분의 경우 `reference.yml` 을 직접 수정해야 할 필요는 없다. +때때로, 업스트림 소스코드 업데이트 때문에 이 환경설정 파일을 수정해야 할 수도 있다. +(예: Golang 버전 의존성, 서드파티 라이브러리 변경 등) +만약 스크립트 사용 시 빌드 문제가 있다면, +[쿠버네티스 슬랙의 #sig-docs 채널](https://kubernetes.slack.com/archives/C1J0BPD2M)에서 SIG-Docs 팀에 문의하면 된다. + +{{< note >}} +`generate-command` 는 특정 저장소로부터 문서를 만들기 위한 +명령어나 스크립트를 실행하기 위해 사용할 수 있는 선택적 필드이다. +{{< /note >}} + +`reference.yml` 환경설정 파일에서, `files` 필드는 `src` 와 `dst` 필드를 포함한다. +`src` 필드에는 `kubernetes-sigs/reference-docs` 디렉터리 아래에 있는 생성된 마크다운 파일의 위치를 명시하고, +`dst` 필드에는 이 파일을 +`kubernetes/website` 디렉터리 아래의 어느 위치로 복사할지를 명시한다. +예시는 다음과 같다. + +```yaml +repos: +- name: reference-docs + remote: https://github.com/kubernetes-sigs/reference-docs.git + files: + - src: gen-compdocs/build/kube-apiserver.md + dst: content/en/docs/reference/command-line-tools-reference/kube-apiserver.md + ... +``` + +만약 하나의 `src` 디렉터리에서 하나의 `dst` 디렉터리로 많은 파일이 복사되어야 한다면, +`src` 필드에 와일드카드를 사용할 수 있다. +이 경우, `dst` 필드에는 단일 파일의 경로가 아니라 디렉터리의 경로를 명시해야 한다. +예시는 다음과 같다. + +```yaml + files: + - src: gen-compdocs/build/kubeadm*.md + dst: content/en/docs/reference/setup-tools/kubeadm/generated/ +``` + +## `update-imported-docs` 도구 실행하기 {#Running-the-update-imported-docs-tool} + +다음과 같이 `update-imported-docs` 도구를 실행할 수 있다. + +```shell +cd /update-imported-docs +./update-imported-docs +``` + +예를 들면 다음과 같다. + +```shell +./update-imported-docs reference.yml 1.17 +``` + + +## 링크 업데이트하기 {#Fixing-Links} + +`release.yml` 환경설정 파일은 상대경로 링크를 수정하는 방법을 포함하고 있다. +임포트하는 파일 안에 있는 상대경로 링크를 수정하려면, `gen-absolute-links` 필드를 +`true` 로 명시한다. 이에 대한 예시는 +[`release.yml`](https://github.com/kubernetes/website/blob/master/update-imported-docs/release.yml) 에서 볼 수 있다. + +## `kubernetes/website` 의 변경사항을 커밋하기 {#Adding-and-committing-changes-in-kubernetes-website} + +다음의 명령을 실행하여, 스크립트에 의해 생성된 뒤 `` 아래에 복사된 파일의 목록을 볼 수 있다. + +```shell +cd +git status +``` + +위의 명령을 실행하면 새로 추가된 파일과 수정된 파일의 목록을 볼 수 있다. +아래의 결과 예시는 업스트림 소스코드의 변경사항에 따라 다르게 나타날 수 있다. + +### 생성된 구성요소 도구 레퍼런스 {#Generated-component-tool-files} + +``` +content/en/docs/reference/command-line-tools-reference/cloud-controller-manager.md +content/en/docs/reference/command-line-tools-reference/kube-apiserver.md +content/en/docs/reference/command-line-tools-reference/kube-controller-manager.md +content/en/docs/reference/command-line-tools-reference/kube-proxy.md +content/en/docs/reference/command-line-tools-reference/kube-scheduler.md +content/en/docs/reference/setup-tools/kubeadm/generated/kubeadm.md +content/en/docs/reference/kubectl/kubectl.md +``` + +### 생성된 kubectl 명령어 레퍼런스 {#Generated-kubectl-command-reference-files} + +``` +static/docs/reference/generated/kubectl/kubectl-commands.html +static/docs/reference/generated/kubectl/navData.js +static/docs/reference/generated/kubectl/scroll.js +static/docs/reference/generated/kubectl/stylesheet.css +static/docs/reference/generated/kubectl/tabvisibility.js +static/docs/reference/generated/kubectl/node_modules/bootstrap/dist/css/bootstrap.min.css +static/docs/reference/generated/kubectl/node_modules/highlight.js/styles/default.css +static/docs/reference/generated/kubectl/node_modules/jquery.scrollto/jquery.scrollTo.min.js +static/docs/reference/generated/kubectl/node_modules/jquery/dist/jquery.min.js +static/docs/reference/generated/kubectl/css/font-awesome.min.css +``` + +### 생성된 쿠버네티스 API 레퍼런스 와 파일 {#Generated-Kubernetes-API-reference-directories-and-files} + +``` +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/index.html +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/navData.js +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/scroll.js +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/js/query.scrollTo.min.js +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/font-awesome.min.css +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/bootstrap.min.css +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/css/stylesheet.css +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/FontAwesome.otf +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.eot +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.svg +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.ttf +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff +static/docs/reference/generated/kubernetes-api/{{< param "version" >}}/fonts/fontawesome-webfont.woff2 +``` + +`git add` 와 `git commit` 명령을 실행하여 추가/변경된 파일을 커밋한다. + +## 풀 리퀘스트 만들기 {#Creating-a-pull-request} + +`kubernetes/website` 저장소에 풀 리퀘스트를 등록한다. +등록한 풀 리퀘스트를 모니터하고, 리뷰 커멘트가 달리면 그에 대해 대응을 한다. +풀 리퀘스트가 머지될 때 까지 계속 모니터한다. + +풀 리퀘스트가 머지된 뒤 몇 분이 지나면, +변경사항을 +[쿠버네티스 문서 홈페이지](/docs/home/)에서 확인할 수 있다. + + + +## {{% heading "whatsnext" %}} + + +수동으로 빌드 저장소를 설정하고 빌드 타겟을 실행하여 개별 레퍼런스 문서를 생성하려면, +다음의 가이드를 참고한다. + +* [쿠버네티스 구성요소와 도구에 대한 레퍼런스 문서 생성하기](/docs/contribute/generate-ref-docs/kubernetes-components/) +* [kubectl 명령어에 대한 레퍼런스 문서 생성하기](/docs/contribute/generate-ref-docs/kubectl/) +* [쿠버네티스 API에 대한 레퍼런스 문서 생성하기](/docs/contribute/generate-ref-docs/kubernetes-api/) + + diff --git a/content/ko/docs/reference/_index.md b/content/ko/docs/reference/_index.md index ff1727ffee35b..4c0b0b8177664 100644 --- a/content/ko/docs/reference/_index.md +++ b/content/ko/docs/reference/_index.md @@ -21,8 +21,6 @@ no_list: true * [표준 용어집](/ko/docs/reference/glossary/) - 포괄적이고, 표준화 된 쿠버네티스 용어 목록 - - * [쿠버네티스 API 레퍼런스](/docs/reference/kubernetes-api/) * [쿠버네티스 {{< param "version" >}}용 원페이지(One-page) API 레퍼런스](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) * [쿠버네티스 API 사용](/ko/docs/reference/using-api/) - 쿠버네티스 API에 대한 개요 @@ -50,16 +48,35 @@ no_list: true ## 컴포넌트 -* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - 각 노드에서 구동되는 주요한 *노드 에이전트*. kubelet은 PodSpecs 집합을 가지며 기술된 컨테이너가 구동되고 있는지, 정상 작동하는지를 보장한다. -* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - 파드, 서비스, 레플리케이션 컨트롤러와 같은 API 오브젝트에 대한 검증과 구성을 수행하는 REST API. +* [kubelet](/docs/reference/command-line-tools-reference/kubelet/) - 각 +노드에서 구동되는 주요한 에이전트. kubelet은 PodSpecs 집합을 가지며 +기술된 컨테이너가 구동되고 있는지, 정상 작동하는지를 보장한다. +* [kube-apiserver](/docs/reference/command-line-tools-reference/kube-apiserver/) - +파드, 서비스, 레플리케이션 컨트롤러와 같은 API 오브젝트에 대한 검증과 구성을 +수행하는 REST API. * [kube-controller-manager](/docs/reference/command-line-tools-reference/kube-controller-manager/) - 쿠버네티스에 탑재된 핵심 제어 루프를 포함하는 데몬. -* [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - 간단한 TCP/UDP 스트림 포워딩이나 백-엔드 집합에 걸쳐서 라운드-로빈 TCP/UDP 포워딩을 할 수 있다. +* [kube-proxy](/docs/reference/command-line-tools-reference/kube-proxy/) - 간단한 +TCP/UDP 스트림 포워딩이나 백-엔드 집합에 걸쳐서 라운드-로빈 TCP/UDP 포워딩을 +할 수 있다. * [kube-scheduler](/docs/reference/command-line-tools-reference/kube-scheduler/) - 가용성, 성능 및 용량을 관리하는 스케줄러. -## 스케줄링 + * [kube-scheduler 정책](/ko/docs/reference/scheduling/policies) + * [kube-scheduler 프로파일](/ko/docs/reference/scheduling/config/#여러-프로파일) + +## 환경설정 API + +이 섹션은 쿠버네티스 구성요소 또는 도구를 환경설정하는 데에 사용되는 +"미발표된" API를 다룬다. 이 API들은 사용자나 관리자가 클러스터를 +사용/관리하는 데에 중요하지만, 이들 API의 대부분은 아직 API 서버가 +제공하지 않는다. -* [kube-scheduler 정책](/ko/docs/reference/scheduling/policies) -* [kube-scheduler 프로파일](/docs/reference/scheduling/config#profiles) +* [kubelet 환경설정 (v1beta1)](/docs/reference/config-api/kubelet-config.v1beta1/) +* [kube-scheduler 환경설정 (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +* [kube-scheduler 정책 레퍼런스 (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) +* [kube-proxy 환경설정 (v1alpha1)](/docs/reference/config-api/kube-proxy-config.v1alpha1/) +* [`audit.k8s.io/v1` API](/docs/reference/config-api/apiserver-audit.v1/) +* [클라이언트 인증 API (v1beta1)](/docs/reference/config-api/client-authentication.v1beta1/) +* [WebhookAdmission 환경설정 (v1)](/docs/reference/config-api/apiserver-webhookadmission.v1/) ## 설계 문서 diff --git a/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md b/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md index f9bc6a1112722..a64633ed52c80 100644 --- a/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md +++ b/content/ko/docs/reference/access-authn-authz/service-accounts-admin.md @@ -55,9 +55,9 @@ weight: 50 1. `/var/run/secrets/kubernetes.io/serviceaccount` 에 마운트된 파드의 각 컨테이너에 `volumeSource` 를 추가한다. #### 바인딩된 서비스 어카운트 토큰 볼륨 -{{< feature-state for_k8s_version="v1.13" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} -`BoundServiceAccountTokenVolume` 기능 게이트가 활성화되면, 서비스 어카운트 어드미션 컨트롤러가 +`BoundServiceAccountTokenVolume` [기능 게이트](/ko/docs/reference/command-line-tools-reference/feature-gates/)가 활성화되면, 서비스 어카운트 어드미션 컨트롤러가 시크릿 볼륨 대신 프로젝티드 서비스 어카운트 토큰 볼륨을 추가한다. 서비스 어카운트 토큰은 기본적으로 1시간 후에 만료되거나 파드가 삭제된다. [프로젝티드 볼륨](/docs/tasks/configure-pod-container/configure-projected-volume-storage/)에 대한 자세한 내용을 참고한다. 이 기능은 모든 네임스페이스에 "kube-root-ca.crt" 컨피그맵을 게시하는 활성화된 `RootCAConfigMap` 기능 게이트에 따라 다르다. 이 컨피그맵에는 kube-apiserver에 대한 연결을 확인하는 데 사용되는 CA 번들이 포함되어 있다. diff --git a/content/ko/docs/reference/command-line-tools-reference/feature-gates.md b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md index 62fb53396cf6e..a0f970d7c40b0 100644 --- a/content/ko/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/ko/docs/reference/command-line-tools-reference/feature-gates.md @@ -59,11 +59,10 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `AnyVolumeDataSource` | `false` | 알파 | 1.18 | | | `AppArmor` | `true` | 베타 | 1.4 | | | `BalanceAttachedNodeVolumes` | `false` | 알파 | 1.11 | | -| `BoundServiceAccountTokenVolume` | `false` | 알파 | 1.13 | | +| `BoundServiceAccountTokenVolume` | `false` | 알파 | 1.13 | 1.20 | +| `BoundServiceAccountTokenVolume` | `true` | 베타 | 1.21 | | | `CPUManager` | `false` | 알파 | 1.8 | 1.9 | | `CPUManager` | `true` | 베타 | 1.10 | | -| `CRIContainerLogRotation` | `false` | 알파 | 1.10 | 1.10 | -| `CRIContainerLogRotation` | `true` | 베타| 1.11 | | | `CSIInlineVolume` | `false` | 알파 | 1.15 | 1.15 | | `CSIInlineVolume` | `true` | 베타 | 1.16 | - | | `CSIMigration` | `false` | 알파 | 1.14 | 1.16 | @@ -74,7 +73,8 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `CSIMigrationAzureDisk` | `false` | 알파 | 1.15 | 1.18 | | `CSIMigrationAzureDisk` | `false` | 베타 | 1.19 | | | `CSIMigrationAzureDiskComplete` | `false` | 알파 | 1.17 | | -| `CSIMigrationAzureFile` | `false` | 알파 | 1.15 | | +| `CSIMigrationAzureFile` | `false` | 알파 | 1.15 | 1.19 | +| `CSIMigrationAzureFile` | `false` | 베타 | 1.21 | | | `CSIMigrationAzureFileComplete` | `false` | 알파 | 1.17 | | | `CSIMigrationGCE` | `false` | 알파 | 1.14 | 1.16 | | `CSIMigrationGCE` | `false` | 베타 | 1.17 | | @@ -84,13 +84,16 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `CSIMigrationOpenStackComplete` | `false` | 알파 | 1.17 | | | `CSIMigrationvSphere` | `false` | 베타 | 1.19 | | | `CSIMigrationvSphereComplete` | `false` | 베타 | 1.19 | | -| `CSIServiceAccountToken` | `false` | 알파 | 1.20 | | -| `CSIStorageCapacity` | `false` | 알파 | 1.19 | | +| `CSIServiceAccountToken` | `false` | 알파 | 1.20 | 1.20 | +| `CSIServiceAccountToken` | `true` | 베타 | 1.21 | | +| `CSIStorageCapacity` | `false` | 알파 | 1.19 | 1.20 | +| `CSIStorageCapacity` | `true` | 베타 | 1.21 | | | `CSIVolumeFSGroupPolicy` | `false` | 알파 | 1.19 | 1.19 | | `CSIVolumeFSGroupPolicy` | `true` | 베타 | 1.20 | | | `ConfigurableFSGroupPolicy` | `false` | 알파 | 1.18 | 1.19 | | `ConfigurableFSGroupPolicy` | `true` | 베타 | 1.20 | | -| `CronJobControllerV2` | `false` | 알파 | 1.20 | | +| `CronJobControllerV2` | `false` | 알파 | 1.20 | 1.20 | +| `CronJobControllerV2` | `true` | 베타 | 1.21 | | | `CustomCPUCFSQuotaPeriod` | `false` | 알파 | 1.12 | | | `DefaultPodTopologySpread` | `false` | 알파 | 1.19 | 1.19 | | `DefaultPodTopologySpread` | `true` | 베타 | 1.20 | | @@ -98,14 +101,11 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `DevicePlugins` | `true` | 베타 | 1.10 | | | `DisableAcceleratorUsageMetrics` | `false` | 알파 | 1.19 | 1.19 | | `DisableAcceleratorUsageMetrics` | `true` | 베타 | 1.20 | | -| `DownwardAPIHugePages` | `false` | 알파 | 1.20 | | +| `DownwardAPIHugePages` | `false` | 알파 | 1.20 | 1.20 | +| `DownwardAPIHugePages` | `false` | 베타 | 1.21 | | | `DynamicKubeletConfig` | `false` | 알파 | 1.4 | 1.10 | | `DynamicKubeletConfig` | `true` | 베타 | 1.11 | | | `EfficientWatchResumption` | `false` | 알파 | 1.20 | | -| `EndpointSlice` | `false` | 알파 | 1.16 | 1.16 | -| `EndpointSlice` | `false` | 베타 | 1.17 | | -| `EndpointSlice` | `true` | 베타 | 1.18 | | -| `EndpointSliceNodeName` | `false` | 알파 | 1.20 | | | `EndpointSliceProxying` | `false` | 알파 | 1.18 | 1.18 | | `EndpointSliceProxying` | `true` | 베타 | 1.19 | | | `EndpointSliceTerminatingCondition` | `false` | 알파 | 1.20 | | @@ -117,15 +117,17 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `ExpandPersistentVolumes` | `false` | 알파 | 1.8 | 1.10 | | `ExpandPersistentVolumes` | `true` | 베타 | 1.11 | | | `ExperimentalHostUserNamespaceDefaulting` | `false` | 베타 | 1.5 | | -| `GenericEphemeralVolume` | `false` | 알파 | 1.19 | | -| `GracefulNodeShutdown` | `false` | 알파 | 1.20 | | +| `GenericEphemeralVolume` | `false` | 알파 | 1.19 | 1.20 | +| `GenericEphemeralVolume` | `true` | 베타 | 1.21 | | +| `GracefulNodeShutdown` | `false` | 알파 | 1.20 | 1.20 | +| `GracefulNodeShutdown` | `true` | 베타 | 1.21 | | | `HPAContainerMetrics` | `false` | 알파 | 1.20 | | | `HPAScaleToZero` | `false` | 알파 | 1.16 | | | `HugePageStorageMediumSize` | `false` | 알파 | 1.18 | 1.18 | | `HugePageStorageMediumSize` | `true` | 베타 | 1.19 | | -| `IPv6DualStack` | `false` | 알파 | 1.15 | | -| `ImmutableEphemeralVolumes` | `false` | 알파 | 1.18 | 1.18 | -| `ImmutableEphemeralVolumes` | `true` | 베타 | 1.19 | | +| `IngressClassNamespacedParams` | `false` | 알파 | 1.21 | | +| `IPv6DualStack` | `false` | 알파 | 1.15 | 1.20 | +| `IPv6DualStack` | `true` | 베타 | 1.21 | | | `KubeletCredentialProviders` | `false` | 알파 | 1.20 | | | `KubeletPodResources` | `true` | 알파 | 1.13 | 1.14 | | `KubeletPodResources` | `true` | 베타 | 1.15 | | @@ -134,22 +136,25 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `LocalStorageCapacityIsolation` | `false` | 알파 | 1.7 | 1.9 | | `LocalStorageCapacityIsolation` | `true` | 베타 | 1.10 | | | `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | 알파 | 1.15 | | +| `LogarithmicScaleDown` | `false` | 알파 | 1.21 | | +| `KubeletPodResourcesGetAllocatable` | `false` | 알파 | 1.21 | | | `MixedProtocolLBService` | `false` | 알파 | 1.20 | | +| `NamespaceDefaultLabelName` | `true` | 베타 | 1.21 | | +| `NetworkPolicyEndPort` | `false` | 알파 | 1.21 | | | `NodeDisruptionExclusion` | `false` | 알파 | 1.16 | 1.18 | | `NodeDisruptionExclusion` | `true` | 베타 | 1.19 | | | `NonPreemptingPriority` | `false` | 알파 | 1.15 | 1.18 | | `NonPreemptingPriority` | `true` | 베타 | 1.19 | | -| `PodDisruptionBudget` | `false` | 알파 | 1.3 | 1.4 | -| `PodDisruptionBudget` | `true` | 베타 | 1.5 | | +| `PodDeletionCost` | `false` | 알파 | 1.21 | | +| `PodAffinityNamespaceSelector` | `false` | 알파 | 1.21 | | | `PodOverhead` | `false` | 알파 | 1.16 | 1.17 | | `PodOverhead` | `true` | 베타 | 1.18 | | +| `ProbeTerminationGracePeriod` | `false` | 알파 | 1.21 | | | `ProcMountType` | `false` | 알파 | 1.12 | | | `QOSReserved` | `false` | 알파 | 1.11 | | | `RemainingItemCount` | `false` | 알파 | 1.15 | | | `RemoveSelfLink` | `false` | 알파 | 1.16 | 1.19 | | `RemoveSelfLink` | `true` | 베타 | 1.20 | | -| `RootCAConfigMap` | `false` | 알파 | 1.13 | 1.19 | -| `RootCAConfigMap` | `true` | 베타 | 1.20 | | | `RotateKubeletServerCertificate` | `false` | 알파 | 1.7 | 1.11 | | `RotateKubeletServerCertificate` | `true` | 베타 | 1.12 | | | `RunAsGroup` | `true` | 베타 | 1.14 | | @@ -157,9 +162,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `SCTPSupport` | `true` | 베타 | 1.19 | | | `ServerSideApply` | `false` | 알파 | 1.14 | 1.15 | | `ServerSideApply` | `true` | 베타 | 1.16 | | -| `ServiceAccountIssuerDiscovery` | `false` | 알파 | 1.18 | 1.19 | -| `ServiceAccountIssuerDiscovery` | `true` | 베타 | 1.20 | | +| `ServiceInternalTrafficPolicy` | `false` | 알파 | 1.21 | | | `ServiceLBNodePortControl` | `false` | 알파 | 1.20 | | +| `ServiceLoadBalancerClass` | `false` | 알파 | 1.21 | | | `ServiceNodeExclusion` | `false` | 알파 | 1.8 | 1.18 | | `ServiceNodeExclusion` | `true` | 베타 | 1.19 | | | `ServiceTopology` | `false` | 알파 | 1.17 | | @@ -169,8 +174,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `StorageVersionAPI` | `false` | 알파 | 1.20 | | | `StorageVersionHash` | `false` | 알파 | 1.14 | 1.14 | | `StorageVersionHash` | `true` | 베타 | 1.15 | | -| `Sysctls` | `true` | 베타 | 1.11 | | +| `SuspendJob` | `false` | 알파 | 1.21 | | | `TTLAfterFinished` | `false` | 알파 | 1.12 | | +| `TopologyAwareHints` | `false` | 알파 | 1.21 | | | `TopologyManager` | `false` | 알파 | 1.16 | 1.17 | | `TopologyManager` | `true` | 베타 | 1.18 | | | `ValidateProxyRedirects` | `false` | 알파 | 1.12 | 1.13 | @@ -179,7 +185,8 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `WinDSR` | `false` | 알파 | 1.14 | | | `WinOverlay` | `false` | 알파 | 1.14 | 1.19 | | `WinOverlay` | `true` | 베타 | 1.20 | | -| `WindowsEndpointSliceProxying` | `false` | 알파 | 1.19 | | +| `WindowsEndpointSliceProxying` | `false` | 알파 | 1.19 | 1.20 | +| `WindowsEndpointSliceProxying` | `true` | beta | 1.21 | | {{< /table >}} ### GA 또는 사용 중단된 기능을 위한 기능 게이트 @@ -200,6 +207,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `BlockVolume` | `false` | 알파 | 1.9 | 1.12 | | `BlockVolume` | `true` | 베타 | 1.13 | 1.17 | | `BlockVolume` | `true` | GA | 1.18 | - | +| `CRIContainerLogRotation` | `false` | 알파 | 1.10 | 1.10 | +| `CRIContainerLogRotation` | `true` | 베타 | 1.11 | 1.20 | +| `CRIContainerLogRotation` | `true` | GA | 1.21 | - | | `CSIBlockVolume` | `false` | 알파 | 1.11 | 1.13 | | `CSIBlockVolume` | `true` | 베타 | 1.14 | 1.17 | | `CSIBlockVolume` | `true` | GA | 1.18 | - | @@ -215,6 +225,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `CSIPersistentVolume` | `false` | 알파 | 1.9 | 1.9 | | `CSIPersistentVolume` | `true` | 베타 | 1.10 | 1.12 | | `CSIPersistentVolume` | `true` | GA | 1.13 | - | +| `CSIVolumeHealth` | `false` | 알파 | 1.21 | - | | `CustomPodDNS` | `false` | 알파 | 1.9 | 1.9 | | `CustomPodDNS` | `true` | 베타| 1.10 | 1.13 | | `CustomPodDNS` | `true` | GA | 1.14 | - | @@ -245,6 +256,12 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `EnableAggregatedDiscoveryTimeout` | `true` | 사용중단 | 1.16 | - | | `EnableEquivalenceClassCache` | `false` | 알파 | 1.8 | 1.14 | | `EnableEquivalenceClassCache` | - | 사용중단 | 1.15 | - | +| `EndpointSlice` | `false` | 알파 | 1.16 | 1.16 | +| `EndpointSlice` | `false` | 베타 | 1.17 | 1.17 | +| `EndpointSlice` | `true` | 베타 | 1.18 | 1.21 | +| `EndpointSlice` | `true` | GA | 1.21 | - | +| `EndpointSliceNodeName` | `false` | 알파 | 1.20 | 1.21 | +| `EndpointSliceNodeName` | `true` | GA | 1.21 | - | | `ExperimentalCriticalPodAnnotation` | `false` | 알파 | 1.5 | 1.12 | | `ExperimentalCriticalPodAnnotation` | `false` | 사용중단 | 1.13 | - | | `EvenPodsSpread` | `false` | 알파 | 1.16 | 1.17 | @@ -258,6 +275,10 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `HugePages` | `true` | GA | 1.14 | - | | `HyperVContainer` | `false` | 알파 | 1.10 | 1.19 | | `HyperVContainer` | `false` | 사용중단 | 1.20 | - | +| `ImmutableEphemeralVolumes` | `false` | 알파 | 1.18 | 1.18 | +| `ImmutableEphemeralVolumes` | `true` | 베타 | 1.19 | 1.20 | +| `ImmutableEphemeralVolumes` | `true` | GA | 1.21 | | +| `IndexedJob` | `false` | 알파 | 1.21 | | | `Initializers` | `false` | 알파 | 1.7 | 1.13 | | `Initializers` | - | 사용중단 | 1.14 | - | | `KubeletConfigFile` | `false` | 알파 | 1.8 | 1.9 | @@ -281,6 +302,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `PersistentLocalVolumes` | `false` | 알파 | 1.7 | 1.9 | | `PersistentLocalVolumes` | `true` | 베타 | 1.10 | 1.13 | | `PersistentLocalVolumes` | `true` | GA | 1.14 | - | +| `PodDisruptionBudget` | `false` | 알파 | 1.3 | 1.4 | +| `PodDisruptionBudget` | `true` | 베타 | 1.5 | 1.20 | +| `PodDisruptionBudget` | `true` | GA | 1.21 | - | | `PodPriority` | `false` | 알파 | 1.8 | 1.10 | | `PodPriority` | `true` | 베타 | 1.11 | 1.13 | | `PodPriority` | `true` | GA | 1.14 | - | @@ -296,6 +320,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `ResourceQuotaScopeSelectors` | `false` | 알파 | 1.11 | 1.11 | | `ResourceQuotaScopeSelectors` | `true` | 베타 | 1.12 | 1.16 | | `ResourceQuotaScopeSelectors` | `true` | GA | 1.17 | - | +| `RootCAConfigMap` | `false` | 알파 | 1.13 | 1.19 | +| `RootCAConfigMap` | `true` | 베타 | 1.20 | 1.20 | +| `RootCAConfigMap` | `true` | GA | 1.21 | - | | `RotateKubeletClientCertificate` | `true` | 베타 | 1.8 | 1.18 | | `RotateKubeletClientCertificate` | `true` | GA | 1.19 | - | | `RuntimeClass` | `false` | 알파 | 1.12 | 1.13 | @@ -307,6 +334,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `SCTPSupport` | `false` | 알파 | 1.12 | 1.18 | | `SCTPSupport` | `true` | 베타 | 1.19 | 1.19 | | `SCTPSupport` | `true` | GA | 1.20 | - | +| `ServiceAccountIssuerDiscovery` | `false` | 알파 | 1.18 | 1.19 | +| `ServiceAccountIssuerDiscovery` | `true` | 베타 | 1.20 | 1.20 | +| `ServiceAccountIssuerDiscovery` | `true` | GA | 1.21 | - | | `ServiceAppProtocol` | `false` | 알파 | 1.18 | 1.18 | | `ServiceAppProtocol` | `true` | 베타 | 1.19 | | | `ServiceAppProtocol` | `true` | GA | 1.20 | - | @@ -331,6 +361,8 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `SupportPodPidsLimit` | `false` | 알파 | 1.10 | 1.13 | | `SupportPodPidsLimit` | `true` | 베타 | 1.14 | 1.19 | | `SupportPodPidsLimit` | `true` | GA | 1.20 | - | +| `Sysctls` | `true` | 베타 | 1.11 | 1.20 | +| `Sysctls` | `true` | GA | 1.21 | | | `TaintBasedEvictions` | `false` | 알파 | 1.6 | 1.12 | | `TaintBasedEvictions` | `true` | 베타 | 1.13 | 1.17 | | `TaintBasedEvictions` | `true` | GA | 1.18 | - | @@ -343,6 +375,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 | `TokenRequestProjection` | `false` | 알파 | 1.11 | 1.11 | | `TokenRequestProjection` | `true` | 베타 | 1.12 | 1.19 | | `TokenRequestProjection` | `true` | GA | 1.20 | - | +| `VolumeCapacityPriority` | `false` | 알파 | 1.21 | - | | `VolumeSnapshotDataSource` | `false` | 알파 | 1.12 | 1.16 | | `VolumeSnapshotDataSource` | `true` | 베타 | 1.17 | 1.19 | | `VolumeSnapshotDataSource` | `true` | GA | 1.20 | - | @@ -444,7 +477,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 확인한다. - `CPUManager`: 컨테이너 수준의 CPU 어피니티 지원을 활성화한다. [CPU 관리 정책](/docs/tasks/administer-cluster/cpu-management-policies/)을 참고한다. -- `CRIContainerLogRotation`: cri 컨테이너 런타임에 컨테이너 로그 로테이션을 활성화한다. +- `CRIContainerLogRotation`: cri 컨테이너 런타임에 컨테이너 로그 로테이션을 활성화한다. 로그 파일 사이즈 기본값은 10MB이며, +컨테이너 당 최대 로그 파일 수 기본값은 5이다. 이 값은 kubelet 환경설정으로 변경할 수 있다. +더 자세한 내용은 [노드 레벨에서의 로깅](/ko/docs/concepts/cluster-administration/logging/#노드-레벨에서의-로깅)을 참고한다. - `CSIBlockVolume`: 외부 CSI 볼륨 드라이버가 블록 스토리지를 지원할 수 있게 한다. 자세한 내용은 [`csi` 원시 블록 볼륨 지원](/ko/docs/concepts/storage/volumes/#csi-원시-raw-블록-볼륨-지원) 문서를 참고한다. @@ -525,6 +560,7 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `CSIVolumeFSGroupPolicy`: CSI드라이버가 `fsGroupPolicy` 필드를 사용하도록 허용한다. 이 필드는 CSI드라이버에서 생성된 볼륨이 마운트될 때 볼륨 소유권과 권한 수정을 지원하는지 여부를 제어한다. +- `CSIVolumeHealth`: 노드에서의 CSI 볼륨 상태 모니터링 기능을 활성화한다. - `ConfigurableFSGroupPolicy`: 사용자가 파드에 볼륨을 마운트할 때 fsGroups에 대한 볼륨 권한 변경 정책을 구성할 수 있다. 자세한 내용은 [파드의 볼륨 권한 및 소유권 변경 정책 구성](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods)을 @@ -546,7 +582,6 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 생성된 리소스에서 스키마 기반 유효성 검사를 활성화한다. - `CustomResourceWebhookConversion`: [커스텀리소스데피니션](/ko/docs/concepts/extend-kubernetes/api-extension/custom-resources/)에서 생성된 리소스에 대해 웹 훅 기반의 변환을 활성화한다. - 실행 중인 파드 문제를 해결한다. - `DefaultPodTopologySpread`: `PodTopologySpread` 스케줄링 플러그인을 사용하여 [기본 분배](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/#내부-기본-제약)를 수행한다. - `DevicePlugins`: 노드에서 [장치 플러그인](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) @@ -624,10 +659,15 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `HyperVContainer`: 윈도우 컨테이너를 위한 [Hyper-V 격리](https://docs.microsoft.com/ko-kr/virtualization/windowscontainers/manage-containers/hyperv-container) 기능을 활성화한다. -- `IPv6DualStack`: IPv6에 대한 [듀얼 스택](/ko/docs/concepts/services-networking/dual-stack/) - 지원을 활성화한다. - `ImmutableEphemeralVolumes`: 안정성과 성능 향상을 위해 개별 시크릿(Secret)과 컨피그맵(ConfigMap)을 변경할 수 없는(immutable) 것으로 표시할 수 있다. +- `IndexedJob`: [잡](/ko/docs/concepts/workloads/controllers/job/) 컨트롤러가 + 완료 횟수를 기반으로 파드 완료를 관리할 수 있도록 한다. +- `IngressClassNamespacedParams`: `IngressClass` 리소스가 네임스페이스 범위로 + 한정된 파라미터를 이용할 수 있도록 한다. 이 기능은 `IngressClass.spec.parameters` 에 + `Scope` 와 `Namespace` 2개의 필드를 추가한다. +- `IPv6DualStack`: IPv6을 위한 [이중 스택](/ko/docs/concepts/services-networking/dual-stack/) + 기능을 활성화한다. - `KubeletConfigFile`: 구성 파일을 사용하여 지정된 파일에서 kubelet 구성을 로드할 수 있다. 자세한 내용은 [구성 파일을 통해 kubelet 파라미터 설정](/docs/tasks/administer-cluster/kubelet-config-file/)을 @@ -638,10 +678,14 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `KubeletPodResources`: kubelet의 파드 리소스 gPRC 엔드포인트를 활성화한다. 자세한 내용은 [장치 모니터링 지원](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/606-compute-device-assignment/README.md)을 참고한다. +- `KubeletPodResourcesGetAllocatable`: kubelet의 파드 리소스 `GetAllocatableResources` 기능을 활성화한다. + 이 API는 클라이언트가 노드의 여유 컴퓨팅 자원을 잘 파악할 수 있도록, 할당 가능 자원에 대한 정보를 + [자원 할당 보고](/ko/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#장치-플러그인-리소스-모니터링)한다. - `LegacyNodeRoleBehavior`: 비활성화되면, 서비스 로드 밸런서 및 노드 중단의 레거시 동작은 `NodeDisruptionExclusion` 과 `ServiceNodeExclusion` 에 의해 제공된 기능별 레이블을 대신하여 `node-role.kubernetes.io/master` 레이블을 무시한다. -- `LocalStorageCapacityIsolation`: [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)와 +- `LocalStorageCapacityIsolation`: + [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)와 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir)의 `sizeLimit` 속성을 사용할 수 있게 한다. - `LocalStorageCapacityIsolationFSQuotaMonitoring`: [로컬 임시 스토리지](/ko/docs/concepts/configuration/manage-resources-containers/)에 @@ -651,21 +695,30 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 프로젝트 쿼터를 사용하여 [emptyDir 볼륨](/ko/docs/concepts/storage/volumes/#emptydir) 스토리지 사용을 모니터링하여 성능과 정확성을 향상시킨다. +- `LogarithmicScaleDown`: 컨트롤러 스케일 다운 시에 파드 타임스탬프를 로그 스케일로 버켓화하여 + 축출할 파드를 반-랜덤하게 선택하는 기법을 활성화한다. - `MixedProtocolLBService`: 동일한 로드밸런서 유형 서비스 인스턴스에서 다른 프로토콜 사용을 활성화한다. - `MountContainers` (*사용 중단됨*): 호스트의 유틸리티 컨테이너를 볼륨 마운터로 사용할 수 있다. - `MountPropagation`: 한 컨테이너에서 다른 컨테이너 또는 파드로 마운트된 볼륨을 공유할 수 있다. 자세한 내용은 [마운트 전파(propagation)](/ko/docs/concepts/storage/volumes/#마운트-전파-propagation)을 참고한다. +- `NamespaceDefaultLabelName`: API 서버로 하여금 모든 네임스페이스에 대해 변경할 수 없는 (immutable) + {{< glossary_tooltip text="레이블" term_id="label" >}} `kubernetes.io/metadata.name`을 설정하도록 한다. (네임스페이스의 이름도 변경 불가) +- `NetworkPolicyEndPort`: 네트워크폴리시(NetworkPolicy) 오브젝트에서 단일 포트를 지정하는 것 대신에 포트 범위를 지정할 수 있도록, `endPort` 필드의 사용을 활성화한다. - `NodeDisruptionExclusion`: 영역(zone) 장애 시 노드가 제외되지 않도록 노드 레이블 `node.kubernetes.io/exclude-disruption` 사용을 활성화한다. - `NodeLease`: 새로운 리스(Lease) API가 노드 상태 신호로 사용될 수 있는 노드 하트비트(heartbeats)를 보고할 수 있게 한다. - `NonPreemptingPriority`: 프라이어리티클래스(PriorityClass)와 파드에 `preemptionPolicy` 필드를 활성화한다. - `PVCProtection`: 파드에서 사용 중일 때 퍼시스턴트볼륨클레임(PVC)이 삭제되지 않도록 한다. +- `PodDeletionCost`: 레플리카셋 다운스케일 시 삭제될 파드의 우선순위를 사용자가 조절할 수 있도록, + [파드 삭제 비용](/ko/docs/concepts/workloads/controllers/replicaset/#파드-삭제-비용) 기능을 활성화한다. - `PersistentLocalVolumes`: 파드에서 `local` 볼륨 유형의 사용을 활성화한다. `local` 볼륨을 요청하는 경우 파드 어피니티를 지정해야 한다. - `PodDisruptionBudget`: [PodDisruptionBudget](/docs/tasks/run-application/configure-pdb/) 기능을 활성화한다. +- `PodAffinityNamespaceSelector`: [파드 어피니티 네임스페이스 셀렉터](/ko/docs/concepts/scheduling-eviction/assign-pod-node/#네임스페이스-셀렉터) 기능과 + [CrossNamespacePodAffinity](/ko/docs/concepts/policy/resource-quotas/#네임스페이스-간-파드-어피니티-쿼터) 쿼터 범위 기능을 활성화한다. - `PodOverhead`: 파드 오버헤드를 판단하기 위해 [파드오버헤드(PodOverhead)](/ko/docs/concepts/scheduling-eviction/pod-overhead/) 기능을 활성화한다. - `PodPriority`: [우선 순위](/ko/docs/concepts/configuration/pod-priority-preemption/)를 @@ -676,6 +729,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `PodShareProcessNamespace`: 파드에서 실행되는 컨테이너 간에 단일 프로세스 네임스페이스를 공유하기 위해 파드에서 `shareProcessNamespace` 설정을 활성화한다. 자세한 내용은 [파드의 컨테이너 간 프로세스 네임스페이스 공유](/docs/tasks/configure-pod-container/share-process-namespace/)에서 확인할 수 있다. +- `ProbeTerminationGracePeriod`: 파드의 [프로브-수준 + `terminationGracePeriodSeconds` 설정하기](/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds) 기능을 활성화한다. + 더 자세한 사항은 [기능개선 제안](https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/2238-liveness-probe-grace-period)을 참고한다. - `ProcMountType`: SecurityContext의 `procMount` 필드를 설정하여 컨테이너의 proc 타입의 마운트를 제어할 수 있다. - `QOSReserved`: QoS 수준에서 리소스 예약을 허용하여 낮은 QoS 수준의 파드가 @@ -715,8 +771,10 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 [파드의 서비스 어카운트 구성](/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery)을 참고한다. - `ServiceAppProtocol`: 서비스와 엔드포인트에서 `AppProtocol` 필드를 활성화한다. +- `ServiceInternalTrafficPolicy`: 서비스에서 `InternalTrafficPolicy` 필드를 활성화한다. - `ServiceLBNodePortControl`: 서비스에서`spec.allocateLoadBalancerNodePorts` 필드를 활성화한다. +- `ServiceLoadBalancerClass`: 서비스에서 `LoadBalancerClass` 필드를 활성화한다. 자세한 내용은 [로드밸런서 구현체의 종류 확인하기](/ko/docs/concepts/services-networking/service/#load-balancer-class)를 참고한다. - `ServiceLoadBalancerFinalizer`: 서비스 로드 밸런서에 대한 Finalizer 보호를 활성화한다. - `ServiceNodeExclusion`: 클라우드 제공자가 생성한 로드 밸런서에서 노드를 제외할 수 있다. "`node.kubernetes.io/exclude-from-external-load-balancers`"로 @@ -725,8 +783,6 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 있도록 한다. 자세한 내용은 [서비스토폴로지(ServiceTopology)](/ko/docs/concepts/services-networking/service-topology/)를 참고한다. -- `SizeMemoryBackedVolumes`: kubelet 지원을 사용하여 메모리 백업 볼륨의 크기를 조정한다. - 자세한 내용은 [volumes](/ko/docs/concepts/storage/volumes)를 참조한다. - `SetHostnameAsFQDN`: 전체 주소 도메인 이름(FQDN)을 파드의 호스트 이름으로 설정하는 기능을 활성화한다. [파드의 `setHostnameAsFQDN` 필드](/ko/docs/concepts/services-networking/dns-pod-service/#파드의-sethostnameasfqdn-필드)를 참고한다. @@ -750,6 +806,9 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 파라미터를 지정하여 지정된 수의 프로세스 ID가 시스템 전체와 각각 쿠버네티스 시스템 데몬에 대해 예약되도록 할 수 있다. +- `SuspendJob`: 잡 중지/재시작 기능을 활성화한다. + 자세한 내용은 [잡 문서](/ko/docs/concepts/workloads/controllers/job/)를 + 참고한다. - `Sysctls`: 각 파드에 설정할 수 있는 네임스페이스 커널 파라미터(sysctl)를 지원한다. 자세한 내용은 [sysctl](/docs/tasks/administer-cluster/sysctl-cluster/)을 참고한다. @@ -765,9 +824,15 @@ kubelet과 같은 컴포넌트의 기능 게이트를 설정하려면, 기능 - `TokenRequest`: 서비스 어카운트 리소스에서 `TokenRequest` 엔드포인트를 활성화한다. - `TokenRequestProjection`: [`projected` 볼륨](/ko/docs/concepts/storage/volumes/#projected)을 통해 서비스 어카운트 토큰을 파드에 주입할 수 있다. +- `TopologyAwareHints`: 엔드포인트슬라이스(EndpointSlices)에서 토폴로지 힌트 기반 + 토폴로지-어웨어 라우팅을 활성화한다. 자세한 내용은 + [토폴로지 어웨어 힌트](/docs/concepts/services-networking/topology-aware-hints/) + 를 참고한다. - `TopologyManager`: 쿠버네티스의 다른 컴포넌트에 대한 세분화된 하드웨어 리소스 할당을 조정하는 메커니즘을 활성화한다. [노드의 토폴로지 관리 정책 제어](/docs/tasks/administer-cluster/topology-manager/)를 참고한다. +- `VolumeCapacityPriority`: 가용 PV 용량을 기반으로 + 여러 토폴로지에 있는 노드들의 우선순위를 정하는 기능을 활성화한다. - `VolumePVCDataSource`: 기존 PVC를 데이터 소스로 지정하는 기능을 지원한다. - `VolumeScheduling`: 볼륨 토폴로지 인식 스케줄링을 활성화하고 퍼시스턴트볼륨클레임(PVC) 바인딩이 스케줄링 결정을 인식하도록 한다. 또한 diff --git a/content/ko/docs/reference/glossary/cloud-controller-manager.md b/content/ko/docs/reference/glossary/cloud-controller-manager.md index ebfa3d926c188..d4eb23111b2a7 100644 --- a/content/ko/docs/reference/glossary/cloud-controller-manager.md +++ b/content/ko/docs/reference/glossary/cloud-controller-manager.md @@ -11,10 +11,10 @@ tags: - architecture - operation --- - 클라우드별 컨트롤 로직을 포함하는 쿠버네티스 +클라우드별 컨트롤 로직을 포함하는 쿠버네티스 {{< glossary_tooltip text="컨트롤 플레인" term_id="control-plane" >}} 컴포넌트이다. 클라우드 컨트롤러 매니저를 통해 클러스터를 클라우드 공급자의 API에 연결하고, -해당 클라우드 플랫폼과 상호 작용하는 컴포넌트와 클러스터와 상호 작용하는 컴포넌트를 분리할 수 있다. +해당 클라우드 플랫폼과 상호 작용하는 컴포넌트와 클러스터와만 상호 작용하는 컴포넌트를 구분할 수 있게 해 준다. diff --git a/content/ko/docs/reference/kubectl/cheatsheet.md b/content/ko/docs/reference/kubectl/cheatsheet.md index d5870bba30303..4ee9c5406e9e2 100644 --- a/content/ko/docs/reference/kubectl/cheatsheet.md +++ b/content/ko/docs/reference/kubectl/cheatsheet.md @@ -357,7 +357,7 @@ API 리소스를 탐색하기 위한 다른 작업: ```bash kubectl api-resources --namespaced=true # 네임스페이스를 가지는 모든 리소스 kubectl api-resources --namespaced=false # 네임스페이스를 가지지 않는 모든 리소스 -kubectl api-resources -o name # 모든 리소스의 단순한 (리소스 이름 만) 출력 +kubectl api-resources -o name # 모든 리소스의 단순한 (리소스 이름만) 출력 kubectl api-resources -o wide # 모든 리소스의 확장된 ("wide"로 알려진) 출력 kubectl api-resources --verbs=list,get # "list"와 "get"의 요청 동사를 지원하는 모든 리소스 출력 kubectl api-resources --api-group=extensions # "extensions" API 그룹의 모든 리소스 @@ -384,6 +384,9 @@ kubectl api-resources --api-group=extensions # "extensions" API 그룹의 모든 # 클러스터에서 실행 중인 모든 이미지 kubectl get pods -A -o=custom-columns='DATA:spec.containers[*].image' +# `default` 네임스페이스의 모든 이미지를 파드별로 그룹지어 출력 +kubectl get pods --namespace default --output=custom-columns="NAME:.metadata.name,IMAGE:.spec.containers[*].image" + # "k8s.gcr.io/coredns:1.6.2" 를 제외한 모든 이미지 kubectl get pods -A -o=custom-columns='DATA:spec.containers[?(@.image!="k8s.gcr.io/coredns:1.6.2")].image' diff --git a/content/ko/docs/reference/scheduling/config.md b/content/ko/docs/reference/scheduling/config.md index 7b6942e119cfb..5da54ed813bba 100644 --- a/content/ko/docs/reference/scheduling/config.md +++ b/content/ko/docs/reference/scheduling/config.md @@ -18,12 +18,10 @@ weight: 20 각 단계는 익스텐션 포인트(extension point)를 통해 노출된다. 플러그인은 이러한 익스텐션 포인트 중 하나 이상을 구현하여 스케줄링 동작을 제공한다. -컴포넌트 구성 API([`v1alpha1`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1alpha1?tab=doc#KubeSchedulerConfiguration) -또는 [`v1alpha2`](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1alpha2?tab=doc#KubeSchedulerConfiguration))를 -사용하고, `kube-scheduler --config `을 실행하여 +[KubeSchedulerConfiguration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) +구조에 맞게 파일을 작성하고, +`kube-scheduler --config `을 실행하여 스케줄링 프로파일을 지정할 수 있다. -`v1alpha2` API를 사용하면 [여러 프로파일](#여러-프로파일)을 -실행하도록 kube-scheduler를 구성할 수 있다. 최소 구성은 다음과 같다. @@ -149,7 +147,12 @@ profiles: 익스텐션 포인트: `Score`. - `VolumeBinding`: 노드에 요청된 {{< glossary_tooltip text="볼륨" term_id="volume" >}}이 있는지 또는 바인딩할 수 있는지 확인한다. - 익스텐션 포인트: `PreFilter`, `Filter`, `Reserve`, `PreBind`. + 익스텐션 포인트: `PreFilter`, `Filter`, `Reserve`, `PreBind`, `Score`. + {{< note >}} + `Score` 익스텐션 포인트는 `VolumeCapacityPriority` 기능이 + 활성화되어 있어야 활성화되며, + 요청된 볼륨 사이즈를 만족하는 가장 작은 PV들을 우선순위 매긴다. + {{< /note >}} - `VolumeRestrictions`: 노드에 마운트된 볼륨이 볼륨 제공자에 특정한 제한 사항을 충족하는지 확인한다. 익스텐션 포인트: `Filter`. @@ -249,3 +252,4 @@ profiles: * [kube-scheduler 레퍼런스](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-scheduler/) 읽어보기 * [스케줄링](/ko/docs/concepts/scheduling-eviction/kube-scheduler/)에 대해 알아보기 +* [kube-scheduler configuration (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1/) 레퍼런스 읽어보기 diff --git a/content/ko/docs/reference/scheduling/policies.md b/content/ko/docs/reference/scheduling/policies.md index f2cae65b685e6..626e077784e82 100644 --- a/content/ko/docs/reference/scheduling/policies.md +++ b/content/ko/docs/reference/scheduling/policies.md @@ -8,9 +8,7 @@ weight: 10 스케줄링 정책을 사용하여 {{< glossary_tooltip text="kube-scheduler" term_id="kube-scheduler" >}}가 각각 노드를 필터링하고 스코어링(scoring)하기 위해 실행하는 *단정(predicates)* 및 *우선순위(priorities)* 를 지정할 수 있다. -`kube-scheduler --policy-config-file ` 또는 `kube-scheduler --policy-configmap `을 실행하고 [정책 유형](https://pkg.go.dev/k8s.io/kube-scheduler@v0.18.0/config/v1?tab=doc#Policy)을 사용하여 스케줄링 정책을 설정할 수 있다. - - +`kube-scheduler --policy-config-file ` 또는 `kube-scheduler --policy-configmap `을 실행하고 [정책 유형](/docs/reference/config-api/kube-scheduler-policy-config.v1/)을 사용하여 스케줄링 정책을 설정할 수 있다. @@ -110,9 +108,9 @@ weight: 10 - `EvenPodsSpreadPriority`: 선호된 [파드 토폴로지 분배 제약 조건](/ko/docs/concepts/workloads/pods/pod-topology-spread-constraints/)을 구현한다. - - ## {{% heading "whatsnext" %}} * [스케줄링](/ko/docs/concepts/scheduling-eviction/kube-scheduler/)에 대해 배우기 * [kube-scheduler 프로파일](/docs/reference/scheduling/profiles/)에 대해 배우기 +* [kube-scheduler configuration 레퍼런스 (v1beta1)](/docs/reference/config-api/kube-scheduler-config.v1beta1) 읽어보기 +* [kube-scheduler Policy 레퍼런스 (v1)](/docs/reference/config-api/kube-scheduler-policy-config.v1/) 읽어보기 diff --git a/content/ko/docs/reference/setup-tools/kubeadm/_index.md b/content/ko/docs/reference/setup-tools/kubeadm/_index.md index 9211da1e0fff2..ca7a08f5efefe 100644 --- a/content/ko/docs/reference/setup-tools/kubeadm/_index.md +++ b/content/ko/docs/reference/setup-tools/kubeadm/_index.md @@ -26,5 +26,7 @@ kubeadm을 설치하려면, [설치 가이드](/ko/docs/setup/production-environ * [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config/): kubeadm v1.7.x 이하의 버전을 사용하여 클러스터를 초기화한 경우, `kubeadm upgrade` 를 위해 사용자의 클러스터를 구성한다. * [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token/): `kubeadm join` 을 위한 토큰을 관리한다. * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/): `kubeadm init` 또는 `kubeadm join` 에 의한 호스트의 모든 변경 사항을 되돌린다. +* [kubeadm certs](/docs/reference/setup-tools/kubeadm/kubeadm-certs): 쿠버네티스 인증서를 관리한다. +* [kubeadm kubeconfig](/docs/reference/setup-tools/kubeadm/kubeadm-kubeconfig): kubeconfig 파일을 관리한다. * [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version/): kubeadm 버전을 출력한다. * [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/): 커뮤니티에서 피드백을 수집하기 위해서 기능 미리 보기를 제공한다. diff --git a/content/ko/docs/setup/best-practices/cluster-large.md b/content/ko/docs/setup/best-practices/cluster-large.md index 6f6dbbae0ef69..d67892e6dca11 100644 --- a/content/ko/docs/setup/best-practices/cluster-large.md +++ b/content/ko/docs/setup/best-practices/cluster-large.md @@ -67,9 +67,8 @@ _A_ 영역에 있는 컨트롤 플레인 호스트로만 전달한다. 단일 쿠버네티스 [리소스 제한](/ko/docs/concepts/configuration/manage-resources-containers/)은 파드와 컨테이너가 다른 컴포넌트에 영향을 줄 수 있는 메모리 누수 및 기타 방식의 영향을 -최소화하는 데 도움이 된다. 이러한 리소스 제한은 애플리케이션 워크로드에 적용되는 것과 마찬가지로 -{{< glossary_tooltip text="애드온" term_id="addons" >}}에도 적용될 수 있으며 -적용되어야 한다. +최소화하는 데 도움이 된다. 이러한 리소스 제한은 애플리케이션 워크로드에 적용될 수 있는 것처럼 +{{< glossary_tooltip text="애드온" term_id="addons" >}} 리소스에도 적용될 수 있다. 예를 들어, 로깅 컴포넌트에 대한 CPU 및 메모리 제한을 설정할 수 있다. diff --git a/content/ko/docs/setup/production-environment/container-runtimes.md b/content/ko/docs/setup/production-environment/container-runtimes.md index 52ab0fc94bcfe..d2638f4433624 100644 --- a/content/ko/docs/setup/production-environment/container-runtimes.md +++ b/content/ko/docs/setup/production-environment/container-runtimes.md @@ -48,7 +48,7 @@ Systemd는 cgroup과의 긴밀한 통합을 통해 프로세스당 cgroup을 할 시스템이 안정화된다. 도커에 대해 구성하려면, `native.cgroupdriver=systemd`를 설정한다. {{< caution >}} -클러스터에 결합되어 있는 노드의 cgroup 관리자를 변경하는 것은 강력하게 권장하지 *않는다*. +클러스터에 결합되어 있는 노드의 cgroup 관리자를 변경하는 것은 신중하게 수행해야 한다. 하나의 cgroup 드라이버의 의미를 사용하여 kubelet이 파드를 생성해왔다면, 컨테이너 런타임을 다른 cgroup 드라이버로 변경하는 것은 존재하는 기존 파드에 대해 파드 샌드박스 재생성을 시도할 때, 에러가 발생할 수 있다. kubelet을 재시작하는 것은 에러를 해결할 수 없을 것이다. @@ -57,6 +57,11 @@ kubelet을 재시작하는 것은 에러를 해결할 수 없을 것이다. 교체하거나, 자동화를 사용하여 다시 설치한다. {{< /caution >}} +### kubeadm으로 생성한 클러스터의 드라이버를 `systemd`로 변경하기 + +kubeadm으로 생성한 클러스터의 cgroup 드라이버를 `systemd`로 변경하려면 +[변경 가이드](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/)를 참고한다. + ## 컨테이너 런타임 {{% thirdparty-content %}} diff --git a/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index 12b48ad17e5b1..a7ce213fdadf1 100644 --- a/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -70,7 +70,7 @@ sudo sysctl --system | 프로토콜 | 방향 | 포트 범위 | 목적 | 사용자 | |----------|-----------|------------|-------------------------|---------------------------| -| TCP | 인바운드 | 6443* | 쿠버네티스 API 서버 | 모두 | +| TCP | 인바운드 | 6443\* | 쿠버네티스 API 서버 | 모두 | | TCP | 인바운드 | 2379-2380 | etcd 서버 클라이언트 API | kube-apiserver, etcd | | TCP | 인바운드 | 10250 | kubelet API | 자체, 컨트롤 플레인 | | TCP | 인바운드 | 10251 | kube-scheduler | 자체 | @@ -294,33 +294,17 @@ Flatcar Container Linux 배포판은 `/usr` 디렉터리를 읽기 전용 파일 kubelet은 이제 kubeadm이 수행할 작업을 알려 줄 때까지 크래시루프(crashloop) 상태로 기다려야 하므로 몇 초마다 다시 시작된다. -## 컨트롤 플레인 노드에서 kubelet이 사용하는 cgroup 드라이버 구성 +## cgroup 드라이버 구성 -도커를 사용할 때, kubeadm은 kubelet 용 cgroup 드라이버를 자동으로 감지하여 -런타임 중에 `/var/lib/kubelet/config.yaml` 파일에 설정한다. - -다른 CRI를 사용하는 경우, 다음과 같이 `cgroupDriver` 값을 `kubeadm init` 에 전달해야 한다. - -```yaml -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -cgroupDriver: -``` - -자세한 내용은 [구성 파일과 함께 kubeadm init 사용](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file)을 참고한다. - -`cgroupfs` 가 이미 kubelet의 기본값이기 때문에, 사용자의 -CRI cgroup 드라이버가 `cgroupfs` 가 아닌 **경우에만** 위와 같이 설정해야 한다. - -{{< note >}} -`--cgroup-driver` 플래그가 kubelet에 의해 사용 중단되었으므로, `/var/lib/kubelet/kubeadm-flags.env` -또는 `/etc/default/kubelet`(RPM에 대해서는 `/etc/sysconfig/kubelet`)에 있는 경우, 그것을 제거하고 대신 KubeletConfiguration을 -사용한다(기본적으로 `/var/lib/kubelet/config.yaml` 에 저장됨). -{{< /note >}} - -CRI-O 및 containerd와 같은 다른 컨테이너 런타임에 대한 cgroup 드라이버의 -자동 감지에 대한 작업이 진행 중이다. +컨테이너 런타임과 kubelet은 +["cgroup 드라이버"](/ko/docs/setup/production-environment/container-runtimes/)라는 속성을 갖고 있으며, +cgroup 드라이버는 리눅스 머신의 cgroup 관리 측면에 있어서 중요하다. +{{< warning >}} +컨테이너 런타임과 kubelet의 cgroup 드라이버를 일치시켜야 하며, 그렇지 않으면 kubelet 프로세스에 오류가 발생한다. + +더 자세한 사항은 [cgroup 드라이버 설정하기](/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/)를 참고한다. +{{< /warning >}} ## 문제 해결 @@ -328,5 +312,4 @@ kubeadm에 문제가 있는 경우, [문제 해결 문서](/docs/setup/productio ## {{% heading "whatsnext" %}} - * [kubeadm을 사용하여 클러스터 생성](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) diff --git a/content/ko/docs/setup/production-environment/tools/kubeadm/self-hosting.md b/content/ko/docs/setup/production-environment/tools/kubeadm/self-hosting.md deleted file mode 100644 index cfa18135b1bdb..0000000000000 --- a/content/ko/docs/setup/production-environment/tools/kubeadm/self-hosting.md +++ /dev/null @@ -1,65 +0,0 @@ ---- -reviewers: -title: 컨트롤 플레인을 자체 호스팅하기 위해 쿠버네티스 클러스터 구성하기 -content_type: concept -weight: 100 ---- - - - -### 쿠버네티스 컨트롤 플레인 자체 호스팅하기 {#self-hosting} - -kubeadm은 실험적으로 _자체 호스팅_ 된 쿠버네티스 컨트롤 플레인을 만들 수 있도록 -해준다. API 서버, 컨트롤러 매니저 및 스케줄러와 같은 주요 구성 요소가 정적(static) 파일을 -통해 kubelet에 구성된 [스태틱(static) 파드](/ko/docs/tasks/configure-pod-container/static-pod/) -대신 쿠버네티스 API를 통해 구성된 [데몬셋(DaemonSet) 파드](/ko/docs/concepts/workloads/controllers/daemonset/) -로 실행된다. - -자체 호스팅된 클러스터를 만들려면 [kubeadm alpha selfhosting pivot](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-selfhosting) -명령어를 확인한다. - - - -#### 주의사항 - -{{< caution >}} -이 기능은 클러스터를 지원되지 않는 상태로 전환하여 더 이상 클러스터를 관리할 수 없게 만든다. -이것은 `kubeadm upgrade`를 포함한다. -{{< /caution >}} - -1. 1.8 이후 버전에서 자체 호스팅은 몇 가지 중요한 한계가 있다. - 특히 자체 호스팅된 클러스터는 수동 조정 없이는 - _컨트롤 플레인 노드를 재부팅하고 나서 복구할 수 없다._ - -1. 기본적으로 자체 호스팅된 컨트롤 플레인 파드는 - [`hostPath`](/ko/docs/concepts/storage/volumes/#hostpath) 볼륨에서 불러 온 - 자격 증명에 의존한다. 초기 생성을 제외하고, 이러한 자격 증명은 kubeadm에 의해 - 관리되지 않는다. - -1. 컨트롤 플레인의 자체 호스팅된 부분에는 스태틱 파드로 실행되는 etcd가 - 포함되지 않는다. - -#### 프로세스 - -자체 호스팅 부트스트랩 프로세스는 [kubeadm 설계 -문서](https://github.com/kubernetes/kubeadm/blob/master/docs/design/design_v1.9.md#optional-self-hosting)에 기록되어 있다. - -요약하면 `kubeadm alpha selfhosting`은 다음과 같이 작동한다. - - 1. 부트스트랩 스태틱 컨트롤 플레인이 실행되고 정상 상태가 될 때까지 기다린다. - 이것은 자체 호스팅이 없는 `kubeadm init` 프로세스와 동일하다. - - 1. 스태틱 컨트롤 플레인 파드 매니페스트를 사용하여 자체 호스팅된 컨트롤 - 플레인을 실행할 데몬셋 매니페스트 집합을 구성한다. 또한 필요한 경우 - 해당 매니페스트를 수정한다. 예를 들어, 시크릿을 위한 새로운 볼륨을 - 추가한다. - - 1. `kube-system` 네임스페이스에 데몬셋을 생성하고 결과 파드가 실행될 때까지 - 대기한다. - - 1. 일단 자체 호스팅된 파드가 동작하면 관련 스태틱 파드가 삭제되고 - kubeadm은 계속해서 다음 구성 요소를 설치한다. - 이것은 kubelet이 스태틱 파드를 멈추게 한다. - - 1. 기존의 컨트롤 플레인이 멈추면 새롭게 자체 호스팅된 컨트롤 플레인은 - 리스닝 포트에 바인딩하여 활성화할 수 있다. diff --git a/content/ko/docs/setup/production-environment/tools/kubespray.md b/content/ko/docs/setup/production-environment/tools/kubespray.md index cb0caf175ea7e..301c7249270b1 100644 --- a/content/ko/docs/setup/production-environment/tools/kubespray.md +++ b/content/ko/docs/setup/production-environment/tools/kubespray.md @@ -68,7 +68,7 @@ Kubespray에서는 디플로이먼트의 많은 속성들을 사용자가 정의 * {{< glossary_tooltip term_id="cri-o" >}} * 인증서 생성 방법 -Kubespray의 [변수 파일들](https://docs.ansible.com/ansible/playbooks_variables.html)을 사용자가 정의할 수 있다. 만약 Kubespray를 막 시작한 경우, kubespray의 기본 설정값을 이용해 클러스터를 배포하고 Kubernetes를 탐색하는 것이 좋다. +Kubespray의 [변수 파일들](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html)을 사용자가 정의할 수 있다. 만약 Kubespray를 처음 접하는 경우, kubespray의 기본 설정값을 이용해 클러스터를 배포하고 Kubernetes를 탐색하는 것이 좋다. ### (4/5) 클러스터 배포하기 diff --git a/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md b/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md index b488e6c04d724..cbe02c49e29a3 100644 --- a/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md +++ b/content/ko/docs/setup/production-environment/windows/intro-windows-in-kubernetes.md @@ -33,7 +33,7 @@ weight: 65 쿠버네티스의 윈도우 운영 체제 지원은 다음 표를 참조한다. 단일 이기종 쿠버네티스 클러스터에는 윈도우 및 리눅스 워커 노드가 모두 있을 수 있다. 윈도우 컨테이너는 윈도우 노드에서, 리눅스 컨테이너는 리눅스 노드에서 스케줄되어야 한다. | 쿠버네티스 버전 | 윈도우 서버 LTSC 릴리스 | 윈도우 서버 SAC 릴리스 | -| --- | --- | --- | --- | +| --- | --- | --- | | *Kubernetes v1.17* | Windows Server 2019 | Windows Server ver 1809 | | *Kubernetes v1.18* | Windows Server 2019 | Windows Server ver 1809, Windows Server ver 1903, Windows Server ver 1909 | | *Kubernetes v1.19* | Windows Server 2019 | Windows Server ver 1909, Windows Server ver 2004 | @@ -230,23 +230,32 @@ CSI 노드 플러그인(특히 블록 디바이스 또는 공유 파일시스템 ### 제한 -#### 컨트롤 플레인 - 윈도우는 쿠버네티스 아키텍처 및 컴포넌트 매트릭스에서 워커 노드로만 지원된다. 즉, 쿠버네티스 클러스터에는 항상 리눅스 마스터 노드가 반드시 포함되어야 하고, 0개 이상의 리눅스 워커 노드 및 0개 이상의 윈도우 워커 노드가 포함된다. -#### 컴퓨트 {#컴퓨트-제한} - -##### 리소스 관리 및 프로세스 격리 +#### 자원 관리 리눅스 cgroup은 리눅스에서 리소스 제어를 위한 파드 경계로 사용된다. 컨테이너는 네트워크, 프로세스 및 파일시스템 격리를 위해 해당 경계 내에 생성된다. cgroups API는 cpu/io/memory 통계를 수집하는 데 사용할 수 있다. 반대로 윈도우는 시스템 네임스페이스 필터가 있는 컨테이너별로 잡(Job) 오브젝트를 사용하여 컨테이너의 모든 프로세스를 포함하고 호스트와의 논리적 격리를 제공한다. 네임스페이스 필터링 없이 윈도우 컨테이너를 실행할 수 있는 방법은 없다. 즉, 시스템 권한은 호스트 컨텍스트에서 삽입 될(assert) 수 없으므로 권한이 있는(privileged) 컨테이너는 윈도우에서 사용할 수 없다. 보안 계정 매니져(Security Account Manager, SAM)가 분리되어 있으므로 컨테이너는 호스트의 ID를 가정할 수 없다. -##### 운영 체제 제한 +#### 자원 예약 -윈도우에는 호스트 OS 버전이 컨테이너 베이스 이미지 OS 버전과 일치해야 하는 엄격한 호환성 규칙이 있다. 윈도우 서버 2019의 컨테이너 운영 체제가 있는 윈도우 컨테이너만 지원된다. 윈도우 컨테이너 이미지 버전의 일부 이전 버전과의 호환성을 가능하게 하는 컨테이너의 Hyper-V 격리는 향후 릴리스로 계획되어 있다. +##### 메모리 예약 +윈도우에는 리눅스에는 있는 메모리 부족 프로세스 킬러가 없다. 윈도우는 모든 사용자-모드 메모리 할당을 항상 가상 메모리처럼 처리하며, 페이지파일이 필수이다. 결과적으로 윈도우에서는 리눅스에서 발생할 수 있는 메모리 부족 상태에 도달하지 않으며, 프로세스는 메모리 부족 (out of memory, OOM) 종료를 겪는 대신 디스크로 페이징한다. 메모리가 오버프로비저닝되고 모든 물리 메모리가 고갈되면 페이징으로 인해 성능이 저하될 수 있다. + +kubelet 파라미터 `--kubelet-reserve` 를 사용하여 메모리 사용량을 합리적인 범위 내로 유지할 수 있으며, `--system-reserve` 를 사용하여 노드 (컨테이너 외부) 의 메모리 사용량을 예약할 수 있다. 이들을 사용하면 그만큼 [노드 할당(NodeAllocatable)](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)은 줄어든다. + +{{< note >}} +워크로드를 배포할 때, 컨테이너에 리소스 제한을 걸어라 (제한만 설정하거나, 제한이 요청과 같아야 함). 이 또한 NodeAllocatable 에서 차감되며, 메모리가 꽉 찬 노드에 스케줄러가 파드를 할당하지 않도록 제한한다. +{{< /note >}} + +오버프로비저닝을 방지하는 가장 좋은 방법은 윈도우, 도커, 그리고 쿠버네티스 프로세스를 위해 최소 2GB 이상의 시스템 예약 메모리로 kubelet을 설정하는 것이다. -##### 기능 제한 +##### CPU 예약 +윈도우, 도커, 그리고 다른 쿠버네티스 호스트 프로세스가 이벤트에 잘 응답할 수 있도록, CPU의 일정 비율을 예약하는 것이 좋다. 이 값은 윈도우 노드에 있는 CPU 코어 수에 따라 조정해야 한다. 이 비율을 결정하려면, 각 노드의 최대 파드 밀도(density)를 관찰하고, 시스템 서비스의 CPU 사용량을 모니터링하여 워크로드 요구사항을 충족하는 값을 선택해야 한다. -* TerminationGracePeriod: CRI-containerD 필요 +kubelet 파라미터 `--kubelet-reserve` 를 사용하여 CPU 사용량을 합리적인 범위 내로 유지할 수 있으며, `--system-reserve` 를 사용하여 노드 (컨테이너 외부) 의 CPU 사용량을 예약할 수 있다. 이들을 사용하면 그만큼 [노드 할당(NodeAllocatable)](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)은 줄어든다. + +#### 기능 제한 +* TerminationGracePeriod: 구현되지 않음 * 단일 파일 매핑: CRI-ContainerD로 구현 예정 * 종료 메시지: CRI-ContainerD로 구현 예정 * 특권을 가진(Privileged) 컨테이너: 현재 윈도우 컨테이너에서 지원되지 않음 @@ -254,15 +263,8 @@ CSI 노드 플러그인(특히 블록 디바이스 또는 공유 파일시스템 * 기존 노드 문제 감지기는 리눅스 전용이며 특권을 가진 컨테이너가 필요하다. 윈도우에서 특권을 가진 컨테이너를 지원하지 않기 때문에 일반적으로 윈도우에서 이 기능이 사용될 것으로 예상하지 않는다. * 공유 네임스페이스의 모든 기능이 지원되는 것은 아니다. (자세한 내용은 API 섹션 참조). -##### 메모리 예약 및 처리 - -윈도우에는 리눅스처럼 out-of-memory 프로세스 킬러가 없다. 윈도우는 항상 모든 사용자 모드 메모리 할당을 가상으로 처리하며 페이지 파일은 필수이다. 결과적으로 윈도우는 리눅스와 같은 방식으로 메모리 부족 상태에 도달하지 않고, 메모리 부족(OOM)으로 인한 종료 대신 페이지를 디스크로 처리한다. 메모리가 과도하게 프로비저닝되고 모든 실제 메모리가 고갈되면, 페이징으로 인해 성능이 저하될 수 있다. - -2단계 프로세스를 통해 적절한 범위 내에서 메모리 사용량을 유지할 수 있다. 먼저, kubelet 파라미터 `--kubelet-reserve` 그리고/또는 `--system-reserve`를 사용하여 노드(컨테이너 외부)의 메모리 사용량을 고려한다. 이렇게 하면 [노드 할당(NodeAllocatable)](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)이 줄어든다. 워크로드를 배포할 때 컨테이너에 리소스 제한을 사용(limits만 설정하거나 limits이 requests과 같아야 함)한다. 또한 NodeAllocatable에서 빼고 노드가 가득차면 스케줄러가 더 많은 파드를 추가하지 못하도록 한다. - -오버 프로비저닝을 방지하는 모범 사례는 윈도우, 도커 및 쿠버네티스 프로세스를 고려하여 최소 2GB의 시스템 예약 메모리로 kubelet을 구성하는 것이다. - -플래그의 동작은 아래에 설명된 대로 다르게 동작한다. +#### 각 플래그의 리눅스와의 차이점 +윈도우 노드에서의 kubelet 플래그의 동작은 아래에 설명된 대로 다르게 동작한다. * `--kubelet-reserve`, `--system-reserve`, `--eviction-hard` 플래그는 Node Allocatable 업데이트 * `--enforce-node-allocable`을 사용한 축출(Eviction)은 구현되지 않았다. @@ -362,7 +364,7 @@ SELinux, AppArmor, Seccomp, 기능(POSIX 기능)과 같은 리눅스 특유의 * ID - 리눅스는 정수형으로 표시되는 userID(UID) 및 groupID(GID)를 사용한다. 사용자와 그룹 이름은 정식 이름이 아니다. UID+GID에 대한 `/etc/groups` 또는 `/etc/passwd`의 별칭일 뿐이다. 윈도우는 윈도우 보안 계정 관리자(Security Account Manager, SAM) 데이터베이스에 저장된 더 큰 이진 보안 식별자(SID)를 사용한다. 이 데이터베이스는 호스트와 컨테이너 간에 또는 컨테이너들 간에 공유되지 않는다. * 파일 퍼미션 - 윈도우는 권한 및 UUID+GID의 비트 마스크(bitmask) 대신 SID를 기반으로 하는 접근 제어 목록을 사용한다. -* 파일 경로 - 윈도우의 규칙은 `/` 대신 `\`를 사용하는 것이다. Go IO 라이브러리는 일반적으로 두 가지를 모두 허용하고 작동하도록 하지만, 컨테이너 내부에서 해석되는 경로 또는 커맨드 라인을 설정할 때 `\`가 필요할 수 있다. +* 파일 경로 - 윈도우의 규칙은 `/` 대신 `\`를 사용하는 것이다. Go IO 라이브러리는 두 가지 파일 경로 분리자를 모두 허용한다. 하지만, 컨테이너 내부에서 해석되는 경로 또는 커맨드 라인을 설정할 때 `\`가 필요할 수 있다. * 신호(Signals) - 윈도우 대화형(interactive) 앱은 종료를 다르게 처리하며, 다음 중 하나 이상을 구현할 수 있다. * UI 스레드는 WM_CLOSE를 포함하여 잘 정의된(well-defined) 메시지를 처리한다. * 콘솔 앱은 컨트롤 핸들러(Control Handler)를 사용하여 ctrl-c 또는 ctrl-break를 처리한다. @@ -410,6 +412,10 @@ PodSecurityContext 필드는 윈도우에서 작동하지 않는다. 참조를 * V1.PodSecurityContext.SupplementalGroups - 윈도우에서는 사용할 수 없는 GID를 제공한다. * V1.PodSecurityContext.Sysctls - 이것들은 리눅스 sysctl 인터페이스의 일부이다. 윈도우에는 이에 상응하는 것이 없다. +#### 운영 체제 버전 제한 + +윈도우에는 호스트 OS 버전이 컨테이너 베이스 이미지 OS 버전과 일치해야 하는 엄격한 호환성 규칙이 있다. 윈도우 서버 2019의 컨테이너 운영 체제가 있는 윈도우 컨테이너만 지원된다. 윈도우 컨테이너 이미지 버전의 일부 이전 버전과의 호환성을 가능하게 하는 컨테이너의 Hyper-V 격리는 향후 릴리스로 계획되어 있다. + ## 도움 받기 및 트러블슈팅 {#troubleshooting} 쿠버네티스 클러스터 트러블슈팅을 위한 기본 도움말은 이 [섹션](/docs/tasks/debug-application-cluster/troubleshooting/)에서 먼저 찾아야 한다. 이 섹션에는 몇 가지 추가 윈도우 관련 트러블슈팅 도움말이 포함되어 있다. 로그는 쿠버네티스에서 트러블슈팅하는데 중요한 요소이다. 다른 기여자로부터 트러블슈팅 지원을 구할 때마다 이를 포함해야 한다. SIG-Windows [로그 수집에 대한 기여 가이드](https://github.com/kubernetes/community/blob/master/sig-windows/CONTRIBUTING.md#gathering-logs)의 지침을 따른다. diff --git a/content/ko/docs/setup/release/notes.md b/content/ko/docs/setup/release/notes.md index 8cb17b2ec00d7..05de7fb44086a 100644 --- a/content/ko/docs/setup/release/notes.md +++ b/content/ko/docs/setup/release/notes.md @@ -1,5 +1,5 @@ --- -title: v1.20 릴리스 노트 +title: v1.21 릴리스 노트 weight: 10 card: name: release-notes @@ -13,953 +13,760 @@ card: -# v1.20.0 +# v1.21.0 [문서](https://docs.k8s.io) -## v1.20.0 다운로드 +## v1.21.0 다운로드 파일명 | sha512 해시 -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes.tar.gz) | `ebfe49552bbda02807034488967b3b62bf9e3e507d56245e298c4c19090387136572c1fca789e772a5e8a19535531d01dcedb61980e42ca7b0461d3864df2c14` -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-src.tar.gz) | `bcbd67ed0bb77840828c08c6118ad0c9bf2bcda16763afaafd8731fd6ce735be654feef61e554bcc34c77c65b02a25dae565adc5e1dc49a2daaa0d115bf1efe6` +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes.tar.gz) | `19bb76a3fa5ce4b9f043b2a3a77c32365ab1fcb902d8dd6678427fb8be8f49f64a5a03dc46aaef9c7dadee05501cf83412eda46f0edacbb8fc1ed0bf5fb79142` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-src.tar.gz) | `f942e6d6c10007a6e9ce21e94df597015ae646a7bc3e515caf1a3b79f1354efb9aff59c40f2553a8e3d43fe4a01742241f5af18b69666244906ed11a22e3bc49` ### 클라이언트 바이너리 파일명 | sha512 해시 -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-darwin-amd64.tar.gz) | `3609f6483f4244676162232b3294d7a2dc40ae5bdd86a842a05aa768f5223b8f50e1d6420fd8afb2d0ce19de06e1d38e5e5b10154ba0cb71a74233e6dc94d5a0` -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-386.tar.gz) | `e06c08016a08137d39804383fdc33a40bb2567aa77d88a5c3fd5b9d93f5b581c635b2c4faaa718ed3bb2d120cb14fe91649ed4469ba72c3a3dda1e343db545ed` -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-amd64.tar.gz) | `081472833601aa4fa78e79239f67833aa4efcb4efe714426cd01d4ddf6f36fbf304ef7e1f5373bff0fdff44a845f7560165c093c108bd359b5ab4189f36b1f2f` -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm.tar.gz) | `037f84a2f29fe62d266cab38ac5600d058cce12cbc4851bcf062fafba796c1fbe23a0c2939cd15784854ca7cd92383e5b96a11474fc71fb614b47dbf98a477d9` -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-arm64.tar.gz) | `275727e1796791ca3cbe52aaa713a2660404eab6209466fdc1cfa8559c9b361fe55c64c6bcecbdeba536b6d56213ddf726e58adc60f959b6f77e4017834c5622` -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-ppc64le.tar.gz) | `7a9965293029e9fcdb2b7387467f022d2026953b8461e6c84182abf35c28b7822d2389a6d8e4d8e532d2ea5d5d67c6fee5fb6c351363cb44c599dc8800649b04` -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-linux-s390x.tar.gz) | `85fc449ce1980f5f030cc32e8c8e2198c1cc91a448e04b15d27debc3ca56aa85d283f44b4f4e5fed26ac96904cc12808fa3e9af3d8bf823fc928befb9950d6f5` -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-386.tar.gz) | `4c0a27dba1077aaee943e0eb7a787239dd697e1d968e78d1933c1e60b02d5d233d58541d5beec59807a4ffe3351d5152359e11da120bf64cacb3ee29fbc242e6` -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-client-windows-amd64.tar.gz) | `29336faf7c596539b8329afbbdceeddc843162501de4afee44a40616278fa1f284d8fc48c241fc7d52c65dab70f76280cc33cec419c8c5dbc2625d9175534af8` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-amd64.tar.gz) | `be9d1440e418e5253fb8a3d8aba705ca8160746a9bd17325ad626a986b6da9f733af864155a651a32b7bca94b533b8d596005ddbe5248bdeea85db47a1b957ed` +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-darwin-arm64.tar.gz) | `eed0ddc81d104bb2d41ace13f737c490423d5df4ebddc7376e45c18ed66af35933c9376b912c1c3da105945b04056f6ca0870c156bee8a307cf4189ca5eb1dd1` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-386.tar.gz) | `8a2f30c4434199762f2a96141dab4241c1cce2711bea9ea39cc63c2c5e7d31719ed7f076efac1931604e3a94578d3bbf0cfa454965708c96f3cfb91789868746` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-amd64.tar.gz) | `cd3cfa645fa31de3716f1f63506e31b73d2aa8d37bb558bb3b3e8c151f35b3d74d44e03cbd05be67e380f9a5d015aba460222afdac6677815cd99a85c2325cf0` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm.tar.gz) | `936042aa11cea0f6dfd2c30fc5dbe655420b34799bede036b1299a92d6831f589ca10290b73b9c9741560b603ae31e450ad024e273f2b4df5354bfac272691d8` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-arm64.tar.gz) | `42beb75364d7bf4bf526804b8a35bd0ab3e124b712e9d1f45c1b914e6be0166619b30695feb24b3eecef134991dacb9ab3597e788bd9e45cf35addddf20dd7f6` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-ppc64le.tar.gz) | `4baba2ed7046b28370eccc22e2378ae79e3ce58220d6f4f1b6791e8233bec8379e30200bb20b971456b83f2b791ea166fdfcf1ea56908bc1eea03590c0eda468` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-linux-s390x.tar.gz) | `37fa0c4d703aef09ce68c10ef3e7362b0313c8f251ce38eea579cd18fae4023d3d2b70e0f31577cabe6958ab9cfc30e98d25a7c64e69048b423057c3cf728339` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-386.tar.gz) | `6900db36c1e3340edfd6dfd8d720575a904c932d39a8a7fa36401595e971a0235bd42111dbcc1cbb77e7374e47f1380a68c637997c18f96a0d9cdc9f3714c4c9` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-client-windows-amd64.tar.gz) | `90de67f6f79fc63bcfdf35066e3d84501cc85433265ffad36fd1a7a428a31b446249f0644a1e97495ea8b2a08e6944df6ef30363003750339edaa2aceffe937c` ### 서버 바이너리 파일명 | sha512 해시 -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-amd64.tar.gz) | `fb56486a55dbf7dbacb53b1aaa690bae18d33d244c72a1e2dc95fb0fcce45108c44ba79f8fa04f12383801c46813dc33d2d0eb2203035cdce1078871595e446e` -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm.tar.gz) | `735ed9993071fe35b292bf06930ee3c0f889e3c7edb983195b1c8e4d7113047c12c0f8281fe71879fc2fcd871e1ee587f03b695a03c8512c873abad444997a19` -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-arm64.tar.gz) | `ffab155531d5a9b82487ee1abf4f6ef49626ea58b2de340656a762e46cf3e0f470bdbe7821210901fe1114224957c44c1d9cc1e32efb5ee24e51fe63990785b2` -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-ppc64le.tar.gz) | `9d5730d35c4ddfb4c5483173629fe55df35d1e535d96f02459468220ac2c97dc01b995f577432a6e4d1548b6edbfdc90828dc9c1f7cf7464481af6ae10aaf118` -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-server-linux-s390x.tar.gz) | `6e4c165306940e8b99dd6e590f8542e31aed23d2c7a6808af0357fa425cec1a57016dd66169cf2a95f8eb8ef70e1f29e2d500533aae889e2e3d9290d04ab8721` +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-amd64.tar.gz) | `3941dcc2309ac19ec185603a79f5a086d8a198f98c04efa23f15a177e5e1f34946ea9392ba9f5d24d0d727839438f067fef1001fc6e88b27b8b01e35bbd962ca` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm.tar.gz) | `6507abf6c2ec2b336901dc23269f6c577ec0049b8bad3c9dd6ad63f21aa10f09bfbbfa6e064c2466d250411d3e10f8672791a9e10942e38de7bfbaf7a8bcc9da` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-arm64.tar.gz) | `5abe76f867ca6865344e957bf166b81766c049ec4eb183a8a5580c22a7f8474db1edf90fd901a5833e56128b6825811653a1d27f72fd34ce5b1287a8c10da05c` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-ppc64le.tar.gz) | `62507b182ca25396a285d91241536860e58f54fac937e97cbdf91948c83bb41be97d33277400489bf50e85164d560205540b76e94e5d519892312bdc63df1067` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-server-linux-s390x.tar.gz) | `04f2a1f7d1388e4a7d7d9f597f872a3da36f26839cfed16aad6df07021c03f4dca1df06b19cfda56df09d1c2d9a13ebd0af40ca1b9b6aecfaf427ab7712d88f3` ### 노드 바이너리 파일명 | sha512 해시 -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-amd64.tar.gz) | `3e6c90561dd1c27fa1dff6953c503251c36001f7e0f8eff3ec918c74ae2d9aa25917d8ac87d5b4224b8229f620b1830442e6dce3b2a497043f8497eee3705696` -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm.tar.gz) | `26db385d9ae9a97a1051a638e7e3de22c4bbff389d5a419fe40d5893f9e4fa85c8b60a2bd1d370fd381b60c3ca33c5d72d4767c90898caa9dbd4df6bd116a247` -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-arm64.tar.gz) | `5b8b63f617e248432b7eb913285a8ef8ba028255216332c05db949666c3f9e9cb9f4c393bbd68d00369bda77abf9bfa2da254a5c9fe0d79ffdad855a77a9d8ed` -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-ppc64le.tar.gz) | `60da7715996b4865e390640525d6e98593ba3cd45c6caeea763aa5355a7f989926da54f58cc5f657f614c8134f97cd3894b899f8b467d100dca48bc22dd4ff63` -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-linux-s390x.tar.gz) | `9407dc55412bd04633f84fcefe3a1074f3eaa772a7cb9302242b8768d6189b75d37677a959f91130e8ad9dc590f9ba8408ba6700a0ceff6827315226dd5ee1e6` -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0/kubernetes-node-windows-amd64.tar.gz) | `9d4261af343cc330e6359582f80dbd6efb57d41f882747a94bbf47b4f93292d43dd19a86214d4944d268941622dfbc96847585e6fec15fddc4dbd93d17015fa8` +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-amd64.tar.gz) | `c1831c708109c31b3878e5a9327ea4b9e546504d0b6b00f3d43db78b5dd7d5114d32ac24a9a505f9cadbe61521f0419933348d2cd309ed8cfe3987d9ca8a7e2c` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm.tar.gz) | `b68dd5bcfc7f9ce2781952df40c8c3a64c29701beff6ac22f042d6f31d4de220e9200b7e8272ddf608114327770acdaf3cb9a34a0a5206e784bda717ea080e0f` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-arm64.tar.gz) | `7fa84fc500c28774ed25ca34b6f7b208a2bea29d6e8379f84b9f57bd024aa8fe574418cee7ee26edd55310716d43d65ae7b9cbe11e40c995fe2eac7f66bdb423` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-ppc64le.tar.gz) | `a4278b3f8e458e9581e01f0c5ba8443303c987988ee136075a8f2f25515d70ca549fbd2e4d10eefca816c75c381d62d71494bd70c47034ab47f8315bbef4ae37` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-linux-s390x.tar.gz) | `8de2bc6f22f232ff534b45012986eac23893581ccb6c45bd637e40dbe808ce31d5a92375c00dc578bdbadec342b6e5b70c1b9f3d3a7bb26ccfde97d71f9bf84a` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0/kubernetes-node-windows-amd64.tar.gz) | `b82e94663d330cff7a117f99a7544f27d0bc92b36b5a283b3c23725d5b33e6f15e0ebf784627638f22f2d58c58c0c2b618ddfd226a64ae779693a0861475d355` -## v1.19.0 이후 변경로그(Changelog) +## v1.20.0 이후 변경로그 (Changelog) -## 새로운 소식(주요 테마) +# v1.21.0-rc.0 릴리스 노트 -### Dockershim 사용 중단(deprecation) +[문서](https://docs.k8s.io/docs/home) -Docker as an underlying runtime is being deprecated. Docker-produced images will continue to work in your cluster with all runtimes, as they always have. -The Kubernetes community [has written a blog post about this in detail](https://blog.k8s.io/2020/12/02/dont-panic-kubernetes-and-docker/) with [a dedicated FAQ page for it](https://blog.k8s.io/2020/12/02/dockershim-faq/). +# v1.20.0 이후 변경로그 (Changelog) -### client-go를 위한 외부 자격증명(credential) 제공자 +## 새로운 소식 (주요 테마) -The client-go credential plugins can now be passed in the current cluster information via the `KUBERNETES_EXEC_INFO` environment variable. Learn more about this on [client-go credential plugins documentation](https://docs.k8s.io/reference/access-authn-authz/authentication/#client-go-credential-plugins/). +### Deprecation of PodSecurityPolicy -### 기능 게이트(feature gate)를 통해 크론잡(CronJob) 컨트롤러 v2 활성화 가능 +PSP as an admission controller resource is being deprecated. Deployed PodSecurityPolicy's will keep working until version 1.25, their target removal from the codebase. A new feature, with a working title of "PSP replacement policy", is being developed in [KEP-2579](https://features.k8s.io/2579). To learn more, read [PodSecurityPolicy Deprecation: Past, Present, and Future](https://blog.k8s.io/2021/04/06/podsecuritypolicy-deprecation-past-present-and-future/). -An alternative implementation of `CronJob` controller is now available as an alpha feature in this release, which has experimental performance improvement by using informers instead of polling. While this will be the default behavior in the future, you can [try them in this release through a feature gate](https://docs.k8s.io/concepts/workloads/controllers/cron-jobs/). +### Kubernetes API Reference Documentation -### PID 제한(PID Limits)이 안정 기능(General Availability)으로 전환 +The API reference is now generated with [`gen-resourcesdocs`](https://github.com/kubernetes-sigs/reference-docs/tree/c96658d89fb21037b7d00d27e6dbbe6b32375837/gen-resourcesdocs) and it is moving to [Kubernetes API](https://docs.k8s.io/reference/kubernetes-api/) -PID Limits features are now generally available on both `SupportNodePidsLimit` (node-to-pod PID isolation) and `SupportPodPidsLimit` (ability to limit PIDs per pod), after being enabled-by-default in beta stage for a year. +### Kustomize Updates in Kubectl -### API 우선순위 및 공정성(API Priority and Fairness)이 베타 단계로 전환 +[Kustomize](https://github.com/kubernetes-sigs/kustomize) version in kubectl had a jump from v2.0.3 to [v4.0.5](https://github.com/kubernetes/kubernetes/pull/98946). Kustomize is now treated as a library and future updates will be less sporadic. -Initially introduced in 1.18, Kubernetes 1.20 now enables API Priority and Fairness (APF) by default. This allows `kube-apiserver` to [categorize incoming requests by priority levels](https://docs.k8s.io/concepts/cluster-administration/flow-control/). +### Default Container Labels -### IPv4/IPv6이 작동 +Pod with multiple containers can use `kubectl.kubernetes.io/default-container` label to have a container preselected for kubectl commands. More can be read in [KEP-2227](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cli/2227-kubectl-default-container/README.md). -IPv4/IPv6 dual-stack has been reimplemented for 1.20 to support dual-stack Services, based on user and community feedback. If your cluster has dual-stack enabled, you can create Services which can use IPv4, IPv6, or both, and you can change this setting for existing Services. Details are available in updated [IPv4/IPv6 dual-stack docs](https://docs.k8s.io/concepts/services-networking/dual-stack/), which cover the nuanced array of options. +### Immutable Secrets and ConfigMaps -We expect this implementation to progress from alpha to beta and GA in coming releases, so we’re eager to have you comment about your dual-stack experiences in [#k8s-dual-stack](https://kubernetes.slack.com/messages/k8s-dual-stack) or in [enhancements #563](https://features.k8s.io/563). +Immutable Secrets and ConfigMaps graduates to GA. This feature allows users to specify that the contents of a particular Secret or ConfigMap is immutable for its object lifetime. For such instances, Kubelet will not watch/poll for changes and therefore reducing apiserver load. -### go1.15.5 +### Structured Logging in Kubelet -go1.15.5 has been integrated to Kubernetes project as of this release, [including other infrastructure related updates on this effort](https://github.com/kubernetes/kubernetes/pull/95776). +Kubelet has adopted structured logging, thanks to community effort in accomplishing this within the release timeline. Structured logging in the project remains an ongoing effort -- for folks interested in participating, [keep an eye / chime in to the mailing list discussion](https://groups.google.com/g/kubernetes-dev/c/y4WIw-ntUR8). -### CSI 볼륨 스냅샷(CSI Volume Snapshot)이 안정 기능으로 전환 +### Storage Capacity Tracking -CSI Volume Snapshot moves to GA in the 1.20 release. This feature provides a standard way to trigger volume snapshot operations in Kubernetes and allows Kubernetes users to incorporate snapshot operations in a portable manner on any Kubernetes environment regardless of supporting underlying storage providers. -Additionally, these Kubernetes snapshot primitives act as basic building blocks that unlock the ability to develop advanced, enterprise grade, storage administration features for Kubernetes: including application or cluster level backup solutions. -Note that snapshot support will require Kubernetes distributors to bundle the Snapshot controller, Snapshot CRDs, and validation webhook. In addition, a CSI driver supporting the snapshot functionality must also be deployed on the cluster. +Traditionally, the Kubernetes scheduler was based on the assumptions that additional persistent storage is available everywhere in the cluster and has infinite capacity. Topology constraints addressed the first point, but up to now pod scheduling was still done without considering that the remaining storage capacity may not be enough to start a new pod. [Storage capacity tracking](https://docs.k8s.io/concepts/storage/storage-capacity/) addresses that by adding an API for a CSI driver to report storage capacity and uses that information in the Kubernetes scheduler when choosing a node for a pod. This feature serves as a stepping stone for supporting dynamic provisioning for local volumes and other volume types that are more capacity constrained. -### 비재귀적 볼륨 소유(Non-recursive Volume Ownership (FSGroup))가 베타 단계로 전환 +### Generic Ephemeral Volumes -By default, the `fsgroup` setting, if specified, recursively updates permissions for every file in a volume on every mount. This can make mount, and pod startup, very slow if the volume has many files. -This setting enables a pod to specify a `PodFSGroupChangePolicy` that indicates that volume ownership and permissions will be changed only when permission and ownership of the root directory does not match with expected permissions on the volume. +[Generic ephermeral volumes](https://docs.k8s.io/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes) feature allows any existing storage driver that supports dynamic provisioning to be used as an ephemeral volume with the volume’s lifecycle bound to the Pod. It can be used to provide scratch storage that is different from the root disk, for example persistent memory, or a separate local disk on that node. All StorageClass parameters for volume provisioning are supported. All features supported with PersistentVolumeClaims are supported, such as storage capacity tracking, snapshots and restore, and volume resizing. -### FSGroup를 위한 CSIDriver 정책이 베타 단계로 전환 +### CSI Service Account Token -The FSGroup's CSIDriver Policy is now beta in 1.20. This allows CSIDrivers to explicitly indicate if they want Kubernetes to manage permissions and ownership for their volumes via `fsgroup`. +CSI Service Account Token feature moves to Beta in 1.21. This feature improves the security posture and allows CSI drivers to receive pods' [bound service account tokens](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/1205-bound-service-account-tokens/README.md). This feature also provides a knob to re-publish volumes so that short-lived volumes can be refreshed. -### CSI 드라이버의 보안성 향상(알파) +### CSI Health Monitoring -In 1.20, we introduce a new alpha feature `CSIServiceAccountToken`. This feature allows CSI drivers to impersonate the pods that they mount the volumes for. This improves the security posture in the mounting process where the volumes are ACL’ed on the pods’ service account without handing out unnecessary permissions to the CSI drivers’ service account. This feature is especially important for secret-handling CSI drivers, such as the secrets-store-csi-driver. Since these tokens can be rotated and short-lived, this feature also provides a knob for CSI drivers to receive `NodePublishVolume` RPC calls periodically with the new token. This knob is also useful when volumes are short-lived, e.g. certificates. - -### 그레이스풀 노드 종료(Graceful Node Shutdown) 기능 소개(알파) - -The `GracefulNodeShutdown` feature is now in Alpha. This allows kubelet to be aware of node system shutdowns, enabling graceful termination of pods during a system shutdown. This feature can be [enabled through feature gate](https://docs.k8s.io/concepts/architecture/nodes/#graceful-node-shutdown). - -### 런타임 로그 관리(sanitation) - -Logs can now be configured to use runtime protection from leaking sensitive data. [Details for this experimental feature is available in documentation](https://docs.k8s.io/concepts/cluster-administration/system-logs/#log-sanitization). - -### 파드 리소스 메트릭 - -On-demand metrics calculation is now available through `/metrics/resources`. [When enabled]( -https://docs.k8s.io/concepts/cluster-administration/system-metrics#kube-scheduler-metrics), the endpoint will report the requested resources and the desired limits of all running pods. - -### `RootCAConfigMap` 소개 - -`RootCAConfigMap` graduates to Beta, seperating from `BoundServiceAccountTokenVolume`. The `kube-root-ca.crt` ConfigMap is now available to every namespace, by default. It contains the Certificate Authority bundle for verify kube-apiserver connections. - -### `kubectl debug` 이 베타 단계로 전환 - -`kubectl alpha debug` graduates from alpha to beta in 1.20, becoming `kubectl debug`. -`kubectl debug` provides support for common debugging workflows directly from kubectl. Troubleshooting scenarios supported in this release of `kubectl` include: -Troubleshoot workloads that crash on startup by creating a copy of the pod that uses a different container image or command. -Troubleshoot distroless containers by adding a new container with debugging tools, either in a new copy of the pod or using an ephemeral container. (Ephemeral containers are an alpha feature that are not enabled by default.) -Troubleshoot on a node by creating a container running in the host namespaces and with access to the host’s filesystem. -Note that as a new builtin command, `kubectl debug` takes priority over any `kubectl` plugin named “debug”. You will need to rename the affected plugin. -Invocations using `kubectl alpha debug` are now deprecated and will be removed in a subsequent release. Update your scripts to use `kubectl debug` instead of `kubectl alpha debug`! -For more information about kubectl debug, see Debugging Running Pods on the Kubernetes website, kubectl help debug, or reach out to SIG CLI by visiting #sig-cli or commenting on [enhancement #1441](https://features.k8s.io/1441). - -### kubeadm에서 사용 중단된 플래그 삭제 - -`kubeadm` applies a number of deprecations and removals of deprecated features in this release. More details are available in the Urgent Upgrade Notes and Kind / Deprecation sections. - -### 파드의 호스트네임을 FQDN으로 사용하는 것이 베타 단계로 전환 - -Previously introduced in 1.19 behind a feature gate, `SetHostnameAsFQDN` is now enabled by default. More details on this behavior is available in [documentation for DNS for Services and Pods](https://docs.k8s.io/concepts/services-networking/dns-pod-service/#pod-sethostnameasfqdn-field) - -### `TokenRequest` / `TokenRequestProjection` 이 안정 기능으로 전환 - -Service account tokens bound to pod is now a stable feature. The feature gates will be removed in 1.21 release. For more information, refer to notes below on the changelogs. - -### 런타임클래스(RuntimeClass)가 안정 기능으로 전환 - -The `node.k8s.io` API groups are promoted from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] - -### 클라우드 컨트롤러 관리자(Cloud Controller Manager)가 이제 각 클라우드 공급자를 통해서만 제공 - -Kubernetes will no longer ship an instance of the Cloud Controller Manager binary. Each Cloud Provider is expected to ship their own instance of this binary. Details for a Cloud Provider to create an instance of such a binary can be found under [here](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/cloud-provider/sample). Anyone with questions on building a Cloud Controller Manager should reach out to SIG Cloud Provider. Questions about the Cloud Controller Manager on a Managed Kubernetes solution should go to the relevant Cloud Provider. Questions about the Cloud Controller Manager on a non managed solution can be brought up with SIG Cloud Provider. +The CSI health monitoring feature is being released as a second Alpha in Kubernetes 1.21. This feature enables CSI Drivers to share abnormal volume conditions from the underlying storage systems with Kubernetes so that they can be reported as events on PVCs or Pods. This feature serves as a stepping stone towards programmatic detection and resolution of individual volume health issues by Kubernetes. ## 알려진 이슈 -### kubelet의 요약(Summary) API는 가속기(accelerator) 메트릭을 가지고 있지 않음 -Currently, cadvisor_stats_provider provides AcceleratorStats but cri_stats_provider does not. As a result, when using cri_stats_provider, kubelet's Summary API does not have accelerator metrics. [There is an open work in progress to fix this](https://github.com/kubernetes/kubernetes/pull/96873). +### `TopologyAwareHints` feature falls back to default behavior + +The feature gate currently falls back to the default behavior in most cases. Enabling the feature gate will add hints to `EndpointSlices`, but functional differences are only observed in non-dual stack kube-proxy implementation. [The fix will be available in coming releases](https://github.com/kubernetes/kubernetes/pull/100804). ## 긴급 업그레이드 노트 ### (주의. 업그레이드 전에 반드시 읽어야 함) -- A bug was fixed in kubelet where exec probe timeouts were not respected. This may result in unexpected behavior since the default timeout (if not specified) is `1s` which may be too small for some exec probes. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. See [configure probe](https://docs.k8s.io/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) section of the documentation for more details. - - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the `ExecProbeTimeout` feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] -- RuntimeClass feature graduates to General Availability. Promote `node.k8s.io` API groups from `v1beta1` to `v1`. `v1beta1` is now deprecated and will be removed in a future release, please start using `v1`. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- API priority and fairness graduated to beta. 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Kubeadm: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - -- Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] -- The deprecated flag --experimental-kustomize is now removed from kubeadm commands. Use --experimental-patches instead, which was introduced in 1.19. Migration information available in --help description for --experimental-patches. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) -- Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42), [SIG API Machinery, Node, Testing]) -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- `TokenRequest` and `TokenRequestProjection` are now GA features. The following flags are required by the API server: - - `--service-account-issuer`, should be set to a URL identifying the API server that will be stable over the cluster lifetime. - - `--service-account-key-file`, set to one or more files containing one or more public keys used to verify tokens. - - `--service-account-signing-key-file`, set to a file containing a private key to use to sign service account tokens. Can be the same file given to `kube-controller-manager` with `--service-account-private-key-file`. ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle] -- kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] - +- Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Remove alpha `CSIMigrationXXComplete` flag and add alpha `InTreePluginXXUnregister` flag. Deprecate `CSIMigrationvSphereComplete` flag and it will be removed in v1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the `storage_operation_duration_seconds` metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) + - `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior` features have been promoted to GA. `ServiceNodeExclusion` and `NodeDisruptionExclusion` are now unconditionally enabled, while `LegacyNodeRoleBehavior` is unconditionally disabled. To prevent control plane nodes from being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) ## 종류(Kind)별 변경 사항 ### 사용 중단 -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] - +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm: deprecated command "alpha selfhosting pivot" is now removed. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Remove deprecated `--generator, --replicas, --service-generator, --service-overrides, --schedule` from `kubectl run` + Deprecate `--serviceaccount, --hostport, --requests, --limits` in `kubectl run` ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- The GA TokenRequest and TokenRequestProjection feature gates have been removed and are unconditionally enabled. Remove explicit use of those feature gates in CLI invocations. ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `discovery.k8s.io/v1beta1` EndpointSlices are deprecated in favor of `discovery.k8s.io/v1`, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) +- `diskformat` storage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) ### API 변경 -- `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - - kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a `serving` and `terminating` condition to the EndpointSlice API. - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fewer candidates are enumerated for preemption to improve performance in large clusters. ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- GPU metrics provided by kubelet are now disabled by default. ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The status of v1beta1 CRDs without "preserveUnknownFields:false" now shows a violation, "spec.preserveUnknownFields: Invalid value: true: must be false". ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBService is enabled. The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] - -### 기능(feature) - -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add foreground cascading deletion to kubectl with the new `kubectl delete foreground|background|orphan` option. ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation. ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- Configure AWS LoadBalancer health check protocol via service annotations. ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] -- Kubeadm now prints warnings instead of throwing errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. - - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: [@mikedanese](https://github.com/mikedanese): - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. - - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. - - - make kube::util::find-binary not dependent on bazel-out/ structure - - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node-to-pod PID isolation. - Promote SupportPodPidsLimit to GA to provide ability to limit PIDs per pod. ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Send GCE node startup scripts logs to console and journal. ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric buckets were set to 0.005 0.0125 0.03125 0.078125 0.1953125 0.48828125 1.220703125 3.0517578125 7.62939453125 19.073486328125 47.6837158203125 119.20928955078125 298.0232238769531 and 745.0580596923828 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed`. This is an alpha field and is only honored by servers with the `IndexedJob` feature gate enabled. ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Adds support for endPort field in NetworkPolicy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- CSIServiceAccountToken graduates to Beta and enabled by default. ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) +- Cluster admins can now turn off `/debug/pprof` and `/debug/flags/v` endpoint in kubelet by setting `enableProfilingHandler` and `enableDebugFlagsHandler` to `false` in the Kubelet configuration file. Options `enableProfilingHandler` and `enableDebugFlagsHandler` can be set to `true` only when `enableDebuggingHandlers` is also set to `true`. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl portforward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Fixes server-side apply for APIService resources. ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Hugepages request values are limited to integer multiples of the page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration (previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) +- Jobs API has a new `.spec.suspend` field that can be used to suspend and resume Jobs. This is an alpha field which is only honored by servers with the `SuspendJob` feature gate enabled. ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) +- Kubelet Graceful Node Shutdown feature graduates to Beta and enabled by default. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Namespace API objects now have a `kubernetes.io/metadata.name` label matching their metadata.name field to allow selecting any namespace by its name using a label selector. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodDisruptionBudget API objects can now contain conditions in status. ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Promote Immutable Secrets/ConfigMaps feature to Stable. This allows to set `immutable` field in Secret or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] +- Services can specify loadBalancerClass to use a custom load balancer ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) +- Storage capacity tracking (= the CSIStorageCapacity feature) graduates to Beta and enabled by default, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] +- The EndpointSlice Controllers are now GA. The `EndpointSliceController` will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The `EndpointSlice` API is now GA. The `EndpointSlice` topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The `discovery.k8s.io/v1alpha1` API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) +- The `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a `Pod` compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) +- The kube-apiserver now resets `managedFields` that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#97099](https://github.com/kubernetes/kubernetes/pull/97099), [@pacoxu](https://github.com/pacoxu)) [SIG CLI] + +### 기능 (Feature) + +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- A new histogram metric to track the time it took to delete a job by the `TTLAfterFinished` controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) +- AWS cloud provider supports auto-discovering subnets without any `kubernetes.io/cluster/` tags. It also supports additional service annotation `service.beta.kubernetes.io/aws-load-balancer-subnets` to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) +- Aborting the drain command in a list of nodes will be deprecated. The new behavior will make the drain command go through all nodes even if one or more nodes failed during the drain. For now, users can try such experience by enabling --ignore-errors flag. ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) +- Add --permit-address-sharing flag to `kube-apiserver` to listen with `SO_REUSEADDR`. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoids waiting for the kernel to release socket in `TIME_WAIT` state, and hence, considerably reducing `kube-apiserver` restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Add metric etcd_lease_object_counts for kube-apiserver to observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Added support for installing `arm64` node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, as well as a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- CRIContainerLogRotation graduates to GA and unconditionally enabled. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99385](https://github.com/kubernetes/kubernetes/pull/99385), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Release] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- EndpointSliceNodeName graduates to GA and thus will be unconditionally enabled -- NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) +- Export `NewDebuggingRoundTripper` function and `DebugLevel` options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: IPv6DualStack feature gate graduates to Beta and enabled by default ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: perform pre-flight validation on host/node name upon `kubeadm init` and `kubeadm join`, showing warnings on non-compliant names ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubectl: Add `--use-protocol-buffers` flag to kubectl top pods and nodes. ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Kubectl: a Pod can be preselected as default container using `kubectl.kubernetes.io/default-container` annotation ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) +- Kubectl: add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=metric1,metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) +- New admission controller `DenyServiceExternalIPs` is available. Clusters which do not *need* the Service `externalIPs` feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Port the devicemanager to Windows node to allow device plugins like directx ([#93285](https://github.com/kubernetes/kubernetes/pull/93285), [@aarnaud](https://github.com/aarnaud)) [SIG Node, Testing and Windows] +- Removes cAdvisor JSON metrics (/stats/container, /stats//, /stats////) from the kubelet. ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Sysctls graduates to General Availability and thus unconditionally enabled. ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The `CronJobControllerV2` feature flag graduates to Beta and set to be enabled by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) +- The `EndpointSlice` mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation `endpoints.kubernetes.io/last-change-trigger-time` is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] +- The `TTLAfterFinished` feature flag is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` takes into consideration global IP addresses on loopback interfaces when 1) the host has default routes, or 2) there are no global IPs on those interfaces in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] +- The feature gate `RootCAConfigMap` graduated to GA in v1.21 and therefore will be unconditionally enabled. This flag will be removed in v1.22 release. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) +- The pause image upgraded to `v3.4.1` in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. New clusters or existing clusters are not be affected until an actor starts adding secondary Pods and service CIDRS CLI flags as described here: [IPv4/IPv6 Dual-stack](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack) ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) +- Users might specify the `kubectl.kubernetes.io/default-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] +- `kubectl wait` ensures that observedGeneration >= generation to prevent stale state reporting. An example scenario can be found on CRD updates. ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) ### 문서 -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- Azure file migration graduates to beta, with CSIMigrationAzureFile flag off by default + as it requires installation of AzureFile CSI Driver. Users should enable CSIMigration and + CSIMigrationAzureFile features and install the [AzureFile CSI Driver](https://github.com/kubernetes-sigs/azurefile-csi-driver) + to avoid disruption to existing Pod and PVC objects at that time. Azure File CSI driver does not support using same persistent + volume with different fsgroups. When CSI migration is enabled for azurefile driver, such case is not supported. + (there is a case we support where volume is mounted with 0777 and then it readable/writable by everyone) ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### 실패 테스트 -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP `NodePort` Services where stale connection tracking entries may blackhole the traffic directed to the `NodePort` ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) +- Kubelet: fixes a bug in the HostPort dockershim implementation that caused the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### 버그 또는 회귀(regression) -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure armclient backoff step defaults to 1 (no retry). ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp fields of an audit event now take into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) -- Build/lib/release: Explicitly use '--platform' in building server images - - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. - - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. - - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. - - Before: - `FROM ${base_image}` - - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- Ceph RBD volume expansion now works even when ceph.conf was not provided. ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Change the calculation of pod UIDs so that static pods get a unique value - will cause all containers to be killed and recreated after in-place upgrade. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Endpoint slice controller now mirrors parent's service label to its corresponding endpoint slices. ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix a bug where loadbalancer deletion gets stuck because of missing resource group. ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a panic in `kubectl debug` when a pod has multiple init or ephemeral containers. ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix race condition on timeCache locks. ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) -- Fix regression on `kubectl portforward` when TCP and UCP services were configured on the same port. ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Increase maximum IOPS of AWS EBS io1 volume to current maximum (64,000). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kubeadm ensures "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user. ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm now warns (instead of error out) on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Port mapping now allows the same `containerPort` of different containers to different `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Recreate EndpointSlices on rapid Service creation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. The [kubectl-check-ownerreferences](https://github.com/kubernetes-sigs/kubectl-check-ownerreferences) tool can be run prior to upgrading to locate existing objects with invalid ownerReferences. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- StatefulSet Controller now waits for PersistentVolumeClaim deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) -- StreamWatcher now calls HandleCrash at appropriate sequence. ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] -- Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- All data is no longer automatically deleted when a failure is detected during creation of the volume data file on a CSI volume. Now only the data file and volume path is removed. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Client-go exec credential plugins will pass stdin only when interactive terminal is detected on stdin. This fixes a bug where previously it was checking if **stdout** is an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Cluster Autoscaler version bump to v1.20.0 ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure all vSphere nodes are are tracked by volume attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) +- Ensure empty string annotations are copied over in rollbacks. ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Explicitly pass `KUBE_BUILD_CONFORMANCE=y` in `package-tarballs` to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) +- Fix Azure file migration e2e test failure when CSIMigration is turned on. ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) +- Fix CSI-migrated inline EBS volumes failing to mount if their volumeID is prefixed by aws:// ([#96821](https://github.com/kubernetes/kubernetes/pull/96821), [@wongma7](https://github.com/wongma7)) [SIG Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix PermissionDenied issue on SMB mount for Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fix kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix privileged config of Pod Sandbox which was previously ignored. ([#96877](https://github.com/kubernetes/kubernetes/pull/96877), [@xeniumlee](https://github.com/xeniumlee)) +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed authentication_duration_seconds metric scope. Previously, it included whole apiserver request duration which yields inaccurate results. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a bug of identifying the correct containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) +- Fixes add-on manager leader election to use leases instead of endpoints, similar to what kube-controller-manager does in 1.20 ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- Fixes problem where invalid selector on `PodDisruptionBudget` leads to a nil pointer dereference that causes the Controller manager to crash loop. ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) +- Fixes spurious errors about IPv6 in `kube-proxy` logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kube-proxy: remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- Kubeadm installs etcd v3.4.13 when creating cluster v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) +- Kubeadm: Fixes a kubeadm upgrade bug that could cause a custom CoreDNS configuration to be replaced with the default. ([#97016](https://github.com/kubernetes/kubernetes/pull/97016), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl logs: `--ignore-errors` is now honored by all containers, maintaining consistency with parallelConsumeRequest behavior. ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) +- Kubectl-convert: Fix `no kind "Ingress" is registered for version` error ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fix repeatedly acquiring the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Limits lease to have 1000 maximum attached objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Performance regression #97685 has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] +- Pod status updates faster when reacting on probe results. The first readiness probe will be called faster when startup probes succeeded, which will make Pod status as ready faster. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) +- Readjust `kubelet_containers_per_pod_count` buckets to only show metrics greater than 1. ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Requests with invalid timeout parameters in the request URL now appear in the audit log correctly. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Resolve a "concurrent map read and map write" crashing error in the kubelet ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- ResourceQuota of an entity now inclusively calculate Pod overhead ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using `dockershim` as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) +- Reverts breaking change to inline AzureFile volumes; referenced secrets are now searched for in the same namespace as the pod as in previous releases. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- The calculation of pod UIDs for static pods has changed to ensure each static pod gets a unique value - this will cause all static pod containers to be recreated/restarted if an in-place kubelet upgrade from 1.20 to 1.21 is performed. Note that draining pods before upgrading the kubelet across minor versions is the supported upgrade path. ([#87461](https://github.com/kubernetes/kubernetes/pull/87461), [@bboreham](https://github.com/bboreham)) [SIG Node] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Truncates a message if it hits the `NoteLengthLimit` when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) +- Updated k8s.gcr.io/ingress-gce-404-server-with-metrics-amd64 to a version that serves /metrics endpoint on a non-default port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Updates the commands ` + - kubectl kustomize {arg} + - kubectl apply -k {arg} + `to use same code as kustomize CLI [v4.0.5](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5) ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] +- When using `Containerd` on Windows, the `C:\Windows\System32\drivers\etc\hosts` file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### 기타 (정리 또는 플레이크(flake)) -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: - - ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Add fine grained debugging to intra-pod conformance test to troubleshoot networking issues for potentially unhealthy nodes when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) -- Add the following metrics: - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Decrease warning message frequency on setting volume ownership for configmap/secret. ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Fix kubelet to properly log when a container is started. Previously, kubelet may log that container is dead and was restarted when it was actually started for the first time. This behavior only happened on pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Mask ceph RBD adminSecrets in logs when logLevel >= 4. ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) -- Remove offensive words from kubectl cluster-info command. ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) -- Remove support for "ci/k8s-master" version label in kubeadm, use "ci/latest" instead. See [kubernetes/test-infra#18517](https://github.com/kubernetes/test-infra/pull/18517). ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- APIs for kubelet annotations and labels from `k8s.io/kubernetes/pkg/kubelet/apis` are now moved under `k8s.io/kubelet/pkg/apis/` ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Clients required to use go1.15.8+ or go1.16+ if kube-apiserver has the goaway feature enabled to avoid unexpected data race condition. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) +- Delete deprecated `service.beta.kubernetes.io/azure-load-balancer-mixed-protocols` mixed procotol annotation in favor of the MixedProtocolLBService feature ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy graduates to GA and unconditionally enabled. ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) +- Increase timeout for pod lifecycle test to reach pod status=ready ([#96691](https://github.com/kubernetes/kubernetes/pull/96691), [@hh](https://github.com/hh)) +- Increased `CSINodeIDMaxLength` from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) +- Kube-apiserver: The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubectl: The deprecated `kubectl alpha debug` command is removed. Use `kubectl debug` instead. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Kubelet command line flags related to dockershim are now showing deprecation message as they will be removed along with dockershim in future release. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The `AttachVolumeLimit` feature gate (GA since v1.17) has been removed and now unconditionally enabled. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The `apiserver_request_total` metric is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts`) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) + +### 분류되지 않음 + +- GCE L4 Loadbalancers now handle > 5 ports in service spec correctly. ([#99595](https://github.com/kubernetes/kubernetes/pull/99595), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## 의존성 ### 추가 -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### 변경 -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/creack/pty: [v1.1.7 → v1.1.11](https://github.com/creack/pty/compare/v1.1.7...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.5...v0.39.0) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: c1934b7 → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/gengo: 83324d8 → b6c5ce2 +- k8s.io/klog/v2: v2.4.0 → v2.8.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.2.0 → v1.4.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.1.0 ### 제거 -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 +- sigs.k8s.io/kustomize: v2.0.3+incompatible ## 의존성 ### 추가 -- cloud.google.com/go/firestore: v1.1.0 -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### 변경 -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.5) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.1) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.5](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.5) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.etcd.io/etcd: 17cef6e → dd1b699 -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/crypto: 75b2880 → 7f63de1 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 6aeccd4 → d219536 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/creack/pty: [v1.1.7 → v1.1.11](https://github.com/creack/pty/compare/v1.1.7...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.5...v0.39.0) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: c1934b7 → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/gengo: 83324d8 → b6c5ce2 +- k8s.io/klog/v2: v2.4.0 → v2.8.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.2.0 → v1.4.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.1.0 ### 제거 -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) -- vbom.ml/util: db5cfe1 +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-rc.0 +# v1.21.0-rc.0 -## Downloads for v1.20.0-rc.0 +## Downloads for v1.21.0-rc.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes.tar.gz) | acfee8658831f9503fccda0904798405434f17be7064a361a9f34c6ed04f1c0f685e79ca40cef5fcf34e3193bacbf467665e8dc277e0562ebdc929170034b5ae -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-src.tar.gz) | 9d962f8845e1fa221649cf0c0e178f0f03808486c49ea15ab5ec67861ec5aa948cf18bc0ee9b2067643c8332227973dd592e6a4457456a9d9d80e8ef28d5f7c3 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes.tar.gz) | ef53a41955d6f8a8d2a94636af98b55d633fb8a5081517559039e019b3dd65c9d10d4e7fa297ab88a7865d772f3eecf72e7b0eeba5e87accb4000c91da33e148 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-src.tar.gz) | 9335a01b50d351776d3b8d00c07a5233844c51d307e361fa7e55a0620c1cb8b699e43eacf45ae9cafd8cbc44752e6987450c528a5bede8204706b7673000b5fc ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 062b57f1a450fe01d6184f104d81d376bdf5720010412821e315fd9b1b622a400ac91f996540daa66cee172006f3efade4eccc19265494f1a1d7cc9450f0b50a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-386.tar.gz) | 86e96d2c2046c5e62e02bef30a6643f25e01f1b3eba256cab7dd61252908540c26cb058490e9cecc5a9bad97d2b577f5968884e9f1a90237e302419f39e068bc -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 619d3afb9ce902368390e71633396010e88e87c5fd848e3adc71571d1d4a25be002588415e5f83afee82460f8a7c9e0bd968335277cb8f8cb51e58d4bb43e64e -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 60965150a60ab3d05a248339786e0c7da4b89a04539c3719737b13d71302bac1dd9bcaa427d8a1f84a7b42d0c67801dce2de0005e9e47d21122868b32ac3d40f -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | 688e064f4ef6a17189dbb5af468c279b9de35e215c40500fb97b1d46692d222747023f9e07a7f7ba006400f9532a8912e69d7c5143f956b1dadca144c67ee711 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | 47b8abc02b42b3b1de67da184921b5801d7e3cb09befac840c85913193fc5ac4e5e3ecfcb57da6b686ff21af9a3bd42ae6949d4744dbe6ad976794340e328b83 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 971b41d3169f30e6c412e0254c180636abb7ccc8dcee6641b0e9877b69752fc61aa30b76c19c108969df654fe385da3cb3a44dd59d3c28dc45561392d7e08874 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-386.tar.gz) | 2d34e8387e31531d9aca5655f2f0d18e75b01825dc1c39b7beb73a7b7b610e2ba429e5ca97d5c41a71b67e75e7096c86ab63fda9baab4c0878c1ccb3a1aefac8 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | f909640f4140693bb871936f10a40e79b43502105d0adb318b35bb7a64a770ad9d05a3a732368ccd3d15d496d75454789165bd1f5c2571da9a00569b3e6c007c +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-amd64.tar.gz) | 964135e43234cee275c452f5f06fb6d2bcd3cff3211a0d50fa35fff1cc4446bc5a0ac5125405dadcfb6596cb152afe29fabf7aad5b35b100e1288db890b70f8e +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-darwin-arm64.tar.gz) | 50d782abaa4ded5e706b3192d87effa953ceabbd7d91e3d48b0c1fa2206a1963a909c14b923560f5d09cac2c7392edc5f38a13fbf1e9a40bc94e3afe8de10622 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-386.tar.gz) | 72af5562f24184a2d7c27f95fa260470da979fbdcacce39a372f8f3add2991d7af8bc78f4e1dbe7a0f97e3f559b149b72a51491d3b13008da81872ee50f02f37 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-amd64.tar.gz) | 1eddb8f6b51e005bc6f7b519d036cbe3d2f6d97dbf7d212dd933fb56354c29f222d050519115a9bcf94555aef095db7cf763469e47bb4ae3c6c07f97edf437cb +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm.tar.gz) | 670f8ca60ea3cf0bb3262a772715e0ea735fccda6a92f3186299361dc455b304ae177d4017e0b67bbfa4a95e36f4cc3f7eb335e2a5130c93ac3fba2aff4519bf +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-arm64.tar.gz) | a69a47907cff138ba393d8c87044fd95d97f3ca8f35d301b50742e2801ad7c229d99d6667971091f65825eb51854d585be0dd7421670110b1aa567e67e7ab4b3 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-ppc64le.tar.gz) | b929feade94b71c81908abdcd4343b1e1e20098fd65e10d4d02585ad649d292d06f52c7ddc349efa188ce5b093e703c7aa9582c6ae5a69699adb87bbf5350243 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-linux-s390x.tar.gz) | 899d1470e412282cf289d8e24806d1a08c62ec0151f345ae3c9e497cc7bc0feab76498de4dd897d6adcdfa0c422e6b1a37e25d928669030f53457fd69d6e7df7 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-386.tar.gz) | 9f0bc90a269eabd06fe4f637b5172a3a6a7d3de26de0d66504c2e1f2093083c584ea39031db6075a7da7a86b98c48bed25aa88d4ac09060b38692c6a5b637078 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-client-windows-amd64.tar.gz) | 05c8cc10188a1294b0d51d052942742a9b26411a08ec73494bf0e728a8a167e0a7863bdfc8864e76a371b584380098381805341e18b4b283b5d0cf298d5f7c7c ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 0ea4458ae34108c633b4d48f1f128c6274dbc82b613492e78b3e0a2f656ac0df0bb9a75124e15d67c8e81850adcecf19f4ab0234c17247ee7ddf84f2df3e5eaa -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm.tar.gz) | aef6a4d457faa29936603370f29a8523bb274211c3cb5101bd31aaf469c91ba6bd149ea99a4ccdd83352cf37e4d6508c5ee475ec10292bccd2f77ceea31e1c28 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | 4829f473e9d60f9929ad17c70fdc2b6b6509ed75418be0b23a75b28580949736cb5b0bd6382070f93aa0a2a8863f0b1596daf965186ca749996c29d03ef7d8b8 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 9ab0790d382a3e28df1c013762c09da0085449cfd09d176d80be932806c24a715ea829be0075c3e221a2ad9cf06e726b4c39ab41987c1fb0fee2563e48206763 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 98670b587e299856dd9821b7517a35f9a65835b915b153de08b66c54d82160438b66f774bf5306c07bc956d70ff709860bc23162225da5e89f995d3fdc1f0122 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-amd64.tar.gz) | 355f278728ef7ac7eb2f5568c99c1429543c6302bbd0ed3bd0378c08116075e56ae850a49241313f078e2392702672ec6c9b70c8d97b4f2f5f4bee36828a63ba +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm.tar.gz) | 9ac02c2825e2fd4e92f0c0f67180c67c24e32841ccbabc82284bf6293727ffecfae65e8a42b527c2a7ca482752384928eb65c2a1706144ae7819a6b3a1ab291c +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-arm64.tar.gz) | eb412453da03c82a9248412c8ccf4d4baa1fbfa81edd8d4f81d28969b40a3727e18934accc68f643d253446c58ffd2623292402495480b3d4b2a837b5318b957 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-ppc64le.tar.gz) | 07da2812c35bbc427ee5b4a0b601c3ae271e0d50ab0dd4c5c25399f43506fa2a187642eb9d4d2085df7b90264d48ea2f31088af87d9efa7eb2e87f91e1fdbde4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-server-linux-s390x.tar.gz) | 3b79442a3d6e389c4ff105922a8e49994c0b6c088d2c501bd8c78d9f9e814902f5bb72c8f9c89380b750fda9b3a336759b9b68f11d70bef4f0e984564a95c29e ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | 699e9c8d1837198312eade8eb6fec390f6a2fea9e08207d2f58e8bb6e3e799028aca69e4670aac0a4ba7cf0af683aee2c158bf78cc520c80edc876c8d94d521a -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm.tar.gz) | f3b5eab0669490e3cd7e802693daf3555d08323dfff6e73a881fce00fed4690e8bdaf1610278d9de74036ca37631016075e5695a02158b7d3e7582b20ef7fa35 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | e5012f77363561a609aaf791baaa17d09009819c4085a57132e5feb5366275a54640094e6ed1cba527f42b586c6d62999c2a5435edf5665ff0e114db4423c2ae -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 2a6d6501620b1a9838dff05c66a40260cc22154a28027813346eb16e18c386bc3865298a46a0f08da71cd55149c5e7d07c4c4c431b4fd231486dd9d716548adb -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 5eca02777519e31428a1e5842fe540b813fb8c929c341bbc71dcfd60d98deb89060f8f37352e8977020e21e053379eead6478eb2d54ced66fb9d38d5f3142bf0 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | 8ace02e7623dff894e863a2e0fa7dfb916368431d1723170713fe82e334c0ae0481b370855b71e2561de0fb64fed124281be604761ec08607230b66fb9ed1c03 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-amd64.tar.gz) | f12edf1faf5f07de1ebc5a8626601c12927902e10aca3f11e398637382fdf55365dbd9a0ef38858553fb7569495ae2cf68f155dd2e49b85b27d76fb599bb92e4 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm.tar.gz) | 4fba8fc4e2102f07fb778aab597ec7231ea65c35e1aa618fe98b707b64a931237bd842c173e9120326e4d9deb983bb3917176762bba2212612bbc09d6e2105c4 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-arm64.tar.gz) | a2e1be5459a8346839970faf4e7ebdb8ab9f3273e02babf1f3199b06bdb67434a2d18fcd1628cf1b989756e99d8dad6624a455b9db11d50f51f509f4df5c27da +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-ppc64le.tar.gz) | 16d2c1cc295474fc49fe9a827ddd73e81bdd6b76af7074987b90250023f99b6d70bf474e204c7d556802111984fcb3a330740b150bdc7970d0e3634eb94a1665 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-linux-s390x.tar.gz) | 9dc6faa6cd007b13dfce703f3e271f80adcc4e029c90a4a9b4f2f143b9756f2893f8af3d7c2cf813f2bd6731cffd87d15d4229456c1685939f65bf467820ec6e +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-rc.0/kubernetes-node-windows-amd64.tar.gz) | f8bac2974c9142bfb80cd5eadeda79f79f27b78899a4e6e71809b795c708824ba442be83fdbadb98e01c3823dd8350776358258a205e851ed045572923cacba7 -## Changelog since v1.20.0-beta.2 +## Changelog since v1.21.0-beta.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + + - Migrated pkg/kubelet/cm/cpuset/cpuset.go to structured logging. Exit code changed from 255 to 1. ([#100007](https://github.com/kubernetes/kubernetes/pull/100007), [@utsavoza](https://github.com/utsavoza)) [SIG Instrumentation and Node] + ## Changes by Kind -### Feature +### API Change -- Kubernetes is now built using go1.15.5 - - build: Update to k/repo-infra@v0.1.2 (supports go1.15.5) ([#95776](https://github.com/kubernetes/kubernetes/pull/95776), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Add Probe-level terminationGracePeriodSeconds field ([#99375](https://github.com/kubernetes/kubernetes/pull/99375), [@ehashman](https://github.com/ehashman)) [SIG API Machinery, Apps, Node and Testing] +- CSIServiceAccountToken is Beta now ([#99298](https://github.com/kubernetes/kubernetes/pull/99298), [@zshihang](https://github.com/zshihang)) [SIG Auth, Storage and Testing] +- Discovery.k8s.io/v1beta1 EndpointSlices are deprecated in favor of discovery.k8s.io/v1, and will no longer be served in Kubernetes v1.25. ([#100472](https://github.com/kubernetes/kubernetes/pull/100472), [@liggitt](https://github.com/liggitt)) [SIG Network] +- FieldManager no longer owns fields that get reset before the object is persisted (e.g. "status wiping"). ([#99661](https://github.com/kubernetes/kubernetes/pull/99661), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Auth and Testing] +- Generic ephemeral volumes are beta. ([#99643](https://github.com/kubernetes/kubernetes/pull/99643), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Node, Storage and Testing] +- Implement the GetAvailableResources in the podresources API. ([#95734](https://github.com/kubernetes/kubernetes/pull/95734), [@fromanirh](https://github.com/fromanirh)) [SIG Instrumentation, Node and Testing] +- The Endpoints controller will now set the `endpoints.kubernetes.io/over-capacity` annotation to "warning" when an Endpoints resource contains more than 1000 addresses. In a future release, the controller will truncate Endpoints that exceed this limit. The EndpointSlice API can be used to support significantly larger number of addresses. ([#99975](https://github.com/kubernetes/kubernetes/pull/99975), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- The PodDisruptionBudget API has been promoted to policy/v1 with no schema changes. The only functional change is that an empty selector (`{}`) written to a policy/v1 PodDisruptionBudget now selects all pods in the namespace. The behavior of the policy/v1beta1 API remains unchanged. The policy/v1beta1 PodDisruptionBudget API is deprecated and will no longer be served in 1.25+. ([#99290](https://github.com/kubernetes/kubernetes/pull/99290), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- Topology Aware Hints are now available in alpha and can be enabled with the `TopologyAwareHints` feature gate. ([#99522](https://github.com/kubernetes/kubernetes/pull/99522), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Instrumentation, Network and Testing] -### Failing Test +### Feature -- Resolves an issue running Ingress conformance tests on clusters which use finalizers on Ingress objects to manage releasing load balancer resources ([#96742](https://github.com/kubernetes/kubernetes/pull/96742), [@spencerhance](https://github.com/spencerhance)) [SIG Network and Testing] -- The Conformance test "validates that there is no conflict between pods with same hostPort but different hostIP and protocol" now validates the connectivity to each hostPort, in addition to the functionality. ([#96627](https://github.com/kubernetes/kubernetes/pull/96627), [@aojea](https://github.com/aojea)) [SIG Scheduling and Testing] +- Add e2e test to validate performance metrics of volume lifecycle operations ([#94334](https://github.com/kubernetes/kubernetes/pull/94334), [@RaunakShah](https://github.com/RaunakShah)) [SIG Storage and Testing] +- EmptyDir memory backed volumes are sized as the the minimum of pod allocatable memory on a host and an optional explicit user provided value. ([#100319](https://github.com/kubernetes/kubernetes/pull/100319), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] +- Enables Kubelet to check volume condition and log events to corresponding pods. ([#99284](https://github.com/kubernetes/kubernetes/pull/99284), [@fengzixu](https://github.com/fengzixu)) [SIG Apps, Instrumentation, Node and Storage] +- Introduce a churn operator to scheduler perf testing framework. ([#98900](https://github.com/kubernetes/kubernetes/pull/98900), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] +- Kubernetes is now built with Golang 1.16.1 ([#100106](https://github.com/kubernetes/kubernetes/pull/100106), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- Migrated pkg/kubelet/cm/devicemanager to structured logging ([#99976](https://github.com/kubernetes/kubernetes/pull/99976), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/memorymanager to structured logging ([#99974](https://github.com/kubernetes/kubernetes/pull/99974), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Migrated pkg/kubelet/cm/topologymanager to structure logging ([#99969](https://github.com/kubernetes/kubernetes/pull/99969), [@knabben](https://github.com/knabben)) [SIG Instrumentation and Node] +- Rename metrics `etcd_object_counts` to `apiserver_storage_object_counts` and mark it as stable. The original `etcd_object_counts` metrics name is marked as "Deprecated" and will be removed in the future. ([#99785](https://github.com/kubernetes/kubernetes/pull/99785), [@erain](https://github.com/erain)) [SIG API Machinery, Instrumentation and Testing] +- Update pause container to run as pseudo user and group `65535:65535`. This implies the release of version 3.5 of the container images. ([#97963](https://github.com/kubernetes/kubernetes/pull/97963), [@saschagrunert](https://github.com/saschagrunert)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Release, Security and Testing] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99833](https://github.com/kubernetes/kubernetes/pull/99833), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] ### Bug or Regression -- Bump node-problem-detector version to v0.8.5 to fix OOM detection in with Linux kernels 5.1+ ([#96716](https://github.com/kubernetes/kubernetes/pull/96716), [@tosi3k](https://github.com/tosi3k)) [SIG Cloud Provider, Scalability and Testing] -- Changes to timeout parameter handling in 1.20.0-beta.2 have been reverted to avoid breaking backwards compatibility with existing clients. ([#96727](https://github.com/kubernetes/kubernetes/pull/96727), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Testing] -- Duplicate owner reference entries in create/update/patch requests now get deduplicated by the API server. The client sending the request now receives a warning header in the API response. Clients should stop sending requests with duplicate owner references. The API server may reject such requests as early as 1.24. ([#96185](https://github.com/kubernetes/kubernetes/pull/96185), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery and Testing] -- Fix: resize Azure disk issue when it's in attached state ([#96705](https://github.com/kubernetes/kubernetes/pull/96705), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixed a bug where aggregator_unavailable_apiservice metrics were reported for deleted apiservices. ([#96421](https://github.com/kubernetes/kubernetes/pull/96421), [@dgrisonnet](https://github.com/dgrisonnet)) [SIG API Machinery and Instrumentation] -- Fixes code generation for non-namespaced create subresources fake client test. ([#96586](https://github.com/kubernetes/kubernetes/pull/96586), [@Doude](https://github.com/Doude)) [SIG API Machinery] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Kubeadm: fix coredns migration should be triggered when there are newdefault configs during kubeadm upgrade ([#96907](https://github.com/kubernetes/kubernetes/pull/96907), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] -- Reduce volume name length for vsphere volumes ([#96533](https://github.com/kubernetes/kubernetes/pull/96533), [@gnufied](https://github.com/gnufied)) [SIG Storage] -- Resolves a regression in 1.19+ with workloads targeting deprecated beta os/arch labels getting stuck in NodeAffinity status on node startup. ([#96810](https://github.com/kubernetes/kubernetes/pull/96810), [@liggitt](https://github.com/liggitt)) [SIG Node] +- Add ability to skip OpenAPI handler installation to the GenericAPIServer ([#100341](https://github.com/kubernetes/kubernetes/pull/100341), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery] +- Count pod overhead against an entity's ResourceQuota ([#99600](https://github.com/kubernetes/kubernetes/pull/99600), [@gjkim42](https://github.com/gjkim42)) [SIG API Machinery and Node] +- EndpointSlice controllers are less likely to create duplicate EndpointSlices. ([#100103](https://github.com/kubernetes/kubernetes/pull/100103), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Ensure only one LoadBalancer rule is created when HA mode is enabled ([#99825](https://github.com/kubernetes/kubernetes/pull/99825), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] +- Fixed a race condition on API server startup ensuring previously created webhook configurations are effective before the first write request is admitted. ([#95783](https://github.com/kubernetes/kubernetes/pull/95783), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] +- Fixed authentication_duration_seconds metric. Previously it included whole apiserver request duration. ([#99944](https://github.com/kubernetes/kubernetes/pull/99944), [@marseel](https://github.com/marseel)) [SIG API Machinery, Instrumentation and Scalability] +- Fixes issue where inline AzueFile secrets could not be accessed from the pod's namespace. ([#100563](https://github.com/kubernetes/kubernetes/pull/100563), [@msau42](https://github.com/msau42)) [SIG Storage] +- Improve speed of vSphere PV provisioning and reduce number of API calls ([#100054](https://github.com/kubernetes/kubernetes/pull/100054), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Kubectl: Fixed panic when describing an ingress backend without an API Group ([#100505](https://github.com/kubernetes/kubernetes/pull/100505), [@lauchokyip](https://github.com/lauchokyip)) [SIG CLI] +- Kubectl: fix case of age column in describe node (#96963, @bl-ue) ([#96963](https://github.com/kubernetes/kubernetes/pull/96963), [@bl-ue](https://github.com/bl-ue)) [SIG CLI] +- Kubelet.exe on Windows now checks that the process running as administrator and the executing user account is listed in the built-in administrators group. This is the equivalent to checking the process is running as uid 0. ([#96616](https://github.com/kubernetes/kubernetes/pull/96616), [@perithompson](https://github.com/perithompson)) [SIG Node and Windows] +- Kubelet: Fixed the bug of getting the number of cpu when the number of cpu logical processors is more than 64 in windows ([#97378](https://github.com/kubernetes/kubernetes/pull/97378), [@hwdef](https://github.com/hwdef)) [SIG Node and Windows] +- Pass `KUBE_BUILD_CONFORMANCE=y` to the package-tarballs to reenable building the conformance tarballs. ([#100571](https://github.com/kubernetes/kubernetes/pull/100571), [@puerco](https://github.com/puerco)) [SIG Release] +- Pod Log stats for windows now reports metrics ([#99221](https://github.com/kubernetes/kubernetes/pull/99221), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node, Storage, Testing and Windows] + +### Other (Cleanup or Flake) + +- A new storage E2E testsuite covers CSIStorageCapacity publishing if a driver opts into the test. ([#100537](https://github.com/kubernetes/kubernetes/pull/100537), [@pohly](https://github.com/pohly)) [SIG Storage and Testing] +- Convert cmd/kubelet/app/server.go to structured logging ([#98334](https://github.com/kubernetes/kubernetes/pull/98334), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- If kube-apiserver enabled goaway feature, clients required golang 1.15.8 or 1.16+ version to avoid un-expected data race issue. ([#98809](https://github.com/kubernetes/kubernetes/pull/98809), [@answer1991](https://github.com/answer1991)) [SIG API Machinery] +- Increased CSINodeIDMaxLength from 128 bytes to 192 bytes. ([#98753](https://github.com/kubernetes/kubernetes/pull/98753), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps and Storage] +- Migrate `pkg/kubelet/pluginmanager` to structured logging ([#99885](https://github.com/kubernetes/kubernetes/pull/99885), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/preemption/preemption.go` and `pkg/kubelet/logs/container_log_manager.go` to structured logging ([#99848](https://github.com/kubernetes/kubernetes/pull/99848), [@qingwave](https://github.com/qingwave)) [SIG Node] +- Migrate `pkg/kubelet/(cri)` to structured logging ([#99006](https://github.com/kubernetes/kubernetes/pull/99006), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(node, pod)` to structured logging ([#98847](https://github.com/kubernetes/kubernetes/pull/98847), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/(volume,container)` to structured logging ([#98850](https://github.com/kubernetes/kubernetes/pull/98850), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate `pkg/kubelet/kubelet_node_status.go` to structured logging ([#98154](https://github.com/kubernetes/kubernetes/pull/98154), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node and Release] +- Migrate `pkg/kubelet/lifecycle,oom` to structured logging ([#99479](https://github.com/kubernetes/kubernetes/pull/99479), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG Instrumentation and Node] +- Migrate cmd/kubelet/+ pkg/kubelet/cadvisor/cadvisor_linux.go + pkg/kubelet/cri/remote/util/util_unix.go + pkg/kubelet/images/image_manager.go to structured logging ([#99994](https://github.com/kubernetes/kubernetes/pull/99994), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/container_manager_linux.go and pkg/kubelet/cm/container_manager_stub.go to structured logging ([#100001](https://github.com/kubernetes/kubernetes/pull/100001), [@shiyajuan123](https://github.com/shiyajuan123)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanage/{topology/togit pology.go, policy_none.go, cpu_assignment.go} to structured logging ([#100163](https://github.com/kubernetes/kubernetes/pull/100163), [@lala123912](https://github.com/lala123912)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/cm/cpumanager/state to structured logging ([#99563](https://github.com/kubernetes/kubernetes/pull/99563), [@jmguzik](https://github.com/jmguzik)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/config to structured logging ([#100002](https://github.com/kubernetes/kubernetes/pull/100002), [@AfrouzMashayekhi](https://github.com/AfrouzMashayekhi)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubelet.go to structured logging ([#99861](https://github.com/kubernetes/kubernetes/pull/99861), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/kubeletconfig to structured logging ([#100265](https://github.com/kubernetes/kubernetes/pull/100265), [@ehashman](https://github.com/ehashman)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime to structured logging ([#99970](https://github.com/kubernetes/kubernetes/pull/99970), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/prober to structured logging ([#99830](https://github.com/kubernetes/kubernetes/pull/99830), [@krzysiekg](https://github.com/krzysiekg)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/winstats to structured logging ([#99855](https://github.com/kubernetes/kubernetes/pull/99855), [@hexxdump](https://github.com/hexxdump)) [SIG Instrumentation and Node] +- Migrate probe log messages to structured logging ([#97093](https://github.com/kubernetes/kubernetes/pull/97093), [@aldudko](https://github.com/aldudko)) [SIG Instrumentation and Node] +- Migrate remaining kubelet files to structured logging ([#100196](https://github.com/kubernetes/kubernetes/pull/100196), [@ehashman](https://github.com/ehashman)) [SIG Instrumentation and Node] +- `apiserver_storage_objects` (a newer version of `etcd_object_counts) is promoted and marked as stable. ([#100082](https://github.com/kubernetes/kubernetes/pull/100082), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] ## Dependencies @@ -967,411 +774,411 @@ filename | sha512 hash _Nothing has changed._ ### Changed -- github.com/google/cadvisor: [v0.38.4 → v0.38.5](https://github.com/google/cadvisor/compare/v0.38.4...v0.38.5) +- github.com/cilium/ebpf: [1c8d4c9 → v0.2.0](https://github.com/cilium/ebpf/compare/1c8d4c9...v0.2.0) +- github.com/containerd/console: [v1.0.0 → v1.0.1](https://github.com/containerd/console/compare/v1.0.0...v1.0.1) +- github.com/containerd/containerd: [v1.4.1 → v1.4.4](https://github.com/containerd/containerd/compare/v1.4.1...v1.4.4) +- github.com/creack/pty: [v1.1.9 → v1.1.11](https://github.com/creack/pty/compare/v1.1.9...v1.1.11) +- github.com/docker/docker: [bd33bbf → v20.10.2+incompatible](https://github.com/docker/docker/compare/bd33bbf...v20.10.2) +- github.com/google/cadvisor: [v0.38.8 → v0.39.0](https://github.com/google/cadvisor/compare/v0.38.8...v0.39.0) +- github.com/konsorten/go-windows-terminal-sequences: [v1.0.3 → v1.0.2](https://github.com/konsorten/go-windows-terminal-sequences/compare/v1.0.3...v1.0.2) +- github.com/moby/sys/mountinfo: [v0.1.3 → v0.4.0](https://github.com/moby/sys/mountinfo/compare/v0.1.3...v0.4.0) +- github.com/moby/term: [672ec06 → df9cb8a](https://github.com/moby/term/compare/672ec06...df9cb8a) +- github.com/mrunalp/fileutils: [abd8a0e → v0.5.0](https://github.com/mrunalp/fileutils/compare/abd8a0e...v0.5.0) +- github.com/opencontainers/runc: [v1.0.0-rc92 → v1.0.0-rc93](https://github.com/opencontainers/runc/compare/v1.0.0-rc92...v1.0.0-rc93) +- github.com/opencontainers/runtime-spec: [4d89ac9 → e6143ca](https://github.com/opencontainers/runtime-spec/compare/4d89ac9...e6143ca) +- github.com/opencontainers/selinux: [v1.6.0 → v1.8.0](https://github.com/opencontainers/selinux/compare/v1.6.0...v1.8.0) +- github.com/sirupsen/logrus: [v1.6.0 → v1.7.0](https://github.com/sirupsen/logrus/compare/v1.6.0...v1.7.0) +- github.com/syndtr/gocapability: [d983527 → 42c35b4](https://github.com/syndtr/gocapability/compare/d983527...42c35b4) +- github.com/willf/bitset: [d5bec33 → v1.1.11](https://github.com/willf/bitset/compare/d5bec33...v1.1.11) +- gotest.tools/v3: v3.0.2 → v3.0.3 +- k8s.io/klog/v2: v2.5.0 → v2.8.0 +- sigs.k8s.io/structured-merge-diff/v4: v4.0.3 → v4.1.0 ### Removed _Nothing has changed._ -# v1.20.0-beta.2 +# v1.21.0-beta.1 -## Downloads for v1.20.0-beta.2 +## Downloads for v1.21.0-beta.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes.tar.gz) | fe769280aa623802a949b6a35fbddadbba1d6f9933a54132a35625683719595ecf58096a9aa0f7456f8d4931774df21bfa98e148bc3d85913f1da915134f77bd -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-src.tar.gz) | ce1c8d97c52e5189af335d673bd7e99c564816f6adebf249838f7e3f0e920f323b4e398a5d163ea767091497012ec38843c59ff14e6fdd07683b682135eed645 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes.tar.gz) | c9f4f25242e319e5d90f49d26f239a930aad69677c0f3c2387c56bb13482648a26ed234be2bfe2352508f35010e3eb6d3b127c31a9f24fa1e53ac99c38520fe4 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-src.tar.gz) | 255357db8fa160cab2187658906b674a8b0d9b9a5b5f688cc7b69dc124f5da00362c6cc18ae9b80f7ddb3da6f64c2ab2f12fb9b63a4e063c7366a5375b175cda ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-darwin-amd64.tar.gz) | d6c14bd0f6702f4bbdf14a6abdfa4e5936de5b4efee38aa86c2bd7272967ec6d7868b88fc00ad4a7c3a20717a35e6be2b84e56dec04154fd702315f641409f7c -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-386.tar.gz) | b923c44cb0acb91a8f6fd442c2168aa6166c848f5d037ce50a7cb11502be3698db65836b373c916f75b648d6ac8d9158807a050eecc4e1c77cffa25b386c8cdb -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-amd64.tar.gz) | 8cae14146a9034dcd4e9d69d5d700f195a77aac35f629a148960ae028ed8b4fe12213993fe3e6e464b4b3e111adebe6f3dd7ca0accc70c738ed5cfd8993edd7c -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm.tar.gz) | 1f54e5262a0432945ead57fcb924e6bfedd9ea76db1dd9ebd946787a2923c247cf16e10505307b47e365905a1b398678dac5af0f433c439c158a33e08362d97b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-arm64.tar.gz) | 31cf79c01e4878a231b4881fe3ed5ef790bd5fb5419388438d3f8c6a2129e655aba9e00b8e1d77e0bc5d05ecc75cf4ae02cf8266788822d0306c49c85ee584ed -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-ppc64le.tar.gz) | 2527948c40be2e16724d939316ad5363f15aa22ebf42d59359d8b6f757d30cfef6447434cc93bc5caa5a23a6a00a2da8d8191b6441e06bba469d9d4375989a97 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-linux-s390x.tar.gz) | b777ad764b3a46651ecb0846e5b7f860bb2c1c4bd4d0fcc468c6ccffb7d3b8dcb6dcdd73b13c16ded7219f91bba9f1e92f9258527fd3bb162b54d7901ac303ff -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-386.tar.gz) | 8a2f58aaab01be9fe298e4d01456536047cbdd39a37d3e325c1f69ceab3a0504998be41a9f41a894735dfc4ed22bed02591eea5f3c75ce12d9e95ba134e72ec5 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-client-windows-amd64.tar.gz) | 2f69cda177a178df149f5de66b7dba7f5ce14c1ffeb7c8d7dc4130c701b47d89bb2fbe74e7a262f573e4d21dee2c92414d050d7829e7c6fc3637a9d6b0b9c5c1 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | 02efd389c8126456416fd2c7ea25c3cc30f612649ad91f631f068d6c0e5e539484d3763cb9a8645ad6b8077e4fcd1552a659d7516ebc4ce6828cf823b65c3016 +[kubernetes-client-darwin-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-darwin-arm64.tar.gz) | ac90dcd1699d1d7ff9c8342d481f6d0d97ccdc3ec501a56dc7c9e1898a8f77f712bf66942d304bfe581b5494f13e3efa211865de88f89749780e9e26e673dbdb +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-386.tar.gz) | cce5fb84cc7a1ee664f89d8ad3064307c51c044e9ddd2ae5a004939b69d3b3ef6f29acc5782e27d0c8f0d6d3d9c96e922f5d1b99d210ca3e754666d775df9f0c +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 2e93bbd2e60ad7cd8fe495115e96c55b1dc8facd100a827ef9c197a732679b60cceb9ea7bf92a1f5e328c3b8adfa8d3922cbc5d8370e374f3381b83f5b877b4f +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm.tar.gz) | 23f03b6a8fa9decce9b89a2c1bd3dae6d0b2f9e533e35a79e2c5a29326a165259677594ae83c877219a21bdb95557a284e55f4eec12954742794579c89a7d7e5 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3acf3101b46568b0ded6b90f13df0e918870d6812dc1a584903ddb8ba146484a204b9e442f863df47c7d4dab043fd9f7294c5510d3eb09004993d6d3b1e9e13c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | f749198df69577f62872d3096138a1b8969ec6b1636eb68eb56640bf33cf5f97a11df4363462749a1c0dc3ccbb8ae76c5d66864bf1c5cf7e52599caaf498e504 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | 3f6c0189d59fca22cdded3a02c672ef703d17e6ab0831e173a870e14ccec436c142600e9fc35b403571b6906f2be8d18d38d33330f7caada971bbe1187b388f6 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-386.tar.gz) | 03d92371c425cf331c80807c0ac56f953be304fc6719057258a363d527d186d610e1d4b4d401b34128062983265c2e21f2d2389231aa66a6f5787eee78142cf6 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | 489ece0c886a025ca3a25d28518637a5a824ea6544e7ef8778321036f13c8909a978ad4ceca966cec1e1cda99f25ca78bfd37460d1231c77436d216d43c872ad ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-amd64.tar.gz) | 3ecaac0213d369eab691ac55376821a80df5013cb12e1263f18d1c236a9e49d42b3cea422175556d8f929cdf3109b22c0b6212ac0f2e80cc7a5f4afa3aba5f24 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm.tar.gz) | 580030b57ff207e177208fec0801a43389cae10cc2c9306327d354e7be6a055390184531d54b6742e0983550b7a76693cc4a705c2d2f4ac30495cf63cef26b9b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-arm64.tar.gz) | 3e3286bd54671549fbef0dfdaaf1da99bc5c3efb32cc8d1e1985d9926520cea0c43bcf7cbcbbc8b1c1a95eab961255693008af3bb1ba743362998b5f0017d6d7 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-ppc64le.tar.gz) | 9fa051e7e97648e97e26b09ab6d26be247b41b1a5938d2189204c9e6688e455afe76612bbcdd994ed5692935d0d960bd96dc222bce4b83f61d62557752b9d75b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-server-linux-s390x.tar.gz) | fa85d432eff586f30975c95664ac130b9f5ae02dc52b97613ed7a41324496631ea11d1a267daba564cf2485a9e49707814d86bbd3175486c7efc8b58a9314af5 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 2e95cb31d5afcb6842c41d25b7d0c18dd7e65693b2d93c8aa44e5275f9c6201e1a67685c7a8ddefa334babb04cb559d26e39b6a18497695a07dc270568cae108 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 2927e82b98404c077196ce3968f3afd51a7576aa56d516019bd3976771c0213ba01e78da5b77478528e770da0d334e9457995fafb98820ed68b2ee34beb68856 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | e0f7aea3ea598214a9817bc04949389cb7e4e7b9503141a590ef48c0b681fe44a4243ebc6280752fa41aa1093149b3ee1bcef7664edb746097a342281825430b +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | c011f7eb01294e9ba5d5ced719068466f88ed595dcb8d554a36a4dd5118fb6b3d6bafe8bf89aa2d42988e69793ed777ba77b8876c6ec74f898a43cfce1f61bf4 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 15f6683e7f16caab7eebead2b7c15799460abbf035a43de0b75f96b0be19908f58add98a777a0cca916230d60cf6bfe3fee92b9dcff50274b1e37c243c157969 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-amd64.tar.gz) | 86e631f95fe670b467ead2b88d34e0364eaa275935af433d27cc378d82dcaa22041ccce40f5fa9561b9656dadaa578dc018ad458a59b1690d35f86dca4776b5c -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm.tar.gz) | a8754ff58a0e902397056b8615ab49af07aca347ba7cc4a812c238e3812234862270f25106b6a94753b157bb153b8eae8b39a01ed67384774d798598c243583b -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-arm64.tar.gz) | 28d727d7d08e2c856c9b4a574ef2dbf9e37236a0555f7ec5258b4284fa0582fb94b06783aaf50bf661f7503d101fbd70808aba6de02a2f0af94db7d065d25947 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-ppc64le.tar.gz) | a1283449f1a0b155c11449275e9371add544d0bdd4609d6dc737ed5f7dd228e84e24ff249613a2a153691627368dd894ad64f4e6c0010eecc6efd2c13d4fb133 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-linux-s390x.tar.gz) | 5806028ba15a6a9c54a34f90117bc3181428dbb0e7ced30874c9f4a953ea5a0e9b2c73e6b1e2545e1b4e5253e9c7691588538b44cdfa666ce6865964b92d2fa8 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.2/kubernetes-node-windows-amd64.tar.gz) | d5327e3b7916c78777b9b69ba0f3758c3a8645c67af80114a0ae52babd7af27bb504febbaf51b1bfe5bd2d74c8c5c573471e1cb449f2429453f4b1be9d5e682a +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | ed58679561197110f366b9109f7afd62c227bfc271918ccf3eea203bb2ab6428eb5db4dd6c965f202a8a636f66da199470269b863815809b99d53d2fa47af2ea +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 7e6c7f1957fcdecec8fef689c5019edbc0d0c11d22dafbfef0a07121d10d8f6273644f73511bd06a9a88b04d81a940bd6645ffb5711422af64af547a45c76273 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | a3618f29967e7a1574917a67f0296e65780321eda484b99aa32bfd4dc9b35acdefce33da952ac52dfb509fbac5bf700cf177431fad2ab4adcab0544538939faa +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 326d3eb521b41bdf489912177f70b8cdd7cd828bb9b3d847ed3694eb27e457f24e0a88b8e51b726eee39800a3c5a40c1b30e3a8ec4a34d8041b3d8ef05d1b749 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | 022d05ebaa66a0332c4fe18cdaf23d14c2c7e4d1f2af7f27baaf1eb042e6890dc3434b4ac8ba58c35d590717956f8c3458112685aff4938b94b18e263c3f4256 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | fa691ed93f07af6bc1cf57e20a30580d6c528f88e5fea3c14f39c1820969dc5a0eb476c5b87b288593d0c086c4dd93aff6165082393283c3f46c210f9bb66d61 -## Changelog since v1.20.0-beta.1 +## Changelog since v1.21.0-beta.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - A bug was fixed in kubelet where exec probe timeouts were not respected. Ensure that pods relying on this behavior are updated to correctly handle probe timeouts. - - This change in behavior may be unexpected for some clusters and can be disabled by turning off the ExecProbeTimeout feature gate. This gate will be locked and removed in future releases so that exec probe timeouts are always respected. ([#94115](https://github.com/kubernetes/kubernetes/pull/94115), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Testing] - - For CSI drivers, kubelet no longer creates the target_path for NodePublishVolume in accordance with the CSI spec. Kubelet also no longer checks if staging and target paths are mounts or corrupted. CSI drivers need to be idempotent and do any necessary mount verification. ([#88759](https://github.com/kubernetes/kubernetes/pull/88759), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] - - Kubeadm: - - The label applied to control-plane nodes "node-role.kubernetes.io/master" is now deprecated and will be removed in a future release after a GA deprecation period. - - Introduce a new label "node-role.kubernetes.io/control-plane" that will be applied in parallel to "node-role.kubernetes.io/master" until the removal of the "node-role.kubernetes.io/master" label. - - Make "kubeadm upgrade apply" add the "node-role.kubernetes.io/control-plane" label on existing nodes that only have the "node-role.kubernetes.io/master" label during upgrade. - - Please adapt your tooling built on top of kubeadm to use the "node-role.kubernetes.io/control-plane" label. - - - The taint applied to control-plane nodes "node-role.kubernetes.io/master:NoSchedule" is now deprecated and will be removed in a future release after a GA deprecation period. - - Apply toleration for a new, future taint "node-role.kubernetes.io/control-plane:NoSchedule" to the kubeadm CoreDNS / kube-dns managed manifests. Note that this taint is not yet applied to kubeadm control-plane nodes. - - Please adapt your workloads to tolerate the same future taint preemptively. - - For more details see: http://git.k8s.io/enhancements/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md ([#95382](https://github.com/kubernetes/kubernetes/pull/95382), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] - + - Kubeadm: during "init" an empty cgroupDriver value in the KubeletConfiguration is now always set to "systemd" unless the user is explicit about it. This requires existing machine setups to configure the container runtime to use the "systemd" driver. Documentation on this topic can be found here: https://kubernetes.io/docs/setup/production-environment/container-runtimes/. When upgrading existing clusters / nodes using "kubeadm upgrade" the old cgroupDriver value is preserved, but in 1.22 this change will also apply to "upgrade". For more information on migrating to the "systemd" driver or remaining on the "cgroupfs" driver see: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/. ([#99471](https://github.com/kubernetes/kubernetes/pull/99471), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] + - Migrate `pkg/kubelet/(dockershim, network)` to structured logging + Exit code changed from 255 to 1 ([#98939](https://github.com/kubernetes/kubernetes/pull/98939), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Network and Node] + - Migrate `pkg/kubelet/certificate` to structured logging + Exit code changed from 255 to 1 ([#98993](https://github.com/kubernetes/kubernetes/pull/98993), [@SataQiu](https://github.com/SataQiu)) [SIG Auth and Node] + - Newly provisioned PVs by EBS plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99130](https://github.com/kubernetes/kubernetes/pull/99130), [@ayberk](https://github.com/ayberk)) [SIG Cloud Provider, Storage and Testing] + - Newly provisioned PVs by OpenStack Cinder plugin will no longer use the deprecated "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" labels. It will use "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" labels instead. ([#99719](https://github.com/kubernetes/kubernetes/pull/99719), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider and Storage] + - OpenStack Cinder CSI migration is on by default, Clinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. ([#98538](https://github.com/kubernetes/kubernetes/pull/98538), [@dims](https://github.com/dims)) [SIG Storage] + - Package pkg/kubelet/server migrated to structured logging + Exit code changed from 255 to 1 ([#99838](https://github.com/kubernetes/kubernetes/pull/99838), [@adisky](https://github.com/adisky)) [SIG Node] + - Pkg/kubelet/kuberuntime/kuberuntime_manager.go migrated to structured logging + Exit code changed from 255 to 1 ([#99841](https://github.com/kubernetes/kubernetes/pull/99841), [@adisky](https://github.com/adisky)) [SIG Instrumentation and Node] + ## Changes by Kind ### Deprecation -- Docker support in the kubelet is now deprecated and will be removed in a future release. The kubelet uses a module called "dockershim" which implements CRI support for Docker and it has seen maintenance issues in the Kubernetes community. We encourage you to evaluate moving to a container runtime that is a full-fledged implementation of CRI (v1alpha1 or v1 compliant) as they become available. ([#94624](https://github.com/kubernetes/kubernetes/pull/94624), [@dims](https://github.com/dims)) [SIG Node] -- Kubectl: deprecate --delete-local-data ([#95076](https://github.com/kubernetes/kubernetes/pull/95076), [@dougsland](https://github.com/dougsland)) [SIG CLI, Cloud Provider and Scalability] +- Kubeadm: the deprecated kube-dns is no longer supported as an option. If "ClusterConfiguration.dns.type" is set to "kube-dns" kubeadm will now throw an error. ([#99646](https://github.com/kubernetes/kubernetes/pull/99646), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Remove deprecated --generator --replicas --service-generator --service-overrides --schedule from kubectl run + Deprecate --serviceaccount --hostport --requests --limits in kubectl run ([#99732](https://github.com/kubernetes/kubernetes/pull/99732), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] +- `audit.k8s.io/v1beta1` and `audit.k8s.io/v1alpha1` audit policy configuration and audit events are deprecated in favor of `audit.k8s.io/v1`, available since v1.13. kube-apiserver invocations that specify alpha or beta policy configurations with `--audit-policy-file`, or explicitly request alpha or beta audit events with `--audit-log-version` / `--audit-webhook-version` must update to use `audit.k8s.io/v1` and accept `audit.k8s.io/v1` events prior to v1.24. ([#98858](https://github.com/kubernetes/kubernetes/pull/98858), [@carlory](https://github.com/carlory)) [SIG Auth] +- `diskformat` stroage class parameter for in-tree vSphere volume plugin is deprecated as of v1.21 release. Please consider updating storageclass and remove `diskformat` parameter. vSphere CSI Driver does not support diskformat storageclass parameter. + + vSphere releases less than 67u3 are deprecated as of v1.21. Please consider upgrading vSphere to 67u3 or above. vSphere CSI Driver requires minimum vSphere 67u3. + + VM Hardware version less than 15 is deprecated as of v1.21. Please consider upgrading the Node VM Hardware version to 15 or above. vSphere CSI Driver recommends Node VM's Hardware version set to at least vmx-15. + + Multi vCenter support is deprecated as of v1.21. If you have a Kubernetes cluster spanning across multiple vCenter servers, please consider moving all k8s nodes to a single vCenter Server. vSphere CSI Driver does not support Kubernetes deployment spanning across multiple vCenter servers. + + Support for these deprecations will be available till Kubernetes v1.24. ([#98546](https://github.com/kubernetes/kubernetes/pull/98546), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] ### API Change -- API priority and fairness graduated to beta - 1.19 servers with APF turned on should not be run in a multi-server cluster with 1.20+ servers. ([#96527](https://github.com/kubernetes/kubernetes/pull/96527), [@adtac](https://github.com/adtac)) [SIG API Machinery and Testing] -- Add LoadBalancerIPMode feature gate ([#92312](https://github.com/kubernetes/kubernetes/pull/92312), [@Sh4d1](https://github.com/Sh4d1)) [SIG Apps, CLI, Cloud Provider and Network] -- Add WindowsContainerResources and Annotations to CRI-API UpdateContainerResourcesRequest ([#95741](https://github.com/kubernetes/kubernetes/pull/95741), [@katiewasnothere](https://github.com/katiewasnothere)) [SIG Node] -- Add a 'serving' and `terminating` condition to the EndpointSlice API. - - `serving` tracks the readiness of endpoints regardless of their terminating state. This is distinct from `ready` since `ready` is only true when pods are not terminating. - `terminating` is true when an endpoint is terminating. For pods this is any endpoint with a deletion timestamp. ([#92968](https://github.com/kubernetes/kubernetes/pull/92968), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps and Network] -- Add support for hugepages to downward API ([#86102](https://github.com/kubernetes/kubernetes/pull/86102), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Adds kubelet alpha feature, `GracefulNodeShutdown` which makes kubelet aware of node system shutdowns and result in graceful termination of pods during a system shutdown. ([#96129](https://github.com/kubernetes/kubernetes/pull/96129), [@bobbypage](https://github.com/bobbypage)) [SIG Node] -- AppProtocol is now GA for Endpoints and Services. The ServiceAppProtocol feature gate will be deprecated in 1.21. ([#96327](https://github.com/kubernetes/kubernetes/pull/96327), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Automatic allocation of NodePorts for services with type LoadBalancer can now be disabled by setting the (new) parameter - Service.spec.allocateLoadBalancerNodePorts=false. The default is to allocate NodePorts for services with type LoadBalancer which is the existing behavior. ([#92744](https://github.com/kubernetes/kubernetes/pull/92744), [@uablrek](https://github.com/uablrek)) [SIG Apps and Network] -- Document that ServiceTopology feature is required to use `service.spec.topologyKeys`. ([#96528](https://github.com/kubernetes/kubernetes/pull/96528), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] -- EndpointSlice has a new NodeName field guarded by the EndpointSliceNodeName feature gate. - - EndpointSlice topology field will be deprecated in an upcoming release. - - EndpointSlice "IP" address type is formally removed after being deprecated in Kubernetes 1.17. - - The discovery.k8s.io/v1alpha1 API is deprecated and will be removed in Kubernetes 1.21. ([#96440](https://github.com/kubernetes/kubernetes/pull/96440), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps and Network] -- Fewer candidates are enumerated for preemption to improve performance in large clusters ([#94814](https://github.com/kubernetes/kubernetes/pull/94814), [@adtac](https://github.com/adtac)) [SIG Scheduling] -- If BoundServiceAccountTokenVolume is enabled, cluster admins can use metric `serviceaccount_stale_tokens_total` to monitor workloads that are depending on the extended tokens. If there are no such workloads, turn off extended tokens by starting `kube-apiserver` with flag `--service-account-extend-token-expiration=false` ([#96273](https://github.com/kubernetes/kubernetes/pull/96273), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] -- Introduce alpha support for exec-based container registry credential provider plugins in the kubelet. ([#94196](https://github.com/kubernetes/kubernetes/pull/94196), [@andrewsykim](https://github.com/andrewsykim)) [SIG Node and Release] -- Kube-apiserver now deletes expired kube-apiserver Lease objects: - - The feature is under feature gate `APIServerIdentity`. - - A flag is added to kube-apiserver: `identity-lease-garbage-collection-check-period-seconds` ([#95895](https://github.com/kubernetes/kubernetes/pull/95895), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Apps, Auth and Testing] -- Move configurable fsgroup change policy for pods to beta ([#96376](https://github.com/kubernetes/kubernetes/pull/96376), [@gnufied](https://github.com/gnufied)) [SIG Apps and Storage] -- New flag is introduced, i.e. --topology-manager-scope=container|pod. - The default value is the "container" scope. ([#92967](https://github.com/kubernetes/kubernetes/pull/92967), [@cezaryzukowski](https://github.com/cezaryzukowski)) [SIG Instrumentation, Node and Testing] -- NodeAffinity plugin can be configured with AddedAffinity. ([#96202](https://github.com/kubernetes/kubernetes/pull/96202), [@alculquicondor](https://github.com/alculquicondor)) [SIG Node, Scheduling and Testing] -- Promote RuntimeClass feature to GA. - Promote node.k8s.io API groups from v1beta1 to v1. ([#95718](https://github.com/kubernetes/kubernetes/pull/95718), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Apps, Auth, Node, Scheduling and Testing] -- Reminder: The labels "failure-domain.beta.kubernetes.io/zone" and "failure-domain.beta.kubernetes.io/region" are deprecated in favor of "topology.kubernetes.io/zone" and "topology.kubernetes.io/region" respectively. All users of the "failure-domain.beta..." labels should switch to the "topology..." equivalents. ([#96033](https://github.com/kubernetes/kubernetes/pull/96033), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, CLI, Cloud Provider, Network, Node, Scheduling, Storage and Testing] -- The usage of mixed protocol values in the same LoadBalancer Service is possible if the new feature gate MixedProtocolLBSVC is enabled. - "action required" - The feature gate is disabled by default. The user has to enable it for the API Server. ([#94028](https://github.com/kubernetes/kubernetes/pull/94028), [@janosi](https://github.com/janosi)) [SIG API Machinery and Apps] -- This PR will introduce a feature gate CSIServiceAccountToken with two additional fields in `CSIDriverSpec`. ([#93130](https://github.com/kubernetes/kubernetes/pull/93130), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Users can try the cronjob controller v2 using the feature gate. This will be the default controller in future releases. ([#93370](https://github.com/kubernetes/kubernetes/pull/93370), [@alaypatel07](https://github.com/alaypatel07)) [SIG API Machinery, Apps, Auth and Testing] -- VolumeSnapshotDataSource moves to GA in 1.20 release ([#95282](https://github.com/kubernetes/kubernetes/pull/95282), [@xing-yang](https://github.com/xing-yang)) [SIG Apps] +- 1. PodAffinityTerm includes a namespaceSelector field to allow selecting eligible namespaces based on their labels. + 2. A new CrossNamespacePodAffinity quota scope API that allows restricting which namespaces allowed to use PodAffinityTerm with corss-namespace reference via namespaceSelector or namespaces fields. ([#98582](https://github.com/kubernetes/kubernetes/pull/98582), [@ahg-g](https://github.com/ahg-g)) [SIG API Machinery, Apps, Auth and Testing] +- Add a default metadata name labels for selecting any namespace by its name. ([#96968](https://github.com/kubernetes/kubernetes/pull/96968), [@jayunit100](https://github.com/jayunit100)) [SIG API Machinery, Apps, Cloud Provider, Storage and Testing] +- Added `.spec.completionMode` field to Job, with accepted values `NonIndexed` (default) and `Indexed` ([#98441](https://github.com/kubernetes/kubernetes/pull/98441), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- Clarified NetworkPolicy policyTypes documentation ([#97216](https://github.com/kubernetes/kubernetes/pull/97216), [@joejulian](https://github.com/joejulian)) [SIG Network] +- DaemonSets accept a MaxSurge integer or percent on their rolling update strategy that will launch the updated pod on nodes and wait for those pods to go ready before marking the old out-of-date pods as deleted. This allows workloads to avoid downtime during upgrades when deployed using DaemonSets. This feature is alpha and is behind the DaemonSetUpdateSurge feature gate. ([#96441](https://github.com/kubernetes/kubernetes/pull/96441), [@smarterclayton](https://github.com/smarterclayton)) [SIG Apps and Testing] +- EndpointSlice API is now GA. The EndpointSlice topology field has been removed from the GA API and will be replaced by a new per Endpoint Zone field. If the topology field was previously used, it will be converted into an annotation in the v1 Resource. The discovery.k8s.io/v1alpha1 API is removed. ([#99662](https://github.com/kubernetes/kubernetes/pull/99662), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network and Testing] +- EndpointSlice Controllers are now GA. The EndpointSlice Controller will not populate the `deprecatedTopology` field and will only provide topology information through the `zone` and `nodeName` fields. ([#99870](https://github.com/kubernetes/kubernetes/pull/99870), [@swetharepakula](https://github.com/swetharepakula)) [SIG API Machinery, Apps, Auth, Network and Testing] +- IngressClass resource can now reference a resource in a specific namespace + for implementation-specific configuration(previously only Cluster-level resources were allowed). + This feature can be enabled using the IngressClassNamespacedParams feature gate. ([#99275](https://github.com/kubernetes/kubernetes/pull/99275), [@hbagdi](https://github.com/hbagdi)) [SIG API Machinery, CLI and Network] +- Introduce conditions for PodDisruptionBudget ([#98127](https://github.com/kubernetes/kubernetes/pull/98127), [@mortent](https://github.com/mortent)) [SIG API Machinery, Apps, Auth, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] +- Jobs API has a new .spec.suspend field that can be used to suspend and resume Jobs ([#98727](https://github.com/kubernetes/kubernetes/pull/98727), [@adtac](https://github.com/adtac)) [SIG API Machinery, Apps, Node, Scheduling and Testing] +- Kubelet Graceful Node Shutdown feature is now beta. ([#99735](https://github.com/kubernetes/kubernetes/pull/99735), [@bobbypage](https://github.com/bobbypage)) [SIG Node] +- Limit the quest value of hugepage to integer multiple of page size. ([#98515](https://github.com/kubernetes/kubernetes/pull/98515), [@lala123912](https://github.com/lala123912)) [SIG Apps] +- One new field "InternalTrafficPolicy" in Service is added. + It specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. + "Cluster" routes internal traffic to a Service to all endpoints. + "Local" routes traffic to node-local endpoints only, and traffic is dropped if no node-local endpoints are ready. + The default value is "Cluster". ([#96600](https://github.com/kubernetes/kubernetes/pull/96600), [@maplain](https://github.com/maplain)) [SIG API Machinery, Apps and Network] +- PodSecurityPolicy only stores "generic" as allowed volume type if the GenericEphemeralVolume feature gate is enabled ([#98918](https://github.com/kubernetes/kubernetes/pull/98918), [@pohly](https://github.com/pohly)) [SIG Auth and Security] +- Promote CronJobs to batch/v1 ([#99423](https://github.com/kubernetes/kubernetes/pull/99423), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] +- Remove support for building Kubernetes with bazel. ([#99561](https://github.com/kubernetes/kubernetes/pull/99561), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery, Apps, Architecture, Auth, Autoscaling, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Scheduling, Storage, Testing and Windows] +- Setting loadBalancerClass in load balancer type of service is available with this PR. + Users who want to use a custom load balancer can specify loadBalancerClass to achieve it. ([#98277](https://github.com/kubernetes/kubernetes/pull/98277), [@XudongLiuHarold](https://github.com/XudongLiuHarold)) [SIG API Machinery, Apps, Cloud Provider and Network] +- Storage capacity tracking (= the CSIStorageCapacity feature) is beta, storage.k8s.io/v1alpha1/VolumeAttachment and storage.k8s.io/v1alpha1/CSIStorageCapacity objects are deprecated ([#99641](https://github.com/kubernetes/kubernetes/pull/99641), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, Scheduling, Storage and Testing] +- Support for Indexed Job: a Job that is considered completed when Pods associated to indexes from 0 to (.spec.completions-1) have succeeded. ([#98812](https://github.com/kubernetes/kubernetes/pull/98812), [@alculquicondor](https://github.com/alculquicondor)) [SIG Apps and CLI] +- The apiserver now resets managedFields that got corrupted by a mutating admission controller. ([#98074](https://github.com/kubernetes/kubernetes/pull/98074), [@kwiesmueller](https://github.com/kwiesmueller)) [SIG API Machinery and Testing] +- `controller.kubernetes.io/pod-deletion-cost` annotation can be set to offer a hint on the cost of deleting a pod compared to other pods belonging to the same ReplicaSet. Pods with lower deletion cost are deleted first. This is an alpha feature. ([#99163](https://github.com/kubernetes/kubernetes/pull/99163), [@ahg-g](https://github.com/ahg-g)) [SIG Apps] ### Feature -- **Additional documentation e.g., KEPs (Kubernetes Enhancement Proposals), usage docs, etc.**: ([#95896](https://github.com/kubernetes/kubernetes/pull/95896), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Cluster Lifecycle] -- A new set of alpha metrics are reported by the Kubernetes scheduler under the `/metrics/resources` endpoint that allow administrators to easily see the resource consumption (requests and limits for all resources on the pods) and compare it to actual pod usage or node capacity. ([#94866](https://github.com/kubernetes/kubernetes/pull/94866), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Instrumentation, Node and Scheduling] -- Add --experimental-logging-sanitization flag enabling runtime protection from leaking sensitive data in logs ([#96370](https://github.com/kubernetes/kubernetes/pull/96370), [@serathius](https://github.com/serathius)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] -- Add a StorageVersionAPI feature gate that makes API server update storageversions before serving certain write requests. - This feature allows the storage migrator to manage storage migration for built-in resources. - Enabling internal.apiserver.k8s.io/v1alpha1 API and APIServerIdentity feature gate are required to use this feature. ([#93873](https://github.com/kubernetes/kubernetes/pull/93873), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery, Auth and Testing] -- Add a new `vSphere` metric: `cloudprovider_vsphere_vcenter_versions`. It's content show `vCenter` hostnames with the associated server version. ([#94526](https://github.com/kubernetes/kubernetes/pull/94526), [@Danil-Grigorev](https://github.com/Danil-Grigorev)) [SIG Cloud Provider and Instrumentation] -- Add feature to size memory backed volumes ([#94444](https://github.com/kubernetes/kubernetes/pull/94444), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Storage and Testing] -- Add node_authorizer_actions_duration_seconds metric that can be used to estimate load to node authorizer. ([#92466](https://github.com/kubernetes/kubernetes/pull/92466), [@mborsz](https://github.com/mborsz)) [SIG API Machinery, Auth and Instrumentation] -- Add pod_ based CPU and memory metrics to Kubelet's /metrics/resource endpoint ([#95839](https://github.com/kubernetes/kubernetes/pull/95839), [@egernst](https://github.com/egernst)) [SIG Instrumentation, Node and Testing] -- Adds a headless service on node-local-cache addon. ([#88412](https://github.com/kubernetes/kubernetes/pull/88412), [@stafot](https://github.com/stafot)) [SIG Cloud Provider and Network] -- CRDs: For structural schemas, non-nullable null map fields will now be dropped and defaulted if a default is available. null items in list will continue being preserved, and fail validation if not nullable. ([#95423](https://github.com/kubernetes/kubernetes/pull/95423), [@apelisse](https://github.com/apelisse)) [SIG API Machinery] -- E2e test for PodFsGroupChangePolicy ([#96247](https://github.com/kubernetes/kubernetes/pull/96247), [@saikat-royc](https://github.com/saikat-royc)) [SIG Storage and Testing] -- Gradudate the Pod Resources API to G.A - Introduces the pod_resources_endpoint_requests_total metric which tracks the total number of requests to the pod resources API ([#92165](https://github.com/kubernetes/kubernetes/pull/92165), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Instrumentation, Node and Testing] -- Introduce api-extensions category which will return: mutating admission configs, validating admission configs, CRDs and APIServices when used in kubectl get, for example. ([#95603](https://github.com/kubernetes/kubernetes/pull/95603), [@soltysh](https://github.com/soltysh)) [SIG API Machinery] -- Kube-apiserver now maintains a Lease object to identify itself: - - The feature is under feature gate `APIServerIdentity`. - - Two flags are added to kube-apiserver: `identity-lease-duration-seconds`, `identity-lease-renew-interval-seconds` ([#95533](https://github.com/kubernetes/kubernetes/pull/95533), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Kube-apiserver: The timeout used when making health check calls to etcd can now be configured with `--etcd-healthcheck-timeout`. The default timeout is 2 seconds, matching the previous behavior. ([#93244](https://github.com/kubernetes/kubernetes/pull/93244), [@Sh4d1](https://github.com/Sh4d1)) [SIG API Machinery] -- Kubectl: Previously users cannot provide arguments to a external diff tool via KUBECTL_EXTERNAL_DIFF env. This release now allow users to specify args to KUBECTL_EXTERNAL_DIFF env. ([#95292](https://github.com/kubernetes/kubernetes/pull/95292), [@dougsland](https://github.com/dougsland)) [SIG CLI] -- Scheduler now ignores Pod update events if the resourceVersion of old and new Pods are identical. ([#96071](https://github.com/kubernetes/kubernetes/pull/96071), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Support custom tags for cloud provider managed resources ([#96450](https://github.com/kubernetes/kubernetes/pull/96450), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support customize load balancer health probe protocol and request path ([#96338](https://github.com/kubernetes/kubernetes/pull/96338), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Support multiple standard load balancers in one cluster ([#96111](https://github.com/kubernetes/kubernetes/pull/96111), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- The beta `RootCAConfigMap` feature gate is enabled by default and causes kube-controller-manager to publish a "kube-root-ca.crt" ConfigMap to every namespace. This ConfigMap contains a CA bundle used for verifying connections to the kube-apiserver. ([#96197](https://github.com/kubernetes/kubernetes/pull/96197), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Apps, Auth and Testing] -- The kubelet_runtime_operations_duration_seconds metric got additional buckets of 60, 300, 600, 900 and 1200 seconds ([#96054](https://github.com/kubernetes/kubernetes/pull/96054), [@alvaroaleman](https://github.com/alvaroaleman)) [SIG Instrumentation and Node] -- There is a new pv_collector_total_pv_count metric that counts persistent volumes by the volume plugin name and volume mode. ([#95719](https://github.com/kubernetes/kubernetes/pull/95719), [@tsmetana](https://github.com/tsmetana)) [SIG Apps, Instrumentation, Storage and Testing] -- Volume snapshot e2e test to validate PVC and VolumeSnapshotContent finalizer ([#95863](https://github.com/kubernetes/kubernetes/pull/95863), [@RaunakShah](https://github.com/RaunakShah)) [SIG Cloud Provider, Storage and Testing] -- Warns user when executing kubectl apply/diff to resource currently being deleted. ([#95544](https://github.com/kubernetes/kubernetes/pull/95544), [@SaiHarshaK](https://github.com/SaiHarshaK)) [SIG CLI] -- `kubectl alpha debug` has graduated to beta and is now `kubectl debug`. ([#96138](https://github.com/kubernetes/kubernetes/pull/96138), [@verb](https://github.com/verb)) [SIG CLI and Testing] -- `kubectl debug` gains support for changing container images when copying a pod for debugging, similar to how `kubectl set image` works. See `kubectl help debug` for more information. ([#96058](https://github.com/kubernetes/kubernetes/pull/96058), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Updates docs and guidance on cloud provider InstancesV2 and Zones interface for external cloud providers: - - removes experimental warning for InstancesV2 - - document that implementation of InstancesV2 will disable calls to Zones - - deprecate Zones in favor of InstancesV2 ([#96397](https://github.com/kubernetes/kubernetes/pull/96397), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] +- A client-go metric, rest_client_exec_plugin_call_total, has been added to track total calls to client-go credential plugins. ([#98892](https://github.com/kubernetes/kubernetes/pull/98892), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery, Auth, Cluster Lifecycle and Instrumentation] +- Add --use-protocol-buffers flag to kubectl top pods and nodes ([#96655](https://github.com/kubernetes/kubernetes/pull/96655), [@serathius](https://github.com/serathius)) [SIG CLI] +- Add support to generate client-side binaries for new darwin/arm64 platform ([#97743](https://github.com/kubernetes/kubernetes/pull/97743), [@dims](https://github.com/dims)) [SIG Release and Testing] +- Added `ephemeral_volume_controller_create[_failures]_total` counters to kube-controller-manager metrics ([#99115](https://github.com/kubernetes/kubernetes/pull/99115), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Cluster Lifecycle, Instrumentation and Storage] +- Adds alpha feature `VolumeCapacityPriority` which makes the scheduler prioritize nodes based on the best matching size of statically provisioned PVs across multiple topologies. ([#96347](https://github.com/kubernetes/kubernetes/pull/96347), [@cofyc](https://github.com/cofyc)) [SIG Apps, Network, Scheduling, Storage and Testing] +- Adds two new metrics to cronjobs, a histogram to track the time difference when a job is created and the expected time when it should be created, and a gauge for the missed schedules of a cronjob ([#99341](https://github.com/kubernetes/kubernetes/pull/99341), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps and Instrumentation] +- Alpha implementation of Kubectl Command Headers: SIG CLI KEP 859 enabled when KUBECTL_COMMAND_HEADERS environment variable set on the client command line. + - To enable: export KUBECTL_COMMAND_HEADERS=1; kubectl ... ([#98952](https://github.com/kubernetes/kubernetes/pull/98952), [@seans3](https://github.com/seans3)) [SIG API Machinery and CLI] +- Component owner can configure the allowlist of metric label with flag '--allow-metric-labels'. ([#99738](https://github.com/kubernetes/kubernetes/pull/99738), [@YoyinZyc](https://github.com/YoyinZyc)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- Disruption controller only sends one event per PodDisruptionBudget if scale can't be computed ([#98128](https://github.com/kubernetes/kubernetes/pull/98128), [@mortent](https://github.com/mortent)) [SIG Apps] +- EndpointSliceNodeName will always be enabled, so NodeName will always be available in the v1beta1 API. ([#99746](https://github.com/kubernetes/kubernetes/pull/99746), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] +- Graduate CRIContainerLogRotation feature gate to GA. ([#99651](https://github.com/kubernetes/kubernetes/pull/99651), [@umohnani8](https://github.com/umohnani8)) [SIG Node and Testing] +- Kube-proxy iptables: new metric sync_proxy_rules_iptables_total that exposes the number of rules programmed per table in each iteration ([#99653](https://github.com/kubernetes/kubernetes/pull/99653), [@aojea](https://github.com/aojea)) [SIG Instrumentation and Network] +- Kube-scheduler now logs plugin scoring summaries at --v=4 ([#99411](https://github.com/kubernetes/kubernetes/pull/99411), [@damemi](https://github.com/damemi)) [SIG Scheduling] +- Kubeadm: a warning to user as ipv6 site-local is deprecated ([#99574](https://github.com/kubernetes/kubernetes/pull/99574), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle and Network] +- Kubeadm: apply the "node.kubernetes.io/exclude-from-external-load-balancers" label on control plane nodes during "init", "join" and "upgrade" to preserve backwards compatibility with the lagacy LB mode where nodes labeled as "master" where excluded. To opt-out you can remove the label from a node. See #97543 and the linked KEP for more details. ([#98269](https://github.com/kubernetes/kubernetes/pull/98269), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: if the user has customized their image repository via the kubeadm configuration, pass the custom pause image repository and tag to the kubelet via --pod-infra-container-image not only for Docker but for all container runtimes. This flag tells the kubelet that it should not garbage collect the image. ([#99476](https://github.com/kubernetes/kubernetes/pull/99476), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Kubeadm: promote IPv6DualStack feature gate to Beta ([#99294](https://github.com/kubernetes/kubernetes/pull/99294), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl version changed to write a warning message to stderr if the client and server version difference exceeds the supported version skew of +/-1 minor version. ([#98250](https://github.com/kubernetes/kubernetes/pull/98250), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] +- Kubernetes is now built with Golang 1.16 ([#98572](https://github.com/kubernetes/kubernetes/pull/98572), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] +- Persistent Volumes formatted with the btrfs filesystem will now automatically resize when expanded. ([#99361](https://github.com/kubernetes/kubernetes/pull/99361), [@Novex](https://github.com/Novex)) [SIG Storage] +- Remove cAdvisor json metrics api collected by Kubelet ([#99236](https://github.com/kubernetes/kubernetes/pull/99236), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Sysctls is now GA and locked to default ([#99158](https://github.com/kubernetes/kubernetes/pull/99158), [@wgahnagl](https://github.com/wgahnagl)) [SIG Node] +- The NodeAffinity plugin implements the PreFilter extension, offering enhanced performance for Filter. ([#99213](https://github.com/kubernetes/kubernetes/pull/99213), [@AliceZhang2016](https://github.com/AliceZhang2016)) [SIG Scheduling] +- The endpointslice mirroring controller mirrors endpoints annotations and labels to the generated endpoint slices, it also ensures that updates on any of these fields are mirrored. + The well-known annotation endpoints.kubernetes.io/last-change-trigger-time is skipped and not mirrored. ([#98116](https://github.com/kubernetes/kubernetes/pull/98116), [@aojea](https://github.com/aojea)) [SIG Apps, Network and Testing] +- Update the latest validated version of Docker to 20.10 ([#98977](https://github.com/kubernetes/kubernetes/pull/98977), [@neolit123](https://github.com/neolit123)) [SIG CLI, Cluster Lifecycle and Node] +- Upgrade node local dns to 1.17.0 for better IPv6 support ([#99749](https://github.com/kubernetes/kubernetes/pull/99749), [@pacoxu](https://github.com/pacoxu)) [SIG Cloud Provider and Network] +- Users might specify the `kubectl.kubernetes.io/default-exec-container` annotation in a Pod to preselect container for kubectl commands. ([#99581](https://github.com/kubernetes/kubernetes/pull/99581), [@mengjiao-liu](https://github.com/mengjiao-liu)) [SIG CLI] +- When downscaling ReplicaSets, ready and creation timestamps are compared in a logarithmic scale. ([#99212](https://github.com/kubernetes/kubernetes/pull/99212), [@damemi](https://github.com/damemi)) [SIG Apps and Testing] +- When the kubelet is watching a ConfigMap or Secret purely in the context of setting environment variables + for containers, only hold that watch for a defined duration before cancelling it. This change reduces the CPU + and memory usage of the kube-apiserver in large clusters. ([#99393](https://github.com/kubernetes/kubernetes/pull/99393), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery, Node and Testing] +- WindowsEndpointSliceProxying feature gate has graduated to beta and is enabled by default. This means kube-proxy will read from EndpointSlices instead of Endpoints on Windows by default. ([#99794](https://github.com/kubernetes/kubernetes/pull/99794), [@robscott](https://github.com/robscott)) [SIG Network] ### Bug or Regression -- Change plugin name in fsgroupapplymetrics of csi and flexvolume to distinguish different driver ([#95892](https://github.com/kubernetes/kubernetes/pull/95892), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation, Storage and Testing] -- Clear UDP conntrack entry on endpoint changes when using nodeport ([#71573](https://github.com/kubernetes/kubernetes/pull/71573), [@JacobTanenbaum](https://github.com/JacobTanenbaum)) [SIG Network] -- Exposes and sets a default timeout for the TokenReview client for DelegatingAuthenticationOptions ([#96217](https://github.com/kubernetes/kubernetes/pull/96217), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Fix CVE-2020-8555 for Quobyte client connections. ([#95206](https://github.com/kubernetes/kubernetes/pull/95206), [@misterikkit](https://github.com/misterikkit)) [SIG Storage] -- Fix IP fragmentation of UDP and TCP packets not supported issues on LoadBalancer rules ([#96464](https://github.com/kubernetes/kubernetes/pull/96464), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Fix a bug that DefaultPreemption plugin is disabled when using (legacy) scheduler policy. ([#96439](https://github.com/kubernetes/kubernetes/pull/96439), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling and Testing] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix client-go prometheus metrics to correctly present the API path accessed in some environments. ([#74363](https://github.com/kubernetes/kubernetes/pull/74363), [@aanm](https://github.com/aanm)) [SIG API Machinery] -- Fix memory leak in kube-apiserver when underlying time goes forth and back. ([#96266](https://github.com/kubernetes/kubernetes/pull/96266), [@chenyw1990](https://github.com/chenyw1990)) [SIG API Machinery] -- Fix paging issues when Azure API returns empty values with non-empty nextLink ([#96211](https://github.com/kubernetes/kubernetes/pull/96211), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix pull image error from multiple ACRs using azure managed identity ([#96355](https://github.com/kubernetes/kubernetes/pull/96355), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix vSphere volumes that could be erroneously attached to wrong node ([#96224](https://github.com/kubernetes/kubernetes/pull/96224), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents kubectl to validate CRDs with schema using x-kubernetes-preserve-unknown-fields on object fields. ([#96369](https://github.com/kubernetes/kubernetes/pull/96369), [@gautierdelorme](https://github.com/gautierdelorme)) [SIG API Machinery and Testing] -- For vSphere Cloud Provider, If VM of worker node is deleted, the node will also be deleted by node controller ([#92608](https://github.com/kubernetes/kubernetes/pull/92608), [@lubronzhan](https://github.com/lubronzhan)) [SIG Cloud Provider] -- HTTP/2 connection health check is enabled by default in all Kubernetes clients. The feature should work out-of-the-box. If needed, users can tune the feature via the HTTP2_READ_IDLE_TIMEOUT_SECONDS and HTTP2_PING_TIMEOUT_SECONDS environment variables. The feature is disabled if HTTP2_READ_IDLE_TIMEOUT_SECONDS is set to 0. ([#95981](https://github.com/kubernetes/kubernetes/pull/95981), [@caesarxuchao](https://github.com/caesarxuchao)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. - - If the user specifies a timeout in the request URL that exceeds the maximum request deadline allowed by the apiserver, the request will be aborted with an HTTP 400. ([#96061](https://github.com/kubernetes/kubernetes/pull/96061), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Network and Testing] -- Improve error messages related to nodePort endpoint changes conntrack entries cleanup. ([#96251](https://github.com/kubernetes/kubernetes/pull/96251), [@ravens](https://github.com/ravens)) [SIG Network] -- Print go stack traces at -v=4 and not -v=2 ([#94663](https://github.com/kubernetes/kubernetes/pull/94663), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Remove ready file and its directory (which is created during volume SetUp) during emptyDir volume TearDown. ([#95770](https://github.com/kubernetes/kubernetes/pull/95770), [@jingxu97](https://github.com/jingxu97)) [SIG Storage] -- Resolves non-deterministic behavior of the garbage collection controller when ownerReferences with incorrect data are encountered. Events with a reason of `OwnerRefInvalidNamespace` are recorded when namespace mismatches between child and owner objects are detected. - - A namespaced object with an ownerReference referencing a uid of a namespaced kind which does not exist in the same namespace is now consistently treated as though that owner does not exist, and the child object is deleted. - - A cluster-scoped object with an ownerReference referencing a uid of a namespaced kind is now consistently treated as though that owner is not resolvable, and the child object is ignored by the garbage collector. ([#92743](https://github.com/kubernetes/kubernetes/pull/92743), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Testing] -- Skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:162]: Driver azure-disk doesn't support snapshot type DynamicSnapshot -- skipping - skip [k8s.io/kubernetes@v1.19.0/test/e2e/storage/testsuites/base.go:185]: Driver azure-disk doesn't support ntfs -- skipping ([#96144](https://github.com/kubernetes/kubernetes/pull/96144), [@qinpingli](https://github.com/qinpingli)) [SIG Storage and Testing] -- The AWS network load balancer attributes can now be specified during service creation ([#95247](https://github.com/kubernetes/kubernetes/pull/95247), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- The kube-apiserver will no longer serve APIs that should have been deleted in GA non-alpha levels. Alpha levels will continue to serve the removed APIs so that CI doesn't immediately break. ([#96525](https://github.com/kubernetes/kubernetes/pull/96525), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] -- Update max azure data disk count map ([#96308](https://github.com/kubernetes/kubernetes/pull/96308), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Update the route table tag in the route reconcile loop ([#96545](https://github.com/kubernetes/kubernetes/pull/96545), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Volume binding: report UnschedulableAndUnresolvable status instead of an error when bound PVs not found ([#95541](https://github.com/kubernetes/kubernetes/pull/95541), [@cofyc](https://github.com/cofyc)) [SIG Apps, Scheduling and Storage] -- [kubectl] Fail when local source file doesn't exist ([#90333](https://github.com/kubernetes/kubernetes/pull/90333), [@bamarni](https://github.com/bamarni)) [SIG CLI] +- Creating a PVC with DataSource should fail for non-CSI plugins. ([#97086](https://github.com/kubernetes/kubernetes/pull/97086), [@xing-yang](https://github.com/xing-yang)) [SIG Apps and Storage] +- EndpointSlice controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99345](https://github.com/kubernetes/kubernetes/pull/99345), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- EndpointSliceMirroring controller is now less likely to emit FailedToUpdateEndpointSlices events. ([#99756](https://github.com/kubernetes/kubernetes/pull/99756), [@robscott](https://github.com/robscott)) [SIG Apps and Network] +- Fix --ignore-errors does not take effect if multiple logs are printed and unfollowed ([#97686](https://github.com/kubernetes/kubernetes/pull/97686), [@wzshiming](https://github.com/wzshiming)) [SIG CLI] +- Fix bug that would let the Horizontal Pod Autoscaler scale down despite at least one metric being unavailable/invalid ([#99514](https://github.com/kubernetes/kubernetes/pull/99514), [@mikkeloscar](https://github.com/mikkeloscar)) [SIG Apps and Autoscaling] +- Fix cgroup handling for systemd with cgroup v2 ([#98365](https://github.com/kubernetes/kubernetes/pull/98365), [@odinuge](https://github.com/odinuge)) [SIG Node] +- Fix smb mount PermissionDenied issue on Windows ([#99550](https://github.com/kubernetes/kubernetes/pull/99550), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider, Storage and Windows] +- Fixed a bug that causes smaller number of conntrack-max being used under CPU static policy. (#99225, @xh4n3) ([#99613](https://github.com/kubernetes/kubernetes/pull/99613), [@xh4n3](https://github.com/xh4n3)) [SIG Network] +- Fixed bug that caused cAdvisor to incorrectly detect single-socket multi-NUMA topology. ([#99315](https://github.com/kubernetes/kubernetes/pull/99315), [@iwankgb](https://github.com/iwankgb)) [SIG Node] +- Fixes add-on manager leader election ([#98968](https://github.com/kubernetes/kubernetes/pull/98968), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider] +- Improved update time of pod statuses following new probe results. ([#98376](https://github.com/kubernetes/kubernetes/pull/98376), [@matthyx](https://github.com/matthyx)) [SIG Node and Testing] +- Kube-apiserver: an update of a pod with a generic ephemeral volume dropped that volume if the feature had been disabled since creating the pod with such a volume ([#99446](https://github.com/kubernetes/kubernetes/pull/99446), [@pohly](https://github.com/pohly)) [SIG Apps, Node and Storage] +- Kubeadm: skip validating pod subnet against node-cidr-mask when allocate-node-cidrs is set to be false ([#98984](https://github.com/kubernetes/kubernetes/pull/98984), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- On single-stack configured (IPv4 or IPv6, but not both) clusters, Services which are both headless (no clusterIP) and selectorless (empty or undefined selector) will report `ipFamilyPolicy RequireDualStack` and will have entries in `ipFamilies[]` for both IPv4 and IPv6. This is a change from alpha, but does not have any impact on the manually-specified Endpoints and EndpointSlices for the Service. ([#99555](https://github.com/kubernetes/kubernetes/pull/99555), [@thockin](https://github.com/thockin)) [SIG Apps and Network] +- Resolves spurious `Failed to list *v1.Secret` or `Failed to list *v1.ConfigMap` messages in kubelet logs. ([#99538](https://github.com/kubernetes/kubernetes/pull/99538), [@liggitt](https://github.com/liggitt)) [SIG Auth and Node] +- Return zero time (midnight on Jan. 1, 1970) instead of negative number when reporting startedAt and finishedAt of the not started or a running Pod when using dockershim as a runtime. ([#99585](https://github.com/kubernetes/kubernetes/pull/99585), [@Iceber](https://github.com/Iceber)) [SIG Node] +- Stdin is now only passed to client-go exec credential plugins when it is detected to be an interactive terminal. Previously, it was passed to client-go exec plugins when **stdout*- was detected to be an interactive terminal. ([#99654](https://github.com/kubernetes/kubernetes/pull/99654), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] +- The maximum number of ports allowed in EndpointSlices has been increased from 100 to 20,000 ([#99795](https://github.com/kubernetes/kubernetes/pull/99795), [@robscott](https://github.com/robscott)) [SIG Network] +- Updates the commands + - kubectl kustomize {arg} + - kubectl apply -k {arg} + to use same code as kustomize CLI v4.0.5 + - [v4.0.5]: https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv4.0.5 ([#98946](https://github.com/kubernetes/kubernetes/pull/98946), [@monopole](https://github.com/monopole)) [SIG API Machinery, Architecture, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node and Storage] +- When a CNI plugin returns dual-stack pod IPs, kubelet will now try to respect the + "primary IP family" of the cluster by picking a primary pod IP of the same family + as the (primary) node IP, rather than assuming that the CNI plugin returned the IPs + in the order the administrator wanted (since some CNI plugins don't allow + configuring this). ([#97979](https://github.com/kubernetes/kubernetes/pull/97979), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- When using Containerd on Windows, the "C:\Windows\System32\drivers\etc\hosts" file will now be managed by kubelet. ([#83730](https://github.com/kubernetes/kubernetes/pull/83730), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node and Windows] +- `VolumeBindingArgs` now allow `BindTimeoutSeconds` to be set as zero, while the value zero indicates no waiting for the checking of volume binding operation. ([#99835](https://github.com/kubernetes/kubernetes/pull/99835), [@chendave](https://github.com/chendave)) [SIG Scheduling and Storage] +- `kubectl exec` and `kubectl attach` now honor the `--quiet` flag which suppresses output from the local binary that could be confused by a script with the remote command output (all non-failure output is hidden). In addition, print inline with exec and attach the list of alternate containers when we default to the first spec.container. ([#99004](https://github.com/kubernetes/kubernetes/pull/99004), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] ### Other (Cleanup or Flake) -- Handle slow cronjob lister in cronjob controller v2 and improve memory footprint. ([#96443](https://github.com/kubernetes/kubernetes/pull/96443), [@alaypatel07](https://github.com/alaypatel07)) [SIG Apps] -- --redirect-container-streaming is no longer functional. The flag will be removed in v1.22 ([#95935](https://github.com/kubernetes/kubernetes/pull/95935), [@tallclair](https://github.com/tallclair)) [SIG Node] -- A new metric `requestAbortsTotal` has been introduced that counts aborted requests for each `group`, `version`, `verb`, `resource`, `subresource` and `scope`. ([#95002](https://github.com/kubernetes/kubernetes/pull/95002), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery, Cloud Provider, Instrumentation and Scheduling] -- API priority and fairness metrics use snake_case in label names ([#96236](https://github.com/kubernetes/kubernetes/pull/96236), [@adtac](https://github.com/adtac)) [SIG API Machinery, Cluster Lifecycle, Instrumentation and Testing] -- Applies translations on all command descriptions ([#95439](https://github.com/kubernetes/kubernetes/pull/95439), [@HerrNaN](https://github.com/HerrNaN)) [SIG CLI] -- Changed: default "Accept-Encoding" header removed from HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes ([#96127](https://github.com/kubernetes/kubernetes/pull/96127), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Generators for services are removed from kubectl ([#95256](https://github.com/kubernetes/kubernetes/pull/95256), [@Git-Jiro](https://github.com/Git-Jiro)) [SIG CLI] -- Introduce kubectl-convert plugin. ([#96190](https://github.com/kubernetes/kubernetes/pull/96190), [@soltysh](https://github.com/soltysh)) [SIG CLI and Testing] -- Kube-scheduler now logs processed component config at startup ([#96426](https://github.com/kubernetes/kubernetes/pull/96426), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- NONE ([#96179](https://github.com/kubernetes/kubernetes/pull/96179), [@bbyrne5](https://github.com/bbyrne5)) [SIG Network] -- Users will now be able to configure all supported values for AWS NLB health check interval and thresholds for new resources. ([#96312](https://github.com/kubernetes/kubernetes/pull/96312), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Apiserver_request_duration_seconds is promoted to stable status. ([#99925](https://github.com/kubernetes/kubernetes/pull/99925), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- Apiserver_request_total is promoted to stable status and no longer has a content-type dimensions, so any alerts/charts which presume the existence of this will fail. This is however, unlikely to be the case since it was effectively an unbounded dimension in the first place. ([#99788](https://github.com/kubernetes/kubernetes/pull/99788), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Instrumentation and Testing] +- EndpointSlice generation is now incremented when labels change. ([#99750](https://github.com/kubernetes/kubernetes/pull/99750), [@robscott](https://github.com/robscott)) [SIG Network] +- Featuregate AllowInsecureBackendProxy is promoted to GA ([#99658](https://github.com/kubernetes/kubernetes/pull/99658), [@deads2k](https://github.com/deads2k)) [SIG API Machinery] +- Migrate `pkg/kubelet/(eviction)` to structured logging ([#99032](https://github.com/kubernetes/kubernetes/pull/99032), [@yangjunmyfm192085](https://github.com/yangjunmyfm192085)) [SIG Node] +- Migrate deployment controller log messages to structured logging ([#97507](https://github.com/kubernetes/kubernetes/pull/97507), [@aldudko](https://github.com/aldudko)) [SIG Apps] +- Migrate pkg/kubelet/cloudresource to structured logging ([#98999](https://github.com/kubernetes/kubernetes/pull/98999), [@sladyn98](https://github.com/sladyn98)) [SIG Node] +- Migrate pkg/kubelet/cri/remote logs to structured logging ([#98589](https://github.com/kubernetes/kubernetes/pull/98589), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate pkg/kubelet/kuberuntime/kuberuntime_container.go logs to structured logging ([#96973](https://github.com/kubernetes/kubernetes/pull/96973), [@chenyw1990](https://github.com/chenyw1990)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/status to structured logging ([#99836](https://github.com/kubernetes/kubernetes/pull/99836), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/token to structured logging ([#99264](https://github.com/kubernetes/kubernetes/pull/99264), [@palnabarun](https://github.com/palnabarun)) [SIG Auth, Instrumentation and Node] +- Migrate pkg/kubelet/util to structured logging ([#99823](https://github.com/kubernetes/kubernetes/pull/99823), [@navidshaikh](https://github.com/navidshaikh)) [SIG Instrumentation and Node] +- Migrate proxy/userspace/proxier.go logs to structured logging ([#97837](https://github.com/kubernetes/kubernetes/pull/97837), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some kubelet/metrics log messages to structured logging ([#98627](https://github.com/kubernetes/kubernetes/pull/98627), [@jialaijun](https://github.com/jialaijun)) [SIG Instrumentation and Node] +- Process start time on Windows now uses current process information ([#97491](https://github.com/kubernetes/kubernetes/pull/97491), [@jsturtevant](https://github.com/jsturtevant)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Windows] + +### Uncategorized + +- Migrate pkg/kubelet/stats to structured logging ([#99607](https://github.com/kubernetes/kubernetes/pull/99607), [@krzysiekg](https://github.com/krzysiekg)) [SIG Node] +- The DownwardAPIHugePages feature is beta. Users may use the feature if all workers in their cluster are min 1.20 version. The feature will be enabled by default in all installations in 1.22. ([#99610](https://github.com/kubernetes/kubernetes/pull/99610), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node] ## Dependencies ### Added -- cloud.google.com/go/firestore: v1.1.0 -- github.com/armon/go-metrics: [f0300d1](https://github.com/armon/go-metrics/tree/f0300d1) -- github.com/armon/go-radix: [7fddfc3](https://github.com/armon/go-radix/tree/7fddfc3) -- github.com/bketelsen/crypt: [5cbc8cc](https://github.com/bketelsen/crypt/tree/5cbc8cc) -- github.com/hashicorp/consul/api: [v1.1.0](https://github.com/hashicorp/consul/api/tree/v1.1.0) -- github.com/hashicorp/consul/sdk: [v0.1.1](https://github.com/hashicorp/consul/sdk/tree/v0.1.1) -- github.com/hashicorp/errwrap: [v1.0.0](https://github.com/hashicorp/errwrap/tree/v1.0.0) -- github.com/hashicorp/go-cleanhttp: [v0.5.1](https://github.com/hashicorp/go-cleanhttp/tree/v0.5.1) -- github.com/hashicorp/go-immutable-radix: [v1.0.0](https://github.com/hashicorp/go-immutable-radix/tree/v1.0.0) -- github.com/hashicorp/go-msgpack: [v0.5.3](https://github.com/hashicorp/go-msgpack/tree/v0.5.3) -- github.com/hashicorp/go-multierror: [v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -- github.com/hashicorp/go-rootcerts: [v1.0.0](https://github.com/hashicorp/go-rootcerts/tree/v1.0.0) -- github.com/hashicorp/go-sockaddr: [v1.0.0](https://github.com/hashicorp/go-sockaddr/tree/v1.0.0) -- github.com/hashicorp/go-uuid: [v1.0.1](https://github.com/hashicorp/go-uuid/tree/v1.0.1) -- github.com/hashicorp/go.net: [v0.0.1](https://github.com/hashicorp/go.net/tree/v0.0.1) -- github.com/hashicorp/logutils: [v1.0.0](https://github.com/hashicorp/logutils/tree/v1.0.0) -- github.com/hashicorp/mdns: [v1.0.0](https://github.com/hashicorp/mdns/tree/v1.0.0) -- github.com/hashicorp/memberlist: [v0.1.3](https://github.com/hashicorp/memberlist/tree/v0.1.3) -- github.com/hashicorp/serf: [v0.8.2](https://github.com/hashicorp/serf/tree/v0.8.2) -- github.com/mitchellh/cli: [v1.0.0](https://github.com/mitchellh/cli/tree/v1.0.0) -- github.com/mitchellh/go-testing-interface: [v1.0.0](https://github.com/mitchellh/go-testing-interface/tree/v1.0.0) -- github.com/mitchellh/gox: [v0.4.0](https://github.com/mitchellh/gox/tree/v0.4.0) -- github.com/mitchellh/iochan: [v1.0.0](https://github.com/mitchellh/iochan/tree/v1.0.0) -- github.com/pascaldekloe/goe: [57f6aae](https://github.com/pascaldekloe/goe/tree/57f6aae) -- github.com/posener/complete: [v1.1.1](https://github.com/posener/complete/tree/v1.1.1) -- github.com/ryanuber/columnize: [9b3edd6](https://github.com/ryanuber/columnize/tree/9b3edd6) -- github.com/sean-/seed: [e2103e2](https://github.com/sean-/seed/tree/e2103e2) -- github.com/subosito/gotenv: [v1.2.0](https://github.com/subosito/gotenv/tree/v1.2.0) -- github.com/willf/bitset: [d5bec33](https://github.com/willf/bitset/tree/d5bec33) -- gopkg.in/ini.v1: v1.51.0 -- gopkg.in/yaml.v3: 9f266ea -- rsc.io/quote/v3: v3.1.0 -- rsc.io/sampler: v1.3.0 +- github.com/go-errors/errors: [v1.0.1](https://github.com/go-errors/errors/tree/v1.0.1) +- github.com/gobuffalo/here: [v0.6.0](https://github.com/gobuffalo/here/tree/v0.6.0) +- github.com/google/shlex: [e7afc7f](https://github.com/google/shlex/tree/e7afc7f) +- github.com/markbates/pkger: [v0.17.1](https://github.com/markbates/pkger/tree/v0.17.1) +- github.com/monochromegane/go-gitignore: [205db1a](https://github.com/monochromegane/go-gitignore/tree/205db1a) +- github.com/niemeyer/pretty: [a10e7ca](https://github.com/niemeyer/pretty/tree/a10e7ca) +- github.com/xlab/treeprint: [a009c39](https://github.com/xlab/treeprint/tree/a009c39) +- go.starlark.net: 8dd3e2e +- golang.org/x/term: 6a3ed07 +- sigs.k8s.io/kustomize/api: v0.8.5 +- sigs.k8s.io/kustomize/cmd/config: v0.9.7 +- sigs.k8s.io/kustomize/kustomize/v4: v4.0.5 +- sigs.k8s.io/kustomize/kyaml: v0.10.15 ### Changed -- cloud.google.com/go/bigquery: v1.0.1 → v1.4.0 -- cloud.google.com/go/datastore: v1.0.0 → v1.1.0 -- cloud.google.com/go/pubsub: v1.0.1 → v1.2.0 -- cloud.google.com/go/storage: v1.0.0 → v1.6.0 -- cloud.google.com/go: v0.51.0 → v0.54.0 -- github.com/Microsoft/go-winio: [fc70bd9 → v0.4.15](https://github.com/Microsoft/go-winio/compare/fc70bd9...v0.4.15) -- github.com/aws/aws-sdk-go: [v1.35.5 → v1.35.24](https://github.com/aws/aws-sdk-go/compare/v1.35.5...v1.35.24) -- github.com/blang/semver: [v3.5.0+incompatible → v3.5.1+incompatible](https://github.com/blang/semver/compare/v3.5.0...v3.5.1) -- github.com/checkpoint-restore/go-criu/v4: [v4.0.2 → v4.1.0](https://github.com/checkpoint-restore/go-criu/v4/compare/v4.0.2...v4.1.0) -- github.com/containerd/containerd: [v1.3.3 → v1.4.1](https://github.com/containerd/containerd/compare/v1.3.3...v1.4.1) -- github.com/containerd/ttrpc: [v1.0.0 → v1.0.2](https://github.com/containerd/ttrpc/compare/v1.0.0...v1.0.2) -- github.com/containerd/typeurl: [v1.0.0 → v1.0.1](https://github.com/containerd/typeurl/compare/v1.0.0...v1.0.1) -- github.com/coreos/etcd: [v3.3.10+incompatible → v3.3.13+incompatible](https://github.com/coreos/etcd/compare/v3.3.10...v3.3.13) -- github.com/docker/docker: [aa6a989 → bd33bbf](https://github.com/docker/docker/compare/aa6a989...bd33bbf) -- github.com/go-gl/glfw/v3.3/glfw: [12ad95a → 6f7a984](https://github.com/go-gl/glfw/v3.3/glfw/compare/12ad95a...6f7a984) -- github.com/golang/groupcache: [215e871 → 8c9f03a](https://github.com/golang/groupcache/compare/215e871...8c9f03a) -- github.com/golang/mock: [v1.3.1 → v1.4.1](https://github.com/golang/mock/compare/v1.3.1...v1.4.1) -- github.com/golang/protobuf: [v1.4.2 → v1.4.3](https://github.com/golang/protobuf/compare/v1.4.2...v1.4.3) -- github.com/google/cadvisor: [v0.37.0 → v0.38.4](https://github.com/google/cadvisor/compare/v0.37.0...v0.38.4) -- github.com/google/go-cmp: [v0.4.0 → v0.5.2](https://github.com/google/go-cmp/compare/v0.4.0...v0.5.2) -- github.com/google/pprof: [d4f498a → 1ebb73c](https://github.com/google/pprof/compare/d4f498a...1ebb73c) -- github.com/google/uuid: [v1.1.1 → v1.1.2](https://github.com/google/uuid/compare/v1.1.1...v1.1.2) -- github.com/gorilla/mux: [v1.7.3 → v1.8.0](https://github.com/gorilla/mux/compare/v1.7.3...v1.8.0) -- github.com/gorilla/websocket: [v1.4.0 → v1.4.2](https://github.com/gorilla/websocket/compare/v1.4.0...v1.4.2) -- github.com/karrick/godirwalk: [v1.7.5 → v1.16.1](https://github.com/karrick/godirwalk/compare/v1.7.5...v1.16.1) -- github.com/opencontainers/runc: [819fcc6 → v1.0.0-rc92](https://github.com/opencontainers/runc/compare/819fcc6...v1.0.0-rc92) -- github.com/opencontainers/runtime-spec: [237cc4f → 4d89ac9](https://github.com/opencontainers/runtime-spec/compare/237cc4f...4d89ac9) -- github.com/opencontainers/selinux: [v1.5.2 → v1.6.0](https://github.com/opencontainers/selinux/compare/v1.5.2...v1.6.0) -- github.com/prometheus/procfs: [v0.1.3 → v0.2.0](https://github.com/prometheus/procfs/compare/v0.1.3...v0.2.0) -- github.com/quobyte/api: [v0.1.2 → v0.1.8](https://github.com/quobyte/api/compare/v0.1.2...v0.1.8) -- github.com/spf13/cobra: [v1.0.0 → v1.1.1](https://github.com/spf13/cobra/compare/v1.0.0...v1.1.1) -- github.com/spf13/viper: [v1.4.0 → v1.7.0](https://github.com/spf13/viper/compare/v1.4.0...v1.7.0) -- github.com/stretchr/testify: [v1.4.0 → v1.6.1](https://github.com/stretchr/testify/compare/v1.4.0...v1.6.1) -- github.com/vishvananda/netns: [52d707b → db3c7e5](https://github.com/vishvananda/netns/compare/52d707b...db3c7e5) -- go.opencensus.io: v0.22.2 → v0.22.3 -- golang.org/x/exp: da58074 → 6cc2880 -- golang.org/x/lint: fdd1cda → 738671d -- golang.org/x/net: ab34263 → 69a7880 -- golang.org/x/oauth2: 858c2ad → bf48bf1 -- golang.org/x/sys: ed371f2 → 5cba982 -- golang.org/x/text: v0.3.3 → v0.3.4 -- golang.org/x/time: 555d28b → 3af7569 -- golang.org/x/xerrors: 9bdfabe → 5ec99f8 -- google.golang.org/api: v0.15.1 → v0.20.0 -- google.golang.org/genproto: cb27e3a → 8816d57 -- google.golang.org/grpc: v1.27.0 → v1.27.1 -- google.golang.org/protobuf: v1.24.0 → v1.25.0 -- honnef.co/go/tools: v0.0.1-2019.2.3 → v0.0.1-2020.1.3 -- k8s.io/gengo: 8167cfd → 83324d8 -- k8s.io/klog/v2: v2.2.0 → v2.4.0 -- k8s.io/kube-openapi: 8b50664 → d219536 -- k8s.io/utils: d5654de → 67b214c -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.12 → v0.0.14 -- sigs.k8s.io/structured-merge-diff/v4: b3cf1e8 → v4.0.2 +- dmitri.shuralyov.com/gpu/mtl: 666a987 → 28db891 +- github.com/creack/pty: [v1.1.7 → v1.1.9](https://github.com/creack/pty/compare/v1.1.7...v1.1.9) +- github.com/go-openapi/spec: [v0.19.3 → v0.19.5](https://github.com/go-openapi/spec/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/strfmt: [v0.19.3 → v0.19.5](https://github.com/go-openapi/strfmt/compare/v0.19.3...v0.19.5) +- github.com/go-openapi/validate: [v0.19.5 → v0.19.8](https://github.com/go-openapi/validate/compare/v0.19.5...v0.19.8) +- github.com/google/cadvisor: [v0.38.7 → v0.38.8](https://github.com/google/cadvisor/compare/v0.38.7...v0.38.8) +- github.com/kr/text: [v0.1.0 → v0.2.0](https://github.com/kr/text/compare/v0.1.0...v0.2.0) +- github.com/mattn/go-runewidth: [v0.0.2 → v0.0.7](https://github.com/mattn/go-runewidth/compare/v0.0.2...v0.0.7) +- github.com/olekukonko/tablewriter: [a0225b3 → v0.0.4](https://github.com/olekukonko/tablewriter/compare/a0225b3...v0.0.4) +- github.com/sergi/go-diff: [v1.0.0 → v1.1.0](https://github.com/sergi/go-diff/compare/v1.0.0...v1.1.0) +- golang.org/x/crypto: 7f63de1 → 5ea612d +- golang.org/x/exp: 6cc2880 → 85be41e +- golang.org/x/mobile: d2bd2a2 → e6ae53a +- golang.org/x/mod: v0.3.0 → ce943fd +- golang.org/x/net: 69a7880 → 3d97a24 +- golang.org/x/sys: 5cba982 → a50acf3 +- golang.org/x/time: 3af7569 → f8bda1e +- golang.org/x/tools: 113979e → v0.1.0 +- gopkg.in/check.v1: 41f04d3 → 8fa4692 +- gopkg.in/yaml.v2: v2.2.8 → v2.4.0 +- k8s.io/kube-openapi: d219536 → 591a79e +- k8s.io/system-validators: v1.3.0 → v1.4.0 ### Removed -- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) -- github.com/go-ini/ini: [v1.9.0](https://github.com/go-ini/ini/tree/v1.9.0) -- github.com/ugorji/go: [v1.1.4](https://github.com/ugorji/go/tree/v1.1.4) -- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) +- github.com/codegangsta/negroni: [v1.0.0](https://github.com/codegangsta/negroni/tree/v1.0.0) +- github.com/golangplus/bytes: [45c989f](https://github.com/golangplus/bytes/tree/45c989f) +- github.com/golangplus/fmt: [2a5d6d7](https://github.com/golangplus/fmt/tree/2a5d6d7) +- github.com/gorilla/context: [v1.1.1](https://github.com/gorilla/context/tree/v1.1.1) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- sigs.k8s.io/kustomize: v2.0.3+incompatible -# v1.20.0-beta.1 +# v1.21.0-beta.0 -## Downloads for v1.20.0-beta.1 +## Downloads for v1.21.0-beta.0 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes.tar.gz) | 4eddf4850c2d57751696f352d0667309339090aeb30ff93e8db8a22c6cdebf74cb2d5dc78d4ae384c4e25491efc39413e2e420a804b76b421a9ad934e56b0667 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-src.tar.gz) | 59de5221162e9b6d88f5abbdb99765cb2b2e501498ea853fb65f2abe390211e28d9f21e0d87be3ade550a5ea6395d04552cf093d2ce2f99fd45ad46545dd13cb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes.tar.gz) | 69b73a03b70b0ed006e9fef3f5b9bc68f0eb8dc40db6cc04777c03a2cb83a008c783012ca186b1c48357fb192403dbcf6960f120924785e2076e215b9012d546 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-src.tar.gz) | 9620fb6d37634271bdd423c09f33f3bd29e74298aa82c47dffc8cb6bd2ff44fa8987a53c53bc529db4ca96ec41503aa81cc8d0c3ac106f3b06c4720de933a8e6 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-darwin-amd64.tar.gz) | d69ffed19b034a4221fc084e43ac293cf392e98febf5bf580f8d92307a8421d8b3aab18f9ca70608937e836b42c7a34e829f88eba6e040218a4486986e2fca21 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-386.tar.gz) | 1b542e165860c4adcd4550adc19b86c3db8cd75d2a1b8db17becc752da78b730ee48f1b0aaf8068d7bfbb1d8e023741ec293543bc3dd0f4037172a6917db8169 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-amd64.tar.gz) | 90ad52785eecb43a6f9035b92b6ba39fc84e67f8bc91cf098e70f8cfdd405c4b9d5c02dccb21022f21bb5b6ce92fdef304def1da0a7255c308e2c5fb3a9cdaab -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm.tar.gz) | d0cb3322b056e1821679afa70728ffc0d3375e8f3326dabbe8185be2e60f665ab8985b13a1a432e10281b84a929e0f036960253ac0dd6e0b44677d539e98e61b -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-arm64.tar.gz) | 3aecc8197e0aa368408624add28a2dd5e73f0d8a48e5e33c19edf91d5323071d16a27353a6f3e22df4f66ed7bfbae8e56e0a9050f7bbdf927ce6aeb29bba6374 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-ppc64le.tar.gz) | 6ff145058f62d478b98f1e418e272555bfb5c7861834fbbf10a8fb334cc7ff09b32f2666a54b230932ba71d2fc7d3b1c1f5e99e6fe6d6ec83926a9b931cd2474 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-linux-s390x.tar.gz) | ff7b8bb894076e05a3524f6327a4a6353b990466f3292e84c92826cb64b5c82b3855f48b8e297ccadc8bcc15552bc056419ff6ff8725fc4e640828af9cc1331b -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-386.tar.gz) | 6c6dcac9c725605763a130b5a975f2b560aa976a5c809d4e0887900701b707baccb9ca1aebc10a03cfa7338a6f42922bbf838ccf6800fc2a3e231686a72568b6 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-client-windows-amd64.tar.gz) | d12e3a29c960f0ddd1b9aabf5426ac1259863ac6c8f2be1736ebeb57ddca6b1c747ee2c363be19e059e38cf71488c5ea3509ad4d0e67fd5087282a5ad0ae9a48 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | 2a6f3fcd6b571f5ccde56b91e6e179a01899244be496dae16a2a16e0405c9437b75c6dc853b56f9a4876a7c0a60ec624ccd28400bf8fb960258263172f6860ba +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-386.tar.gz) | 78fe9ad9f9a9bc043293327223f0038a2c087ca65e87187a6dcae7a24aef9565fe498d295a4639b0b90524469a04930022fcecd815d0afc742eb87ddd8eb7ef5 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | c025f5e5bd132355e7dd1296cf2ec752264e7f754c4d95fc34b076bd75bef2f571d30872bcb3d138ce95c592111353d275a80eb31f82c07000874b4c56282dbd +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm.tar.gz) | 9975cd2f08fbc202575fb15ba6fc51dab23155ca4d294ebb48516a81efa51f58bab3a87d41c865103756189b554c020371d729ad42880ba788f25047ffc46910 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 56a6836e24471e42e9d9a8488453f2d55598d70c8aca0a307d5116139c930c25c469fd0d1ab5060fbe88dad75a9b5209a08dc11d644af5f3ebebfbcb6c16266c +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | b6a6cc9baad0ad85ed079ee80e6d6acc905095cfb440998bbc0f553b94fa80077bd58b8692754de477517663d51161705e6e89a1b6d04aa74819800db3517722 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 7b743481b340f510bf9ae28ea8ea91150aa1e8c37fe104b66d7b3aff62f5e6db3c590d2c13d14dbb5c928de31c7613372def2496075853611d10d6b5fa5b60bd +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-386.tar.gz) | df06c7a524ce84c1f8d7836aa960c550c88dbca0ec4854df4dd0a85b3c84b8ecbc41b54e8c4669ce28ac670659ff0fad795deb1bc539f3c3b3aa885381265f5a +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 4568497b684564f2a94fbea6cbfd778b891231470d9a6956c3b7a3268643d13b855c0fc5ebea5f769300cc0c7719c2c331c387f468816f182f63e515adeaa7a0 ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-amd64.tar.gz) | 904e8c049179e071c6caa65f525f465260bb4d4318a6dd9cc05be2172f39f7cfc69d1672736e01d926045764fe8872e806444e3af77ffef823ede769537b7d20 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm.tar.gz) | 5934959374868aed8d4294de84411972660bca7b2e952201a9403f37e40c60a5c53eaea8001344d0bf4a00c8cd27de6324d88161388de27f263a5761357cb82b -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-arm64.tar.gz) | 4c884585970f80dc5462d9a734d7d5be9558b36c6e326a8a3139423efbd7284fa9f53fb077983647e17e19f03f5cb9bf26201450c78daecf10afa5a1ab5f9efc -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-ppc64le.tar.gz) | 235b78b08440350dcb9f13b63f7722bd090c672d8e724ca5d409256e5a5d4f46d431652a1aa908c3affc5b1e162318471de443d38b93286113e79e7f90501a9b -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-server-linux-s390x.tar.gz) | 220fc9351702b3ecdcf79089892ceb26753a8a1deaf46922ffb3d3b62b999c93fef89440e779ca6043372b963081891b3a966d1a5df0cf261bdd44395fd28dce +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 42883cca2d312153baf693fc6024a295359a421e74fd70eefc927413be4e0353debe634e7cca6b9a8f7d8a0cee3717e03ba5d29a306e93139b1c2f3027535a6d +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm.tar.gz) | e0042215e84c769ba4fc4d159ccf67b2c4a26206bfffb0ec5152723dc813ff9c1426aa0e9b963d7bfa2efb266ca43561b596b459152882ebb42102ccf60bd8eb +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | bfad29d43e14152cb9bc7c4df6aa77929c6eca64a294bb832215bdba9fa0ee2195a2b709c0267dc7426bb371b547ee80bb8461a8c678c9bffa0819aa7db96289 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | ca67674c01c6cebdc8160c85b449eab1a23bb0557418665246e0208543fa2eaaf97679685c7b49bee3a4300904c0399c3d762ae34dc3e279fd69ce792c4b07ff +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 285352b628ec754b01b8ad4ef1427223a142d58ebcb46f6861df14d68643133b32330460b213b1ba5bc5362ff2b6dacd8e0c2d20cce6e760fa1954af8a60df8b ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-amd64.tar.gz) | fe59d3a1f21c47bab126f689687657f77fbcb46a2caeef48eecd073b2b22879f997a466911b5c5c829e9cf27e68a36ecdf18686d42714839d4b97d6c7281578d -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm.tar.gz) | 93e545aa963cfd11e0b2c6d47669b5ef70c5a86ef80c3353c1a074396bff1e8e7371dda25c39d78c7a9e761f2607b8b5ab843fa0c10b8ff9663098fae8d25725 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-arm64.tar.gz) | 5e0f177f9bec406a668d4b37e69b191208551fdf289c82b5ec898959da4f8a00a2b0695cbf1d2de5acb809321c6e5604f5483d33556543d92b96dcf80e814dd3 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-ppc64le.tar.gz) | 574412059e4d257eb904cd4892a075b6a2cde27adfa4976ee64c46d6768facece338475f1b652ad94c8df7cfcbb70ebdf0113be109c7099ab76ffdb6f023eefd -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-linux-s390x.tar.gz) | b1ffaa6d7f77d89885c642663cb14a86f3e2ec2afd223e3bb2000962758cf0f15320969ffc4be93b5826ff22d54fdbae0dbea09f9d8228eda6da50b6fdc88758 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.1/kubernetes-node-windows-amd64.tar.gz) | 388983765213cf3bdc1f8b27103ed79e39028767e5f1571e35ed1f91ed100e49f3027f7b7ff19b53fab7fbb6d723c0439f21fc6ed62be64532c25f5bfa7ee265 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | d92d9b30e7e44134a0cd9db4c01924d365991ea16b3131200b02a82cff89c8701f618cd90e7f1c65427bd4bb5f78b10d540b2262de2c143b401fa44e5b25627b +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 551092f23c27fdea4bb2d0547f6075892534892a96fc2be7786f82b58c93bffdb5e1c20f8f11beb8bed46c24f36d4c18ec5ac9755435489efa28e6ae775739bd +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 26ae7f4163e527349b8818ee38b9ee062314ab417f307afa49c146df8f5a2bd689509b128bd4a1efd3896fd89571149a9955ada91f8ca0c2f599cd863d613c86 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 821fa953f6cebc69d2d481e489f3e90899813d20e2eefbabbcadd019d004108e7540f741fabe60e8e7c6adbb1053ac97898bbdddec3ca19f34a71aa3312e0d4e +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | 22197d4f66205d5aa9de83dfddcc4f2bb3195fd7067cdb5c21e61dbeae217bc112fb7ecff8a539579b60ad92298c2b4c87b9b7c7e6ec1ee1ffa0c6e4bc4412c1 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | 7e22e0d9603562a04dee16a513579f06b1ff6354d97d669bd68f8777ec7f89f6ef027fb23ab0445d7bba0bb689352f0cc748ce90e3f597c6ebe495464a96b860 -## Changelog since v1.20.0-beta.0 +## Changelog since v1.21.0-alpha.3 + +## Urgent Upgrade Notes +### (No, really, you MUST read this before you upgrade) + + - The metric `storage_operation_errors_total` is not removed, but is marked deprecated, and the metric `storage_operation_status_count` is marked deprecated. In both cases the storage_operation_duration_seconds metric can be used to recover equivalent counts (using `status=fail-unknown` in the case of `storage_operations_errors_total`). ([#99045](https://github.com/kubernetes/kubernetes/pull/99045), [@mattcary](https://github.com/mattcary)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- ACTION REQUIRED: The kube-apiserver ability to serve on an insecure port, deprecated since v1.10, has been removed. The insecure address flags `--address` and `--insecure-bind-address` have no effect in kube-apiserver and will be removed in v1.24. The insecure port flags `--port` and `--insecure-port` may only be set to 0 and will be removed in v1.24. ([#95856](https://github.com/kubernetes/kubernetes/pull/95856), [@knight42](https://github.com/knight42)) [SIG API Machinery, Node and Testing] +- The `batch/v2alpha1` CronJob type definitions and clients are deprecated and removed. ([#96987](https://github.com/kubernetes/kubernetes/pull/96987), [@soltysh](https://github.com/soltysh)) [SIG API Machinery, Apps, CLI and Testing] ### API Change -- + `TokenRequest` and `TokenRequestProjection` features have been promoted to GA. This feature allows generating service account tokens that are not visible in Secret objects and are tied to the lifetime of a Pod object. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection for details on configuring and using this feature. The `TokenRequest` and `TokenRequestProjection` feature gates will be removed in v1.21. - + kubeadm's kube-apiserver Pod manifest now includes the following flags by default "--service-account-key-file", "--service-account-signing-key-file", "--service-account-issuer". ([#93258](https://github.com/kubernetes/kubernetes/pull/93258), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle, Storage and Testing] -- Certain fields on Service objects will be automatically cleared when changing the service's `type` to a mode that does not need those fields. For example, changing from type=LoadBalancer to type=ClusterIP will clear the NodePort assignments, rather than forcing the user to clear them. ([#95196](https://github.com/kubernetes/kubernetes/pull/95196), [@thockin](https://github.com/thockin)) [SIG API Machinery, Apps, Network and Testing] -- Services will now have a `clusterIPs` field to go with `clusterIP`. `clusterIPs[0]` is a synonym for `clusterIP` and will be syncronized on create and update operations. ([#95894](https://github.com/kubernetes/kubernetes/pull/95894), [@thockin](https://github.com/thockin)) [SIG Network] +- Cluster admins can now turn off /debug/pprof and /debug/flags/v endpoint in kubelet by setting enableProfilingHandler and enableDebugFlagsHandler to false in their kubelet configuration file. enableProfilingHandler and enableDebugFlagsHandler can be set to true only when enableDebuggingHandlers is also set to true. ([#98458](https://github.com/kubernetes/kubernetes/pull/98458), [@SaranBalaji90](https://github.com/SaranBalaji90)) [SIG Node] +- The BoundServiceAccountTokenVolume feature has been promoted to beta, and enabled by default. + - This changes the tokens provided to containers at `/var/run/secrets/kubernetes.io/serviceaccount/token` to be time-limited, auto-refreshed, and invalidated when the containing pod is deleted. + - Clients should reload the token from disk periodically (once per minute is recommended) to ensure they continue to use a valid token. `k8s.io/client-go` version v11.0.0+ and v0.15.0+ reload tokens automatically. + - By default, injected tokens are given an extended lifetime so they remain valid even after a new refreshed token is provided. The metric `serviceaccount_stale_tokens_total` can be used to monitor for workloads that are depending on the extended lifetime and are continuing to use tokens even after a refreshed token is provided to the container. If that metric indicates no existing workloads are depending on extended lifetimes, injected token lifetime can be shortened to 1 hour by starting `kube-apiserver` with `--service-account-extend-token-expiration=false`. ([#95667](https://github.com/kubernetes/kubernetes/pull/95667), [@zshihang](https://github.com/zshihang)) [SIG API Machinery, Auth, Cluster Lifecycle and Testing] ### Feature -- A new metric `apiserver_request_filter_duration_seconds` has been introduced that - measures request filter latency in seconds. ([#95207](https://github.com/kubernetes/kubernetes/pull/95207), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Instrumentation] -- Add a new flag to set priority for the kubelet on Windows nodes so that workloads cannot overwhelm the node there by disrupting kubelet process. ([#96051](https://github.com/kubernetes/kubernetes/pull/96051), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node and Windows] -- Changed: default "Accept: */*" header added to HTTP probes. See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes (https://github.com/kubernetes/website/pull/24756) ([#95641](https://github.com/kubernetes/kubernetes/pull/95641), [@fonsecas72](https://github.com/fonsecas72)) [SIG Network and Node] -- Client-go credential plugins can now be passed in the current cluster information via the KUBERNETES_EXEC_INFO environment variable. ([#95489](https://github.com/kubernetes/kubernetes/pull/95489), [@ankeesler](https://github.com/ankeesler)) [SIG API Machinery and Auth] -- Kube-apiserver: added support for compressing rotated audit log files with `--audit-log-compress` ([#94066](https://github.com/kubernetes/kubernetes/pull/94066), [@lojies](https://github.com/lojies)) [SIG API Machinery and Auth] +- A new histogram metric to track the time it took to delete a job by the ttl-after-finished controller ([#98676](https://github.com/kubernetes/kubernetes/pull/98676), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Instrumentation] +- AWS cloudprovider supports auto-discovering subnets without any kubernetes.io/cluster/ tags. It also supports additional service annotation service.beta.kubernetes.io/aws-load-balancer-subnets to manually configure the subnets. ([#97431](https://github.com/kubernetes/kubernetes/pull/97431), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] +- Add --permit-address-sharing flag to kube-apiserver to listen with SO_REUSEADDR. While allowing to listen on wildcard IPs like 0.0.0.0 and specific IPs in parallel, it avoid waiting for the kernel to release socket in TIME_WAIT state, and hence, considably reducing kube-apiserver restart times under certain conditions. ([#93861](https://github.com/kubernetes/kubernetes/pull/93861), [@sttts](https://github.com/sttts)) [SIG API Machinery] +- Add `csi_operations_seconds` metric on kubelet that exposes CSI operations duration and status for node CSI operations. ([#98979](https://github.com/kubernetes/kubernetes/pull/98979), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Instrumentation and Storage] +- Add `migrated` field into `storage_operation_duration_seconds` metric ([#99050](https://github.com/kubernetes/kubernetes/pull/99050), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Instrumentation and Storage] +- Add bash-completion for comma separated list on `kubectl get` ([#98301](https://github.com/kubernetes/kubernetes/pull/98301), [@phil9909](https://github.com/phil9909)) [SIG CLI] +- Added support for installing arm64 node artifacts. ([#99242](https://github.com/kubernetes/kubernetes/pull/99242), [@liu-cong](https://github.com/liu-cong)) [SIG Cloud Provider] +- Feature gate RootCAConfigMap is graduated to GA in 1.21 and will be removed in 1.22. ([#98033](https://github.com/kubernetes/kubernetes/pull/98033), [@zshihang](https://github.com/zshihang)) [SIG API Machinery and Auth] +- Kubeadm: during "init" and "join" perform preflight validation on the host / node name and throw warnings if a name is not compliant ([#99194](https://github.com/kubernetes/kubernetes/pull/99194), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubectl: `kubectl get` will omit managed fields by default now. Users could set `--show-managed-fields` to true to show managedFields when the output format is either `json` or `yaml`. ([#96878](https://github.com/kubernetes/kubernetes/pull/96878), [@knight42](https://github.com/knight42)) [SIG CLI and Testing] +- Metrics can now be disabled explicitly via a command line flag (i.e. '--disabled-metrics=bad_metric1,bad_metric2') ([#99217](https://github.com/kubernetes/kubernetes/pull/99217), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Cluster Lifecycle and Instrumentation] +- TTLAfterFinished is now beta and enabled by default ([#98678](https://github.com/kubernetes/kubernetes/pull/98678), [@ahg-g](https://github.com/ahg-g)) [SIG Apps and Auth] +- The `RunAsGroup` feature has been promoted to GA in this release. ([#94641](https://github.com/kubernetes/kubernetes/pull/94641), [@krmayankk](https://github.com/krmayankk)) [SIG Auth and Node] +- Turn CronJobControllerV2 on by default. ([#98878](https://github.com/kubernetes/kubernetes/pull/98878), [@soltysh](https://github.com/soltysh)) [SIG Apps] +- UDP protocol support for Agnhost connect subcommand ([#98639](https://github.com/kubernetes/kubernetes/pull/98639), [@knabben](https://github.com/knabben)) [SIG Testing] +- Upgrades `IPv6Dualstack` to `Beta` and turns it on by default. Clusters new and existing will not be affected until user starting adding secondary pod and service cidrs cli flags as described here: https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/563-dual-stack ([#98969](https://github.com/kubernetes/kubernetes/pull/98969), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, Cloud Provider, Network and Node] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] +- Fix ALPHA stability level reference link ([#98641](https://github.com/kubernetes/kubernetes/pull/98641), [@Jeffwan](https://github.com/Jeffwan)) [SIG Auth, Cloud Provider, Instrumentation and Storage] + +### Failing Test + +- Escape the special characters like `[`, `]` and ` ` that exist in vsphere windows path ([#98830](https://github.com/kubernetes/kubernetes/pull/98830), [@liyanhui1228](https://github.com/liyanhui1228)) [SIG Storage and Windows] +- Kube-proxy: fix a bug on UDP NodePort Services where stale conntrack entries may blackhole the traffic directed to the NodePort. ([#98305](https://github.com/kubernetes/kubernetes/pull/98305), [@aojea](https://github.com/aojea)) [SIG Network] ### Bug or Regression -- Added support to kube-proxy for externalTrafficPolicy=Local setting via Direct Server Return (DSR) load balancers on Windows. ([#93166](https://github.com/kubernetes/kubernetes/pull/93166), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Disable watchcache for events ([#96052](https://github.com/kubernetes/kubernetes/pull/96052), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Disabled `LocalStorageCapacityIsolation` feature gate is honored during scheduling. ([#96092](https://github.com/kubernetes/kubernetes/pull/96092), [@Huang-Wei](https://github.com/Huang-Wei)) [SIG Scheduling] -- Fix bug in JSON path parser where an error occurs when a range is empty ([#95933](https://github.com/kubernetes/kubernetes/pull/95933), [@brianpursley](https://github.com/brianpursley)) [SIG API Machinery] -- Fix k8s.io/apimachinery/pkg/api/meta.SetStatusCondition to update ObservedGeneration ([#95961](https://github.com/kubernetes/kubernetes/pull/95961), [@KnicKnic](https://github.com/KnicKnic)) [SIG API Machinery] -- Fixed a regression which prevented pods with `docker/default` seccomp annotations from being created in 1.19 if a PodSecurityPolicy was in place which did not allow `runtime/default` seccomp profiles. ([#95985](https://github.com/kubernetes/kubernetes/pull/95985), [@saschagrunert](https://github.com/saschagrunert)) [SIG Auth] -- Kubectl: print error if users place flags before plugin name ([#92343](https://github.com/kubernetes/kubernetes/pull/92343), [@knight42](https://github.com/knight42)) [SIG CLI] -- When creating a PVC with the volume.beta.kubernetes.io/storage-provisioner annotation already set, the PV controller might have incorrectly deleted the newly provisioned PV instead of binding it to the PVC, depending on timing and system load. ([#95909](https://github.com/kubernetes/kubernetes/pull/95909), [@pohly](https://github.com/pohly)) [SIG Apps and Storage] +- Add missing --kube-api-content-type in kubemark hollow template ([#98911](https://github.com/kubernetes/kubernetes/pull/98911), [@Jeffwan](https://github.com/Jeffwan)) [SIG Scalability and Testing] +- Avoid duplicate error messages when runing kubectl edit quota ([#98201](https://github.com/kubernetes/kubernetes/pull/98201), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery and Apps] +- Cleanup subnet in frontend IP configs to prevent huge subnet request bodies in some scenarios. ([#98133](https://github.com/kubernetes/kubernetes/pull/98133), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix errors when accessing Windows container stats for Dockershim ([#98510](https://github.com/kubernetes/kubernetes/pull/98510), [@jsturtevant](https://github.com/jsturtevant)) [SIG Node and Windows] +- Fixes spurious errors about IPv6 in kube-proxy logs on nodes with IPv6 disabled. ([#99127](https://github.com/kubernetes/kubernetes/pull/99127), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] +- In the method that ensures that the docker and containerd are in the correct containers with the proper OOM score set up, fixed the bug of identifying containerd process. ([#97888](https://github.com/kubernetes/kubernetes/pull/97888), [@pacoxu](https://github.com/pacoxu)) [SIG Node] +- Kubelet now cleans up orphaned volume directories automatically ([#95301](https://github.com/kubernetes/kubernetes/pull/95301), [@lorenz](https://github.com/lorenz)) [SIG Node and Storage] +- When dynamically provisioning Azure File volumes for a premium account, the requested size will be set to 100GB if the request is initially lower than this value to accommodate Azure File requirements. ([#99122](https://github.com/kubernetes/kubernetes/pull/99122), [@huffmanca](https://github.com/huffmanca)) [SIG Cloud Provider and Storage] ### Other (Cleanup or Flake) -- Kubectl: the `generator` flag of `kubectl autoscale` has been deprecated and has no effect, it will be removed in a feature release ([#92998](https://github.com/kubernetes/kubernetes/pull/92998), [@SataQiu](https://github.com/SataQiu)) [SIG CLI] -- V1helpers.MatchNodeSelectorTerms now accepts just a Node and a list of Terms ([#95871](https://github.com/kubernetes/kubernetes/pull/95871), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] -- `MatchNodeSelectorTerms` function moved to `k8s.io/component-helpers` ([#95531](https://github.com/kubernetes/kubernetes/pull/95531), [@damemi](https://github.com/damemi)) [SIG Apps, Scheduling and Storage] +- APIs for kubelet annotations and labels from k8s.io/kubernetes/pkg/kubelet/apis are now available under k8s.io/kubelet/pkg/apis/ ([#98931](https://github.com/kubernetes/kubernetes/pull/98931), [@michaelbeaumont](https://github.com/michaelbeaumont)) [SIG Apps, Auth and Node] +- Migrate `pkg/kubelet/(pod, pleg)` to structured logging ([#98990](https://github.com/kubernetes/kubernetes/pull/98990), [@gjkim42](https://github.com/gjkim42)) [SIG Instrumentation and Node] +- Migrate pkg/kubelet/nodestatus to structured logging ([#99001](https://github.com/kubernetes/kubernetes/pull/99001), [@QiWang19](https://github.com/QiWang19)) [SIG Node] +- Migrate pkg/kubelet/server logs to structured logging ([#98643](https://github.com/kubernetes/kubernetes/pull/98643), [@chenyw1990](https://github.com/chenyw1990)) [SIG Node] +- Migrate proxy/winkernel/proxier.go logs to structured logging ([#98001](https://github.com/kubernetes/kubernetes/pull/98001), [@JornShen](https://github.com/JornShen)) [SIG Network and Windows] +- Migrate scheduling_queue.go to structured logging ([#98358](https://github.com/kubernetes/kubernetes/pull/98358), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Several flags related to the deprecated dockershim which are present in the kubelet command line are now deprecated. ([#98730](https://github.com/kubernetes/kubernetes/pull/98730), [@dims](https://github.com/dims)) [SIG Node] +- The deprecated feature gates `CSIDriverRegistry`, `BlockVolume` and `CSIBlockVolume` are now unconditionally enabled and can no longer be specified in component invocations. ([#98021](https://github.com/kubernetes/kubernetes/pull/98021), [@gavinfish](https://github.com/gavinfish)) [SIG Storage] ## Dependencies @@ -1379,763 +1186,530 @@ filename | sha512 hash _Nothing has changed._ ### Changed -_Nothing has changed._ +- sigs.k8s.io/structured-merge-diff/v4: v4.0.2 → v4.0.3 ### Removed _Nothing has changed._ -# v1.20.0-beta.0 +# v1.21.0-alpha.3 -## Downloads for v1.20.0-beta.0 +## Downloads for v1.21.0-alpha.3 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes.tar.gz) | 385e49e32bbd6996f07bcadbf42285755b8a8ef9826ee1ba42bd82c65827cf13f63e5634b834451b263a93b708299cbb4b4b0b8ddbc688433deaf6bec240aa67 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-src.tar.gz) | 842e80f6dcad461426fb699de8a55fde8621d76a94e54288fe9939cc1a3bbd0f4799abadac2c59bcf3f91d743726dbd17e1755312ae7fec482ef560f336dbcbb +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes.tar.gz) | 704ec916a1dbd134c54184d2652671f80ae09274f9d23dbbed312944ebeccbc173e2e6b6949b38bdbbfdaf8aa032844deead5efeda1b3150f9751386d9184bc8 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-src.tar.gz) | 57db9e7560cfc9c10e7059cb5faf9c4bd5eb8f9b7964f44f000a417021cf80873184b774e7c66c80d4aba84c14080c6bc335618db3d2e5f276436ae065e25408 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-darwin-amd64.tar.gz) | bde5e7d9ee3e79d1e69465a3ddb4bb36819a4f281b5c01a7976816d7c784410812dde133cdf941c47e5434e9520701b9c5e8b94d61dca77c172f87488dfaeb26 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-386.tar.gz) | 721bb8444c9e0d7a9f8461e3f5428882d76fcb3def6eb11b8e8e08fae7f7383630699248660d69d4f6a774124d6437888666e1fa81298d5b5518bc4a6a6b2c92 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-amd64.tar.gz) | 71e4edc41afbd65f813e7ecbc22b27c95f248446f005e288d758138dc4cc708735be7218af51bcf15e8b9893a3598c45d6a685f605b46f50af3762b02c32ed76 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm.tar.gz) | bbefc749156f63898973f2f7c7a6f1467481329fb430d641fe659b497e64d679886482d557ebdddb95932b93de8d1e3e365c91d4bf9f110b68bd94b0ba702ded -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-arm64.tar.gz) | 9803190685058b4b64d002c2fbfb313308bcea4734ed53a8c340cfdae4894d8cb13b3e819ae64051bafe0fbf8b6ecab53a6c1dcf661c57640c75b0eb60041113 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-ppc64le.tar.gz) | bcdceea64cba1ae38ea2bab50d8fd77c53f6d673de12566050b0e3c204334610e6c19e4ace763e68b5e48ab9e811521208b852b1741627be30a2b17324fc1daf -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-linux-s390x.tar.gz) | 41e36d00867e90012d5d5adfabfaae8d9f5a9fd32f290811e3c368e11822916b973afaaf43961081197f2cbab234090d97d89774e674aeadc1da61f7a64708a9 -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-386.tar.gz) | c50fec5aec2d0e742f851f25c236cb73e76f8fc73b0908049a10ae736c0205b8fff83eb3d29b1748412edd942da00dd738195d9003f25b577d6af8359d84fb2f -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-client-windows-amd64.tar.gz) | 0fd6777c349908b6d627e849ea2d34c048b8de41f7df8a19898623f597e6debd35b7bcbf8e1d43a1be3a9abb45e4810bc498a0963cf780b109e93211659e9c7e +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | e2706efda92d5cf4f8b69503bb2f7703a8754407eff7f199bb77847838070e720e5f572126c14daa4c0c03b59bb1a63c1dfdeb6e936a40eff1d5497e871e3409 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 007bb23c576356ed0890bdfd25a0f98d552599e0ffec19fb982591183c7c1f216d8a3ffa3abf15216be12ae5c4b91fdcd48a7306a2d26b007b86a6abd553fc61 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | 39504b0c610348beba60e8866fff265bad58034f74504951cd894c151a248db718d10f77ebc83f2c38b2d517f8513a46325b38889eefa261ca6dbffeceba50ff +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | 30bc2c40d0c759365422ad1651a6fb35909be771f463c5b971caf401f9209525d05256ab70c807e88628dd357c2896745eecf13eda0b748464da97d0a5ef2066 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 085cdf574dc8fd33ece667130b8c45830b522a07860e03a2384283b1adea73a9652ef3dfaa566e69ee00aea1a6461608814b3ce7a3f703e4a934304f7ae12f97 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b34b845037d83ea7b3e2d80a9ede4f889b71b17b93b1445f0d936a36e98c13ed6ada125630a68d9243a5fcd311ee37cdcc0c05da484da8488ea5060bc529dbfc +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | c4758adc7a404b776556efaa79655db2a70777c562145d6ea6887f3335988367a0c2fcd4383e469340f2a768b22e786951de212805ca1cb91104d41c21e0c9ce +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-386.tar.gz) | f51edc79702bbd1d9cb3a672852a405e11b20feeab64c5411a7e85c9af304960663eb6b23ef96e0f8c44a722fecf58cb6d700ea2c42c05b3269d8efd5ad803f2 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 6a3507ce4ac40a0dc7e4720538863fa15f8faf025085a032f34b8fa0f6fa4e8c26849baf649b5b32829b9182e04f82721b13950d31cf218c35be6bf1c05d6abf ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-amd64.tar.gz) | 30d982424ca64bf0923503ae8195b2e2a59497096b2d9e58dfd491cd6639633027acfa9750bc7bccf34e1dc116d29d2f87cbd7ae713db4210ce9ac16182f0576 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm.tar.gz) | f08b62be9bc6f0745f820b0083c7a31eedb2ce370a037c768459a59192107b944c8f4345d0bb88fc975f2e7a803ac692c9ac3e16d4a659249d4600e84ff75d9e -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-arm64.tar.gz) | e3472b5b3dfae0a56e5363d52062b1e4a9fc227a05e0cf5ece38233b2c442f427970aab94a52377fb87e583663c120760d154bc1c4ac22dca1f4d0d1ebb96088 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-ppc64le.tar.gz) | 06c254e0a62f755d31bc40093d86c44974f0a60308716cc3214a6b3c249a4d74534d909b82f8a3dd3a3c9720e61465b45d2bb3a327ef85d3caba865750020dfb -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-server-linux-s390x.tar.gz) | 2edeb4411c26a0de057a66787091ab1044f71774a464aed898ffee26634a40127181c2edddb38e786b6757cca878fd0c3a885880eec6c3448b93c645770abb12 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 19181d162dfb0b30236e2bf1111000e037eece87c037ca2b24622ca94cb88db86aa4da4ca533522518b209bc9983bbfd6b880a7898e0da96b33f3f6c4690539b +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 42a02f9e08a78ad5da6e5fa1ab12bf1e3c967c472fdbdadbd8746586da74dc8093682ba8513ff2a5301393c47ee9021b860e88ada56b13da386ef485708e46ca +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 3c8ba8eb02f70061689bd7fab7813542005efe2edc6cfc6b7aecd03ffedf0b81819ad91d69fff588e83023d595eefbfe636aa55e1856add8733bf42fff3c748f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | cd9e6537450411c39a06fd0b5819db3d16b668d403fb3627ec32c0e32dd1c4860e942934578ca0e1d1b8e6f21f450ff81e37e0cd46ff5c5faf7847ab074aefc5 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | ada3f65e53bc0e0c0229694dd48c425388089d6d77111a62476d1b08f6ad1d8ab3d60b9ed7d95ac1b42c2c6be8dc0618f40679717160769743c43583d8452362 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-amd64.tar.gz) | cc1d5b94b86070b5e7746d7aaeaeac3b3a5e5ebbff1ec33885f7eeab270a6177d593cb1975b2e56f4430b7859ad42da76f266629f9313e0f688571691ac448ed -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm.tar.gz) | 75e82c7c9122add3b24695b94dcb0723c52420c3956abf47511e37785aa48a1fa8257db090c6601010c4475a325ccfff13eb3352b65e3aa1774f104b09b766b0 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-arm64.tar.gz) | 16ef27c40bf4d678a55fcd3d3f7d09f1597eec2cc58f9950946f0901e52b82287be397ad7f65e8d162d8a9cdb4a34a610b6db8b5d0462be8e27c4b6eb5d6e5e7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-ppc64le.tar.gz) | 939865f2c4cb6a8934f22a06223e416dec5f768ffc1010314586149470420a1d62aef97527c34d8a636621c9669d6489908ce1caf96f109e8d073cee1c030b50 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-linux-s390x.tar.gz) | bbfdd844075fb816079af7b73d99bc1a78f41717cdbadb043f6f5872b4dc47bc619f7f95e2680d4b516146db492c630c17424e36879edb45e40c91bc2ae4493c -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-beta.0/kubernetes-node-windows-amd64.tar.gz) | a2b3ea40086fd71aed71a4858fd3fc79fd1907bc9ea8048ff3c82ec56477b0a791b724e5a52d79b3b36338c7fbd93dfd3d03b00ccea9042bda0d270fc891e4ec +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | ae0fec6aa59e49624b55d9a11c12fdf717ddfe04bdfd4f69965d03004a34e52ee4a3e83f7b61d0c6a86f43b72c99f3decb195b39ae529ef30526d18ec5f58f83 +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | 9a48c140ab53b7ed8ecec6903988a1a474efc16d2538e5974bc9a12f0c9190be78c4f9e326bf4e982d0b7045a80b99dd0fda7e9b650663be5b89bfd991596746 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | 6912adbc9300344bea470d6435f7b387bfce59767078c11728ce59faf47cd3f72b41b9604fcc5cda45e9816fe939fbe2fb33e52a773e6ff2dfa9a615b4df6141 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | d66dccfe3e6ed6d81567c70703f15375a53992b3a5e2814b98c32e581b861ad95912e03ed2562415d087624c008038bb4a816611fa255442ae752968ea15856b +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | ad8c69a28f1fbafa3f1cb54909bfd3fc22b104bed63d7ca2b296208c9d43eb5f2943a0ff267da4c185186cdd9f7f77b315cd7f5f1bf9858c0bf42eceb9ac3c58 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 91d723aa848a9cb028f5bcb41090ca346fb973961521d025c4399164de2c8029b57ca2c4daca560d3c782c05265d2eb0edb0abcce6f23d3efbecf2316a54d650 -## Changelog since v1.20.0-alpha.3 +## Changelog since v1.21.0-alpha.2 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Kubeadm: improve the validation of serviceSubnet and podSubnet. - ServiceSubnet has to be limited in size, due to implementation details, and the mask can not allocate more than 20 bits. - PodSubnet validates against the corresponding cluster "--node-cidr-mask-size" of the kube-controller-manager, it fail if the values are not compatible. - kubeadm no longer sets the node-mask automatically on IPv6 deployments, you must check that your IPv6 service subnet mask is compatible with the default node mask /64 or set it accordenly. - Previously, for IPv6, if the podSubnet had a mask lower than /112, kubeadm calculated a node-mask to be multiple of eight and splitting the available bits to maximise the number used for nodes. ([#95723](https://github.com/kubernetes/kubernetes/pull/95723), [@aojea](https://github.com/aojea)) [SIG Cluster Lifecycle] - - Windows hyper-v container featuregate is deprecated in 1.20 and will be removed in 1.21 ([#95505](https://github.com/kubernetes/kubernetes/pull/95505), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] - + - Newly provisioned PVs by gce-pd will no longer have the beta FailureDomain label. gce-pd volume plugin will start to have GA topology label instead. ([#98700](https://github.com/kubernetes/kubernetes/pull/98700), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider, Storage and Testing] + - Remove alpha CSIMigrationXXComplete flag and add alpha InTreePluginXXUnregister flag. Deprecate CSIMigrationvSphereComplete flag and it will be removed in 1.22. ([#98243](https://github.com/kubernetes/kubernetes/pull/98243), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Node and Storage] + ## Changes by Kind -### Deprecation - -- Support 'controlplane' as a valid EgressSelection type in the EgressSelectorConfiguration API. 'Master' is deprecated and will be removed in v1.22. ([#95235](https://github.com/kubernetes/kubernetes/pull/95235), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] - ### API Change -- Add dual-stack Services (alpha). This is a BREAKING CHANGE to an alpha API. - It changes the dual-stack API wrt Service from a single ipFamily field to 3 - fields: ipFamilyPolicy (SingleStack, PreferDualStack, RequireDualStack), - ipFamilies (a list of families assigned), and clusterIPs (inclusive of - clusterIP). Most users do not need to set anything at all, defaulting will - handle it for them. Services are single-stack unless the user asks for - dual-stack. This is all gated by the "IPv6DualStack" feature gate. ([#91824](https://github.com/kubernetes/kubernetes/pull/91824), [@khenidak](https://github.com/khenidak)) [SIG API Machinery, Apps, CLI, Network, Node, Scheduling and Testing] -- Introduces a metric source for HPAs which allows scaling based on container resource usage. ([#90691](https://github.com/kubernetes/kubernetes/pull/90691), [@arjunrn](https://github.com/arjunrn)) [SIG API Machinery, Apps, Autoscaling and CLI] +- Adds support for portRange / EndPort in Network Policy ([#97058](https://github.com/kubernetes/kubernetes/pull/97058), [@rikatz](https://github.com/rikatz)) [SIG Apps and Network] +- Fixes using server-side apply with APIService resources ([#98576](https://github.com/kubernetes/kubernetes/pull/98576), [@kevindelgado](https://github.com/kevindelgado)) [SIG API Machinery, Apps and Testing] +- Kubernetes is now built using go1.15.7 ([#98363](https://github.com/kubernetes/kubernetes/pull/98363), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Node, Release and Testing] +- Scheduler extender filter interface now can report unresolvable failed nodes in the new field `FailedAndUnresolvableNodes` of `ExtenderFilterResult` struct. Nodes in this map will be skipped in the preemption phase. ([#92866](https://github.com/kubernetes/kubernetes/pull/92866), [@cofyc](https://github.com/cofyc)) [SIG Scheduling] ### Feature -- Add a metric for time taken to perform recursive permission change ([#95866](https://github.com/kubernetes/kubernetes/pull/95866), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] -- Allow cross compilation of kubernetes on different platforms. ([#94403](https://github.com/kubernetes/kubernetes/pull/94403), [@bnrjee](https://github.com/bnrjee)) [SIG Release] -- Command to start network proxy changes from 'KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE ./cluster/kube-up.sh' to 'KUBE_ENABLE_KONNECTIVITY_SERVICE=true ./hack/kube-up.sh' ([#92669](https://github.com/kubernetes/kubernetes/pull/92669), [@Jefftree](https://github.com/Jefftree)) [SIG Cloud Provider] -- DefaultPodTopologySpread graduated to Beta. The feature gate is enabled by default. ([#95631](https://github.com/kubernetes/kubernetes/pull/95631), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling and Testing] -- Kubernetes E2E test image manifest lists now contain Windows images. ([#77398](https://github.com/kubernetes/kubernetes/pull/77398), [@claudiubelu](https://github.com/claudiubelu)) [SIG Testing and Windows] -- Support for Windows container images (OS Versions: 1809, 1903, 1909, 2004) was added the pause:3.4 image. ([#91452](https://github.com/kubernetes/kubernetes/pull/91452), [@claudiubelu](https://github.com/claudiubelu)) [SIG Node, Release and Windows] +- A lease can only attach up to 10k objects. ([#98257](https://github.com/kubernetes/kubernetes/pull/98257), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery] +- Add ignore-errors flag for drain, support none-break drain in group ([#98203](https://github.com/kubernetes/kubernetes/pull/98203), [@yuzhiquan](https://github.com/yuzhiquan)) [SIG CLI] +- Base-images: Update to debian-iptables:buster-v1.4.0 + - Uses iptables 1.8.5 + - base-images: Update to debian-base:buster-v1.3.0 + - cluster/images/etcd: Build etcd:3.4.13-2 image + - Uses debian-base:buster-v1.3.0 ([#98401](https://github.com/kubernetes/kubernetes/pull/98401), [@pacoxu](https://github.com/pacoxu)) [SIG Testing] +- Export NewDebuggingRoundTripper function and DebugLevel options in the k8s.io/client-go/transport package. ([#98324](https://github.com/kubernetes/kubernetes/pull/98324), [@atosatto](https://github.com/atosatto)) [SIG API Machinery] +- Kubectl wait ensures that observedGeneration >= generation if applicable ([#97408](https://github.com/kubernetes/kubernetes/pull/97408), [@KnicKnic](https://github.com/KnicKnic)) [SIG CLI] +- Kubernetes is now built using go1.15.8 ([#98834](https://github.com/kubernetes/kubernetes/pull/98834), [@cpanato](https://github.com/cpanato)) [SIG Cloud Provider, Instrumentation, Release and Testing] +- New admission controller "denyserviceexternalips" is available. Clusters which do not *need- the Service "externalIPs" feature should enable this controller and be more secure. ([#97395](https://github.com/kubernetes/kubernetes/pull/97395), [@thockin](https://github.com/thockin)) [SIG API Machinery] +- Overall, enable the feature of `PreferNominatedNode` will improve the performance of scheduling where preemption might frequently happen, but in theory, enable the feature of `PreferNominatedNode`, the pod might not be scheduled to the best candidate node in the cluster. ([#93179](https://github.com/kubernetes/kubernetes/pull/93179), [@chendave](https://github.com/chendave)) [SIG Scheduling and Testing] +- Pause image upgraded to 3.4.1 in kubelet and kubeadm for both Linux and Windows. ([#98205](https://github.com/kubernetes/kubernetes/pull/98205), [@pacoxu](https://github.com/pacoxu)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Node, Testing and Windows] +- The `ServiceAccountIssuerDiscovery` feature has graduated to GA, and is unconditionally enabled. The `ServiceAccountIssuerDiscovery` feature-gate will be removed in 1.22. ([#98553](https://github.com/kubernetes/kubernetes/pull/98553), [@mtaufen](https://github.com/mtaufen)) [SIG API Machinery, Auth and Testing] ### Documentation -- Fake dynamic client: document that List does not preserve TypeMeta in UnstructuredList ([#95117](https://github.com/kubernetes/kubernetes/pull/95117), [@andrewsykim](https://github.com/andrewsykim)) [SIG API Machinery] +- Feat: azure file migration go beta in 1.21. Feature gates CSIMigration to Beta (on by default) and CSIMigrationAzureFile to Beta (off by default since it requires installation of the AzureFile CSI Driver) + The in-tree AzureFile plugin "kubernetes.io/azure-file" is now deprecated and will be removed in 1.23. Users should enable CSIMigration + CSIMigrationAzureFile features and install the AzureFile CSI Driver (https://github.com/kubernetes-sigs/azurefile-csi-driver) to avoid disruption to existing Pod and PVC objects at that time. + Users should start using the AzureFile CSI Driver directly for any new volumes. ([#96293](https://github.com/kubernetes/kubernetes/pull/96293), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -### Bug or Regression - -- Exposes and sets a default timeout for the SubjectAccessReview client for DelegatingAuthorizationOptions. ([#95725](https://github.com/kubernetes/kubernetes/pull/95725), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery and Cloud Provider] -- Alter wording to describe pods using a pvc ([#95635](https://github.com/kubernetes/kubernetes/pull/95635), [@RaunakShah](https://github.com/RaunakShah)) [SIG CLI] -- If we set SelectPolicy MinPolicySelect on scaleUp behavior or scaleDown behavior,Horizontal Pod Autoscaler doesn`t automatically scale the number of pods correctly ([#95647](https://github.com/kubernetes/kubernetes/pull/95647), [@JoshuaAndrew](https://github.com/JoshuaAndrew)) [SIG Apps and Autoscaling] -- Ignore apparmor for non-linux operating systems ([#93220](https://github.com/kubernetes/kubernetes/pull/93220), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Ipvs: ensure selected scheduler kernel modules are loaded ([#93040](https://github.com/kubernetes/kubernetes/pull/93040), [@cmluciano](https://github.com/cmluciano)) [SIG Network] -- Kubeadm: add missing "--experimental-patches" flag to "kubeadm init phase control-plane" ([#95786](https://github.com/kubernetes/kubernetes/pull/95786), [@Sh4d1](https://github.com/Sh4d1)) [SIG Cluster Lifecycle] -- Reorganized iptables rules to fix a performance issue ([#95252](https://github.com/kubernetes/kubernetes/pull/95252), [@tssurya](https://github.com/tssurya)) [SIG Network] -- Unhealthy pods covered by PDBs can be successfully evicted if enough healthy pods are available. ([#94381](https://github.com/kubernetes/kubernetes/pull/94381), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Update the PIP when it is not in the Succeeded provisioning state during the LB update. ([#95748](https://github.com/kubernetes/kubernetes/pull/95748), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Update the frontend IP config when the service's `pipName` annotation is changed ([#95813](https://github.com/kubernetes/kubernetes/pull/95813), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] - -### Other (Cleanup or Flake) - -- NO ([#95690](https://github.com/kubernetes/kubernetes/pull/95690), [@nikhita](https://github.com/nikhita)) [SIG Release] - -## Dependencies - -### Added -- github.com/form3tech-oss/jwt-go: [v3.2.2+incompatible](https://github.com/form3tech-oss/jwt-go/tree/v3.2.2) - -### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.9.0 → v0.9.5](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.9.0...v0.9.5) -- github.com/Azure/go-autorest/autorest/mocks: [v0.4.0 → v0.4.1](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.4.0...v0.4.1) -- golang.org/x/crypto: 75b2880 → 7f63de1 - -### Removed -_Nothing has changed._ - - - -# v1.20.0-alpha.3 - - -## Downloads for v1.20.0-alpha.3 - -### Source Code - -filename | sha512 hash --------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes.tar.gz) | 542cc9e0cd97732020491456402b6e2b4f54f2714007ee1374a7d363663a1b41e82b50886176a5313aaccfbfd4df2bc611d6b32d19961cdc98b5821b75d6b17c -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-src.tar.gz) | 5e5d725294e552fd1d14fd6716d013222827ac2d4e2d11a7a1fdefb77b3459bbeb69931f38e1597de205dd32a1c9763ab524c2af1551faef4f502ef0890f7fbf - -### Client binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-darwin-amd64.tar.gz) | 60004939727c75d0f06adc4449e16b43303941937c0e9ea9aca7d947e93a5aed5d11e53d1fc94caeb988be66d39acab118d406dc2d6cead61181e1ced6d2be1a -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-386.tar.gz) | 7edba9c4f1bf38fdf1fa5bff2856c05c0e127333ce19b17edf3119dc9b80462c027404a1f58a5eabf1de73a8f2f20aced043dda1fafd893619db1a188cda550c -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-amd64.tar.gz) | db1818aa82d072cb3e32a2a988e66d76ecf7cebc6b8a29845fa2d6ec27f14a36e4b9839b1b7ed8c43d2da9cde00215eb672a7e8ee235d2e3107bc93c22e58d38 -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm.tar.gz) | d2922e70d22364b1f5a1e94a0c115f849fe2575b231b1ba268f73a9d86fc0a9fbb78dc713446839a2593acf1341cb5a115992f350870f13c1a472bb107b75af7 -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-arm64.tar.gz) | 2e3ae20e554c7d4fc3a8afdfcafe6bbc81d4c5e9aea036357baac7a3fdc2e8098aa8a8c3dded3951667d57f667ce3fbf37ec5ae5ceb2009a569dc9002d3a92f9 -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-ppc64le.tar.gz) | b54a34e572e6a86221577de376e6f7f9fcd82327f7fe94f2fc8d21f35d302db8a0f3d51e60dc89693999f5df37c96d0c3649a29f07f095efcdd59923ae285c95 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-linux-s390x.tar.gz) | 5be1b70dc437d3ba88cb0b89cd1bc555f79896c3f5b5f4fa0fb046a0d09d758b994d622ebe5cef8e65bba938c5ae945b81dc297f9dfa0d98f82ea75f344a3a0d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-386.tar.gz) | 88cf3f66168ef3bf9a5d3d2275b7f33799406e8205f2c202997ebec23d449aa4bb48b010356ab1cf52ff7b527b8df7c8b9947a43a82ebe060df83c3d21b7223a -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-client-windows-amd64.tar.gz) | 87d2d4ea1829da8cfa1a705a03ea26c759a03bd1c4d8b96f2c93264c4d172bb63a91d9ddda65cdc5478b627c30ae8993db5baf8be262c157d83bffcebe85474e - -### Server binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-amd64.tar.gz) | 7af691fc0b13a937797912374e3b3eeb88d5262e4eb7d4ebe92a3b64b3c226cb049aedfd7e39f639f6990444f7bcf2fe58699cf0c29039daebe100d7eebf60de -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm.tar.gz) | 557c47870ecf5c2090b2694c8f0c8e3b4ca23df5455a37945bd037bc6fb5b8f417bf737bb66e6336b285112cb52de0345240fdb2f3ce1c4fb335ca7ef1197f99 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-arm64.tar.gz) | 981de6cf7679d743cdeef1e894314357b68090133814801870504ef30564e32b5675e270db20961e9a731e35241ad9b037bdaf749da87b6c4ce8889eeb1c5855 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-ppc64le.tar.gz) | 506578a21601ccff609ae757a55e68634c15cbfecbf13de972c96b32a155ded29bd71aee069c77f5f721416672c7a7ac0b8274de22bfd28e1ecae306313d96c5 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-server-linux-s390x.tar.gz) | af0cdcd4a77a7cc8060a076641615730a802f1f02dab084e41926023489efec6102d37681c70ab0dbe7440cd3e72ea0443719a365467985360152b9aae657375 - -### Node binaries - -filename | sha512 hash --------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-amd64.tar.gz) | 2d92c61596296279de1efae23b2b707415565d9d50cd61a7231b8d10325732b059bcb90f3afb36bef2575d203938c265572721e38df408e8792d3949523bd5d9 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm.tar.gz) | c298de9b5ac1b8778729a2d8e2793ff86743033254fbc27014333880b03c519de81691caf03aa418c729297ee8942ce9ec89d11b0e34a80576b9936015dc1519 -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-arm64.tar.gz) | daa3c65afda6d7aff206c1494390bbcc205c2c6f8db04c10ca967a690578a01c49d49c6902b85e7158f79fd4d2a87c5d397d56524a75991c9d7db85ac53059a7 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-ppc64le.tar.gz) | 05661908bb73bfcaf9c2eae96e9a6a793db5a7a100bce6df9e057985dd53a7a5248d72e81b6d13496bd38b9326c17cdb2edaf0e982b6437507245fb846e1efc6 -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-linux-s390x.tar.gz) | 845e518e2c4ef0cef2c3b58f0b9ea5b5fe9b8a249717f789607752484c424c26ae854b263b7c0a004a8426feb9aa3683c177a9ed2567e6c3521f4835ea08c24a -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.3/kubernetes-node-windows-amd64.tar.gz) | 530e536574ed2c3e5973d3c0f0fdd2b4d48ef681a7a7c02db13e605001669eeb4f4b8a856fc08fc21436658c27b377f5d04dbcb3aae438098abc953b6eaf5712 - -## Changelog since v1.20.0-alpha.2 - -## Changes by Kind - -### API Change - -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] - -### Feature +### Failing Test -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Kubectl create now supports creating ingress objects. ([#94327](https://github.com/kubernetes/kubernetes/pull/94327), [@rikatz](https://github.com/rikatz)) [SIG CLI and Network] -- New default scheduling plugins order reduces scheduling and preemption latency when taints and node affinity are used ([#95539](https://github.com/kubernetes/kubernetes/pull/95539), [@soulxu](https://github.com/soulxu)) [SIG Scheduling] -- SCTP support in API objects (Pod, Service, NetworkPolicy) is now GA. - Note that this has no effect on whether SCTP is enabled on nodes at the kernel level, - and note that some cloud platforms and network plugins do not support SCTP traffic. ([#95566](https://github.com/kubernetes/kubernetes/pull/95566), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- SelectorSpreadPriority maps to PodTopologySpread plugin when DefaultPodTopologySpread feature is enabled ([#95448](https://github.com/kubernetes/kubernetes/pull/95448), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- SetHostnameAsFQDN has been graduated to Beta and therefore it is enabled by default. ([#95267](https://github.com/kubernetes/kubernetes/pull/95267), [@javidiaz](https://github.com/javidiaz)) [SIG Node] +- Kubelet: the HostPort implementation in dockershim was not taking into consideration the HostIP field, causing that the same HostPort can not be used with different IP addresses. + This bug causes the conformance test "HostPort validates that there is no conflict between pods with same hostPort but different hostIP and protocol" to fail. ([#98755](https://github.com/kubernetes/kubernetes/pull/98755), [@aojea](https://github.com/aojea)) [SIG Cloud Provider, Network and Node] ### Bug or Regression -- An issues preventing volume expand controller to annotate the PVC with `volume.kubernetes.io/storage-resizer` when the PVC StorageClass is already updated to the out-of-tree provisioner is now fixed. ([#94489](https://github.com/kubernetes/kubernetes/pull/94489), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery, Apps and Storage] -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix verb & scope reporting for kube-apiserver metrics (LIST reported instead of GET) ([#95562](https://github.com/kubernetes/kubernetes/pull/95562), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fix: smb valid path error ([#95583](https://github.com/kubernetes/kubernetes/pull/95583), [@andyzhangx](https://github.com/andyzhangx)) [SIG Storage] -- Fixed a bug causing incorrect formatting of `kubectl describe ingress`. ([#94985](https://github.com/kubernetes/kubernetes/pull/94985), [@howardjohn](https://github.com/howardjohn)) [SIG CLI and Network] -- Fixed a bug in client-go where new clients with customized `Dial`, `Proxy`, `GetCert` config may get stale HTTP transports. ([#95427](https://github.com/kubernetes/kubernetes/pull/95427), [@roycaihw](https://github.com/roycaihw)) [SIG API Machinery] -- Fixes high CPU usage in kubectl drain ([#95260](https://github.com/kubernetes/kubernetes/pull/95260), [@amandahla](https://github.com/amandahla)) [SIG CLI] -- Support the node label `node.kubernetes.io/exclude-from-external-load-balancers` ([#95542](https://github.com/kubernetes/kubernetes/pull/95542), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix NPE in ephemeral storage eviction ([#98261](https://github.com/kubernetes/kubernetes/pull/98261), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that on k8s nodes, when the policy of INPUT chain in filter table is not ACCEPT, healthcheck nodeport would not work. + Added iptables rules to allow healthcheck nodeport traffic. ([#97824](https://github.com/kubernetes/kubernetes/pull/97824), [@hanlins](https://github.com/hanlins)) [SIG Network] +- Fixed kube-proxy container image architecture for non amd64 images. ([#98526](https://github.com/kubernetes/kubernetes/pull/98526), [@saschagrunert](https://github.com/saschagrunert)) [SIG API Machinery, Release and Testing] +- Fixed provisioning of Cinder volumes migrated to CSI when StorageClass with AllowedTopologies was used. ([#98311](https://github.com/kubernetes/kubernetes/pull/98311), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixes a panic in the disruption budget controller for PDB objects with invalid selectors ([#98750](https://github.com/kubernetes/kubernetes/pull/98750), [@mortent](https://github.com/mortent)) [SIG Apps] +- Fixes connection errors when using `--volume-host-cidr-denylist` or `--volume-host-allow-local-loopback` ([#98436](https://github.com/kubernetes/kubernetes/pull/98436), [@liggitt](https://github.com/liggitt)) [SIG Network and Storage] +- If the user specifies an invalid timeout in the request URL, the request will be aborted with an HTTP 400. + - in cases where the client specifies a timeout in the request URL, the overall request deadline is shortened now since the deadline is setup as soon as the request is received by the apiserver. ([#96901](https://github.com/kubernetes/kubernetes/pull/96901), [@tkashem](https://github.com/tkashem)) [SIG API Machinery and Testing] +- Kubeadm: Some text in the `kubeadm upgrade plan` output has changed. If you have scripts or other automation that parses this output, please review these changes and update your scripts to account for the new output. ([#98728](https://github.com/kubernetes/kubernetes/pull/98728), [@stmcginnis](https://github.com/stmcginnis)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where external credentials in an existing admin.conf prevented the CA certificate to be written in the cluster-info ConfigMap. ([#98882](https://github.com/kubernetes/kubernetes/pull/98882), [@kvaps](https://github.com/kvaps)) [SIG Cluster Lifecycle] +- Kubeadm: fix bad token placeholder text in "config print *-defaults --help" ([#98839](https://github.com/kubernetes/kubernetes/pull/98839), [@Mattias-](https://github.com/Mattias-)) [SIG Cluster Lifecycle] +- Kubeadm: get k8s CI version markers from k8s infra bucket ([#98836](https://github.com/kubernetes/kubernetes/pull/98836), [@hasheddan](https://github.com/hasheddan)) [SIG Cluster Lifecycle and Release] +- Mitigate CVE-2020-8555 for kube-up using GCE by preventing local loopback folume hosts. ([#97934](https://github.com/kubernetes/kubernetes/pull/97934), [@mattcary](https://github.com/mattcary)) [SIG Cloud Provider and Storage] +- Remove CSI topology from migrated in-tree gcepd volume. ([#97823](https://github.com/kubernetes/kubernetes/pull/97823), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Cloud Provider and Storage] +- Sync node status during kubelet node shutdown. + Adds an pod admission handler that rejects new pods when the node is in progress of shutting down. ([#98005](https://github.com/kubernetes/kubernetes/pull/98005), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Truncates a message if it hits the NoteLengthLimit when the scheduler records an event for the pod that indicates the pod has failed to schedule. ([#98715](https://github.com/kubernetes/kubernetes/pull/98715), [@carlory](https://github.com/carlory)) [SIG Scheduling] +- We will no longer automatically delete all data when a failure is detected during creation of the volume data file on a CSI volume. Now we will only remove the data file and volume path. ([#96021](https://github.com/kubernetes/kubernetes/pull/96021), [@huffmanca](https://github.com/huffmanca)) [SIG Storage] ### Other (Cleanup or Flake) -- Fix func name NewCreateCreateDeploymentOptions ([#91931](https://github.com/kubernetes/kubernetes/pull/91931), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG CLI] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove the dependency of csi-translation-lib module on apiserver/cloud-provider/controller-manager ([#95543](https://github.com/kubernetes/kubernetes/pull/95543), [@wawa0210](https://github.com/wawa0210)) [SIG Release] -- Scheduler framework interface moved from pkg/scheduler/framework/v1alpha to pkg/scheduler/framework ([#95069](https://github.com/kubernetes/kubernetes/pull/95069), [@farah](https://github.com/farah)) [SIG Scheduling, Storage and Testing] -- UDP and SCTP protocols can left stale connections that need to be cleared to avoid services disruption, but they can cause problems that are hard to debug. - Kubernetes components using a loglevel greater or equal than 4 will log the conntrack operations and its output, to show the entries that were deleted. ([#95694](https://github.com/kubernetes/kubernetes/pull/95694), [@aojea](https://github.com/aojea)) [SIG Network] +- Fix the description of command line flags that can override --config ([#98254](https://github.com/kubernetes/kubernetes/pull/98254), [@changshuchao](https://github.com/changshuchao)) [SIG Scheduling] +- Migrate scheduler/taint_manager.go structured logging ([#98259](https://github.com/kubernetes/kubernetes/pull/98259), [@tanjing2020](https://github.com/tanjing2020)) [SIG Apps] +- Migrate staging/src/k8s.io/apiserver/pkg/admission logs to structured logging ([#98138](https://github.com/kubernetes/kubernetes/pull/98138), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Resolves flakes in the Ingress conformance tests due to conflicts with controllers updating the Ingress object ([#98430](https://github.com/kubernetes/kubernetes/pull/98430), [@liggitt](https://github.com/liggitt)) [SIG Network and Testing] +- The default delegating authorization options now allow unauthenticated access to healthz, readyz, and livez. A system:masters user connecting to an authz delegator will not perform an authz check. ([#98325](https://github.com/kubernetes/kubernetes/pull/98325), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth, Cloud Provider and Scheduling] +- The e2e suite can be instructed not to wait for pods in kube-system to be ready or for all nodes to be ready by passing `--allowed-not-ready-nodes=-1` when invoking the e2e.test program. This allows callers to run subsets of the e2e suite in scenarios other than perfectly healthy clusters. ([#98781](https://github.com/kubernetes/kubernetes/pull/98781), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- The feature gates `WindowsGMSA` and `WindowsRunAsUserName` that are GA since v1.18 are now removed. ([#96531](https://github.com/kubernetes/kubernetes/pull/96531), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Node and Windows] +- The new `-gce-zones` flag on the `e2e.test` binary instructs tests that check for information about how the cluster interacts with the cloud to limit their queries to the provided zone list. If not specified, the current behavior of asking the cloud provider for all available zones in multi zone clusters is preserved. ([#98787](https://github.com/kubernetes/kubernetes/pull/98787), [@smarterclayton](https://github.com/smarterclayton)) [SIG API Machinery, Cluster Lifecycle and Testing] ## Dependencies ### Added -_Nothing has changed._ +- github.com/moby/spdystream: [v0.2.0](https://github.com/moby/spdystream/tree/v0.2.0) ### Changed -_Nothing has changed._ +- github.com/NYTimes/gziphandler: [56545f4 → v1.1.1](https://github.com/NYTimes/gziphandler/compare/56545f4...v1.1.1) +- github.com/container-storage-interface/spec: [v1.2.0 → v1.3.0](https://github.com/container-storage-interface/spec/compare/v1.2.0...v1.3.0) +- github.com/go-logr/logr: [v0.2.0 → v0.4.0](https://github.com/go-logr/logr/compare/v0.2.0...v0.4.0) +- github.com/gogo/protobuf: [v1.3.1 → v1.3.2](https://github.com/gogo/protobuf/compare/v1.3.1...v1.3.2) +- github.com/kisielk/errcheck: [v1.2.0 → v1.5.0](https://github.com/kisielk/errcheck/compare/v1.2.0...v1.5.0) +- github.com/yuin/goldmark: [v1.1.27 → v1.2.1](https://github.com/yuin/goldmark/compare/v1.1.27...v1.2.1) +- golang.org/x/sync: cd5d95a → 67f06af +- golang.org/x/tools: c1934b7 → 113979e +- k8s.io/klog/v2: v2.4.0 → v2.5.0 +- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.14 → v0.0.15 ### Removed -_Nothing has changed._ +- github.com/docker/spdystream: [449fdfc](https://github.com/docker/spdystream/tree/449fdfc) -# v1.20.0-alpha.2 +# v1.21.0-alpha.2 -## Downloads for v1.20.0-alpha.2 +## Downloads for v1.21.0-alpha.2 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes.tar.gz) | 45089a4d26d56a5d613ecbea64e356869ac738eca3cc71d16b74ea8ae1b4527bcc32f1dc35ff7aa8927e138083c7936603faf063121d965a2f0f8ba28fa128d8 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-src.tar.gz) | 646edd890d6df5858b90aaf68cc6e1b4589b8db09396ae921b5c400f2188234999e6c9633906692add08c6e8b4b09f12b2099132b0a7533443fb2a01cfc2bf81 +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes.tar.gz) | 6836f6c8514253fe0831fd171fc4ed92eb6d9a773491c8dc82b90d171a1b10076bd6bfaea56ec1e199c5f46c273265bdb9f174f0b2d99c5af1de4c99b862329e +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-src.tar.gz) | d137694804741a05ab09e5f9a418448b66aba0146c028eafce61bcd9d7c276521e345ce9223ffbc703e8172041d58dfc56a3242a4df3686f24905a4541fcd306 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | c136273883e24a2a50b5093b9654f01cdfe57b97461d34885af4a68c2c4d108c07583c02b1cdf7f57f82e91306e542ce8f3bddb12fcce72b744458bc4796f8eb -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 6ec59f1ed30569fa64ddb2d0de32b1ae04cda4ffe13f339050a7c9d7c63d425ee6f6d963dcf82c17281c4474da3eaf32c08117669052872a8c81bdce2c8a5415 -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | 7b40a4c087e2ea7f8d055f297fcd39a3f1cb6c866e7a3981a9408c3c3eb5363c648613491aad11bc7d44d5530b20832f8f96f6ceff43deede911fb74aafad35f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | cda9955feebea5acb8f2b5b87895d24894bbbbde47041453b1f926ebdf47a258ce0496aa27d06bcbf365b5615ce68a20d659b64410c54227216726e2ee432fca -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | f65bd9241c7eb88a4886a285330f732448570aea4ededaebeabcf70d17ea185f51bf8a7218f146ee09fb1adceca7ee71fb3c3683834f2c415163add820fba96e -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 1e377599af100a81d027d9199365fb8208d443a8e0a97affff1a79dc18796e14b78cb53d6e245c1c1e8defd0e050e37bf5f2a23c8a3ff45a6d18d03619709bf5 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 1cdee81478246aa7e7b80ae4efc7f070a5b058083ae278f59fad088b75a8052761b0e15ab261a6e667ddafd6a69fb424fc307072ed47941cad89a85af7aee93d -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-386.tar.gz) | d8774167c87b6844c348aa15e92d5033c528d6ab9e95d08a7cb22da68bafd8e46d442cf57a5f6affad62f674c10ae6947d524b94108b5e450ca78f92656d63c0 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | f664b47d8daa6036f8154c1dc1f881bfe683bf57c39d9b491de3848c03d051c50c6644d681baf7f9685eae45f9ce62e4c6dfea2853763cfe8256a61bdd59d894 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-darwin-amd64.tar.gz) | 9478b047a97717953f365c13a098feb7e3cb30a3df22e1b82aa945f2208dcc5cb90afc441ba059a3ae7aafb4ee000ec3a52dc65a8c043a5ac7255a391c875330 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-386.tar.gz) | 44c8dd4b1ddfc256d35786c8abf45b0eb5f0794f5e310d2efc865748adddc50e8bf38aa71295ae8a82884cb65f2e0b9b0737b000f96fd8f2d5c19971d7c4d8e8 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-amd64.tar.gz) | e1291989892769de6b978c17b8612b94da6f3b735a4d895100af622ca9ebb968c75548afea7ab00445869625dd0da3afec979e333afbb445805f5d31c1c13cc7 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm.tar.gz) | 3c4bcb8cbe73822d68a2f62553a364e20bec56b638c71d0f58679b4f4b277d809142346f18506914e694f6122a3e0f767eab20b7b1c4dbb79e4c5089981ae0f1 +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-arm64.tar.gz) | 9389974a790268522e187f5ba5237f3ee4684118c7db76bc3d4164de71d8208702747ec333b204c7a78073ab42553cbbce13a1883fab4fec617e093b05fab332 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-ppc64le.tar.gz) | 63399e53a083b5af3816c28ff162c9de6b64c75da4647f0d6bbaf97afdf896823cb1e556f2abac75c6516072293026d3ff9f30676fd75143ac6ca3f4d21f4327 +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-linux-s390x.tar.gz) | 50898f197a9d923971ff9046c9f02779b57f7b3cea7da02f3ea9bab8c08d65a9c4a7531a2470fa14783460f52111a52b96ebf916c0a1d8215b4070e4e861c1b0 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-386.tar.gz) | a7743e839e1aa19f5ee20b6ee5000ac8ef9e624ac5be63bb574fad6992e4b9167193ed07e03c9bc524e88bfeed66c95341a38a03bff1b10bc9910345f33019f0 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-client-windows-amd64.tar.gz) | 5f1d19c230bd3542866d16051808d184e9dd3e2f8c001ed4cee7b5df91f872380c2bf56a3add8c9413ead9d8c369efce2bcab4412174df9b823d3592677bf74e ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | d6fcb4600be0beb9de222a8da64c35fe22798a0da82d41401d34d0f0fc7e2817512169524c281423d8f4a007cd77452d966317d5a1b67d2717a05ff346e8aa7d -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | 022a76cf10801f8afbabb509572479b68fdb4e683526fa0799cdbd9bab4d3f6ecb76d1d63d0eafee93e3edf6c12892d84b9c771ef2325663b95347728fa3d6c0 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 0679aadd60bbf6f607e5befad74b5267eb2d4c1b55985cc25a97e0f4c5efb7acbb3ede91bfa6a5a5713dae4d7a302f6faaf678fd6b359284c33d9a6aca2a08bb -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 9f2cfeed543b515eafb60d9765a3afff4f3d323c0a5c8a0d75e3de25985b2627817bfcbe59a9a61d969e026e2b861adb974a09eae75b58372ed736ceaaed2a82 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | 937258704d7b9dcd91f35f2d34ee9dd38c18d9d4e867408c05281bfbbb919ad012c95880bee84d2674761aa44cc617fb2fae1124cf63b689289286d6eac1c407 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-amd64.tar.gz) | ef2cac10febde231aeb6f131e589450c560eeaab8046b49504127a091cddc17bc518c2ad56894a6a033033ab6fc6e121b1cc23691683bc36f45fe6b1dd8e0510 +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm.tar.gz) | d11c9730307f08e80b2b8a7c64c3e9a9e43c622002e377dfe3a386f4541e24adc79a199a6f280f40298bb36793194fd44ed45defe8a3ee54a9cb1386bc26e905 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-arm64.tar.gz) | 28f8c32bf98ee1add7edf5d341c3bac1afc0085f90dcbbfb8b27a92087f13e2b53c327c8935ee29bf1dc3160655b32bbe3e29d5741a8124a3848a777e7d42933 +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-ppc64le.tar.gz) | 99ae8d44b0de3518c27fa8bbddd2ecf053dfb789fb9d65f8a4ecf4c8331cf63d2f09a41c2bcd5573247d5f66a1b2e51944379df1715017d920d521b98589508a +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-server-linux-s390x.tar.gz) | f8c0e954a2dfc6845614488dadeed069cc7f3f08e33c351d7a77c6ef97867af590932e8576d12998a820a0e4d35d2eee797c764e2810f09ab1e90a5acaeaad33 ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | 076165d745d47879de68f4404eaf432920884be48277eb409e84bf2c61759633bf3575f46b0995f1fc693023d76c0921ed22a01432e756d7f8d9e246a243b126 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 1ff2e2e3e43af41118cdfb70c778e15035bbb1aca833ffd2db83c4bcd44f55693e956deb9e65017ebf3c553f2820ad5cd05f5baa33f3d63f3e00ed980ea4dfed -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | b232c7359b8c635126899beee76998078eec7a1ef6758d92bcdebe8013b0b1e4d7b33ecbf35e3f82824fe29493400845257e70ed63c1635bfa36c8b3b4969f6f -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 51d415a068f554840f4c78d11a4fedebd7cb03c686b0ec864509b24f7a8667ebf54bb0a25debcf2b70f38be1e345e743f520695b11806539a55a3620ce21946f -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | b51c082d8af358233a088b632cf2f6c8cfe5421471c27f5dc9ba4839ae6ea75df25d84298f2042770097554c01742bb7686694b331ad9bafc93c86317b867728 -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | 91b9d26620a2dde67a0edead0039814efccbdfd54594dda3597aaced6d89140dc92612ed0727bc21d63468efeef77c845e640153b09e39d8b736062e6eee0c76 +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-amd64.tar.gz) | c5456d50bfbe0d75fb150b3662ed7468a0abd3970792c447824f326894382c47bbd3a2cc5a290f691c8c09585ff6fe505ab86b4aff2b7e5ccee11b5e6354ae6c +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm.tar.gz) | 335b5cd8672e053302fd94d932fb2fa2e48eeeb1799650b3f93acdfa635e03a8453637569ab710c46885c8317759f4c60aaaf24dca9817d9fa47500fe4a3ca53 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-arm64.tar.gz) | 3ee87dbeed8ace9351ac89bdaf7274dd10b4faec3ceba0825f690ec7a2bb7eb7c634274a1065a0939eec8ff3e43f72385f058f4ec141841550109e775bc5eff9 +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-ppc64le.tar.gz) | 6956f965b8d719b164214ec9195fdb2c776b907fe6d2c524082f00c27872a73475927fd7d2a994045ce78f6ad2aa5aeaf1eb5514df1810d2cfe342fd4e5ce4a1 +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-linux-s390x.tar.gz) | 3b643aa905c709c57083c28dd9e8ffd88cb64466cda1499da7fc54176b775003e08b9c7a07b0964064df67c8142f6f1e6c13bfc261bd65fb064049920bfa57d0 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.2/kubernetes-node-windows-amd64.tar.gz) | b2e6d6fb0091f2541f9925018c2bdbb0138a95bab06b4c6b38abf4b7144b2575422263b78fb3c6fd09e76d90a25a8d35a6d4720dc169794d42c95aa22ecc6d5f + +## Changelog since v1.21.0-alpha.1 -## Changelog since v1.20.0-alpha.1 +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + - Remove storage metrics `storage_operation_errors_total`, since we already have `storage_operation_status_count`.And add new field `status` for `storage_operation_duration_seconds`, so that we can know about all status storage operation latency. ([#98332](https://github.com/kubernetes/kubernetes/pull/98332), [@JornShen](https://github.com/JornShen)) [SIG Instrumentation and Storage] + ## Changes by Kind ### Deprecation -- Action-required: kubeadm: graduate the "kubeadm alpha certs" command to a parent command "kubeadm certs". The command "kubeadm alpha certs" is deprecated and will be removed in a future release. Please migrate. ([#94938](https://github.com/kubernetes/kubernetes/pull/94938), [@yagonobre](https://github.com/yagonobre)) [SIG Cluster Lifecycle] -- Action-required: kubeadm: remove the deprecated feature --experimental-kustomize from kubeadm commands. The feature was replaced with --experimental-patches in 1.19. To migrate see the --help description for the --experimental-patches flag. ([#94871](https://github.com/kubernetes/kubernetes/pull/94871), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate self-hosting support. The experimental command "kubeadm alpha self-hosting" is now deprecated and will be removed in a future release. ([#95125](https://github.com/kubernetes/kubernetes/pull/95125), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Removes deprecated scheduler metrics DeprecatedSchedulingDuration, DeprecatedSchedulingAlgorithmPredicateEvaluationSecondsDuration, DeprecatedSchedulingAlgorithmPriorityEvaluationSecondsDuration ([#94884](https://github.com/kubernetes/kubernetes/pull/94884), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] -- Scheduler alpha metrics binding_duration_seconds and scheduling_algorithm_preemption_evaluation_seconds are deprecated, Both of those metrics are now covered as part of framework_extension_point_duration_seconds, the former as a PostFilter the latter and a Bind plugin. The plan is to remove both in 1.21 ([#95001](https://github.com/kubernetes/kubernetes/pull/95001), [@arghya88](https://github.com/arghya88)) [SIG Instrumentation and Scheduling] +- Remove the TokenRequest and TokenRequestProjection feature gates ([#97148](https://github.com/kubernetes/kubernetes/pull/97148), [@wawa0210](https://github.com/wawa0210)) [SIG Node] +- Removing experimental windows container hyper-v support with Docker ([#97141](https://github.com/kubernetes/kubernetes/pull/97141), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] +- The `export` query parameter (inconsistently supported by API resources and deprecated in v1.14) is fully removed. Requests setting this query parameter will now receive a 400 status response. ([#98312](https://github.com/kubernetes/kubernetes/pull/98312), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Auth and Testing] ### API Change -- GPU metrics provided by kubelet are now disabled by default ([#95184](https://github.com/kubernetes/kubernetes/pull/95184), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- New parameter `defaultingType` for `PodTopologySpread` plugin allows to use k8s defined or user provided default constraints ([#95048](https://github.com/kubernetes/kubernetes/pull/95048), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Server Side Apply now treats LabelSelector fields as atomic (meaning the entire selector is managed by a single writer and updated together), since they contain interrelated and inseparable fields that do not merge in intuitive ways. ([#93901](https://github.com/kubernetes/kubernetes/pull/93901), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Storage and Testing] -- Status of v1beta1 CRDs without "preserveUnknownFields:false" will show violation "spec.preserveUnknownFields: Invalid value: true: must be false" ([#93078](https://github.com/kubernetes/kubernetes/pull/93078), [@vareti](https://github.com/vareti)) [SIG API Machinery] +- Enable SPDY pings to keep connections alive, so that `kubectl exec` and `kubectl port-forward` won't be interrupted. ([#97083](https://github.com/kubernetes/kubernetes/pull/97083), [@knight42](https://github.com/knight42)) [SIG API Machinery and CLI] -### Feature +### Documentation -- Added `get-users` and `delete-user` to the `kubectl config` subcommand ([#89840](https://github.com/kubernetes/kubernetes/pull/89840), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] -- Added counter metric "apiserver_request_self" to count API server self-requests with labels for verb, resource, and subresource. ([#94288](https://github.com/kubernetes/kubernetes/pull/94288), [@LogicalShark](https://github.com/LogicalShark)) [SIG API Machinery, Auth, Instrumentation and Scheduling] -- Added new k8s.io/component-helpers repository providing shared helper code for (core) components. ([#92507](https://github.com/kubernetes/kubernetes/pull/92507), [@ingvagabund](https://github.com/ingvagabund)) [SIG Apps, Node, Release and Scheduling] -- Adds `create ingress` command to `kubectl` ([#78153](https://github.com/kubernetes/kubernetes/pull/78153), [@amimof](https://github.com/amimof)) [SIG CLI and Network] -- Allow configuring AWS LoadBalancer health check protocol via service annotations ([#94546](https://github.com/kubernetes/kubernetes/pull/94546), [@kishorj](https://github.com/kishorj)) [SIG Cloud Provider] -- Azure: Support multiple services sharing one IP address ([#94991](https://github.com/kubernetes/kubernetes/pull/94991), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] -- Ephemeral containers now apply the same API defaults as initContainers and containers ([#94896](https://github.com/kubernetes/kubernetes/pull/94896), [@wawa0210](https://github.com/wawa0210)) [SIG Apps and CLI] -- In dual-stack bare-metal clusters, you can now pass dual-stack IPs to `kubelet --node-ip`. - eg: `kubelet --node-ip 10.1.0.5,fd01::0005`. This is not yet supported for non-bare-metal - clusters. - - In dual-stack clusters where nodes have dual-stack addresses, hostNetwork pods - will now get dual-stack PodIPs. ([#95239](https://github.com/kubernetes/kubernetes/pull/95239), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Introduces a new GCE specific cluster creation variable KUBE_PROXY_DISABLE. When set to true, this will skip over the creation of kube-proxy (whether the daemonset or static pod). This can be used to control the lifecycle of kube-proxy separately from the lifecycle of the nodes. ([#91977](https://github.com/kubernetes/kubernetes/pull/91977), [@varunmar](https://github.com/varunmar)) [SIG Cloud Provider] -- Kubeadm: do not throw errors if the current system time is outside of the NotBefore and NotAfter bounds of a loaded certificate. Print warnings instead. ([#94504](https://github.com/kubernetes/kubernetes/pull/94504), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: make the command "kubeadm alpha kubeconfig user" accept a "--config" flag and remove the following flags: - - apiserver-advertise-address / apiserver-bind-port: use either localAPIEndpoint from InitConfiguration or controlPlaneEndpoint from ClusterConfiguration. - - cluster-name: use clusterName from ClusterConfiguration - - cert-dir: use certificatesDir from ClusterConfiguration ([#94879](https://github.com/kubernetes/kubernetes/pull/94879), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubectl rollout history sts/sts-name --revision=some-revision will start showing the detailed view of the sts on that specified revision ([#86506](https://github.com/kubernetes/kubernetes/pull/86506), [@dineshba](https://github.com/dineshba)) [SIG CLI] -- Scheduling Framework: expose Run[Pre]ScorePlugins functions to PreemptionHandle which can be used in PostFilter extention point. ([#93534](https://github.com/kubernetes/kubernetes/pull/93534), [@everpeace](https://github.com/everpeace)) [SIG Scheduling and Testing] -- Send gce node startup scripts logs to console and journal ([#95311](https://github.com/kubernetes/kubernetes/pull/95311), [@karan](https://github.com/karan)) [SIG Cloud Provider and Node] -- Support kubectl delete orphan/foreground/background options ([#93384](https://github.com/kubernetes/kubernetes/pull/93384), [@zhouya0](https://github.com/zhouya0)) [SIG CLI and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97935](https://github.com/kubernetes/kubernetes/pull/97935), [@adeniyistephen](https://github.com/adeniyistephen)) [SIG Release and Testing] +- Set kubelet option `--volume-stats-agg-period` to negative value to disable volume calculations. ([#96675](https://github.com/kubernetes/kubernetes/pull/96675), [@pacoxu](https://github.com/pacoxu)) [SIG Node] ### Bug or Regression -- Change the mount way from systemd to normal mount except ceph and glusterfs intree-volume. ([#94916](https://github.com/kubernetes/kubernetes/pull/94916), [@smileusd](https://github.com/smileusd)) [SIG Apps, Cloud Provider, Network, Node, Storage and Testing] -- Cloud node controller: handle empty providerID from getProviderID ([#95342](https://github.com/kubernetes/kubernetes/pull/95342), [@nicolehanjing](https://github.com/nicolehanjing)) [SIG Cloud Provider] -- Fix a bug where the endpoint slice controller was not mirroring the parent service labels to its corresponding endpoint slices ([#94443](https://github.com/kubernetes/kubernetes/pull/94443), [@aojea](https://github.com/aojea)) [SIG Apps and Network] -- Fix azure disk attach failure for disk size bigger than 4TB ([#95463](https://github.com/kubernetes/kubernetes/pull/95463), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix azure disk data loss issue on Windows when unmount disk ([#95456](https://github.com/kubernetes/kubernetes/pull/95456), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix detach azure disk issue when vm not exist ([#95177](https://github.com/kubernetes/kubernetes/pull/95177), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix network_programming_latency metric reporting for Endpoints/EndpointSlice deletions, where we don't have correct timestamp ([#95363](https://github.com/kubernetes/kubernetes/pull/95363), [@wojtek-t](https://github.com/wojtek-t)) [SIG Network and Scalability] -- Fix scheduler cache snapshot when a Node is deleted before its Pods ([#95130](https://github.com/kubernetes/kubernetes/pull/95130), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scheduling] -- Fix vsphere detach failure for static PVs ([#95447](https://github.com/kubernetes/kubernetes/pull/95447), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] -- Fixed a bug that prevents the use of ephemeral containers in the presence of a validating admission webhook. ([#94685](https://github.com/kubernetes/kubernetes/pull/94685), [@verb](https://github.com/verb)) [SIG Node and Testing] -- Gracefully delete nodes when their parent scale set went missing ([#95289](https://github.com/kubernetes/kubernetes/pull/95289), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- In dual-stack clusters, kubelet will now set up both IPv4 and IPv6 iptables rules, which may - fix some problems, eg with HostPorts. ([#94474](https://github.com/kubernetes/kubernetes/pull/94474), [@danwinship](https://github.com/danwinship)) [SIG Network and Node] -- Kubeadm: for Docker as the container runtime, make the "kubeadm reset" command stop containers before removing them ([#94586](https://github.com/kubernetes/kubernetes/pull/94586), [@BedivereZero](https://github.com/BedivereZero)) [SIG Cluster Lifecycle] -- Kubeadm: warn but do not error out on missing "ca.key" files for root CA, front-proxy CA and etcd CA, during "kubeadm join --control-plane" if the user has provided all certificates, keys and kubeconfig files which require signing with the given CA keys. ([#94988](https://github.com/kubernetes/kubernetes/pull/94988), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Port mapping allows to map the same `containerPort` to multiple `hostPort` without naming the mapping explicitly. ([#94494](https://github.com/kubernetes/kubernetes/pull/94494), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Network and Node] -- Warn instead of fail when creating Roles and ClusterRoles with custom verbs via kubectl ([#92492](https://github.com/kubernetes/kubernetes/pull/92492), [@eddiezane](https://github.com/eddiezane)) [SIG CLI] +- Clean ReplicaSet by revision instead of creation timestamp in deployment controller ([#97407](https://github.com/kubernetes/kubernetes/pull/97407), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Ensure that client-go's EventBroadcaster is safe (non-racy) during shutdown. ([#95664](https://github.com/kubernetes/kubernetes/pull/95664), [@DirectXMan12](https://github.com/DirectXMan12)) [SIG API Machinery] +- Fix azure file migration issue ([#97877](https://github.com/kubernetes/kubernetes/pull/97877), [@andyzhangx](https://github.com/andyzhangx)) [SIG Auth, Cloud Provider and Storage] +- Fix kubelet from panic after getting the wrong signal ([#98200](https://github.com/kubernetes/kubernetes/pull/98200), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fix repeatedly acquire the inhibit lock ([#98088](https://github.com/kubernetes/kubernetes/pull/98088), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Fixed a bug that the kubelet cannot start on BtrfS. ([#98042](https://github.com/kubernetes/kubernetes/pull/98042), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Fixed an issue with garbage collection failing to clean up namespaced children of an object also referenced incorrectly by cluster-scoped children ([#98068](https://github.com/kubernetes/kubernetes/pull/98068), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] +- Fixed no effect namespace when exposing deployment with --dry-run=client. ([#97492](https://github.com/kubernetes/kubernetes/pull/97492), [@masap](https://github.com/masap)) [SIG CLI] +- Fixing a bug where a failed node may not have the NoExecute taint set correctly ([#96876](https://github.com/kubernetes/kubernetes/pull/96876), [@howieyuen](https://github.com/howieyuen)) [SIG Apps and Node] +- Indentation of `Resource Quota` block in kubectl describe namespaces output gets correct. ([#97946](https://github.com/kubernetes/kubernetes/pull/97946), [@dty1er](https://github.com/dty1er)) [SIG CLI] +- KUBECTL_EXTERNAL_DIFF now accepts equal sign for additional parameters. ([#98158](https://github.com/kubernetes/kubernetes/pull/98158), [@dougsland](https://github.com/dougsland)) [SIG CLI] +- Kubeadm: fix a bug where "kubeadm join" would not properly handle missing names for existing etcd members. ([#97372](https://github.com/kubernetes/kubernetes/pull/97372), [@ihgann](https://github.com/ihgann)) [SIG Cluster Lifecycle] +- Kubelet should ignore cgroup driver check on Windows node. ([#97764](https://github.com/kubernetes/kubernetes/pull/97764), [@pacoxu](https://github.com/pacoxu)) [SIG Node and Windows] +- Make podTopologyHints protected by lock ([#95111](https://github.com/kubernetes/kubernetes/pull/95111), [@choury](https://github.com/choury)) [SIG Node] +- Readjust kubelet_containers_per_pod_count bucket ([#98169](https://github.com/kubernetes/kubernetes/pull/98169), [@wawa0210](https://github.com/wawa0210)) [SIG Instrumentation and Node] +- Scores from InterPodAffinity have stronger differentiation. ([#98096](https://github.com/kubernetes/kubernetes/pull/98096), [@leileiwan](https://github.com/leileiwan)) [SIG Scheduling] +- Specifying the KUBE_TEST_REPO environment variable when e2e tests are executed will instruct the test infrastructure to load that image from a location within the specified repo, using a predefined pattern. ([#93510](https://github.com/kubernetes/kubernetes/pull/93510), [@smarterclayton](https://github.com/smarterclayton)) [SIG Testing] +- Static pods will be deleted gracefully. ([#98103](https://github.com/kubernetes/kubernetes/pull/98103), [@gjkim42](https://github.com/gjkim42)) [SIG Node] +- Use network.Interface.VirtualMachine.ID to get the binded VM + Skip standalone VM when reconciling LoadBalancer ([#97635](https://github.com/kubernetes/kubernetes/pull/97635), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] ### Other (Cleanup or Flake) -- Added fine grained debugging to the intra-pod conformance test for helping easily resolve networking issues for nodes that might be unhealthy when running conformance or sonobuoy tests. ([#93837](https://github.com/kubernetes/kubernetes/pull/93837), [@jayunit100](https://github.com/jayunit100)) [SIG Network and Testing] -- AdmissionReview objects sent for the creation of Namespace API objects now populate the `namespace` attribute consistently (previously the `namespace` attribute was empty for Namespace creation via POST requests, and populated for Namespace creation via server-side-apply PATCH requests) ([#95012](https://github.com/kubernetes/kubernetes/pull/95012), [@nodo](https://github.com/nodo)) [SIG API Machinery and Testing] -- Client-go header logging (at verbosity levels >= 9) now masks `Authorization` header contents ([#95316](https://github.com/kubernetes/kubernetes/pull/95316), [@sfowl](https://github.com/sfowl)) [SIG API Machinery] -- Enhance log information of verifyRunAsNonRoot, add pod, container information ([#94911](https://github.com/kubernetes/kubernetes/pull/94911), [@wawa0210](https://github.com/wawa0210)) [SIG Node] -- Errors from staticcheck: - vendor/k8s.io/client-go/discovery/cached/memory/memcache_test.go:94:2: this value of g is never used (SA4006) ([#95098](https://github.com/kubernetes/kubernetes/pull/95098), [@phunziker](https://github.com/phunziker)) [SIG API Machinery] -- Kubeadm: update the default pause image version to 1.4.0 on Windows. With this update the image supports Windows versions 1809 (2019LTS), 1903, 1909, 2004 ([#95419](https://github.com/kubernetes/kubernetes/pull/95419), [@jsturtevant](https://github.com/jsturtevant)) [SIG Cluster Lifecycle and Windows] -- Masks ceph RBD adminSecrets in logs when logLevel >= 4 ([#95245](https://github.com/kubernetes/kubernetes/pull/95245), [@sfowl](https://github.com/sfowl)) [SIG Storage] -- Upgrade snapshot controller to 3.0.0 ([#95412](https://github.com/kubernetes/kubernetes/pull/95412), [@saikat-royc](https://github.com/saikat-royc)) [SIG Cloud Provider] -- Remove offensive words from kubectl cluster-info command ([#95202](https://github.com/kubernetes/kubernetes/pull/95202), [@rikatz](https://github.com/rikatz)) [SIG Architecture, CLI and Testing] -- The following new metrics are available. - - network_plugin_operations_total - - network_plugin_operations_errors_total ([#93066](https://github.com/kubernetes/kubernetes/pull/93066), [@AnishShah](https://github.com/AnishShah)) [SIG Instrumentation, Network and Node] -- Vsphere: improve logging message on node cache refresh event ([#95236](https://github.com/kubernetes/kubernetes/pull/95236), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider] -- `kubectl api-resources` now prints the API version (as 'API group/version', same as output of `kubectl api-versions`). The column APIGROUP is now APIVERSION ([#95253](https://github.com/kubernetes/kubernetes/pull/95253), [@sallyom](https://github.com/sallyom)) [SIG CLI] +- Kubeadm: change the default image repository for CI images from 'gcr.io/kubernetes-ci-images' to 'gcr.io/k8s-staging-ci-images' ([#97087](https://github.com/kubernetes/kubernetes/pull/97087), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Migrate generic_scheduler.go and types.go to structured logging. ([#98134](https://github.com/kubernetes/kubernetes/pull/98134), [@tanjing2020](https://github.com/tanjing2020)) [SIG Scheduling] +- Migrate proxy/winuserspace/proxier.go logs to structured logging ([#97941](https://github.com/kubernetes/kubernetes/pull/97941), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate staging/src/k8s.io/apiserver/pkg/audit/policy/reader.go logs to structured logging. ([#98252](https://github.com/kubernetes/kubernetes/pull/98252), [@lala123912](https://github.com/lala123912)) [SIG API Machinery and Auth] +- Migrate staging\src\k8s.io\apiserver\pkg\endpoints logs to structured logging ([#98093](https://github.com/kubernetes/kubernetes/pull/98093), [@lala123912](https://github.com/lala123912)) [SIG API Machinery] +- Node ([#96552](https://github.com/kubernetes/kubernetes/pull/96552), [@pandaamanda](https://github.com/pandaamanda)) [SIG Apps, Cloud Provider, Node and Scheduling] +- The kubectl alpha debug command was scheduled to be removed in v1.21. ([#98111](https://github.com/kubernetes/kubernetes/pull/98111), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Update cri-tools to [v1.20.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.20.0) ([#97967](https://github.com/kubernetes/kubernetes/pull/97967), [@rajibmitra](https://github.com/rajibmitra)) [SIG Cloud Provider] +- Windows nodes on GCE will take longer to start due to dependencies installed at node creation time. ([#98284](https://github.com/kubernetes/kubernetes/pull/98284), [@pjh](https://github.com/pjh)) [SIG Cloud Provider] ## Dependencies ### Added -- github.com/jmespath/go-jmespath/internal/testify: [v1.5.1](https://github.com/jmespath/go-jmespath/internal/testify/tree/v1.5.1) +_Nothing has changed._ ### Changed -- github.com/aws/aws-sdk-go: [v1.28.2 → v1.35.5](https://github.com/aws/aws-sdk-go/compare/v1.28.2...v1.35.5) -- github.com/jmespath/go-jmespath: [c2b33e8 → v0.4.0](https://github.com/jmespath/go-jmespath/compare/c2b33e8...v0.4.0) -- k8s.io/kube-openapi: 6aeccd4 → 8b50664 -- sigs.k8s.io/apiserver-network-proxy/konnectivity-client: v0.0.9 → v0.0.12 -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 → b3cf1e8 +- github.com/google/cadvisor: [v0.38.6 → v0.38.7](https://github.com/google/cadvisor/compare/v0.38.6...v0.38.7) +- k8s.io/gengo: 83324d8 → b6c5ce2 ### Removed _Nothing has changed._ -# v1.20.0-alpha.1 +# v1.21.0-alpha.1 -## Downloads for v1.20.0-alpha.1 +## Downloads for v1.21.0-alpha.1 ### Source Code filename | sha512 hash -------- | ----------- -[kubernetes.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes.tar.gz) | e7daed6502ea07816274f2371f96fe1a446d0d7917df4454b722d9eb3b5ff6163bfbbd5b92dfe7a0c1d07328b8c09c4ae966e482310d6b36de8813aaf87380b5 -[kubernetes-src.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-src.tar.gz) | e91213a0919647a1215d4691a63b12d89a3e74055463a8ebd71dc1a4cabf4006b3660881067af0189960c8dab74f4a7faf86f594df69021901213ee5b56550ea +[kubernetes.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes.tar.gz) | b2bacd5c3fc9f829e6269b7d2006b0c6e464ff848bb0a2a8f2fe52ad2d7c4438f099bd8be847d8d49ac6e4087f4d74d5c3a967acd798e0b0cb4d7a2bdb122997 +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-src.tar.gz) | 518ac5acbcf23902fb1b902b69dbf3e86deca5d8a9b5f57488a15f185176d5a109558f3e4df062366af874eca1bcd61751ee8098b0beb9bcdc025d9a1c9be693 ### Client binaries filename | sha512 hash -------- | ----------- -[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | 1f3add5f826fa989820d715ca38e8864b66f30b59c1abeacbb4bfb96b4e9c694eac6b3f4c1c81e0ee3451082d44828cb7515315d91ad68116959a5efbdaef1e1 -[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-386.tar.gz) | c62acdc8993b0a950d4b0ce0b45473bf96373d501ce61c88adf4007afb15c1d53da8d53b778a7eccac6c1624f7fdda322be9f3a8bc2d80aaad7b4237c39f5eaf -[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1203ababfe00f9bc5be5c059324c17160a96530c1379a152db33564bbe644ccdb94b30eea15a0655bd652efb17895a46c31bbba19d4f5f473c2a0ff62f6e551f -[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | 31860088596e12d739c7aed94556c2d1e217971699b950c8417a3cea1bed4e78c9ff1717b9f3943354b75b4641d4b906cd910890dbf4278287c0d224837d9a7d -[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8d469f37fe20d6e15b5debc13cce4c22e8b7a4f6a4ac787006b96507a85ce761f63b28140d692c54b5f7deb08697f8d5ddb9bbfa8f5ac0d9241fc7de3a3fe3cd -[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 0d62ee1729cd5884946b6c73701ad3a570fa4d642190ca0fe5c1db0fb0cba9da3ac86a948788d915b9432d28ab8cc499e28aadc64530b7d549ee752a6ed93ec1 -[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 0fc0420e134ec0b8e0ab2654e1e102cebec47b48179703f1e1b79d51ee0d6da55a4e7304d8773d3cf830341ac2fe3cede1e6b0460fd88f7595534e0730422d5a -[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-386.tar.gz) | 3fb53b5260f4888c77c0e4ff602bbcf6bf38c364d2769850afe2b8d8e8b95f7024807c15e2b0d5603e787c46af8ac53492be9e88c530f578b8a389e3bd50c099 -[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 2f44c93463d6b5244ce0c82f147e7f32ec2233d0e29c64c3c5759e23533aebd12671bf63e986c0861e9736f9b5259bb8d138574a7c8c8efc822e35cd637416c0 +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-darwin-amd64.tar.gz) | eaa7aea84a5ed954df5ec710cbeb6ec88b46465f43cb3d09aabe2f714b84a050a50bf5736089f09dbf1090f2e19b44823d656c917e3c8c877630756c3026f2b6 +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-386.tar.gz) | 47f74b8d46ad1779c5b0b5f15aa15d5513a504eeb6f53db4201fbe9ff8956cb986b7c1b0e9d50a99f78e9e2a7f304f3fc1cc2fa239296d9a0dd408eb6069e975 +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-amd64.tar.gz) | 1a148e282628b008c8abd03dd12ec177ced17584b5115d92cd33dd251e607097d42e9da8c7089bd947134b900f85eb75a4740b6a5dd580c105455b843559df39 +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm.tar.gz) | d13d2feb73bd032dc01f7e2955b98d8215a39fe1107d037a73fa1f7d06c3b93ebaa53ed4952d845c64454ef3cca533edb97132d234d50b6fb3bcbd8a8ad990eb +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-arm64.tar.gz) | 8252105a17b09a78e9ad2c024e4e401a69764ac869708a071aaa06f81714c17b9e7c5b2eb8efde33f24d0b59f75c5da607d5e1e72bdf12adfbb8c829205cd1c1 +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-ppc64le.tar.gz) | 297a9082df4988389dc4be30eb636dff49f36f5d87047bab44745884e610f46a17ae3a08401e2cab155b7c439f38057bfd8288418215f7dd3bf6a49dbe61ea0e +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-linux-s390x.tar.gz) | 04c06490dd17cd5dccfd92bafa14acf64280ceaea370d9635f23aeb6984d1beae6d0d1d1506edc6f30f927deeb149b989d3e482b47fbe74008b371f629656e79 +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-386.tar.gz) | ec6e9e87a7d685f8751d7e58f24f417753cff5554a7229218cb3a08195d461b2e12409344950228e9fbbc92a8a06d35dd86242da6ff1e6652ec1fae0365a88c1 +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-client-windows-amd64.tar.gz) | 51039e6221d3126b5d15e797002ae01d4f0b10789c5d2056532f27ef13f35c5a2e51be27764fda68e8303219963126559023aed9421313bec275c0827fbcaf8a ### Server binaries filename | sha512 hash -------- | ----------- -[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | ae82d14b1214e4100f0cc2c988308b3e1edd040a65267d0eddb9082409f79644e55387889e3c0904a12c710f91206e9383edf510990bee8c9ea2e297b6472551 -[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | 9a2a5828b7d1ddb16cc19d573e99a4af642f84129408e6203eeeb0558e7b8db77f3269593b5770b6a976fe9df4a64240ed27ad05a4bd43719e55fce1db0abf58 -[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | ed700dd226c999354ce05b73927388d36d08474c15333ae689427de15de27c84feb6b23c463afd9dd81993315f31eb8265938cfc7ecf6f750247aa42b9b33fa9 -[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | abb7a9d726538be3ccf5057a0c63ff9732b616e213c6ebb81363f0c49f1e168ce8068b870061ad7cba7ba1d49252f94cf00a5f68cec0f38dc8fce4e24edc5ca6 -[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 3a51888af1bfdd2d5b0101d173ee589c1f39240e4428165f5f85c610344db219625faa42f00a49a83ce943fb079be873b1a114a62003fae2f328f9bf9d1227a4 +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-amd64.tar.gz) | 4edf820930c88716263560275e3bd7fadb8dc3700b9f8e1d266562e356e0abeb1a913f536377dab91218e3940b447d6bf1da343b85da25c2256dc4dcde5798dd +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm.tar.gz) | b15213e53a8ab4ba512ce6ef9ad42dd197d419c61615cd23de344227fd846c90448d8f3d98e555b63ba5b565afa627cca6b7e3990ebbbba359c96f2391302df1 +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-arm64.tar.gz) | 5be29cca9a9358fc68351ee63e99d57dc2ffce6e42fc3345753dbbf7542ff2d770c4852424158540435fa6e097ce3afa9b13affc40c8b3b69fe8406798f8068f +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-ppc64le.tar.gz) | 89fd99ab9ce85db0b94b86709932105efc883cc93959cf7ea9a39e79a4acea23064d7010eeb577450cccabe521c04b7ba47bbec212ed37edeed7cb04bad34518 +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-server-linux-s390x.tar.gz) | 2fbc30862c77d247aa8d96ab9d1a144599505287b0033a3a2d0988958e7bb2f2e8b67f52c1fec74b4ec47d74ba22cd0f6cb5c4228acbaa72b1678d5fece0254d ### Node binaries filename | sha512 hash -------- | ----------- -[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | d0f28e3c38ca59a7ff1bfecb48a1ce97116520355d9286afdca1200d346c10018f5bbdf890f130a388654635a2e83e908b263ed45f8a88defca52a7c1d0a7984 -[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | ed9d3f13028beb3be39bce980c966f82c4b39dc73beaae38cc075fea5be30b0309e555cb2af8196014f2cc9f0df823354213c314b4d6545ff6e30dd2d00ec90e -[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | ad5b3268db365dcdded9a9a4bffc90c7df0f844000349accdf2b8fb5f1081e553de9b9e9fb25d5e8a4ef7252d51fa94ef94d36d2ab31d157854e164136f662c2 -[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | c4de2524e513996def5eeba7b83f7b406f17eaf89d4d557833a93bd035348c81fa9375dcd5c27cfcc55d73995449fc8ee504be1b3bd7b9f108b0b2f153cb05ae -[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 9157b44e3e7bd5478af9f72014e54d1afa5cd19b984b4cd8b348b312c385016bb77f29db47f44aea08b58abf47d8a396b92a2d0e03f2fe8acdd30f4f9466cbdb -[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.20.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 8b40a43c5e6447379ad2ee8aac06e8028555e1b370a995f6001018a62411abe5fbbca6060b3d1682c5cadc07a27d49edd3204e797af46368800d55f4ca8aa1de +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-amd64.tar.gz) | 95658d321a0a371c0900b401d1469d96915310afbc4e4b9b11f031438bb188513b57d5a60b5316c3b0c18f541cda6f0ac42f59a76495f8abc743a067115da23a +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm.tar.gz) | f375acfb42aad6c65b833c270e7e3acfe9cd1d6b2441c33874e77faae263957f7acfe86f1b71f14298118595e4cc6952c7dea0c832f7f2e72428336f13034362 +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-arm64.tar.gz) | 43b4baccd58d74e7f48d096ab92f2bbbcdf47e30e7a3d2b56c6cc9f90002cfd4fefaac894f69bd5f9f4dbdb09a4749a77eb76b1b97d91746bd96fe94457879ab +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-ppc64le.tar.gz) | e7962b522c6c7c14b9ee4c1d254d8bdd9846b2b33b0443fc9c4a41be6c40e5e6981798b720f0148f36263d5cc45d5a2bb1dd2f9ab2838e3d002e45b9bddeb7bf +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-linux-s390x.tar.gz) | 49ebc97f01829e65f7de15be00b882513c44782eaadd1b1825a227e3bd3c73cc6aca8345af05b303d8c43aa2cb944a069755b2709effb8cc22eae621d25d4ba5 +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.21.0-alpha.1/kubernetes-node-windows-amd64.tar.gz) | 6e0fd7724b09e6befbcb53b33574e97f2db089f2eee4bbf391abb7f043103a5e6e32e3014c0531b88f9a3ca88887bbc68625752c44326f98dd53adb3a6d1bed8 -## Changelog since v1.20.0-alpha.0 +## Changelog since v1.20.0 ## Urgent Upgrade Notes ### (No, really, you MUST read this before you upgrade) - - Azure blob disk feature(`kind`: `Shared`, `Dedicated`) has been deprecated, you should use `kind`: `Managed` in `kubernetes.io/azure-disk` storage class. ([#92905](https://github.com/kubernetes/kubernetes/pull/92905), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] - - CVE-2020-8559 (Medium): Privilege escalation from compromised node to cluster. See https://github.com/kubernetes/kubernetes/issues/92914 for more details. - The API Server will no longer proxy non-101 responses for upgrade requests. This could break proxied backends (such as an extension API server) that respond to upgrade requests with a non-101 response code. ([#92941](https://github.com/kubernetes/kubernetes/pull/92941), [@tallclair](https://github.com/tallclair)) [SIG API Machinery] - + - Kube-proxy's IPVS proxy mode no longer sets the net.ipv4.conf.all.route_localnet sysctl parameter. Nodes upgrading will have net.ipv4.conf.all.route_localnet set to 1 but new nodes will inherit the system default (usually 0). If you relied on any behavior requiring net.ipv4.conf.all.route_localnet, you must set ensure it is enabled as kube-proxy will no longer set it automatically. This change helps to further mitigate CVE-2020-8558. ([#92938](https://github.com/kubernetes/kubernetes/pull/92938), [@lbernail](https://github.com/lbernail)) [SIG Network and Release] + ## Changes by Kind ### Deprecation -- Kube-apiserver: the componentstatus API is deprecated. This API provided status of etcd, kube-scheduler, and kube-controller-manager components, but only worked when those components were local to the API server, and when kube-scheduler and kube-controller-manager exposed unsecured health endpoints. Instead of this API, etcd health is included in the kube-apiserver health check and kube-scheduler/kube-controller-manager health checks can be made directly against those components' health endpoints. ([#93570](https://github.com/kubernetes/kubernetes/pull/93570), [@liggitt](https://github.com/liggitt)) [SIG API Machinery, Apps and Cluster Lifecycle] -- Kubeadm: deprecate the "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. ([#92881](https://github.com/kubernetes/kubernetes/pull/92881), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated "kubeadm alpha kubelet config enable-dynamic" command. To continue using the feature please defer to the guide for "Dynamic Kubelet Configuration" at k8s.io. This change also removes the parent command "kubeadm alpha kubelet" as there are no more sub-commands under it for the time being. ([#94668](https://github.com/kubernetes/kubernetes/pull/94668), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove the deprecated --kubelet-config flag for the command "kubeadm upgrade node" ([#94869](https://github.com/kubernetes/kubernetes/pull/94869), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubelet's deprecated endpoint `metrics/resource/v1alpha1` has been removed, please adopt to `metrics/resource`. ([#94272](https://github.com/kubernetes/kubernetes/pull/94272), [@RainbowMango](https://github.com/RainbowMango)) [SIG Instrumentation and Node] -- The v1alpha1 PodPreset API and admission plugin has been removed with no built-in replacement. Admission webhooks can be used to modify pods on creation. ([#94090](https://github.com/kubernetes/kubernetes/pull/94090), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps, CLI, Cloud Provider, Scalability and Testing] +- Deprecate the `topologyKeys` field in Service. This capability will be replaced with upcoming work around Topology Aware Subsetting and Service Internal Traffic Policy. ([#96736](https://github.com/kubernetes/kubernetes/pull/96736), [@andrewsykim](https://github.com/andrewsykim)) [SIG Apps] +- Kubeadm: deprecated command "alpha selfhosting pivot" is removed now. ([#97627](https://github.com/kubernetes/kubernetes/pull/97627), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: graduate the command `kubeadm alpha kubeconfig user` to `kubeadm kubeconfig user`. The `kubeadm alpha kubeconfig user` command is deprecated now. ([#97583](https://github.com/kubernetes/kubernetes/pull/97583), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Kubeadm: the "kubeadm alpha certs" command is removed now, please use "kubeadm certs" instead. ([#97706](https://github.com/kubernetes/kubernetes/pull/97706), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] +- Remove the deprecated metrics "scheduling_algorithm_preemption_evaluation_seconds" and "binding_duration_seconds", suggest to use "scheduler_framework_extension_point_duration_seconds" instead. ([#96447](https://github.com/kubernetes/kubernetes/pull/96447), [@chendave](https://github.com/chendave)) [SIG Cluster Lifecycle, Instrumentation, Scheduling and Testing] +- The PodSecurityPolicy API is deprecated in 1.21, and will no longer be served starting in 1.25. ([#97171](https://github.com/kubernetes/kubernetes/pull/97171), [@deads2k](https://github.com/deads2k)) [SIG Auth and CLI] ### API Change -- A new `nofuzz` go build tag now disables gofuzz support. Release binaries enable this. ([#92491](https://github.com/kubernetes/kubernetes/pull/92491), [@BenTheElder](https://github.com/BenTheElder)) [SIG API Machinery] -- A new alpha-level field, `SupportsFsGroup`, has been introduced for CSIDrivers to allow them to specify whether they support volume ownership and permission modifications. The `CSIVolumeSupportFSGroup` feature gate must be enabled to allow this field to be used. ([#92001](https://github.com/kubernetes/kubernetes/pull/92001), [@huffmanca](https://github.com/huffmanca)) [SIG API Machinery, CLI and Storage] -- Added pod version skew strategy for seccomp profile to synchronize the deprecated annotations with the new API Server fields. Please see the corresponding section [in the KEP](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/20190717-seccomp-ga.md#version-skew-strategy) for more detailed explanations. ([#91408](https://github.com/kubernetes/kubernetes/pull/91408), [@saschagrunert](https://github.com/saschagrunert)) [SIG Apps, Auth, CLI and Node] -- Adds the ability to disable Accelerator/GPU metrics collected by Kubelet ([#91930](https://github.com/kubernetes/kubernetes/pull/91930), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node] -- Custom Endpoints are now mirrored to EndpointSlices by a new EndpointSliceMirroring controller. ([#91637](https://github.com/kubernetes/kubernetes/pull/91637), [@robscott](https://github.com/robscott)) [SIG API Machinery, Apps, Auth, Cloud Provider, Instrumentation, Network and Testing] -- External facing API podresources is now available under k8s.io/kubelet/pkg/apis/ ([#92632](https://github.com/kubernetes/kubernetes/pull/92632), [@RenaudWasTaken](https://github.com/RenaudWasTaken)) [SIG Node and Testing] -- Fix conversions for custom metrics. ([#94481](https://github.com/kubernetes/kubernetes/pull/94481), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Generic ephemeral volumes, a new alpha feature under the `GenericEphemeralVolume` feature gate, provide a more flexible alternative to `EmptyDir` volumes: as with `EmptyDir`, volumes are created and deleted for each pod automatically by Kubernetes. But because the normal provisioning process is used (`PersistentVolumeClaim`), storage can be provided by third-party storage vendors and all of the usual volume features work. Volumes don't need to be empt; for example, restoring from snapshot is supported. ([#92784](https://github.com/kubernetes/kubernetes/pull/92784), [@pohly](https://github.com/pohly)) [SIG API Machinery, Apps, Auth, CLI, Instrumentation, Node, Scheduling, Storage and Testing] -- Kube-controller-manager: volume plugins can be restricted from contacting local and loopback addresses by setting `--volume-host-allow-local-loopback=false`, or from contacting specific CIDR ranges by setting `--volume-host-cidr-denylist` (for example, `--volume-host-cidr-denylist=127.0.0.1/28,feed::/16`) ([#91785](https://github.com/kubernetes/kubernetes/pull/91785), [@mattcary](https://github.com/mattcary)) [SIG API Machinery, Apps, Auth, CLI, Network, Node, Storage and Testing] -- Kubernetes is now built with golang 1.15.0-rc.1. - - The deprecated, legacy behavior of treating the CommonName field on X.509 serving certificates as a host name when no Subject Alternative Names are present is now disabled by default. It can be temporarily re-enabled by adding the value x509ignoreCN=0 to the GODEBUG environment variable. ([#93264](https://github.com/kubernetes/kubernetes/pull/93264), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Auth, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Node, Release, Scalability, Storage and Testing] -- Migrate scheduler, controller-manager and cloud-controller-manager to use LeaseLock ([#94603](https://github.com/kubernetes/kubernetes/pull/94603), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery, Apps, Cloud Provider and Scheduling] -- Modify DNS-1123 error messages to indicate that RFC 1123 is not followed exactly ([#94182](https://github.com/kubernetes/kubernetes/pull/94182), [@mattfenwick](https://github.com/mattfenwick)) [SIG API Machinery, Apps, Auth, Network and Node] -- The ServiceAccountIssuerDiscovery feature gate is now Beta and enabled by default. ([#91921](https://github.com/kubernetes/kubernetes/pull/91921), [@mtaufen](https://github.com/mtaufen)) [SIG Auth] -- The kube-controller-manager managed signers can now have distinct signing certificates and keys. See the help about `--cluster-signing-[signer-name]-{cert,key}-file`. `--cluster-signing-{cert,key}-file` is still the default. ([#90822](https://github.com/kubernetes/kubernetes/pull/90822), [@deads2k](https://github.com/deads2k)) [SIG API Machinery, Apps and Auth] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.tls[*].secretName` values are required to pass validation rules for Secret API object names. ([#93929](https://github.com/kubernetes/kubernetes/pull/93929), [@liggitt](https://github.com/liggitt)) [SIG Network] -- WinOverlay feature graduated to beta ([#94807](https://github.com/kubernetes/kubernetes/pull/94807), [@ksubrmnn](https://github.com/ksubrmnn)) [SIG Windows] +- Change the APIVersion proto name of BoundObjectRef from aPIVersion to apiVersion. ([#97379](https://github.com/kubernetes/kubernetes/pull/97379), [@kebe7jun](https://github.com/kebe7jun)) [SIG Auth] +- Promote Immutable Secrets/ConfigMaps feature to Stable. + This allows to set `Immutable` field in Secrets or ConfigMap object to mark their contents as immutable. ([#97615](https://github.com/kubernetes/kubernetes/pull/97615), [@wojtek-t](https://github.com/wojtek-t)) [SIG Apps, Architecture, Node and Testing] ### Feature -- ACTION REQUIRED : In CoreDNS v1.7.0, [metrics names have been changed](https://github.com/coredns/coredns/blob/master/notes/coredns-1.7.0.md#metric-changes) which will be backward incompatible with existing reporting formulas that use the old metrics' names. Adjust your formulas to the new names before upgrading. - - Kubeadm now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92651](https://github.com/kubernetes/kubernetes/pull/92651), [@rajansandeep](https://github.com/rajansandeep)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle and Instrumentation] -- Add metrics for azure service operations (route and loadbalancer). ([#94124](https://github.com/kubernetes/kubernetes/pull/94124), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider and Instrumentation] -- Add network rule support in Azure account creation ([#94239](https://github.com/kubernetes/kubernetes/pull/94239), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Add tags support for Azure File Driver ([#92825](https://github.com/kubernetes/kubernetes/pull/92825), [@ZeroMagic](https://github.com/ZeroMagic)) [SIG Cloud Provider and Storage] -- Added kube-apiserver metrics: apiserver_current_inflight_request_measures and, when API Priority and Fairness is enable, windowed_request_stats. ([#91177](https://github.com/kubernetes/kubernetes/pull/91177), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery, Instrumentation and Testing] -- Audit events for API requests to deprecated API versions now include a `"k8s.io/deprecated": "true"` audit annotation. If a target removal release is identified, the audit event includes a `"k8s.io/removal-release": "."` audit annotation as well. ([#92842](https://github.com/kubernetes/kubernetes/pull/92842), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Instrumentation] -- Cloud node-controller use InstancesV2 ([#91319](https://github.com/kubernetes/kubernetes/pull/91319), [@gongguan](https://github.com/gongguan)) [SIG Apps, Cloud Provider, Scalability and Storage] -- Kubeadm: Add a preflight check that the control-plane node has at least 1700MB of RAM ([#93275](https://github.com/kubernetes/kubernetes/pull/93275), [@xlgao-zju](https://github.com/xlgao-zju)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--cluster-name" flag to the "kubeadm alpha kubeconfig user" to allow configuring the cluster name in the generated kubeconfig file ([#93992](https://github.com/kubernetes/kubernetes/pull/93992), [@prabhu43](https://github.com/prabhu43)) [SIG Cluster Lifecycle] -- Kubeadm: add the "--kubeconfig" flag to the "kubeadm init phase upload-certs" command to allow users to pass a custom location for a kubeconfig file. ([#94765](https://github.com/kubernetes/kubernetes/pull/94765), [@zhanw15](https://github.com/zhanw15)) [SIG Cluster Lifecycle] -- Kubeadm: deprecate the "--csr-only" and "--csr-dir" flags of the "kubeadm init phase certs" subcommands. Please use "kubeadm alpha certs generate-csr" instead. This new command allows you to generate new private keys and certificate signing requests for all the control-plane components, so that the certificates can be signed by an external CA. ([#92183](https://github.com/kubernetes/kubernetes/pull/92183), [@wallrj](https://github.com/wallrj)) [SIG Cluster Lifecycle] -- Kubeadm: make etcd pod request 100m CPU, 100Mi memory and 100Mi ephemeral_storage by default ([#94479](https://github.com/kubernetes/kubernetes/pull/94479), [@knight42](https://github.com/knight42)) [SIG Cluster Lifecycle] -- Kubemark now supports both real and hollow nodes in a single cluster. ([#93201](https://github.com/kubernetes/kubernetes/pull/93201), [@ellistarn](https://github.com/ellistarn)) [SIG Scalability] -- Kubernetes is now built using go1.15.2 - - build: Update to k/repo-infra@v0.1.1 (supports go1.15.2) - - build: Use go-runner:buster-v2.0.1 (built using go1.15.1) - - bazel: Replace --features with Starlark build settings flag - - hack/lib/util.sh: some bash cleanups - - - switched one spot to use kube::logging - - make kube::util::find-binary return an error when it doesn't find - anything so that hack scripts fail fast instead of with '' binary not - found errors. - - this required deleting some genfeddoc stuff. the binary no longer - exists in k/k repo since we removed federation/, and I don't see it - in https://github.com/kubernetes-sigs/kubefed/ either. I'm assuming - that it's gone for good now. - - - bazel: output go_binary rule directly from go_binary_conditional_pure - - From: @mikedanese: - Instead of aliasing. Aliases are annoying in a number of ways. This is - specifically bugging me now because they make the action graph harder to - analyze programmatically. By using aliases here, we would need to handle - potentially aliased go_binary targets and dereference to the effective - target. - - The comment references an issue with `pure = select(...)` which appears - to be resolved considering this now builds. - - - make kube::util::find-binary not dependent on bazel-out/ structure - - Implement an aspect that outputs go_build_mode metadata for go binaries, - and use that during binary selection. ([#94449](https://github.com/kubernetes/kubernetes/pull/94449), [@justaugustus](https://github.com/justaugustus)) [SIG Architecture, CLI, Cluster Lifecycle, Node, Release and Testing] -- Only update Azure data disks when attach/detach ([#94265](https://github.com/kubernetes/kubernetes/pull/94265), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Promote SupportNodePidsLimit to GA to provide node to pod pid isolation - Promote SupportPodPidsLimit to GA to provide ability to limit pids per pod ([#94140](https://github.com/kubernetes/kubernetes/pull/94140), [@derekwaynecarr](https://github.com/derekwaynecarr)) [SIG Node and Testing] -- Rename pod_preemption_metrics to preemption_metrics. ([#93256](https://github.com/kubernetes/kubernetes/pull/93256), [@ahg-g](https://github.com/ahg-g)) [SIG Instrumentation and Scheduling] -- Server-side apply behavior has been regularized in the case where a field is removed from the applied configuration. Removed fields which have no other owners are deleted from the live object, or reset to their default value if they have one. Safe ownership transfers, such as the transfer of a `replicas` field from a user to an HPA without resetting to the default value are documented in [Transferring Ownership](/docs/reference/using-api/server-side-apply/#transferring-ownership) ([#92661](https://github.com/kubernetes/kubernetes/pull/92661), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Testing] -- Set CSIMigrationvSphere feature gates to beta. - Users should enable CSIMigration + CSIMigrationvSphere features and install the vSphere CSI Driver (https://github.com/kubernetes-sigs/vsphere-csi-driver) to move workload from the in-tree vSphere plugin "kubernetes.io/vsphere-volume" to vSphere CSI Driver. - - Requires: vSphere vCenter/ESXi Version: 7.0u1, HW Version: VM version 15 ([#92816](https://github.com/kubernetes/kubernetes/pull/92816), [@divyenpatel](https://github.com/divyenpatel)) [SIG Cloud Provider and Storage] -- Support [service.beta.kubernetes.io/azure-pip-ip-tags] annotations to allow customers to specify ip-tags to influence public-ip creation in Azure [Tag1=Value1, Tag2=Value2, etc.] ([#94114](https://github.com/kubernetes/kubernetes/pull/94114), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Support a smooth upgrade from client-side apply to server-side apply without conflicts, as well as support the corresponding downgrade. ([#90187](https://github.com/kubernetes/kubernetes/pull/90187), [@julianvmodesto](https://github.com/julianvmodesto)) [SIG API Machinery and Testing] -- Trace output in apiserver logs is more organized and comprehensive. Traces are nested, and for all non-long running request endpoints, the entire filter chain is instrumented (e.g. authentication check is included). ([#88936](https://github.com/kubernetes/kubernetes/pull/88936), [@jpbetz](https://github.com/jpbetz)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Scheduling] -- `kubectl alpha debug` now supports debugging nodes by creating a debugging container running in the node's host namespaces. ([#92310](https://github.com/kubernetes/kubernetes/pull/92310), [@verb](https://github.com/verb)) [SIG CLI] - -### Documentation - -- Kubelet: remove alpha warnings for CNI flags. ([#94508](https://github.com/kubernetes/kubernetes/pull/94508), [@andrewsykim](https://github.com/andrewsykim)) [SIG Network and Node] - -### Failing Test - -- Kube-proxy iptables min-sync-period defaults to 1 sec. Previously, it was 0. ([#92836](https://github.com/kubernetes/kubernetes/pull/92836), [@aojea](https://github.com/aojea)) [SIG Network] +- Add flag --lease-max-object-size and metric etcd_lease_object_counts for kube-apiserver to config and observe max objects attached to a single etcd lease. ([#97480](https://github.com/kubernetes/kubernetes/pull/97480), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery, Instrumentation and Scalability] +- Add flag --lease-reuse-duration-seconds for kube-apiserver to config etcd lease reuse duration. ([#97009](https://github.com/kubernetes/kubernetes/pull/97009), [@lingsamuel](https://github.com/lingsamuel)) [SIG API Machinery and Scalability] +- Adds the ability to pass --strict-transport-security-directives to the kube-apiserver to set the HSTS header appropriately. Be sure you understand the consequences to browsers before setting this field. ([#96502](https://github.com/kubernetes/kubernetes/pull/96502), [@249043822](https://github.com/249043822)) [SIG Auth] +- Kubeadm now includes CoreDNS v1.8.0. ([#96429](https://github.com/kubernetes/kubernetes/pull/96429), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cluster Lifecycle] +- Kubeadm: add support for certificate chain validation. When using kubeadm in external CA mode, this allows an intermediate CA to be used to sign the certificates. The intermediate CA certificate must be appended to each signed certificate for this to work correctly. ([#97266](https://github.com/kubernetes/kubernetes/pull/97266), [@robbiemcmichael](https://github.com/robbiemcmichael)) [SIG Cluster Lifecycle] +- Kubeadm: amend the node kernel validation to treat CGROUP_PIDS, FAIR_GROUP_SCHED as required and CFS_BANDWIDTH, CGROUP_HUGETLB as optional ([#96378](https://github.com/kubernetes/kubernetes/pull/96378), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle and Node] +- The Kubernetes pause image manifest list now contains an image for Windows Server 20H2. ([#97322](https://github.com/kubernetes/kubernetes/pull/97322), [@claudiubelu](https://github.com/claudiubelu)) [SIG Windows] +- The apimachinery util/net function used to detect the bind address `ResolveBindAddress()` + takes into consideration global ip addresses on loopback interfaces when: + - the host has default routes + - there are no global IPs on those interfaces. + in order to support more complex network scenarios like BGP Unnumbered RFC 5549 ([#95790](https://github.com/kubernetes/kubernetes/pull/95790), [@aojea](https://github.com/aojea)) [SIG Network] ### Bug or Regression -- A panic in the apiserver caused by the `informer-sync` health checker is now fixed. ([#93600](https://github.com/kubernetes/kubernetes/pull/93600), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG API Machinery] -- Add kubectl wait --ignore-not-found flag ([#90969](https://github.com/kubernetes/kubernetes/pull/90969), [@zhouya0](https://github.com/zhouya0)) [SIG CLI] -- Adding fix to the statefulset controller to wait for pvc deletion before creating pods. ([#93457](https://github.com/kubernetes/kubernetes/pull/93457), [@ymmt2005](https://github.com/ymmt2005)) [SIG Apps] -- Azure ARM client: don't segfault on empty response and http error ([#94078](https://github.com/kubernetes/kubernetes/pull/94078), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Azure: fix a bug that kube-controller-manager would panic if wrong Azure VMSS name is configured ([#94306](https://github.com/kubernetes/kubernetes/pull/94306), [@knight42](https://github.com/knight42)) [SIG Cloud Provider] -- Azure: per VMSS VMSS VMs cache to prevent throttling on clusters having many attached VMSS ([#93107](https://github.com/kubernetes/kubernetes/pull/93107), [@bpineau](https://github.com/bpineau)) [SIG Cloud Provider] -- Both apiserver_request_duration_seconds metrics and RequestReceivedTimestamp field of an audit event take - into account the time a request spends in the apiserver request filters. ([#94903](https://github.com/kubernetes/kubernetes/pull/94903), [@tkashem](https://github.com/tkashem)) [SIG API Machinery, Auth and Instrumentation] -- Build/lib/release: Explicitly use '--platform' in building server images - - When we switched to go-runner for building the apiserver, - controller-manager, and scheduler server components, we no longer - reference the individual architectures in the image names, specifically - in the 'FROM' directive of the server image Dockerfiles. - - As a result, server images for non-amd64 images copy in the go-runner - amd64 binary instead of the go-runner that matches that architecture. - - This commit explicitly sets the '--platform=linux/${arch}' to ensure - we're pulling the correct go-runner arch from the manifest list. - - Before: - `FROM ${base_image}` - - After: - `FROM --platform=linux/${arch} ${base_image}` ([#94552](https://github.com/kubernetes/kubernetes/pull/94552), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- CSIDriver object can be deployed during volume attachment. ([#93710](https://github.com/kubernetes/kubernetes/pull/93710), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Apps, Node, Storage and Testing] -- CVE-2020-8557 (Medium): Node-local denial of service via container /etc/hosts file. See https://github.com/kubernetes/kubernetes/issues/93032 for more details. ([#92916](https://github.com/kubernetes/kubernetes/pull/92916), [@joelsmith](https://github.com/joelsmith)) [SIG Node] -- Do not add nodes labeled with kubernetes.azure.com/managed=false to backend pool of load balancer. ([#93034](https://github.com/kubernetes/kubernetes/pull/93034), [@matthias50](https://github.com/matthias50)) [SIG Cloud Provider] -- Do not fail sorting empty elements. ([#94666](https://github.com/kubernetes/kubernetes/pull/94666), [@soltysh](https://github.com/soltysh)) [SIG CLI] -- Do not retry volume expansion if CSI driver returns FailedPrecondition error ([#92986](https://github.com/kubernetes/kubernetes/pull/92986), [@gnufied](https://github.com/gnufied)) [SIG Node and Storage] -- Dockershim security: pod sandbox now always run with `no-new-privileges` and `runtime/default` seccomp profile - dockershim seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90948](https://github.com/kubernetes/kubernetes/pull/90948), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- Dual-stack: make nodeipam compatible with existing single-stack clusters when dual-stack feature gate become enabled by default ([#90439](https://github.com/kubernetes/kubernetes/pull/90439), [@SataQiu](https://github.com/SataQiu)) [SIG API Machinery] -- Endpoint controller requeues service after an endpoint deletion event occurs to confirm that deleted endpoints are undesired to mitigate the effects of an out of sync endpoint cache. ([#93030](https://github.com/kubernetes/kubernetes/pull/93030), [@swetharepakula](https://github.com/swetharepakula)) [SIG Apps and Network] -- EndpointSlice controllers now return immediately if they encounter an error creating, updating, or deleting resources. ([#93908](https://github.com/kubernetes/kubernetes/pull/93908), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now copies labels from Endpoints to EndpointSlices. ([#93442](https://github.com/kubernetes/kubernetes/pull/93442), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- EndpointSliceMirroring controller now mirrors Endpoints that do not have a Service associated with them. ([#94171](https://github.com/kubernetes/kubernetes/pull/94171), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Ensure backoff step is set to 1 for Azure armclient. ([#94180](https://github.com/kubernetes/kubernetes/pull/94180), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Ensure getPrimaryInterfaceID not panic when network interfaces for Azure VMSS are null ([#94355](https://github.com/kubernetes/kubernetes/pull/94355), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Eviction requests for pods that have a non-zero DeletionTimestamp will always succeed ([#91342](https://github.com/kubernetes/kubernetes/pull/91342), [@michaelgugino](https://github.com/michaelgugino)) [SIG Apps] -- Extended DSR loadbalancer feature in winkernel kube-proxy to HNS versions 9.3-9.max, 10.2+ ([#93080](https://github.com/kubernetes/kubernetes/pull/93080), [@elweb9858](https://github.com/elweb9858)) [SIG Network] -- Fix HandleCrash order ([#93108](https://github.com/kubernetes/kubernetes/pull/93108), [@lixiaobing1](https://github.com/lixiaobing1)) [SIG API Machinery] -- Fix a concurrent map writes error in kubelet ([#93773](https://github.com/kubernetes/kubernetes/pull/93773), [@knight42](https://github.com/knight42)) [SIG Node] -- Fix a regression where kubeadm bails out with a fatal error when an optional version command line argument is supplied to the "kubeadm upgrade plan" command ([#94421](https://github.com/kubernetes/kubernetes/pull/94421), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Fix azure file migration panic ([#94853](https://github.com/kubernetes/kubernetes/pull/94853), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix bug where loadbalancer deletion gets stuck because of missing resource group #75198 ([#93962](https://github.com/kubernetes/kubernetes/pull/93962), [@phiphi282](https://github.com/phiphi282)) [SIG Cloud Provider] -- Fix calling AttachDisk on a previously attached EBS volume ([#93567](https://github.com/kubernetes/kubernetes/pull/93567), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider, Storage and Testing] -- Fix detection of image filesystem, disk metrics for devicemapper, detection of OOM Kills on 5.0+ linux kernels. ([#92919](https://github.com/kubernetes/kubernetes/pull/92919), [@dashpole](https://github.com/dashpole)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation and Node] -- Fix etcd_object_counts metric reported by kube-apiserver ([#94773](https://github.com/kubernetes/kubernetes/pull/94773), [@tkashem](https://github.com/tkashem)) [SIG API Machinery] -- Fix incorrectly reported verbs for kube-apiserver metrics for CRD objects ([#93523](https://github.com/kubernetes/kubernetes/pull/93523), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Instrumentation] -- Fix instance not found issues when an Azure Node is recreated in a short time ([#93316](https://github.com/kubernetes/kubernetes/pull/93316), [@feiskyer](https://github.com/feiskyer)) [SIG Cloud Provider] -- Fix kube-apiserver /readyz to contain "informer-sync" check ensuring that internal informers are synced. ([#93670](https://github.com/kubernetes/kubernetes/pull/93670), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Fix kubectl SchemaError on CRDs with schema using x-kubernetes-preserve-unknown-fields on array types. ([#94888](https://github.com/kubernetes/kubernetes/pull/94888), [@sttts](https://github.com/sttts)) [SIG API Machinery] -- Fix memory leak in EndpointSliceTracker for EndpointSliceMirroring controller. ([#93441](https://github.com/kubernetes/kubernetes/pull/93441), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- Fix missing csi annotations on node during parallel csinode update. ([#94389](https://github.com/kubernetes/kubernetes/pull/94389), [@pacoxu](https://github.com/pacoxu)) [SIG Storage] -- Fix the `cloudprovider_azure_api_request_duration_seconds` metric buckets to correctly capture the latency metrics. Previously, the majority of the calls would fall in the "+Inf" bucket. ([#94873](https://github.com/kubernetes/kubernetes/pull/94873), [@marwanad](https://github.com/marwanad)) [SIG Cloud Provider and Instrumentation] -- Fix: azure disk resize error if source does not exist ([#93011](https://github.com/kubernetes/kubernetes/pull/93011), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: detach azure disk broken on Azure Stack ([#94885](https://github.com/kubernetes/kubernetes/pull/94885), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fix: determine the correct ip config based on ip family ([#93043](https://github.com/kubernetes/kubernetes/pull/93043), [@aramase](https://github.com/aramase)) [SIG Cloud Provider] -- Fix: initial delay in mounting azure disk & file ([#93052](https://github.com/kubernetes/kubernetes/pull/93052), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fix: use sensitiveOptions on Windows mount ([#94126](https://github.com/kubernetes/kubernetes/pull/94126), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] -- Fixed Ceph RBD volume expansion when no ceph.conf exists ([#92027](https://github.com/kubernetes/kubernetes/pull/92027), [@juliantaylor](https://github.com/juliantaylor)) [SIG Storage] -- Fixed a bug where improper storage and comparison of endpoints led to excessive API traffic from the endpoints controller ([#94112](https://github.com/kubernetes/kubernetes/pull/94112), [@damemi](https://github.com/damemi)) [SIG Apps, Network and Testing] -- Fixed a bug whereby the allocation of reusable CPUs and devices was not being honored when the TopologyManager was enabled ([#93189](https://github.com/kubernetes/kubernetes/pull/93189), [@klueska](https://github.com/klueska)) [SIG Node] -- Fixed a panic in kubectl debug when pod has multiple init containers or ephemeral containers ([#94580](https://github.com/kubernetes/kubernetes/pull/94580), [@kiyoshim55](https://github.com/kiyoshim55)) [SIG CLI] -- Fixed a regression that sometimes prevented `kubectl portforward` to work when TCP and UDP services were configured on the same port ([#94728](https://github.com/kubernetes/kubernetes/pull/94728), [@amorenoz](https://github.com/amorenoz)) [SIG CLI] -- Fixed bug in reflector that couldn't recover from "Too large resource version" errors with API servers 1.17.0-1.18.5 ([#94316](https://github.com/kubernetes/kubernetes/pull/94316), [@janeczku](https://github.com/janeczku)) [SIG API Machinery] -- Fixed bug where kubectl top pod output is not sorted when --sort-by and --containers flags are used together ([#93692](https://github.com/kubernetes/kubernetes/pull/93692), [@brianpursley](https://github.com/brianpursley)) [SIG CLI] -- Fixed kubelet creating extra sandbox for pods with RestartPolicyOnFailure after all containers succeeded ([#92614](https://github.com/kubernetes/kubernetes/pull/92614), [@tnqn](https://github.com/tnqn)) [SIG Node and Testing] -- Fixed memory leak in endpointSliceTracker ([#92838](https://github.com/kubernetes/kubernetes/pull/92838), [@tnqn](https://github.com/tnqn)) [SIG Apps and Network] -- Fixed node data lost in kube-scheduler for clusters with imbalance on number of nodes across zones ([#93355](https://github.com/kubernetes/kubernetes/pull/93355), [@maelk](https://github.com/maelk)) [SIG Scheduling] -- Fixed the EndpointSliceController to correctly create endpoints for IPv6-only pods. - - Fixed the EndpointController to allow IPv6 headless services, if the IPv6DualStack - feature gate is enabled, by specifying `ipFamily: IPv6` on the service. (This already - worked with the EndpointSliceController.) ([#91399](https://github.com/kubernetes/kubernetes/pull/91399), [@danwinship](https://github.com/danwinship)) [SIG Apps and Network] -- Fixes a bug evicting pods after a taint with a limited tolerationSeconds toleration is removed from a node ([#93722](https://github.com/kubernetes/kubernetes/pull/93722), [@liggitt](https://github.com/liggitt)) [SIG Apps and Node] -- Fixes a bug where EndpointSlices would not be recreated after rapid Service recreation. ([#94730](https://github.com/kubernetes/kubernetes/pull/94730), [@robscott](https://github.com/robscott)) [SIG Apps, Network and Testing] -- Fixes a race condition in kubelet pod handling ([#94751](https://github.com/kubernetes/kubernetes/pull/94751), [@auxten](https://github.com/auxten)) [SIG Node] -- Fixes an issue proxying to ipv6 pods without specifying a port ([#94834](https://github.com/kubernetes/kubernetes/pull/94834), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Network] -- Fixes an issue that can result in namespaced custom resources being orphaned when their namespace is deleted, if the CRD defining the custom resource is removed concurrently with namespaces being deleted, then recreated. ([#93790](https://github.com/kubernetes/kubernetes/pull/93790), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Apps] -- Ignore root user check when windows pod starts ([#92355](https://github.com/kubernetes/kubernetes/pull/92355), [@wawa0210](https://github.com/wawa0210)) [SIG Node and Windows] -- Increased maximum IOPS of AWS EBS io1 volumes to 64,000 (current AWS maximum). ([#90014](https://github.com/kubernetes/kubernetes/pull/90014), [@jacobmarble](https://github.com/jacobmarble)) [SIG Cloud Provider and Storage] -- K8s.io/apimachinery: runtime.DefaultUnstructuredConverter.FromUnstructured now handles converting integer fields to typed float values ([#93250](https://github.com/kubernetes/kubernetes/pull/93250), [@liggitt](https://github.com/liggitt)) [SIG API Machinery] -- Kube-aggregator certificates are dynamically loaded on change from disk ([#92791](https://github.com/kubernetes/kubernetes/pull/92791), [@p0lyn0mial](https://github.com/p0lyn0mial)) [SIG API Machinery] -- Kube-apiserver: fixed a bug returning inconsistent results from list requests which set a field or label selector and set a paging limit ([#94002](https://github.com/kubernetes/kubernetes/pull/94002), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery] -- Kube-apiserver: jsonpath expressions with consecutive recursive descent operators are no longer evaluated for custom resource printer columns ([#93408](https://github.com/kubernetes/kubernetes/pull/93408), [@joelsmith](https://github.com/joelsmith)) [SIG API Machinery] -- Kube-proxy now trims extra spaces found in loadBalancerSourceRanges to match Service validation. ([#94107](https://github.com/kubernetes/kubernetes/pull/94107), [@robscott](https://github.com/robscott)) [SIG Network] -- Kube-up now includes CoreDNS version v1.7.0. Some of the major changes include: - - Fixed a bug that could cause CoreDNS to stop updating service records. - - Fixed a bug in the forward plugin where only the first upstream server is always selected no matter which policy is set. - - Remove already deprecated options `resyncperiod` and `upstream` in the Kubernetes plugin. - - Includes Prometheus metrics name changes (to bring them in line with standard Prometheus metrics naming convention). They will be backward incompatible with existing reporting formulas that use the old metrics' names. - - The federation plugin (allows for v1 Kubernetes federation) has been removed. - More details are available in https://coredns.io/2020/06/15/coredns-1.7.0-release/ ([#92718](https://github.com/kubernetes/kubernetes/pull/92718), [@rajansandeep](https://github.com/rajansandeep)) [SIG Cloud Provider] -- Kubeadm now makes sure the etcd manifest is regenerated upon upgrade even when no etcd version change takes place ([#94395](https://github.com/kubernetes/kubernetes/pull/94395), [@rosti](https://github.com/rosti)) [SIG Cluster Lifecycle] -- Kubeadm: avoid a panic when determining if the running version of CoreDNS is supported during upgrades ([#94299](https://github.com/kubernetes/kubernetes/pull/94299), [@zouyee](https://github.com/zouyee)) [SIG Cluster Lifecycle] -- Kubeadm: ensure "kubeadm reset" does not unmount the root "/var/lib/kubelet" directory if it is mounted by the user ([#93702](https://github.com/kubernetes/kubernetes/pull/93702), [@thtanaka](https://github.com/thtanaka)) [SIG Cluster Lifecycle] -- Kubeadm: ensure the etcd data directory is created with 0700 permissions during control-plane init and join ([#94102](https://github.com/kubernetes/kubernetes/pull/94102), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: fix the bug that kubeadm tries to call 'docker info' even if the CRI socket was for another CR ([#94555](https://github.com/kubernetes/kubernetes/pull/94555), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] -- Kubeadm: make the kubeconfig files for the kube-controller-manager and kube-scheduler use the LocalAPIEndpoint instead of the ControlPlaneEndpoint. This makes kubeadm clusters more reseliant to version skew problems during immutable upgrades: https://kubernetes.io/docs/setup/release/version-skew-policy/#kube-controller-manager-kube-scheduler-and-cloud-controller-manager ([#94398](https://github.com/kubernetes/kubernetes/pull/94398), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: relax the validation of kubeconfig server URLs. Allow the user to define custom kubeconfig server URLs without erroring out during validation of existing kubeconfig files (e.g. when using external CA mode). ([#94816](https://github.com/kubernetes/kubernetes/pull/94816), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubeadm: remove duplicate DNS names and IP addresses from generated certificates ([#92753](https://github.com/kubernetes/kubernetes/pull/92753), [@QianChenglong](https://github.com/QianChenglong)) [SIG Cluster Lifecycle] -- Kubelet: assume that swap is disabled when `/proc/swaps` does not exist ([#93931](https://github.com/kubernetes/kubernetes/pull/93931), [@SataQiu](https://github.com/SataQiu)) [SIG Node] -- Kubelet: fix race condition in pluginWatcher ([#93622](https://github.com/kubernetes/kubernetes/pull/93622), [@knight42](https://github.com/knight42)) [SIG Node] -- Kuberuntime security: pod sandbox now always runs with `runtime/default` seccomp profile - kuberuntime seccomp: custom profiles can now have smaller seccomp profiles when set at pod level ([#90949](https://github.com/kubernetes/kubernetes/pull/90949), [@pjbgf](https://github.com/pjbgf)) [SIG Node] -- NONE ([#71269](https://github.com/kubernetes/kubernetes/pull/71269), [@DeliangFan](https://github.com/DeliangFan)) [SIG Node] -- New Azure instance types do now have correct max data disk count information. ([#94340](https://github.com/kubernetes/kubernetes/pull/94340), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Cloud Provider and Storage] -- Pods with invalid Affinity/AntiAffinity LabelSelectors will now fail scheduling when these plugins are enabled ([#93660](https://github.com/kubernetes/kubernetes/pull/93660), [@damemi](https://github.com/damemi)) [SIG Scheduling] -- Require feature flag CustomCPUCFSQuotaPeriod if setting a non-default cpuCFSQuotaPeriod in kubelet config. ([#94687](https://github.com/kubernetes/kubernetes/pull/94687), [@karan](https://github.com/karan)) [SIG Node] -- Reverted devicemanager for Windows node added in 1.19rc1. ([#93263](https://github.com/kubernetes/kubernetes/pull/93263), [@liggitt](https://github.com/liggitt)) [SIG Node and Windows] -- Scheduler bugfix: Scheduler doesn't lose pod information when nodes are quickly recreated. This could happen when nodes are restarted or quickly recreated reusing a nodename. ([#93938](https://github.com/kubernetes/kubernetes/pull/93938), [@alculquicondor](https://github.com/alculquicondor)) [SIG Scalability, Scheduling and Testing] -- The EndpointSlice controller now waits for EndpointSlice and Node caches to be synced before starting. ([#94086](https://github.com/kubernetes/kubernetes/pull/94086), [@robscott](https://github.com/robscott)) [SIG Apps and Network] -- The `/debug/api_priority_and_fairness/dump_requests` path at an apiserver will no longer return a phantom line for each exempt priority level. ([#93406](https://github.com/kubernetes/kubernetes/pull/93406), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] -- The kubelet recognizes the --containerd-namespace flag to configure the namespace used by cadvisor. ([#87054](https://github.com/kubernetes/kubernetes/pull/87054), [@changyaowei](https://github.com/changyaowei)) [SIG Node] -- The terminationGracePeriodSeconds from pod spec is respected for the mirror pod. ([#92442](https://github.com/kubernetes/kubernetes/pull/92442), [@tedyu](https://github.com/tedyu)) [SIG Node and Testing] -- Update Calico to v3.15.2 ([#94241](https://github.com/kubernetes/kubernetes/pull/94241), [@lmm](https://github.com/lmm)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.13 ([#94287](https://github.com/kubernetes/kubernetes/pull/94287), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Updated Cluster Autoscaler to 1.19.0; ([#93577](https://github.com/kubernetes/kubernetes/pull/93577), [@vivekbagade](https://github.com/vivekbagade)) [SIG Autoscaling and Cloud Provider] -- Use NLB Subnet CIDRs instead of VPC CIDRs in Health Check SG Rules ([#93515](https://github.com/kubernetes/kubernetes/pull/93515), [@t0rr3sp3dr0](https://github.com/t0rr3sp3dr0)) [SIG Cloud Provider] +- ## Changelog + + ### General + - Fix priority expander falling back to a random choice even though there is a higher priority option to choose + - Clone `kubernetes/kubernetes` in `update-vendor.sh` shallowly, instead of fetching all revisions + - Speed up binpacking by reducing the number of PreFilter calls (call once per pod instead of #pods*#nodes times) + - Speed up finding unneeded nodes by 5x+ in very large clusters by reducing the number of PreFilter calls + - Expose `--max-nodes-total` as a metric + - Errors in `IncreaseSize` changed from type `apiError` to `cloudProviderError` + - Make `build-in-docker` and `test-in-docker` work on Linux systems with SELinux enabled + - Fix an error where existing nodes were not considered as destinations while finding place for pods in scale-down simulations + - Remove redundant log lines and reduce severity around parsing kubeEnv + - Don't treat nodes created by virtual kubelet as nodes from non-autoscaled node groups + - Remove redundant logging around calculating node utilization + - Add configurable `--network` and `--rm` flags for docker in `Makefile` + - Subtract DaemonSet pods' requests from node allocatable in the denominator while computing node utilization + - Include taints by condition when determining if a node is unready/still starting + - Fix `update-vendor.sh` to work on OSX and zsh + - Add best-effort eviction for DaemonSet pods while scaling down non-empty nodes + - Add build support for ARM64 + + ### AliCloud + - Add missing daemonsets and replicasets to ALI example cluster role + + ### Apache CloudStack + - Add support for Apache CloudStack + + ### AWS + - Regenerate list of EC2 instances + - Fix pricing endpoint in AWS China Region + + ### Azure + - Add optional jitter on initial VMSS VM cache refresh, keep the refreshes spread over time + - Serve from cache for the whole period of ongoing throttling + - Fix unwanted VMSS VMs cache invalidations + - Enforce setting the number of retries if cloud provider backoff is enabled + - Don't update capacity if VMSS provisioning state is updating + - Support allocatable resources overrides via VMSS tags + - Add missing stable labels in template nodes + - Proactively set instance status to deleting on node deletions + + ### Cluster API + - Migrate interaction with the API from using internal types to using Unstructured + - Improve tests to work better with constrained resources + - Add support for node autodiscovery + - Add support for `--cloud-config` + - Update group identifier to use for Cluster API annotations + + ### Exoscale + - Add support for Exoscale + + ### GCE + - Decrease the number of GCE Read Requests made while deleting nodes + - Base pricing of custom instances on their instance family type + - Add pricing information for missing machine types + - Add pricing information for different GPU types + - Ignore the new `topology.gke.io/zone` label when comparing groups + - Add missing stable labels to template nodes + + ### HuaweiCloud + - Add auto scaling group support + - Implement node group by AS + - Implement getting desired instance number of node group + - Implement increasing node group size + - Implement TemplateNodeInfo + - Implement caching instances + + ### IONOS + - Add support for IONOS + + ### Kubemark + - Skip non-kubemark nodes while computing node infos for node groups. + + ### Magnum + - Add Magnum support in the Cluster Autoscaler helm chart + + ### Packet + - Allow empty nodepools + - Add support for multiple nodepools + - Add pricing support + + ## Image + Image: `k8s.gcr.io/autoscaling/cluster-autoscaler:v1.20.0` ([#97011](https://github.com/kubernetes/kubernetes/pull/97011), [@towca](https://github.com/towca)) [SIG Cloud Provider] +- AcceleratorStats will be available in the Summary API of kubelet when cri_stats_provider is used. ([#96873](https://github.com/kubernetes/kubernetes/pull/96873), [@ruiwen-zhao](https://github.com/ruiwen-zhao)) [SIG Node] +- Add limited lines to log when having tail option ([#93920](https://github.com/kubernetes/kubernetes/pull/93920), [@zhouya0](https://github.com/zhouya0)) [SIG Node] +- Avoid systemd-logind loading configuration warning ([#97950](https://github.com/kubernetes/kubernetes/pull/97950), [@wzshiming](https://github.com/wzshiming)) [SIG Node] +- Cloud-controller-manager: routes controller should not depend on --allocate-node-cidrs ([#97029](https://github.com/kubernetes/kubernetes/pull/97029), [@andrewsykim](https://github.com/andrewsykim)) [SIG Cloud Provider and Testing] +- Copy annotations with empty value when deployment rolls back ([#94858](https://github.com/kubernetes/kubernetes/pull/94858), [@waynepeking348](https://github.com/waynepeking348)) [SIG Apps] +- Detach volumes from vSphere nodes not tracked by attach-detach controller ([#96689](https://github.com/kubernetes/kubernetes/pull/96689), [@gnufied](https://github.com/gnufied)) [SIG Cloud Provider and Storage] +- Fix kubectl label error when local=true is set. ([#97440](https://github.com/kubernetes/kubernetes/pull/97440), [@pandaamanda](https://github.com/pandaamanda)) [SIG CLI] +- Fix Azure file share not deleted issue when the namespace is deleted ([#97417](https://github.com/kubernetes/kubernetes/pull/97417), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fix CVE-2020-8555 for Gluster client connections. ([#97922](https://github.com/kubernetes/kubernetes/pull/97922), [@liggitt](https://github.com/liggitt)) [SIG Storage] +- Fix counting error in service/nodeport/loadbalancer quota check ([#97451](https://github.com/kubernetes/kubernetes/pull/97451), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Network and Testing] +- Fix kubectl-convert import known versions ([#97754](https://github.com/kubernetes/kubernetes/pull/97754), [@wzshiming](https://github.com/wzshiming)) [SIG CLI and Testing] +- Fix missing cadvisor machine metrics. ([#97006](https://github.com/kubernetes/kubernetes/pull/97006), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node] +- Fix nil VMSS name when setting service to auto mode ([#97366](https://github.com/kubernetes/kubernetes/pull/97366), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Fix the panic when kubelet registers if a node object already exists with no Status.Capacity or Status.Allocatable ([#95269](https://github.com/kubernetes/kubernetes/pull/95269), [@SataQiu](https://github.com/SataQiu)) [SIG Node] +- Fix the regression with the slow pods termination. Before this fix pods may take an additional time to terminate - up to one minute. Reversing the change that ensured that CNI resources cleaned up when the pod is removed on API server. ([#97980](https://github.com/kubernetes/kubernetes/pull/97980), [@SergeyKanzhelev](https://github.com/SergeyKanzhelev)) [SIG Node] +- Fix to recover CSI volumes from certain dangling attachments ([#96617](https://github.com/kubernetes/kubernetes/pull/96617), [@yuga711](https://github.com/yuga711)) [SIG Apps and Storage] +- Fix: azure file latency issue for metadata-heavy workloads ([#97082](https://github.com/kubernetes/kubernetes/pull/97082), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider and Storage] +- Fixed Cinder volume IDs on OpenStack Train ([#96673](https://github.com/kubernetes/kubernetes/pull/96673), [@jsafrane](https://github.com/jsafrane)) [SIG Cloud Provider] +- Fixed FibreChannel volume plugin corrupting filesystems on detach of multipath volumes. ([#97013](https://github.com/kubernetes/kubernetes/pull/97013), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- Fixed a bug in kubelet that will saturate CPU utilization after containerd got restarted. ([#97174](https://github.com/kubernetes/kubernetes/pull/97174), [@hanlins](https://github.com/hanlins)) [SIG Node] +- Fixed bug in CPUManager with race on container map access ([#97427](https://github.com/kubernetes/kubernetes/pull/97427), [@klueska](https://github.com/klueska)) [SIG Node] +- Fixed cleanup of block devices when /var/lib/kubelet is a symlink. ([#96889](https://github.com/kubernetes/kubernetes/pull/96889), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] +- GCE Internal LoadBalancer sync loop will now release the ILB IP address upon sync failure. An error in ILB forwarding rule creation will no longer leak IP addresses. ([#97740](https://github.com/kubernetes/kubernetes/pull/97740), [@prameshj](https://github.com/prameshj)) [SIG Cloud Provider and Network] +- Ignore update pod with no new images in alwaysPullImages admission controller ([#96668](https://github.com/kubernetes/kubernetes/pull/96668), [@pacoxu](https://github.com/pacoxu)) [SIG Apps, Auth and Node] +- Kubeadm now installs version 3.4.13 of etcd when creating a cluster with v1.19 ([#97244](https://github.com/kubernetes/kubernetes/pull/97244), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: avoid detection of the container runtime for commands that do not need it ([#97625](https://github.com/kubernetes/kubernetes/pull/97625), [@pacoxu](https://github.com/pacoxu)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug in the host memory detection code on 32bit Linux platforms ([#97403](https://github.com/kubernetes/kubernetes/pull/97403), [@abelbarrera15](https://github.com/abelbarrera15)) [SIG Cluster Lifecycle] +- Kubeadm: fix a bug where "kubeadm upgrade" commands can fail if CoreDNS v1.8.0 is installed. ([#97919](https://github.com/kubernetes/kubernetes/pull/97919), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Performance regression [#97685](https://github.com/kubernetes/kubernetes/issues/97685) has been fixed. ([#97860](https://github.com/kubernetes/kubernetes/pull/97860), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) [SIG API Machinery] +- Remove deprecated --cleanup-ipvs flag of kube-proxy, and make --cleanup flag always to flush IPVS ([#97336](https://github.com/kubernetes/kubernetes/pull/97336), [@maaoBit](https://github.com/maaoBit)) [SIG Network] +- The current version of the container image publicly exposed IP serving a /metrics endpoint to the Internet. The new version of the container image serves /metrics endpoint on a different port. ([#97621](https://github.com/kubernetes/kubernetes/pull/97621), [@vbannai](https://github.com/vbannai)) [SIG Cloud Provider] +- Use force unmount for NFS volumes if regular mount fails after 1 minute timeout ([#96844](https://github.com/kubernetes/kubernetes/pull/96844), [@gnufied](https://github.com/gnufied)) [SIG Storage] - Users will see increase in time for deletion of pods and also guarantee that removal of pod from api server would mean deletion of all the resources from container runtime. ([#92817](https://github.com/kubernetes/kubernetes/pull/92817), [@kmala](https://github.com/kmala)) [SIG Node] -- Very large patches may now be specified to `kubectl patch` with the `--patch-file` flag instead of including them directly on the command line. The `--patch` and `--patch-file` flags are mutually exclusive. ([#93548](https://github.com/kubernetes/kubernetes/pull/93548), [@smarterclayton](https://github.com/smarterclayton)) [SIG CLI] -- When creating a networking.k8s.io/v1 Ingress API object, `spec.rules[*].http` values are now validated consistently when the `host` field contains a wildcard. ([#93954](https://github.com/kubernetes/kubernetes/pull/93954), [@Miciah](https://github.com/Miciah)) [SIG CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Network, Storage and Testing] +- Using exec auth plugins with kubectl no longer results in warnings about constructing many client instances from the same exec auth config. ([#97857](https://github.com/kubernetes/kubernetes/pull/97857), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and Auth] +- Warning about using a deprecated volume plugin is logged only once. ([#96751](https://github.com/kubernetes/kubernetes/pull/96751), [@jsafrane](https://github.com/jsafrane)) [SIG Storage] ### Other (Cleanup or Flake) -- --cache-dir sets cache directory for both http and discovery, defaults to $HOME/.kube/cache ([#92910](https://github.com/kubernetes/kubernetes/pull/92910), [@soltysh](https://github.com/soltysh)) [SIG API Machinery and CLI] -- Adds a bootstrapping ClusterRole, ClusterRoleBinding and group for /metrics, /livez/*, /readyz/*, & /healthz/- endpoints. ([#93311](https://github.com/kubernetes/kubernetes/pull/93311), [@logicalhan](https://github.com/logicalhan)) [SIG API Machinery, Auth, Cloud Provider and Instrumentation] -- Base-images: Update to debian-iptables:buster-v1.3.0 - - Uses iptables 1.8.5 - - base-images: Update to debian-base:buster-v1.2.0 - - cluster/images/etcd: Build etcd:3.4.13-1 image - - Uses debian-base:buster-v1.2.0 ([#94733](https://github.com/kubernetes/kubernetes/pull/94733), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.2 and debian-iptables@v12.1.1 ([#93667](https://github.com/kubernetes/kubernetes/pull/93667), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to debian-base@v2.1.3 and debian-iptables@v12.1.2 ([#93916](https://github.com/kubernetes/kubernetes/pull/93916), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, Release and Testing] -- Build: Update to go-runner:buster-v2.0.0 ([#94167](https://github.com/kubernetes/kubernetes/pull/94167), [@justaugustus](https://github.com/justaugustus)) [SIG Release] -- Fix kubelet to properly log when a container is started. Before, sometimes the log said that a container is dead and was restarted when it was started for the first time. This only happened when using pods with initContainers and regular containers. ([#91469](https://github.com/kubernetes/kubernetes/pull/91469), [@rata](https://github.com/rata)) [SIG Node] -- Fix: license issue in blob disk feature ([#92824](https://github.com/kubernetes/kubernetes/pull/92824), [@andyzhangx](https://github.com/andyzhangx)) [SIG Cloud Provider] -- Fixes the flooding warning messages about setting volume ownership for configmap/secret volumes ([#92878](https://github.com/kubernetes/kubernetes/pull/92878), [@jvanz](https://github.com/jvanz)) [SIG Instrumentation, Node and Storage] -- Fixes the message about no auth for metrics in scheduler. ([#94035](https://github.com/kubernetes/kubernetes/pull/94035), [@zhouya0](https://github.com/zhouya0)) [SIG Scheduling] -- Kube-up: defaults to limiting critical pods to the kube-system namespace to match behavior prior to 1.17 ([#93121](https://github.com/kubernetes/kubernetes/pull/93121), [@liggitt](https://github.com/liggitt)) [SIG Cloud Provider and Scheduling] -- Kubeadm: Separate argument key/value in log msg ([#94016](https://github.com/kubernetes/kubernetes/pull/94016), [@mrueg](https://github.com/mrueg)) [SIG Cluster Lifecycle] -- Kubeadm: remove support for the "ci/k8s-master" version label. This label has been removed in the Kubernetes CI release process and would no longer work in kubeadm. You can use the "ci/latest" version label instead. See kubernetes/test-infra#18517 ([#93626](https://github.com/kubernetes/kubernetes/pull/93626), [@vikkyomkar](https://github.com/vikkyomkar)) [SIG Cluster Lifecycle] -- Kubeadm: remove the CoreDNS check for known image digests when applying the addon ([#94506](https://github.com/kubernetes/kubernetes/pull/94506), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] -- Kubernetes is now built with go1.15.0 ([#93939](https://github.com/kubernetes/kubernetes/pull/93939), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Kubernetes is now built with go1.15.0-rc.2 ([#93827](https://github.com/kubernetes/kubernetes/pull/93827), [@justaugustus](https://github.com/justaugustus)) [SIG API Machinery, CLI, Cloud Provider, Cluster Lifecycle, Instrumentation, Node, Release and Testing] -- Lock ExternalPolicyForExternalIP to default, this feature gate will be removed in 1.22. ([#94581](https://github.com/kubernetes/kubernetes/pull/94581), [@knabben](https://github.com/knabben)) [SIG Network] -- Service.beta.kubernetes.io/azure-load-balancer-disable-tcp-reset is removed. All Standard load balancers will always enable tcp resets. ([#94297](https://github.com/kubernetes/kubernetes/pull/94297), [@MarcPow](https://github.com/MarcPow)) [SIG Cloud Provider] -- Stop propagating SelfLink (deprecated in 1.16) in kube-apiserver ([#94397](https://github.com/kubernetes/kubernetes/pull/94397), [@wojtek-t](https://github.com/wojtek-t)) [SIG API Machinery and Testing] -- Strip unnecessary security contexts on Windows ([#93475](https://github.com/kubernetes/kubernetes/pull/93475), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) [SIG Node, Testing and Windows] -- To ensure the code be strong, add unit test for GetAddressAndDialer ([#93180](https://github.com/kubernetes/kubernetes/pull/93180), [@FreeZhang61](https://github.com/FreeZhang61)) [SIG Node] -- Update CNI plugins to v0.8.7 ([#94367](https://github.com/kubernetes/kubernetes/pull/94367), [@justaugustus](https://github.com/justaugustus)) [SIG Cloud Provider, Network, Node, Release and Testing] -- Update Golang to v1.14.5 - - Update repo-infra to 0.0.7 (to support go1.14.5 and go1.13.13) - - Includes: - - bazelbuild/bazel-toolchains@3.3.2 - - bazelbuild/rules_go@v0.22.7 ([#93088](https://github.com/kubernetes/kubernetes/pull/93088), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update Golang to v1.14.6 - - Update repo-infra to 0.0.8 (to support go1.14.6 and go1.13.14) - - Includes: - - bazelbuild/bazel-toolchains@3.4.0 - - bazelbuild/rules_go@v0.22.8 ([#93198](https://github.com/kubernetes/kubernetes/pull/93198), [@justaugustus](https://github.com/justaugustus)) [SIG Release and Testing] -- Update cri-tools to [v1.19.0](https://github.com/kubernetes-sigs/cri-tools/releases/tag/v1.19.0) ([#94307](https://github.com/kubernetes/kubernetes/pull/94307), [@xmudrii](https://github.com/xmudrii)) [SIG Cloud Provider] -- Update default etcd server version to 3.4.9 ([#92349](https://github.com/kubernetes/kubernetes/pull/92349), [@jingyih](https://github.com/jingyih)) [SIG API Machinery, Cloud Provider, Cluster Lifecycle and Testing] -- Update etcd client side to v3.4.13 ([#94259](https://github.com/kubernetes/kubernetes/pull/94259), [@jingyih](https://github.com/jingyih)) [SIG API Machinery and Cloud Provider] -- `kubectl get ingress` now prefers the `networking.k8s.io/v1` over `extensions/v1beta1` (deprecated since v1.14). To explicitly request the deprecated version, use `kubectl get ingress.v1beta1.extensions`. ([#94309](https://github.com/kubernetes/kubernetes/pull/94309), [@liggitt](https://github.com/liggitt)) [SIG API Machinery and CLI] +- Bump github.com/Azure/go-autorest/autorest to v0.11.12 ([#97033](https://github.com/kubernetes/kubernetes/pull/97033), [@patrickshan](https://github.com/patrickshan)) [SIG API Machinery, CLI, Cloud Provider and Cluster Lifecycle] +- Delete deprecated mixed protocol annotation ([#97096](https://github.com/kubernetes/kubernetes/pull/97096), [@nilo19](https://github.com/nilo19)) [SIG Cloud Provider] +- Kube-proxy: Traffic from the cluster directed to ExternalIPs is always sent directly to the Service. ([#96296](https://github.com/kubernetes/kubernetes/pull/96296), [@aojea](https://github.com/aojea)) [SIG Network and Testing] +- Kubeadm: fix a whitespace issue in the output of the "kubeadm join" command shown as the output of "kubeadm init" and "kubeadm token create --print-join-command" ([#97413](https://github.com/kubernetes/kubernetes/pull/97413), [@SataQiu](https://github.com/SataQiu)) [SIG Cluster Lifecycle] +- Kubeadm: improve the error messaging when the user provides an invalid discovery token CA certificate hash. ([#97290](https://github.com/kubernetes/kubernetes/pull/97290), [@neolit123](https://github.com/neolit123)) [SIG Cluster Lifecycle] +- Migrate log messages in pkg/scheduler/{scheduler.go,factory.go} to structured logging ([#97509](https://github.com/kubernetes/kubernetes/pull/97509), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- Migrate proxy/iptables/proxier.go logs to structured logging ([#97678](https://github.com/kubernetes/kubernetes/pull/97678), [@JornShen](https://github.com/JornShen)) [SIG Network] +- Migrate some scheduler log messages to structured logging ([#97349](https://github.com/kubernetes/kubernetes/pull/97349), [@aldudko](https://github.com/aldudko)) [SIG Scheduling] +- NONE ([#97167](https://github.com/kubernetes/kubernetes/pull/97167), [@geegeea](https://github.com/geegeea)) [SIG Node] +- NetworkPolicy validation framework optimizations for rapidly verifying CNI's work correctly across several pods and namespaces ([#91592](https://github.com/kubernetes/kubernetes/pull/91592), [@jayunit100](https://github.com/jayunit100)) [SIG Network, Storage and Testing] +- Official support to build kubernetes with docker-machine / remote docker is removed. This change does not affect building kubernetes with docker locally. ([#97618](https://github.com/kubernetes/kubernetes/pull/97618), [@jherrera123](https://github.com/jherrera123)) [SIG Release and Testing] +- Scheduler plugin validation now provides all errors detected instead of the first one. ([#96745](https://github.com/kubernetes/kubernetes/pull/96745), [@lingsamuel](https://github.com/lingsamuel)) [SIG Node, Scheduling and Testing] +- Storage related e2e testsuite redesign & cleanup ([#96573](https://github.com/kubernetes/kubernetes/pull/96573), [@Jiawei0227](https://github.com/Jiawei0227)) [SIG Storage and Testing] +- The OIDC authenticator no longer waits 10 seconds before attempting to fetch the metadata required to verify tokens. ([#97693](https://github.com/kubernetes/kubernetes/pull/97693), [@enj](https://github.com/enj)) [SIG API Machinery and Auth] +- The `AttachVolumeLimit` feature gate that is GA since v1.17 is now removed. ([#96539](https://github.com/kubernetes/kubernetes/pull/96539), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Storage] +- The `CSINodeInfo` feature gate that is GA since v1.17 is unconditionally enabled, and can no longer be specified via the `--feature-gates` argument. ([#96561](https://github.com/kubernetes/kubernetes/pull/96561), [@ialidzhikov](https://github.com/ialidzhikov)) [SIG Apps, Auth, Scheduling, Storage and Testing] +- The deprecated feature gates `RotateKubeletClientCertificate`, `AttachVolumeLimit`, `VolumePVCDataSource` and `EvenPodsSpread` are now unconditionally enabled and can no longer be specified in component invocations. ([#97306](https://github.com/kubernetes/kubernetes/pull/97306), [@gavinfish](https://github.com/gavinfish)) [SIG Node, Scheduling and Storage] +- `ServiceNodeExclusion`, `NodeDisruptionExclusion` and `LegacyNodeRoleBehavior`(locked to false) features have been promoted to GA. + To prevent control plane nodes being added to load balancers automatically, upgrade users need to add "node.kubernetes.io/exclude-from-external-load-balancers" label to control plane nodes. ([#97543](https://github.com/kubernetes/kubernetes/pull/97543), [@pacoxu](https://github.com/pacoxu)) [SIG API Machinery, Apps, Cloud Provider and Network] + +### Uncategorized + +- Adding Brazilian Portuguese translation for kubectl ([#61595](https://github.com/kubernetes/kubernetes/pull/61595), [@cpanato](https://github.com/cpanato)) [SIG CLI] ## Dependencies ### Added -- github.com/Azure/go-autorest: [v14.2.0+incompatible](https://github.com/Azure/go-autorest/tree/v14.2.0) -- github.com/fvbommel/sortorder: [v1.0.1](https://github.com/fvbommel/sortorder/tree/v1.0.1) -- github.com/yuin/goldmark: [v1.1.27](https://github.com/yuin/goldmark/tree/v1.1.27) -- sigs.k8s.io/structured-merge-diff/v4: v4.0.1 +_Nothing has changed._ ### Changed -- github.com/Azure/go-autorest/autorest/adal: [v0.8.2 → v0.9.0](https://github.com/Azure/go-autorest/autorest/adal/compare/v0.8.2...v0.9.0) -- github.com/Azure/go-autorest/autorest/date: [v0.2.0 → v0.3.0](https://github.com/Azure/go-autorest/autorest/date/compare/v0.2.0...v0.3.0) -- github.com/Azure/go-autorest/autorest/mocks: [v0.3.0 → v0.4.0](https://github.com/Azure/go-autorest/autorest/mocks/compare/v0.3.0...v0.4.0) -- github.com/Azure/go-autorest/autorest: [v0.9.6 → v0.11.1](https://github.com/Azure/go-autorest/autorest/compare/v0.9.6...v0.11.1) -- github.com/Azure/go-autorest/logger: [v0.1.0 → v0.2.0](https://github.com/Azure/go-autorest/logger/compare/v0.1.0...v0.2.0) -- github.com/Azure/go-autorest/tracing: [v0.5.0 → v0.6.0](https://github.com/Azure/go-autorest/tracing/compare/v0.5.0...v0.6.0) -- github.com/Microsoft/hcsshim: [v0.8.9 → 5eafd15](https://github.com/Microsoft/hcsshim/compare/v0.8.9...5eafd15) -- github.com/cilium/ebpf: [9f1617e → 1c8d4c9](https://github.com/cilium/ebpf/compare/9f1617e...1c8d4c9) -- github.com/containerd/cgroups: [bf292b2 → 0dbf7f0](https://github.com/containerd/cgroups/compare/bf292b2...0dbf7f0) -- github.com/coredns/corefile-migration: [v1.0.8 → v1.0.10](https://github.com/coredns/corefile-migration/compare/v1.0.8...v1.0.10) -- github.com/evanphx/json-patch: [e83c0a1 → v4.9.0+incompatible](https://github.com/evanphx/json-patch/compare/e83c0a1...v4.9.0) -- github.com/google/cadvisor: [8450c56 → v0.37.0](https://github.com/google/cadvisor/compare/8450c56...v0.37.0) -- github.com/json-iterator/go: [v1.1.9 → v1.1.10](https://github.com/json-iterator/go/compare/v1.1.9...v1.1.10) -- github.com/opencontainers/go-digest: [v1.0.0-rc1 → v1.0.0](https://github.com/opencontainers/go-digest/compare/v1.0.0-rc1...v1.0.0) -- github.com/opencontainers/runc: [1b94395 → 819fcc6](https://github.com/opencontainers/runc/compare/1b94395...819fcc6) -- github.com/prometheus/client_golang: [v1.6.0 → v1.7.1](https://github.com/prometheus/client_golang/compare/v1.6.0...v1.7.1) -- github.com/prometheus/common: [v0.9.1 → v0.10.0](https://github.com/prometheus/common/compare/v0.9.1...v0.10.0) -- github.com/prometheus/procfs: [v0.0.11 → v0.1.3](https://github.com/prometheus/procfs/compare/v0.0.11...v0.1.3) -- github.com/rubiojr/go-vhd: [0bfd3b3 → 02e2102](https://github.com/rubiojr/go-vhd/compare/0bfd3b3...02e2102) -- github.com/storageos/go-api: [343b3ef → v2.2.0+incompatible](https://github.com/storageos/go-api/compare/343b3ef...v2.2.0) -- github.com/urfave/cli: [v1.22.1 → v1.22.2](https://github.com/urfave/cli/compare/v1.22.1...v1.22.2) -- go.etcd.io/etcd: 54ba958 → dd1b699 -- golang.org/x/crypto: bac4c82 → 75b2880 -- golang.org/x/mod: v0.1.0 → v0.3.0 -- golang.org/x/net: d3edc99 → ab34263 -- golang.org/x/tools: c00d67e → c1934b7 -- k8s.io/kube-openapi: 656914f → 6aeccd4 -- k8s.io/system-validators: v1.1.2 → v1.2.0 -- k8s.io/utils: 6e3d28b → d5654de +- github.com/Azure/go-autorest/autorest: [v0.11.1 → v0.11.12](https://github.com/Azure/go-autorest/autorest/compare/v0.11.1...v0.11.12) +- github.com/coredns/corefile-migration: [v1.0.10 → v1.0.11](https://github.com/coredns/corefile-migration/compare/v1.0.10...v1.0.11) +- github.com/golang/mock: [v1.4.1 → v1.4.4](https://github.com/golang/mock/compare/v1.4.1...v1.4.4) +- github.com/google/cadvisor: [v0.38.5 → v0.38.6](https://github.com/google/cadvisor/compare/v0.38.5...v0.38.6) +- github.com/heketi/heketi: [c2e2a4a → v10.2.0+incompatible](https://github.com/heketi/heketi/compare/c2e2a4a...v10.2.0) +- github.com/miekg/dns: [v1.1.4 → v1.1.35](https://github.com/miekg/dns/compare/v1.1.4...v1.1.35) +- k8s.io/system-validators: v1.2.0 → v1.3.0 ### Removed -- github.com/godbus/dbus: [ade71ed](https://github.com/godbus/dbus/tree/ade71ed) -- github.com/xlab/handysort: [fb3537e](https://github.com/xlab/handysort/tree/fb3537e) -- sigs.k8s.io/structured-merge-diff/v3: v3.0.0 -- vbom.ml/util: db5cfe1 +- rsc.io/quote/v3: v3.1.0 +- rsc.io/sampler: v1.3.0 diff --git a/content/ko/docs/tasks/access-application-cluster/access-cluster.md b/content/ko/docs/tasks/access-application-cluster/access-cluster.md index 43a6a823436fb..9e7c9b4fc77b6 100644 --- a/content/ko/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/ko/docs/tasks/access-application-cluster/access-cluster.md @@ -26,9 +26,9 @@ kubectl이 인지하는 위치정보와 인증정보는 다음 커맨드로 확 kubectl config view ``` -많은 [예제들](/ko/docs/reference/kubectl/cheatsheet/)에서 -kubectl을 사용하는 것을 소개하고 있으며 완전한 문서는 -[kubectl 매뉴얼](/ko/docs/reference/kubectl/overview/)에서 찾아볼 수 있다. +[여기](/ko/docs/reference/kubectl/cheatsheet/)에서 +kubectl 사용 예시를 볼 수 있으며, 완전한 문서는 +[kubectl 매뉴얼](/ko/docs/reference/kubectl/overview/)에서 확인할 수 있다. ## REST API에 직접 접근 @@ -44,12 +44,12 @@ REST API에 직접 접근하려고 한다면 위치 파악과 인증을 하는 - 앞으로는 클라이언트 측의 지능형 load balancing과 failover가 될 것이다. - 직접적으로 http 클라이언트에 위치정보와 인증정보를 제공. - 대안적인 접근 방식. - - proxy 사용과 혼동되는 몇 가지 타입의 클라이언트 code들과 같이 동작한다. - - MITM로부터 보호를 위해 root 인증서를 당신의 브라우저로 import해야 한다. + - proxy 사용과 혼동되는 몇 가지 타입의 클라이언트 코드와 같이 동작한다. + - MITM로부터 보호를 위해 root 인증서를 당신의 브라우저로 임포트해야 한다. ### kubectl proxy 사용 -다음 커맨드는 kubectl을 reverse proxy처럼 동작하는 모드를 실행한다. 이는 +다음 커맨드는 kubectl을 리버스 프록시(reverse proxy)처럼 동작하는 모드를 실행한다. 이는 apiserver의 위치지정과 인증을 처리한다. 다음과 같이 실행한다. @@ -205,7 +205,7 @@ apiserver의 인증서 제공을 검증하는데 사용되어야 한다. - 파드의 sidecar 컨테이너 내에서 `kubectl proxy`를 실행하거나, 컨테이너 내부에서 백그라운드 프로세스로 실행한다. - 이는 쿠버네티스 API를 파드의 localhost 인터페이스로 proxy하여 + 이는 쿠버네티스 API를 파드의 localhost 인터페이스로 프록시하여 해당 파드의 컨테이너 내에 다른 프로세스가 API에 접속할 수 있게 해준다. - Go 클라이언트 라이브러리를 이용하여 `rest.InClusterConfig()`와 `kubernetes.NewForConfig()` 함수들을 사용하도록 클라이언트를 만든다. 이는 apiserver의 위치지정과 인증을 처리한다. [예제](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go) @@ -215,47 +215,47 @@ apiserver의 인증서 제공을 검증하는데 사용되어야 한다. ## 클러스터에서 실행되는 서비스로 접근 이전 장은 쿠버네티스 API server 접속에 대한 내용을 다루었다. 이번 장은 -쿠버네티스 클러스터 상에서 실행되는 다른 서비스로의 연결을 다룰 것이다. 쿠버네티스에서 -[노드들](/ko/docs/concepts/architecture/nodes/), -[파드들](/ko/docs/concepts/workloads/pods/), -[서비스들](/ko/docs/concepts/services-networking/service/)은 -모두 자신의 IP들을 가진다. 당신의 데스크탑 PC와 같은 클러스터 외부 장비에서는 -클러스터 상의 노드 IP들, 파드 IP들, 서비스 IP들로 라우팅되지 않아서 접근을 +쿠버네티스 클러스터 상에서 실행되는 다른 서비스로의 연결을 다룰 것이다. + +쿠버네티스에서, [노드](/ko/docs/concepts/architecture/nodes/), +[파드](/ko/docs/concepts/workloads/pods/) 및 [서비스](/ko/docs/concepts/services-networking/service/)는 모두 +고유한 IP를 가진다. 당신의 데스크탑 PC와 같은 클러스터 외부 장비에서는 +클러스터 상의 노드 IP, 파드 IP, 서비스 IP로 라우팅되지 않아서 접근을 할 수 없을 것이다. ### 통신을 위한 방식들 -클러스터 외부에서 노드들, 파드들, 서비스들에 접속하는 데는 몇 가지 선택지들이 있다. +클러스터 외부에서 노드, 파드 및 서비스에 접속하기 위한 몇 가지 옵션이 있다. - 공인 IP를 통해 서비스에 접근. - 클러스터 외부에서 접근할 수 있도록 `NodePort` 또는 `LoadBalancer` 타입의 서비스를 사용한다. [서비스](/ko/docs/concepts/services-networking/service/)와 [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) 문서를 참조한다. - - 당신의 클러스터 환경에 따라 회사 네트워크에만 서비스를 노출하거나 - 인터넷으로 노출할 수 있다. 이 경우 노출되는 서비스의 보안 여부를 고려해야 한다. + - 클러스터 환경에 따라, 서비스는 회사 네트워크에만 노출되기도 하며, + 인터넷에 노출되는 경우도 있다. 이 경우 노출되는 서비스의 보안 여부를 고려해야 한다. 해당 서비스는 자체적으로 인증을 수행하는가? - - 파드들은 서비스 뒤에 위치시킨다. 레플리카들의 집합에서 특정 파드 하나에 debugging 같은 목적으로 접근하려면 - 해당 파드에 고유의 레이블을 붙이고 셀렉터에 해당 레이블을 선택한 신규 서비스를 생성한다. + - 파드는 서비스 뒤에 위치시킨다. 레플리카들의 집합에서 특정 파드 하나에 debugging 같은 목적으로 접근하려면 + 해당 파드에 고유의 레이블을 붙이고 셀렉터에 해당 레이블을 선택하는 신규 서비스를 생성한다. - 대부분의 경우에는 애플리케이션 개발자가 노드 IP를 통해 직접 노드에 접근할 필요는 없다. - Proxy Verb를 사용하여 서비스, 노드, 파드에 접근. - 원격 서비스에 접근하기에 앞서 apiserver의 인증과 인가를 받아야 한다. - 서비스가 인터넷에 노출하기에 보안이 충분하지 않거나 노드 IP 상의 port에 + 서비스가 인터넷에 노출하기에 보안이 충분하지 않거나 노드 IP 상의 포트에 접근을 하려고 하거나 debugging을 하려면 이를 사용한다. - - 어떤 web 애플리케이션에서는 proxy가 문제를 일으킬 수 있다. + - 어떤 web 애플리케이션에서는 프록시가 문제를 일으킬 수 있다. - HTTP/HTTPS에서만 동작한다. - [여기](#수작업으로-apiserver-proxy-url들을-구축)에서 설명하고 있다. - 클러스터 내 노드 또는 파드에서 접근. - - 파드를 Running시킨 다음 [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec)를 사용하여 해당 파드의 셸로 접속한다. - 해당 셸에서 다른 노드들, 파드들, 서비스들에 연결한다. + - 파드를 실행한 다음, [kubectl exec](/docs/reference/generated/kubectl/kubectl-commands/#exec)를 사용하여 해당 파드의 셸로 접속한다. + 해당 셸에서 다른 노드, 파드, 서비스에 연결한다. - 어떤 클러스터는 클러스터 내의 노드에 ssh 접속을 허용하기도 한다. 이런 클러스터에서는 클러스터 서비스에 접근도 가능하다. 이는 비표준 방식으로 특정 클러스터에서는 동작하지만 다른 클러스터에서는 동작하지 않을 수 있다. 브라우저와 다른 도구들이 설치되지 않았거나 설치되었을 수 있다. 클러스터 DNS가 동작하지 않을 수도 있다. -### 빌트인 서비스들의 발견 +### 빌트인 서비스 검색 -일반적으로 kube-system에 의해 클러스터 상에서 start되는 몇 가지 서비스들이 존재한다. -`kubectl cluster-info` 커맨드로 이 서비스들의 리스트를 볼 수 있다. +일반적으로 kube-system에 의해 클러스터에 실행되는 몇 가지 서비스가 있다. +`kubectl cluster-info` 커맨드로 이 서비스의 리스트를 볼 수 있다. ```shell kubectl cluster-info @@ -280,20 +280,20 @@ heapster is running at https://104.197.5.247/api/v1/namespaces/kube-system/servi #### 수작업으로 apiserver proxy URL을 구축 -위에서 언급한 것처럼 서비스의 proxy URL을 검색하는데 `kubectl cluster-info` 커맨드를 사용할 수 있다. 서비스 endpoint, 접미사, 매개변수를 포함하는 proxy URL을 생성하려면 해당 서비스에 +위에서 언급한 것처럼 서비스의 proxy URL을 검색하는 데 `kubectl cluster-info` 커맨드를 사용할 수 있다. 서비스 endpoint, 접미사, 매개변수를 포함하는 proxy URL을 생성하려면 해당 서비스에 `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`service_name[:port_name]`*`/proxy` 형식의 proxy URL을 덧붙인다. -당신이 port에 이름을 지정하지 않았다면 URL에 *port_name* 을 지정할 필요는 없다. +당신이 포트에 이름을 지정하지 않았다면 URL에 *port_name* 을 지정할 필요는 없다. 이름이 있는 포트와 이름이 없는 포트 모두에 대하여, *port_name* 이 들어갈 자리에 포트 번호를 기재할 수도 있다. -기본적으로 API server는 http를 사용하여 서비스를 proxy한다. https를 사용하려면 다음과 같이 서비스 네임의 접두사에 `https:`를 붙인다. +기본적으로 API server는 http를 사용하여 서비스를 프록시한다. https를 사용하려면 다음과 같이 서비스 네임의 접두사에 `https:`를 붙인다. `http://`*`kubernetes_master_address`*`/api/v1/namespaces/`*`namespace_name`*`/services/`*`https:service_name:[port_name]`*`/proxy` URL의 네임 부분에 지원되는 양식은 다음과 같다. -* `` - http를 사용하여 기본값 또는 이름이 없는 port로 proxy한다 -* `:` - http를 사용하여 지정된 port로 proxy한다 -* `https::` - https를 사용하여 기본값 또는 이름이 없는 port로 proxy한다(마지막 콜론:에 주의) -* `https::` - https를 사용하여 지정된 port로 proxy한다 +* `` - http를 사용하여 기본값 또는 이름이 없는 포트로 프록시한다. +* `:` - http를 사용하여 지정된 포트 이름 또는 포트 번호로 프록시한다. +* `https::` - https를 사용하여 기본값 또는 이름이 없는 포트로 프록시한다. (마지막 콜론:에 주의) +* `https::` - https를 사용하여 지정된 포트 이름 또는 포트 번호로 프록시한다. ##### 예제들 @@ -326,38 +326,38 @@ URL의 네임 부분에 지원되는 양식은 다음과 같다. ## 요청 redirect -redirect 기능은 deprecated되고 제거 되었다. 대신 (아래의) proxy를 사용하기를 바란다. +redirect 기능은 deprecated되고 제거 되었다. 대신 (아래의) 프록시를 사용하기를 바란다. -## 다양한 Proxy들 +## 다양한 프록시들 -쿠버네티스를 사용하면서 당신이 접할 수 있는 몇 가지 다른 proxy들이 존재한다. +쿠버네티스를 사용하면서 당신이 접할 수 있는 몇 가지 다른 프록시들이 존재한다. 1. [kubectl proxy](#rest-api에-직접-접근): - 사용자의 데스크탑이나 파드 내에서 실행한다 - - localhost 주소에서 쿠버네티스 apiserver로 proxy한다 - - proxy하는 클라이언트는 HTTP를 사용한다 - - apiserver의 proxy는 HTTPS를 사용한다 + - localhost 주소에서 쿠버네티스 apiserver로 프록시한다 + - 프록시하는 클라이언트는 HTTP를 사용한다 + - apiserver의 프록시는 HTTPS를 사용한다 - apiserver를 위치지정한다 - 인증 header들을 추가한다 -1. [apiserver proxy](#빌트인-서비스들의-발견): +1. [apiserver proxy](#빌트인-서비스-검색): - apiserver 내의 빌트인 bastion이다 - - 다른 방식으로는 연결할 수 없는 클러스터 외부의 사용자를 클러스터 IP들로 연결한다 + - 다른 방식으로는 연결할 수 없는 클러스터 외부의 사용자를 클러스터 IP로 연결한다 - apiserver process들 내에서 실행된다 - - proxy하는 클라이언트는 HTTPS를 사용한다(또는 apiserver가 http로 구성되었다면 http) - - 타겟으로의 proxy는 가용정보를 사용하는 proxy에 의해서 HTTP 또는 HTTPS를 사용할 수도 있다 + - 프록시하는 클라이언트는 HTTPS를 사용한다(또는 apiserver가 http로 구성되었다면 http) + - 타겟으로의 프록시는 가용정보를 사용하는 프록시에 의해서 HTTP 또는 HTTPS를 사용할 수도 있다 - 노드, 파드, 서비스에 접근하는 데 사용될 수 있다 - 서비스에 접근하는 데 사용되면 load balacing한다 1. [kube proxy](/ko/docs/concepts/services-networking/service/#ips-and-vips): - 각 노드 상에서 실행된다 - - UDP와 TCP를 proxy한다 + - UDP와 TCP를 프록시한다 - HTTP를 인지하지 않는다 - load balancing을 제공한다 - - 서비스에 접근하는 데만 사용된다 + - 서비스에 접근하는 데에만 사용된다 1. apiserver(s) 전면의 Proxy/Load-balancer: diff --git a/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md b/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md new file mode 100644 index 0000000000000..77f5f5d6359c6 --- /dev/null +++ b/content/ko/docs/tasks/access-application-cluster/list-all-running-container-images.md @@ -0,0 +1,109 @@ +--- +title: 클러스터 내 모든 컨테이너 이미지 목록 보기 +content_type: task +weight: 100 +--- + + + +이 문서는 kubectl을 이용하여 클러스터 내 모든 컨테이너 이미지 목록을 +조회하는 방법에 관해 설명한다. + +## {{% heading "prerequisites" %}} + +{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} + + + +이 작업에서는 kubectl을 사용하여 클러스터 내 모든 파드의 정보를 +조회하고, 결과값의 서식을 변경하여 각 파드에 대한 컨테이너 이미지 목록으로 +재구성할 것이다. + +## 모든 네임스페이스의 모든 컨테이너 이미지 가져오기 + +- `kubectl get pods --all-namespaces` 를 사용하여 모든 네임스페이스의 모든 파드 정보를 가져온다. +- 컨테이너 이미지 이름만 출력하기 위해 `-o jsonpath={..image}` 를 사용한다. + 이 명령어는 결과값으로 받은 json을 반복적으로 파싱하여, + `image` 필드만을 출력한다. + - jsonpath를 사용하는 방법에 대해 더 많은 정보를 얻고 싶다면 + [Jsonpath 지원](/ko/docs/reference/kubectl/jsonpath/)을 확인한다. +- 다음의 표준 툴을 이용해서 결과값을 처리한다. `tr`, `sort`, `uniq` + - `tr` 을 사용하여 공백을 줄 바꾸기로 대체한다. + - `sort` 를 사용하여 결과값을 정렬한다. + - `uniq` 를 사용하여 이미지 개수를 합산한다. + +```shell +kubectl get pods --all-namespaces -o jsonpath="{..image}" |\ +tr -s '[[:space:]]' '\n' |\ +sort |\ +uniq -c +``` + +이 커맨드는 결과값으로 나온 모든 아이템 중에 `image` 라고 명명된 필드를 +모두 출력한다. + +이와 다른 방법으로 파드 이미지 필드 값의 절대 경로를 사용할 수 있다. +이것은 필드명이 반복될 때에도 +정확한 값을 출력하도록 보장한다. +예) 결과값 중에 많은 필드들이 `name`으로 명명되었을 경우, + +```shell +kubectl get pods --all-namespaces -o jsonpath="{.items[*].spec.containers[*].image}" +``` + +이 jsonpath는 다음과 같이 해석할 수 있다. + +- `.items[*]`: 각 결과값에 대하여 +- `.spec`: spec 값을 가져온다. +- `.containers[*]`: 각 컨테이너에 대하여 +- `.image`: image 값을 가져온다. + +{{< note >}} +명령어로 하나의 파드를 가져올 때, 예를 들어 `kubectl get pod nginx` 라면, +jsonpath에서 `.items[*]` 부분은 생략해야 하는데, 이는 명령어가 아이템 목록이 아닌 +단 한 개의 아이템(여기선 파드)으로 결과값을 주기 때문이다. +{{< /note >}} + +## 각 파드의 컨테이너 이미지 보기 + +`range` 연산을 사용하여 명령어의 결과값에서 각각의 요소들을 +반복하여 출력할 수 있다. + +```shell +kubectl get pods --all-namespaces -o=jsonpath='{range .items[*]}{"\n"}{.metadata.name}{":\t"}{range .spec.containers[*]}{.image}{", "}{end}{end}' |\ +sort +``` + +## 파드 레이블로 필터링된 컨테이너 이미지 목록 보기 + +특정 레이블에 맞는 파드를 지정하기 위해서 -l 플래그를 사용한다. 아래의 +명령어 결과값은 `app=nginx` 레이블에 일치하는 파드만 출력한다. + +```shell +kubectl get pods --all-namespaces -o=jsonpath="{..image}" -l app=nginx +``` + +## 파드 네임스페이스로 필터링된 컨테이너 이미지 목록 보기 + +특정 네임스페이스의 파드를 지정하려면, 네임스페이스 플래그를 사용한다. +아래의 명령어 결과값은 `kube-system` 네임스페이스에 있는 파드만 출력한다. + +```shell +kubectl get pods --namespace kube-system -o jsonpath="{..image}" +``` + +## jsonpath 대신 Go 템플릿을 사용하여 컨테이너 이미지 목록 보기 + +jsonpath의 대안으로 Kubectl은 [Go 템플릿](https://golang.org/pkg/text/template/)을 지원한다. +다음과 같이 결과값의 서식을 지정할 수 있다. + +```shell +kubectl get pods --all-namespaces -o go-template --template="{{range .items}}{{range .spec.containers}}{{.image}} {{end}}{{end}}" +``` + +## {{% heading "whatsnext" %}} + +### 참조 + +* [Jsonpath](/ko/docs/reference/kubectl/jsonpath/) 참조 +* [Go 템플릿](https://golang.org/pkg/text/template/) 참조 diff --git a/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md b/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md index a6739cc2ea313..cc5f872cc582d 100644 --- a/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md +++ b/content/ko/docs/tasks/access-application-cluster/port-forward-access-application-cluster.md @@ -8,7 +8,7 @@ min-kubernetes-server-version: v1.10 이 페이지는 `kubectl port-forward` 를 사용해서 쿠버네티스 클러스터 내에서 -실행중인 Redis 서버에 연결하는 방법을 보여준다. 이 유형의 연결은 데이터베이스 +실행중인 MongoDB 서버에 연결하는 방법을 보여준다. 이 유형의 연결은 데이터베이스 디버깅에 유용할 수 있다. @@ -19,25 +19,25 @@ min-kubernetes-server-version: v1.10 * {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} -* [redis-cli](http://redis.io/topics/rediscli)를 설치한다. +* [MongoDB Shell](https://www.mongodb.com/try/download/shell)을 설치한다. -## Redis 디플로이먼트와 서비스 생성하기 +## MongoDB 디플로이먼트와 서비스 생성하기 -1. Redis를 실행하기 위해 디플로이먼트를 생성한다. +1. MongoDB를 실행하기 위해 디플로이먼트를 생성한다. ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-deployment.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml ``` 성공적인 명령어의 출력은 디플로이먼트가 생성됐다는 것을 확인해준다. ``` - deployment.apps/redis-master created + deployment.apps/mongo created ``` 파드 상태를 조회하여 파드가 준비되었는지 확인한다. @@ -49,8 +49,8 @@ min-kubernetes-server-version: v1.10 출력은 파드가 생성되었다는 것을 보여준다. ``` - NAME READY STATUS RESTARTS AGE - redis-master-765d459796-258hz 1/1 Running 0 50s + NAME READY STATUS RESTARTS AGE + mongo-75f59d57f4-4nd6q 1/1 Running 0 2m4s ``` 디플로이먼트 상태를 조회한다. @@ -62,64 +62,65 @@ min-kubernetes-server-version: v1.10 출력은 디플로이먼트가 생성되었다는 것을 보여준다. ``` - NAME READY UP-TO-DATE AVAILABLE AGE - redis-master 1/1 1 1 55s + NAME READY UP-TO-DATE AVAILABLE AGE + mongo 1/1 1 1 2m21s ``` + 디플로이먼트는 자동으로 레플리카셋을 관리한다. 아래의 명령어를 사용하여 레플리카셋 상태를 조회한다. ```shell - kubectl get rs + kubectl get replicaset ``` 출력은 레플리카셋이 생성되었다는 것을 보여준다. ``` - NAME DESIRED CURRENT READY AGE - redis-master-765d459796 1 1 1 1m + NAME DESIRED CURRENT READY AGE + mongo-75f59d57f4 1 1 1 3m12s ``` -2. Redis를 네트워크에 노출시키기 위해 서비스를 생성한다. +2. MongoDB를 네트워크에 노출시키기 위해 서비스를 생성한다. ```shell - kubectl apply -f https://k8s.io/examples/application/guestbook/redis-master-service.yaml + kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml ``` 성공적인 커맨드의 출력은 서비스가 생성되었다는 것을 확인해준다. ``` - service/redis-master created + service/mongo created ``` 서비스가 생성되었는지 확인한다. ```shell - kubectl get svc | grep redis + kubectl get service mongo ``` 출력은 서비스가 생성되었다는 것을 보여준다. ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - redis-master ClusterIP 10.0.0.213 6379/TCP 27s + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + mongo ClusterIP 10.96.41.183 27017/TCP 11s ``` -3. Redis 서버가 파드 안에서 실행되고 있고, 6379번 포트에서 수신하고 있는지 확인한다. +3. MongoDB 서버가 파드 안에서 실행되고 있고, 27017번 포트에서 수신하고 있는지 확인한다. ```shell - # redis-master-765d459796-258hz 를 파드 이름으로 변경한다. - kubectl get pod redis-master-765d459796-258hz --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' + # mongo-75f59d57f4-4nd6q 를 당신의 파드 이름으로 대체한다. + kubectl get pod mongo-75f59d57f4-4nd6q --template='{{(index (index .spec.containers 0).ports 0).containerPort}}{{"\n"}}' ``` - 출력은 파드 내 Redis 포트 번호를 보여준다. + 출력은 파드 내 MongoDB 포트 번호를 보여준다. ``` - 6379 + 27017 ``` - (이 TCP 포트는 Redis가 인터넷에 할당된 것이다). + (이는 인터넷 상의 MongoDB에 할당된 TCP 포트이다.) ## 파드의 포트를 로컬 포트로 포워딩하기 @@ -127,39 +128,39 @@ min-kubernetes-server-version: v1.10 ```shell - # redis-master-765d459796-258hz 를 파드 이름으로 변경한다. - kubectl port-forward redis-master-765d459796-258hz 7000:6379 + # mongo-75f59d57f4-4nd6q 를 당신의 파드 이름으로 대체한다. + kubectl port-forward mongo-75f59d57f4-4nd6q 28015:27017 ``` 이것은 ```shell - kubectl port-forward pods/redis-master-765d459796-258hz 7000:6379 + kubectl port-forward pods/mongo-75f59d57f4-4nd6q 28015:27017 ``` 또는 ```shell - kubectl port-forward deployment/redis-master 7000:6379 + kubectl port-forward deployment/mongo 28015:27017 ``` 또는 ```shell - kubectl port-forward rs/redis-master 7000:6379 + kubectl port-forward replicaset/mongo-75f59d57f4 28015:27017 ``` 또는 다음과 같다. ```shell - kubectl port-forward service/redis-master 7000:redis + kubectl port-forward service/mongo 28015:27017 ``` 위의 명령어들은 모두 동일하게 동작한다. 이와 유사하게 출력된다. ``` - Forwarding from 127.0.0.1:7000 -> 6379 - Forwarding from [::1]:7000 -> 6379 + Forwarding from 127.0.0.1:28015 -> 27017 + Forwarding from [::1]:28015 -> 27017 ``` {{< note >}} @@ -168,22 +169,22 @@ min-kubernetes-server-version: v1.10 {{< /note >}} -2. Redis 커맨드라인 인터페이스를 실행한다. +2. MongoDB 커맨드라인 인터페이스를 실행한다. ```shell - redis-cli -p 7000 + mongosh --port 28015 ``` -3. Redis 커맨드라인 프롬프트에 `ping` 명령을 입력한다. +3. MongoDB 커맨드라인 프롬프트에 `ping` 명령을 입력한다. ```shell - ping + db.runCommand( { ping: 1 } ) ``` 성공적인 핑 요청을 반환한다. ``` - PONG + { ok: 1 } ``` ### 선택적으로 _kubectl_ 이 로컬 포트를 선택하게 하기 {#let-kubectl-choose-local-port} @@ -193,15 +194,15 @@ min-kubernetes-server-version: v1.10 부담을 줄일 수 있다. ```shell -kubectl port-forward deployment/redis-master :6379 +kubectl port-forward deployment/mongo :27017 ``` -`kubectl` 도구는 사용 중이 아닌 로컬 포트 번호를 찾는다. (낮은 포트 번호는 -다른 애플리케이션에서 사용될 것이므로, 낮은 포트 번호를 피해서) 출력은 다음과 같을 것이다. +`kubectl` 도구는 사용 중이 아닌 로컬 포트 번호를 찾는다 (낮은 포트 번호는 +다른 애플리케이션에서 사용될 것이므로, 낮은 포트 번호를 피해서). 출력은 다음과 같을 것이다. ``` -Forwarding from 127.0.0.1:62162 -> 6379 -Forwarding from [::1]:62162 -> 6379 +Forwarding from 127.0.0.1:63753 -> 27017 +Forwarding from [::1]:63753 -> 27017 ``` @@ -209,7 +210,7 @@ Forwarding from [::1]:62162 -> 6379 ## 토의 -로컬 7000 포트에 대한 연결은 Redis 서버가 실행중인 파드의 6379 포트로 포워딩된다. +로컬 28015 포트에 대한 연결은 MongoDB 서버가 실행중인 파드의 27017 포트로 포워딩된다. 이 연결로 로컬 워크스테이션에서 파드 안에서 실행 중인 데이터베이스를 디버깅하는데 사용할 수 있다. diff --git a/content/ko/docs/tasks/administer-cluster/access-cluster-services.md b/content/ko/docs/tasks/administer-cluster/access-cluster-services.md index 5dd7a627d055b..8019e46c255ee 100644 --- a/content/ko/docs/tasks/administer-cluster/access-cluster-services.md +++ b/content/ko/docs/tasks/administer-cluster/access-cluster-services.md @@ -19,22 +19,22 @@ content_type: task 쿠버네티스에서, [노드](/ko/docs/concepts/architecture/nodes/), [파드](/ko/docs/concepts/workloads/pods/) 및 [서비스](/ko/docs/concepts/services-networking/service/)는 모두 -고유한 IP를 가진다. 대부분의 경우, 클러스터의 노드 IP, 파드 IP 및 일부 서비스 IP는 라우팅할 수 -없으므로, 데스크톱 시스템과 같은 클러스터 외부 시스템에서 -도달할 수 없다. +고유한 IP를 가진다. 당신의 데스크탑 PC와 같은 클러스터 외부 장비에서는 +클러스터 상의 노드 IP, 파드 IP, 서비스 IP로 라우팅되지 않아서 +접근할 수 없을 것이다. ### 연결하는 방법 -클러스터 외부에서 노드, 파드 및 서비스에 연결하기 위한 몇 가지 옵션이 있다. +클러스터 외부에서 노드, 파드 및 서비스에 접속하기 위한 몇 가지 옵션이 있다. - 퍼블릭 IP를 통해 서비스에 접근한다. - - `NodePort` 또는 `LoadBalancer` 타입의 서비스를 사용하여 해당 서비스를 클러스터 외부에서 - 접근할 수 있게 한다. [서비스](/ko/docs/concepts/services-networking/service/)와 + - 클러스터 외부에서 접근할 수 있도록 `NodePort` 또는 `LoadBalancer` 타입의 + 서비스를 사용한다. [서비스](/ko/docs/concepts/services-networking/service/)와 [kubectl expose](/docs/reference/generated/kubectl/kubectl-commands/#expose) 문서를 참고한다. - - 클러스터 환경에 따라, 서비스는 단지 회사 네트워크에 노출되기도 하며, - 인터넷에 노출되는 경우도 있다. 노출되는 서비스가 안전한지 생각한다. - 자체 인증을 수행하는가? - - 서비스 뒤에 파드를 배치한다. 디버깅과 같은 목적으로 레플리카 집합에서 특정 파드에 접근하려면, + - 클러스터 환경에 따라, 서비스는 회사 네트워크에만 노출되기도 하며, + 인터넷에 노출되는 경우도 있다. 이 경우 노출되는 서비스의 보안 여부를 고려해야 한다. + 해당 서비스는 자체적으로 인증을 수행하는가? + - 파드는 서비스 뒤에 위치시킨다. 디버깅과 같은 목적으로 레플리카 집합에서 특정 파드에 접근하려면, 파드에 고유한 레이블을 배치하고 이 레이블을 선택하는 새 서비스를 생성한다. - 대부분의 경우, 애플리케이션 개발자가 nodeIP를 통해 노드에 직접 접근할 필요는 없다. @@ -54,8 +54,8 @@ content_type: task ### 빌트인 서비스 검색 -일반적으로, kube-system에 의해 클러스터에서 시작되는 몇 가지 서비스가 있다. `kubectl cluster-info` 명령을 -사용하여 이들의 목록을 얻는다. +일반적으로 kube-system에 의해 클러스터에 실행되는 몇 가지 서비스가 있다. +`kubectl cluster-info` 커맨드로 이 서비스의 리스트를 볼 수 있다. ```shell kubectl cluster-info diff --git a/content/ko/docs/tasks/administer-cluster/coredns.md b/content/ko/docs/tasks/administer-cluster/coredns.md index 6f0caad9e40bb..414f681901fb3 100644 --- a/content/ko/docs/tasks/administer-cluster/coredns.md +++ b/content/ko/docs/tasks/administer-cluster/coredns.md @@ -33,8 +33,8 @@ Kube-dns의 배포나 교체에 관한 매뉴얼은 [CoreDNS GitHub 프로젝트 ### Kubeadm을 사용해 기존 클러스터 업그레이드하기 쿠버네티스 버전 1.10 이상에서, `kube-dns` 를 사용하는 클러스터를 업그레이드하기 위하여 -`kubeadm` 을 사용할 때 CoreDNS로 이동할 수도 있다. 이 경우, `kubeadm` 은 -`kube-dns` 컨피그맵(ConfigMap)을 기반으로 패더레이션, 스텁 도메인(stub domain), 업스트림 네임 서버의 +`kubeadm` 을 사용할 때 CoreDNS로 전환할 수도 있다. 이 경우, `kubeadm` 은 +`kube-dns` 컨피그맵(ConfigMap)을 기반으로 스텁 도메인(stub domain), 업스트림 네임 서버의 설정을 유지하며 CoreDNS 설정("Corefile")을 생성한다. 만약 kube-dns에서 CoreDNS로 이동하는 경우, 업그레이드 과정에서 기능 게이트의 `CoreDNS` 값을 `true` 로 설정해야 한다. @@ -44,8 +44,6 @@ kubeadm upgrade apply v1.11.0 --feature-gates=CoreDNS=true ``` 쿠버네티스 1.13 이상에서 기능 게이트의 `CoreDNS` 항목은 제거되었으며, CoreDNS가 기본적으로 사용된다. -업그레이드된 클러스터에서 kube-dns를 사용하려는 경우, [여기](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon)에 -설명된 지침 가이드를 참고하자. 1.11 미만 버전일 경우 업그레이드 과정에서 만들어진 파일이 Corefile을 **덮어쓴다**. **만약 컨피그맵을 사용자 정의한 경우, 기존의 컨피그맵을 저장해야 한다.** 새 컨피그맵이 @@ -54,26 +52,7 @@ kubeadm upgrade apply v1.11.0 --feature-gates=CoreDNS=true 만약 쿠버네티스 1.11 이상 버전에서 CoreDNS를 사용하는 경우, 업그레이드 과정에서, 기존의 Corefile이 유지된다. - -### Kubeadm을 사용해 CoreDNS가 아닌 kube-dns 설치하기 - -{{< note >}} -쿠버네티스 1.11 버전에서, CoreDNS는 GA(General Availability) 되었으며, -기본적으로 설치된다. -{{< /note >}} - -{{< warning >}} -쿠버네티스 1.18 버전에서, kubeadm을 통한 kube-dns는 사용 중단되었으며, 향후 버전에서 제거될 예정이다. -{{< /warning >}} - -1.13 보다 이전 버전에서 kube-dns를 설치하는경우, 기능 게이트의 `CoreDNS` -값을 `false` 로 변경해야 한다. - -``` -kubeadm init --feature-gates=CoreDNS=false -``` - -1.13 이후 버전에서는, [여기](/docs/reference/setup-tools/kubeadm/kubeadm-init-phase#cmd-phase-addon)에 설명된 지침 가이드를 참고하자. +쿠버네티스 버전 1.21에서, kubeadm 의 `kube-dns` 지원 기능이 삭제되었다. ## CoreDNS 업그레이드하기 diff --git a/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md index ad760098446ce..9521bb1ec65fd 100644 --- a/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md +++ b/content/ko/docs/tasks/administer-cluster/dns-custom-nameservers.md @@ -31,7 +31,7 @@ DNS는 _애드온 관리자_ 인 [클러스터 애드온](http://releases.k8s.io CoreDNS 대신 `kube-dns` 를 계속 사용할 수도 있다. {{< note >}} -CoreDNS와 kube-dns 서비스 모두 `metadata.name` 필드에 `kube-dns` 로 이름이 지정된다. +CoreDNS 서비스는 `metadata.name` 필드에 `kube-dns` 로 이름이 지정된다. 이를 통해, 기존의 `kube-dns` 서비스 이름을 사용하여 클러스터 내부의 주소를 확인하는 워크로드에 대한 상호 운용성이 증가된다. `kube-dns` 로 서비스 이름을 사용하면, 해당 DNS 공급자가 어떤 공통 이름으로 실행되고 있는지에 대한 구현 세부 정보를 추상화한다. {{< /note >}} @@ -176,17 +176,14 @@ kube-dns는 스텁 도메인 및 네임서버(예: ns.foo.com)에 대한 FQDN을 CoreDNS는 kube-dns 이상의 기능을 지원한다. `StubDomains` 과 `upstreamNameservers` 를 지원하도록 생성된 kube-dns의 컨피그맵은 CoreDNS의 `forward` 플러그인으로 변환된다. -마찬가지로, kube-dns의 `Federations` 플러그인은 CoreDNS의 `federation` 플러그인으로 변환된다. ### 예시 -kube-dns에 대한 이 컨피그맵 예제는 federations, stubDomains 및 upstreamNameservers를 지정한다. +kube-dns에 대한 이 컨피그맵 예제는 stubDomains 및 upstreamNameservers를 지정한다. ```yaml apiVersion: v1 data: - federations: | - {"foo" : "foo.feddomain.com"} stubDomains: | {"abc.com" : ["1.2.3.4"], "my.cluster.local" : ["2.3.4.5"]} upstreamNameservers: | @@ -196,13 +193,6 @@ kind: ConfigMap CoreDNS에서는 동등한 설정으로 Corefile을 생성한다. -* federations 에 대응하는 설정: -``` -federation cluster.local { - foo foo.feddomain.com -} -``` - * stubDomains 에 대응하는 설정: ```yaml abc.com:53 { diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md index 487d89379ecf4..3474aee3c2b74 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-certs.md @@ -168,7 +168,7 @@ controllerManager: ### 인증서 서명 요청(CSR) 생성 -쿠버네티스 API로 CSR을 작성하려면 [CertificateSigningRequest 생성](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/#create-certificatesigningrequest)을 본다. +쿠버네티스 API로 CSR을 작성하려면 [CertificateSigningRequest 생성](/docs/reference/access-authn-authz/certificate-signing-requests/#create-certificatesigningrequest)을 본다. ## 외부 CA로 인증서 갱신 diff --git a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md index bf70b98e939bc..2227c49c9eae8 100644 --- a/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md +++ b/content/ko/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade.md @@ -37,7 +37,7 @@ weight: 20 ### 추가 정보 -- kubelet 마이너 버전을 업그레이드하기 전에 [노드 드레이닝(draining)](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/)이 +- kubelet 마이너 버전을 업그레이드하기 전에 [노드 드레이닝(draining)](/docs/tasks/administer-cluster/safely-drain-node/)이 필요하다. 컨트롤 플레인 노드의 경우 CoreNDS 파드 또는 기타 중요한 워크로드를 실행할 수 있다. - 컨테이너 사양 해시 값이 변경되므로, 업그레이드 후 모든 컨테이너가 다시 시작된다. @@ -328,7 +328,7 @@ etcd 업그레이드가 실패하고 자동 롤백이 작동하지 않으면, - 컨트롤 플레인 이미지가 사용 가능한지 또는 머신으로 가져올 수 있는지 확인한다. - 컴포넌트 구성에 버전 업그레이드가 필요한 경우 대체 구성을 생성하거나 사용자가 제공한 것으로 덮어 쓰기한다. - 컨트롤 플레인 컴포넌트 또는 롤백 중 하나라도 나타나지 않으면 업그레이드한다. -- 새로운 `kube-dns` 와 `kube-proxy` 매니페스트를 적용하고 필요한 모든 RBAC 규칙이 생성되도록 한다. +- 새로운 `CoreDNS` 와 `kube-proxy` 매니페스트를 적용하고 필요한 모든 RBAC 규칙이 생성되도록 한다. - API 서버의 새 인증서와 키 파일을 작성하고 180일 후에 만료될 경우 이전 파일을 백업한다. `kubeadm upgrade node` 는 추가 컨트롤 플레인 노드에서 다음을 수행한다. diff --git a/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md b/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md index bee3c940697be..25abb14314cc6 100644 --- a/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md +++ b/content/ko/docs/tasks/administer-cluster/network-policy-provider/calico-network-policy.md @@ -18,7 +18,7 @@ weight: 10 **사전요구사항**: [gcloud](https://cloud.google.com/sdk/docs/quickstarts). -1. 캘리코로 GKE 클러스터를 시작하려면, `--enable-network-policy` 플래그를 추가하면 된다. +1. 캘리코로 GKE 클러스터를 시작하려면, `--enable-network-policy` 플래그를 추가한다. **문법** ```shell diff --git a/content/ko/docs/tasks/configure-pod-container/static-pod.md b/content/ko/docs/tasks/configure-pod-container/static-pod.md index 4daf739c2f4b1..aea2fcffa1614 100644 --- a/content/ko/docs/tasks/configure-pod-container/static-pod.md +++ b/content/ko/docs/tasks/configure-pod-container/static-pod.md @@ -31,21 +31,14 @@ API 서버에서 제어될 수는 없다. 을 사용하는 것이 바람직하다. {{< /note >}} - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} 이 페이지는 파드를 실행하기 위해 {{< glossary_tooltip term_id="docker" >}}를 사용하며, 노드에서 Fedora 운영 체제를 구동하고 있다고 가정한다. 다른 배포판이나 쿠버네티스 설치 지침과는 다소 상이할 수 있다. - - - - ## 스태틱 파드 생성하기 {#static-pod-creation} @@ -54,7 +47,9 @@ API 서버에서 제어될 수는 없다. ### 파일시스템이 호스팅 하는 스태틱 파드 매니페스트 {#configuration-files} -매니페스트는 특정 디렉터리에 있는 JSON 이나 YAML 형식의 표준 파드 정의이다. [kubelet 구성 파일](/docs/tasks/administer-cluster/kubelet-config-file)의 `staticPodPath: ` 필드를 사용하자. 이 디렉터리를 정기적으로 스캔하여, 디렉터리 안의 YAML/JSON 파일이 생성되거나 삭제되었을 때 스태틱 파드를 생성하거나 삭제한다. +매니페스트는 특정 디렉터리에 있는 JSON 이나 YAML 형식의 표준 파드 정의이다. +[kubelet 구성 파일](/docs/reference/config-api/kubelet-config.v1beta1/)의 `staticPodPath: ` 필드를 사용하자. +명시한 디렉터리를 정기적으로 스캔하여, 디렉터리 안의 YAML/JSON 파일이 생성되거나 삭제되었을 때 스태틱 파드를 생성하거나 삭제한다. Kubelet 이 특정 디렉터리를 스캔할 때 점(.)으로 시작하는 단어를 무시한다는 점을 유의하자. 예를 들어, 다음은 스태틱 파드로 간단한 웹 서버를 구동하는 방법을 보여준다. @@ -90,17 +85,18 @@ Kubelet 이 특정 디렉터리를 스캔할 때 점(.)으로 시작하는 단 3. 노드에서 kubelet 실행 시에 `--pod-manifest-path=/etc/kubelet.d/` 와 같이 인자를 제공하여 해당 디렉터리를 사용하도록 구성한다. Fedora 의 경우 이 줄을 포함하기 위하여 `/etc/kubernetes/kubelet` 파일을 다음과 같이 수정한다. - ``` - KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" - ``` - 혹은 [kubelet 구성 파일](/docs/tasks/administer-cluster/kubelet-config-file)에 `staticPodPath: ` 필드를 추가한다. + ``` + KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --pod-manifest-path=/etc/kubelet.d/" + ``` + 혹은 [kubelet 구성 파일](/docs/reference/config-api/kubelet-config.v1beta1/)에 + `staticPodPath: ` 필드를 추가한다. 4. kubelet을 재시작한다. Fedora의 경우 아래와 같이 수행한다. - ```shell - # kubelet 이 동작하고 있는 노드에서 이 명령을 수행한다. - systemctl restart kubelet - ``` + ```shell + # kubelet 이 동작하고 있는 노드에서 이 명령을 수행한다. + systemctl restart kubelet + ``` ### 웹이 호스팅 하는 스태틱 파드 매니페스트 {#pods-created-via-http} diff --git a/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md b/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md index 2c7a95a136e0e..f8696993ffe15 100644 --- a/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md +++ b/content/ko/docs/tasks/debug-application-cluster/debug-pod-replication-controller.md @@ -57,7 +57,7 @@ kubectl describe pods ${POD_NAME} 절대 스케줄 될 수 없다. 사용자는 `kubectl get nodes -o ` 명령으로 노드의 - 용량을 점검할 수 있다. 다음은 필요한 정보만을 추출하는 몇 가지 + 용량을 점검할 수 있다. 다음은 필요한 정보를 추출하는 몇 가지 명령의 예이다. ```shell diff --git a/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md index e2b14354642c4..4addcdcfafb6b 100644 --- a/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/ko/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -1,12 +1,16 @@ --- title: 크론잡(CronJob)으로 자동화된 작업 실행 -min-kubernetes-server-version: v1.8 +min-kubernetes-server-version: v1.21 content_type: task weight: 10 --- +쿠버네티스 버전 1.21에서 {{< glossary_tooltip text="크론잡" term_id="cronjob" >}}이 GA (General Availability)로 승격되었다. +이전 버전의 쿠버네티스를 사용하고 있다면, 해당 쿠버네티스 버전의 문서를 참고하여 정확한 정보를 확인할 수 있다. +이전 버전의 쿠버네티스는 `batch/v1` 크론잡 API를 지원하지 않는다. + 시간 기반의 스케줄에 따라 {{< glossary_tooltip text="크론잡" term_id="cronjob" >}}을 이용해서 {{< glossary_tooltip text="잡(Job)" term_id="job" >}}을 실행할 수 있다. 이러한 자동화된 잡은 리눅스 또는 유닉스 시스템에서 [크론](https://ko.wikipedia.org/wiki/Cron) 작업처럼 실행된다. @@ -168,13 +172,11 @@ kubectl delete cronjob hello 이러한 방식으로 기한을 맞추지 못한 잡은 실패한 작업으로 간주된다. 이 필드를 지정하지 않으면, 잡에 기한이 없다. -크론잡 컨트롤러는 크론 잡에 대해 얼마나 많은 스케줄이 누락되었는지를 계산한다. 누락된 스케줄이 100개를 초과 한다면, 크론 잡은 더이상 스케줄되지 않는다. `.spec.startingDeadlineSeconds` 이 설정되지 않았다면, 크론잡 컨트롤러는 `status.lastScheduleTime` 부터 지금까지 누락된 스케줄을 계산한다. - -예를 들어, 하나의 크론 잡이 1분마다 실행되도록 설정되어 있고, 크론잡의 `status.lastScheduleTime` 은 새벽 5:00시이지만, 지금은 오전 7:00시라고 가정하자. 즉 120개의 스케줄이 누락되었다는 것이고, 그래서 크론 잡은 더이상 스케줄되지 않는다. - -`.spec.startingDeadlineSeconds` 필드가 (null이 아닌) 값으로 설정되어 있다면, 크론잡 컨트롤러는 `.spec.startingDeadlineSeconds` 의 값으로부터 지금까지 얼마나 많은 잡이 누락되었는지를 계산한다. +`.spec.startingDeadlineSeconds` 필드가 (null이 아닌 값으로) 설정되어 있다면, +크론잡 컨트롤러는 잡 생성 완료 예상 시각과 현재 시각의 차이를 측정하고, +시각 차이가 설정한 값보다 커지면 잡 생성 동작을 스킵한다. -예를 들어, `200` 으로 설정되었다면, 지난 200초 동안 누락된 스케줄이 몇 번 발생했는지 계산한다. 이 경우, 지난 200초 동안 누락된 스케줄이 100개가 넘으면, 크론 잡이 더이상 스케줄되지 않는다. +예를 들어, `200` 으로 설정되었다면, 잡 생성 완료 예상 시각으로부터 200초까지는 잡이 생성될 수 있다. ### 동시성 정책 diff --git a/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md b/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md index fa765a70051f4..bd8bf3880852d 100644 --- a/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md +++ b/content/ko/docs/tasks/job/coarse-parallel-processing-work-queue.md @@ -2,7 +2,7 @@ title: 작업 대기열을 사용한 거친 병렬 처리 min-kubernetes-server-version: v1.8 content_type: task -weight: 30 +weight: 20 --- @@ -19,7 +19,7 @@ weight: 30 1. **메시지 대기열 서비스를 시작한다.** 이 예에서는, RabbitMQ를 사용하지만, 다른 메시지 대기열을 이용해도 된다. 실제로 사용할 때는, 한 번 메시지 대기열 서비스를 구축하고서 이를 여러 잡을 위해 재사용하기도 한다. 1. **대기열을 만들고, 메시지로 채운다.** 각 메시지는 수행할 하나의 작업을 나타낸다. - 이 예제에서, 메시지는 긴 계산을 수행할 정수일 뿐이다. + 이 예제에서, 메시지는 긴 계산을 수행할 정수다. 1. **대기열에서 작업을 수행하는 잡을 시작한다.** 잡은 여러 파드를 시작한다. 각 파드는 메시지 대기열에서 하나의 작업을 가져와서, 처리한 다음, 대기열이 비워질 때까지 반복한다. @@ -141,13 +141,12 @@ root@temp-loe07:/# ``` 마지막 커맨드에서, `amqp-consume` 도구는 대기열로부터 하나의 메시지를 -받고(`-c 1`), 그 메시지를 임의의 명령 표준입력으로 전달한다. 이 경우에는, `cat` 프로그램이 표준입력으로부터 -받은 값을 바로 출력하고 있고, echo가 캐리지 리턴을 더해주어 +받고(`-c 1`), 그 메시지를 임의의 명령 표준입력으로 전달한다. 이 경우에는, `cat` 프로그램이 표준입력으로부터 받은 값을 출력하고, echo가 캐리지 리턴을 더해주어 출력 결과가 보여진다. ## 작업으로 대기열 채우기 -이제 몇 가지 "작업"으로 대기열을 채운다. 이 예제에서의 작업은 간단히 문자열을 +이제 몇 가지 "작업"으로 대기열을 채운다. 이 예제에서의 작업은 문자열을 출력하는 것이다. 실제로 사용할 때는, 메시지의 내용이 다음과 같을 수 있다. diff --git a/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md b/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md index 8788477c171b5..b85f687df7a4b 100644 --- a/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md +++ b/content/ko/docs/tasks/job/fine-parallel-processing-work-queue.md @@ -2,7 +2,7 @@ title: 작업 대기열을 사용한 정밀 병렬 처리 content_type: task min-kubernetes-server-version: v1.8 -weight: 40 +weight: 30 --- @@ -21,7 +21,7 @@ weight: 40 않기 때문에 Redis 및 사용자 지정의 작업 대기열 클라이언트 라이브러리를 사용한다. 실제로는 Redis와 같은 저장소를 한 번 설정하고 여러 작업과 다른 것들의 작업 대기열로 재사용한다. 1. **대기열을 만들고, 메시지로 채운다.** 각 메시지는 수행할 하나의 작업을 나타낸다. 이 - 예에서, 메시지는 긴 계산을 수행할 정수일 뿐이다. + 예에서, 메시지는 긴 계산을 수행할 정수다. 1. **대기열에서 작업을 수행하는 잡을 시작한다.** 잡은 여러 파드를 시작한다. 각 파드는 메시지 대기열에서 하나의 작업을 가져와서, 처리한 다음, 대기열이 비워질 때까지 반복한다. diff --git a/content/ko/docs/tasks/job/parallel-processing-expansion.md b/content/ko/docs/tasks/job/parallel-processing-expansion.md index ef02dac61b9b0..fbf105024d766 100644 --- a/content/ko/docs/tasks/job/parallel-processing-expansion.md +++ b/content/ko/docs/tasks/job/parallel-processing-expansion.md @@ -2,7 +2,7 @@ title: 확장을 사용한 병렬 처리 content_type: task min-kubernetes-server-version: v1.8 -weight: 20 +weight: 50 --- diff --git a/content/ko/docs/tasks/manage-daemon/update-daemon-set.md b/content/ko/docs/tasks/manage-daemon/update-daemon-set.md index 659836833a568..ec29259de75a3 100644 --- a/content/ko/docs/tasks/manage-daemon/update-daemon-set.md +++ b/content/ko/docs/tasks/manage-daemon/update-daemon-set.md @@ -111,8 +111,8 @@ kubectl edit ds/fluentd-elasticsearch -n kube-system ##### 컨테이너 이미지만 업데이트 -데몬셋 템플릿에서 컨테이너 이미지를 업데이트해야 하는 -경우(예: `.spec.template.spec.containers[*].image`), `kubectl set image` 를 사용한다. +데몬셋 템플릿(예: `.spec.template.spec.containers[*].image`)에 의해 정의된 컨테이너 이미지만 업데이트하려면, +`kubectl set image` 를 사용한다. ```shell kubectl set image ds/fluentd-elasticsearch fluentd-elasticsearch=quay.io/fluentd_elasticsearch/fluentd:v2.6.0 -n kube-system @@ -168,7 +168,7 @@ kubectl get pods -l name=fluentd-elasticsearch -o wide -n kube-system 데몬셋 롤아웃이 진행되지 않는다. 이 문제를 해결하려면, 데몬셋 템플릿을 다시 업데이트한다. 이전의 비정상 롤아웃으로 인해 -새로운 롤아웃이 차단되지 않는다. +새로운 롤아웃이 차단되지는 않는다. #### 클럭 차이(skew) diff --git a/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md b/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md index 78c5dc2cbdb49..087399df01670 100644 --- a/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md +++ b/content/ko/docs/tasks/manage-gpus/scheduling-gpus.md @@ -13,7 +13,7 @@ description: 클러스터의 노드별로 리소스로 사용할 GPU를 구성 쿠버네티스는 AMD 및 NVIDIA GPU(그래픽 프로세싱 유닛)를 노드들에 걸쳐 관리하기 위한 **실험적인** 지원을 포함한다. -이 페이지는 다른 쿠버네티스 버전 간에 걸쳐 사용자가 GPU들을 소비할 수 있는 방법과 +이 페이지는 여러 쿠버네티스 버전에서 사용자가 GPU를 활용할 수 있는 방법과 현재의 제약 사항을 설명한다. @@ -37,7 +37,7 @@ description: 클러스터의 노드별로 리소스로 사용할 GPU를 구성 `nvidia.com/gpu` 를 스케줄 가능한 리소스로써 노출시킨다. 사용자는 이 GPU들을 `cpu` 나 `memory` 를 요청하는 방식과 동일하게 -`.com/gpu` 를 요청함으로써 컨테이너를 통해 소비할 수 있다. +`.com/gpu` 를 요청함으로써 컨테이너에서 활용할 수 있다. 그러나 GPU를 사용할 때는 리소스 요구 사항을 명시하는 방식에 약간의 제약이 있다. diff --git a/content/ko/docs/tasks/run-application/delete-stateful-set.md b/content/ko/docs/tasks/run-application/delete-stateful-set.md index 1ef9220d6559b..07b3396440dd9 100644 --- a/content/ko/docs/tasks/run-application/delete-stateful-set.md +++ b/content/ko/docs/tasks/run-application/delete-stateful-set.md @@ -37,8 +37,8 @@ kubectl delete statefulsets kubectl delete service ``` -kubectl을 통해 스테이트풀셋을 삭제하면 0으로 스케일이 낮아지고, 스테이트풀셋에 포함된 모든 파드가 삭제된다. -파드가 아닌 스테이트풀셋만 삭제하려면, `--cascade=false` 를 사용한다. +kubectl을 통해 스테이트풀셋을 삭제하면, 스테이트풀셋의 크기가 0으로 설정되고 이로 인해 스테이트풀셋에 포함된 모든 파드가 삭제된다. 파드가 아닌 스테이트풀셋만 삭제하려면, `--cascade=false` 옵션을 사용한다. +예시는 다음과 같다. ```shell kubectl delete -f --cascade=false diff --git a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index b4ca1826d6278..61f1dbc7583f2 100644 --- a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -381,7 +381,7 @@ object: 외부 메트릭 사용시, 먼저 모니터링 시스템에 대한 이해가 있어야 한다. 이 설치는 사용자 정의 메트릭과 유사하다. 외부 메트릭을 사용하면 모니터링 시스템의 사용 가능한 메트릭에 기반하여 클러스터를 오토스케일링 할 수 있다. -위의 예제처럼 `name`과 `selector`를 갖는 `metric` 블록을 제공하고, +위의 예제처럼 `name`과 `selector`를 갖는 `metric` 블록을 명시하고, `Object` 대신에 `External` 메트릭 타입을 사용한다. 만일 여러 개의 시계열이 `metricSelector`와 일치하면, HorizontalPodAutoscaler가 값의 합을 사용한다. 외부 메트릭들은 `Value`와 `AverageValue` 대상 타입을 모두 지원하고, diff --git a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md index f762357603543..b4cc1b3be5f5e 100644 --- a/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md +++ b/content/ko/docs/tasks/run-application/horizontal-pod-autoscale.md @@ -23,9 +23,7 @@ Pod Autoscaler는 크기를 조정할 수 없는 오브젝트(예: 데몬셋(Dae Horizontal Pod Autoscaler는 쿠버네티스 API 리소스 및 컨트롤러로 구현된다. 리소스는 컨트롤러의 동작을 결정한다. -컨트롤러는 관찰된 평균 CPU 사용률이 사용자가 지정한 대상과 일치하도록 레플리케이션 -컨트롤러 또는 디플로이먼트에서 레플리카 개수를 주기적으로 조정한다. - +컨트롤러는 평균 CPU 사용률, 평균 메모리 사용률 또는 다른 커스텀 메트릭과 같은 관찰 대상 메트릭이 사용자가 지정한 목표값과 일치하도록 레플리케이션 컨트롤러 또는 디플로이먼트에서 레플리카 개수를 주기적으로 조정한다. @@ -355,7 +353,7 @@ API에 접속하려면 클러스터 관리자는 다음을 확인해야 한다. ## 구성가능한 스케일링 동작 지원 -[v1.18](https://github.com/kubernetes/enhancements/blob/master/keps/sig-autoscaling/20190307-configurable-scale-velocity-for-hpa.md) +[v1.18](https://github.com/kubernetes/enhancements/blob/master/keps/sig-autoscaling/853-configurable-hpa-scale-velocity/README.md) 부터 `v2beta2` API는 HPA `behavior` 필드를 통해 스케일링 동작을 구성할 수 있다. 동작은 `behavior` 필드 아래의 `scaleUp` 또는 `scaleDown` diff --git a/content/ko/docs/tasks/tools/_index.md b/content/ko/docs/tasks/tools/_index.md index 8c2b3e1aed185..990a9fd99b8b4 100755 --- a/content/ko/docs/tasks/tools/_index.md +++ b/content/ko/docs/tasks/tools/_index.md @@ -17,9 +17,9 @@ no_list: true `kubectl` 은 다양한 리눅스 플랫폼, macOS, 그리고 윈도우에 설치할 수 있다. 각각에 대한 설치 가이드는 다음과 같다. -- [리눅스에 `kubectl` 설치하기](install-kubectl-linux) -- [macOS에 `kubectl` 설치하기](install-kubectl-macos) -- [윈도우에 `kubectl` 설치하기](install-kubectl-windows) +- [리눅스에 `kubectl` 설치하기](/ko/docs/tasks/tools/install-kubectl-linux/) +- [macOS에 `kubectl` 설치하기](/ko/docs/tasks/tools/install-kubectl-macos/) +- [윈도우에 `kubectl` 설치하기](/ko/docs/tasks/tools/install-kubectl-windows/) ## kind diff --git a/content/ko/docs/tutorials/_index.md b/content/ko/docs/tutorials/_index.md index 7c1216c5fd25f..8d3fd54010fff 100644 --- a/content/ko/docs/tutorials/_index.md +++ b/content/ko/docs/tutorials/_index.md @@ -27,6 +27,8 @@ content_type: concept ## 구성 +* [예제: Java 마이크로서비스 구성하기](/ko/docs/tutorials/configuration/configure-java-microservice/) + * [컨피그 맵을 사용해서 Redis 설정하기](/ko/docs/tutorials/configuration/configure-redis-using-configmap/) ## 상태 유지를 하지 않는(stateless) 애플리케이션 diff --git a/content/ko/docs/tutorials/clusters/apparmor.md b/content/ko/docs/tutorials/clusters/apparmor.md index ae3fce6cb87d5..43b07e293bcde 100644 --- a/content/ko/docs/tutorials/clusters/apparmor.md +++ b/content/ko/docs/tutorials/clusters/apparmor.md @@ -322,7 +322,7 @@ Events: 23s 23s 1 {kubelet e2e-test-stclair-node-pool-t1f5} Warning AppArmor Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded ``` -파드 상태는 Failed이며 오류메시지는 `Pod Cannot enforce AppArmor: profile +파드 상태는 Pending이며, 오류 메시지는 `Pod Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded`이다. 이벤트도 동일한 메시지로 기록되었다. ## 관리 {#administration} diff --git a/content/ko/docs/tutorials/hello-minikube.md b/content/ko/docs/tutorials/hello-minikube.md index 39e57ff501d97..e5831046dfb46 100644 --- a/content/ko/docs/tutorials/hello-minikube.md +++ b/content/ko/docs/tutorials/hello-minikube.md @@ -48,7 +48,7 @@ Katacode는 무료로 브라우저에서 쿠버네티스 환경을 제공한다. {{< kat-button >}} {{< note >}} - minikube를 로컬에 설치했다면 `minikube start`를 실행한다. + minikube를 로컬에 설치했다면 `minikube start`를 실행한다. `minikube dashboard` 명령을 실행하기 전에, 새 터미널을 열고, 그 터미널에서 `minikube dashboard` 명령을 실행한 후, 원래의 터미널로 돌아온다. {{< /note >}} 2. 브라우저에서 쿠버네티스 대시보드를 열어보자. @@ -154,7 +154,7 @@ minikube dashboard --url `k8s.gcr.io/echoserver` 이미지 내의 애플리케이션 코드는 TCP 포트 8080에서만 수신한다. `kubectl expose`를 사용하여 다른 포트를 노출한 경우, 클라이언트는 다른 포트에 연결할 수 없다. -2. 방금 생성한 서비스 살펴보기 +2. 생성한 서비스 살펴보기 ```shell kubectl get services @@ -229,7 +229,7 @@ minikube 툴은 활성화하거나 비활성화할 수 있고 로컬 쿠버네 metrics-server was successfully enabled ``` -3. 방금 생성한 파드와 서비스를 확인한다. +3. 생성한 파드와 서비스를 확인한다. ```shell kubectl get pod,svc -n kube-system diff --git a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md index a17ae9f320d8f..8b0a258ae6246 100644 --- a/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/ko/docs/tutorials/stateful-application/basic-stateful-set.md @@ -434,7 +434,7 @@ web-4 0/1 ContainerCreating 0 0s web-4 1/1 Running 0 19s ``` -스테이트풀셋 컨트롤러는 레플리카개수를 스케일링한다. +스테이트풀셋 컨트롤러는 레플리카 개수를 스케일링한다. [스테이트풀셋 생성](#차례대로-파드-생성하기)으로 스테이트풀셋 컨트롤러는 각 파드을 순차적으로 각 순번에 따라 생성하고 후속 파드 시작 전에 이전 파드가 Running과 Ready 상태가 될 때까지 @@ -1067,9 +1067,10 @@ statefulset "web" deleted ### Parallel 파드 관리 `Parallel` 파드 관리는 스테이트풀셋 컨트롤러가 모든 파드를 -병렬로 시작하고 종료하는 것으로 다른 파드를 시작/종료하기 전에 +병렬로 시작하고 종료하는 것으로, 다른 파드를 시작/종료하기 전에 파드가 Running과 Ready 상태로 전환되거나 완전히 종료되기까지 기다리지 않음을 뜻한다. +이 옵션은 스케일링 동작에만 영향을 미치며, 업데이트 동작에는 영향을 미치지 않는다. {{< codenew file="application/web/web-parallel.yaml" >}} @@ -1114,7 +1115,7 @@ web-1 1/1 Running 0 10s 스테이트풀셋 컨트롤러는 `web-0`와 `web-1`를 둘 다 동시에 시작했다. 두 번째 터미널을 열어 놓고 다른 터미널창에서 스테이트풀셋을 -스케일링 하자. +스케일링하자. ```shell kubectl scale statefulset/web --replicas=4 diff --git a/content/ko/docs/tutorials/stateless-application/guestbook.md b/content/ko/docs/tutorials/stateless-application/guestbook.md index 04230b4f3efed..1a984319d854b 100644 --- a/content/ko/docs/tutorials/stateless-application/guestbook.md +++ b/content/ko/docs/tutorials/stateless-application/guestbook.md @@ -49,13 +49,15 @@ min-kubernetes-server-version: v1.14 1. 매니페스트 파일을 다운로드한 디렉터리에서 터미널 창을 시작한다. 1. `mongo-deployment.yaml` 파일을 통해 MongoDB 디플로이먼트에 적용한다. + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml ``` - + 1. 파드의 목록을 질의하여 MongoDB 파드가 실행 중인지 확인한다. @@ -84,14 +86,15 @@ kubectl apply -f ./content/en/examples/application/guestbook/mongo-deployment.ya 1. `mongo-service.yaml` 파일을 통해 MongoDB 서비스에 적용한다. + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml ``` - 1. 서비스의 목록을 질의하여 MongoDB 서비스가 실행 중인지 확인한다. @@ -122,14 +125,15 @@ kubectl apply -f ./content/en/examples/application/guestbook/mongo-service.yaml 1. `frontend-deployment.yaml` 파일을 통해 프론트엔드의 디플로이먼트에 적용한다. + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml ``` - 1. 파드의 목록을 질의하여 세 개의 프론트엔드 복제본이 실행되고 있는지 확인한다. @@ -160,14 +164,15 @@ Google Compute Engine 또는 Google Kubernetes Engine과 같은 일부 클라우 1. `frontend-service.yaml` 파일을 통해 프론트엔드 서비스에 적용시킨다. + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml ``` - 1. 서비스의 목록을 질의하여 프론트엔드 서비스가 실행 중인지 확인한다. @@ -179,7 +184,7 @@ kubectl apply -f ./content/en/examples/application/guestbook/frontend-service.ya ``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.0.0.112 80/TCP 6s + frontend ClusterIP 10.0.0.112 80/TCP 6s kubernetes ClusterIP 10.0.0.1 443/TCP 4m mongo ClusterIP 10.0.0.151 6379/TCP 2m ``` @@ -214,8 +219,8 @@ kubectl apply -f ./content/en/examples/application/guestbook/frontend-service.ya 결과는 아래와 같은 형태로 나타난다. ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.51.242.136 109.197.92.229 80:32372/TCP 1m + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m ``` 1. IP 주소를 복사하고, 방명록을 보기 위해 브라우저에서 페이지를 로드한다. @@ -245,7 +250,7 @@ kubectl apply -f ./content/en/examples/application/guestbook/frontend-service.ya frontend-3823415956-k22zn 1/1 Running 0 54m frontend-3823415956-w9gbt 1/1 Running 0 54m frontend-3823415956-x2pld 1/1 Running 0 5s - mongo-1068406935-3lswp 1/1 Running 0 56m + mongo-1068406935-3lswp 1/1 Running 0 56m ``` 1. 프론트엔드 파드의 수를 축소하기 위해 아래 명령어를 실행한다. @@ -266,7 +271,7 @@ kubectl apply -f ./content/en/examples/application/guestbook/frontend-service.ya NAME READY STATUS RESTARTS AGE frontend-3823415956-k22zn 1/1 Running 0 1h frontend-3823415956-w9gbt 1/1 Running 0 1h - mongo-1068406935-3lswp 1/1 Running 0 1h + mongo-1068406935-3lswp 1/1 Running 0 1h ``` diff --git a/content/ko/examples/application/job/cronjob.yaml b/content/ko/examples/application/job/cronjob.yaml index 816d682f28759..da905a9048c57 100644 --- a/content/ko/examples/application/job/cronjob.yaml +++ b/content/ko/examples/application/job/cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: hello diff --git a/content/ko/examples/application/zookeeper/zookeeper.yaml b/content/ko/examples/application/zookeeper/zookeeper.yaml index a858a72613d5f..4d893b369bde4 100644 --- a/content/ko/examples/application/zookeeper/zookeeper.yaml +++ b/content/ko/examples/application/zookeeper/zookeeper.yaml @@ -27,7 +27,7 @@ spec: selector: app: zk --- -apiVersion: policy/v1beta1 +apiVersion: policy/v1 kind: PodDisruptionBudget metadata: name: zk-pdb diff --git a/content/ko/examples/service/networking/namespaced-params.yaml b/content/ko/examples/service/networking/namespaced-params.yaml new file mode 100644 index 0000000000000..dd567247874f4 --- /dev/null +++ b/content/ko/examples/service/networking/namespaced-params.yaml @@ -0,0 +1,12 @@ +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: external-lb +spec: + controller: example.com/ingress-controller + parameters: + apiGroup: k8s.example.com + kind: IngressParameters + name: external-lb + namespace: external-configuration + scope: Namespace diff --git a/content/pl/docs/reference/glossary/kube-apiserver.md b/content/pl/docs/reference/glossary/kube-apiserver.md index 14b58a9ba6d9f..12620387d008b 100755 --- a/content/pl/docs/reference/glossary/kube-apiserver.md +++ b/content/pl/docs/reference/glossary/kube-apiserver.md @@ -4,7 +4,7 @@ id: kube-apiserver date: 2018-04-12 full_link: /docs/concepts/overview/components/#kube-apiserver short_description: > - Składnik warstwy sterowania udostępniający API Kubernetes. + Składnik warstwy sterowania udostępniający API Kubernetesa. aka: - kube-apiserver @@ -12,13 +12,12 @@ tags: - architecture - fundamental --- - Składnik *master* udostępniający API Kubernetes. Służy jako *front-end* dla warstwy sterowania Kubernetes. - Serwer API jest składnikiem -{{< glossary_tooltip text="warstwy sterowania" term_id="control-plane" >}} Kubernetes, który udostępnia API. +Serwer API jest składnikiem +{{< glossary_tooltip text="warstwy sterowania" term_id="control-plane" >}} Kubernetesa, który udostępnia API. Server API służy jako front-end warstwy sterowania Kubernetes. -Podstawowa implementacją serwera API Kubernetes jest [kube-apiserver](/docs/reference/generated/kube-apiserver/). +Podstawową implementacją serwera API Kubernetesa jest [kube-apiserver](/docs/reference/generated/kube-apiserver/). kube-apiserver został zaprojektowany w taki sposób, aby móc skalować się horyzontalnie — to oznacza, że zwiększa swoją wydajność poprzez dodawanie kolejnych instancji. Można uruchomić kilka instancji kube-apiserver i rozkładać między nimi ruch od klientów. diff --git a/content/pt/OWNERS b/content/pt-br/OWNERS similarity index 100% rename from content/pt/OWNERS rename to content/pt-br/OWNERS diff --git a/content/pt/_common-resources/index.md b/content/pt-br/_common-resources/index.md similarity index 100% rename from content/pt/_common-resources/index.md rename to content/pt-br/_common-resources/index.md diff --git a/content/pt/_index.html b/content/pt-br/_index.html similarity index 100% rename from content/pt/_index.html rename to content/pt-br/_index.html diff --git a/content/pt/blog/_index.md b/content/pt-br/blog/_index.md similarity index 100% rename from content/pt/blog/_index.md rename to content/pt-br/blog/_index.md diff --git a/content/pt/blog/_posts/2020-09-02-scaling-kubernetes-networking-endpointslices.md b/content/pt-br/blog/_posts/2020-09-02-scaling-kubernetes-networking-endpointslices.md similarity index 100% rename from content/pt/blog/_posts/2020-09-02-scaling-kubernetes-networking-endpointslices.md rename to content/pt-br/blog/_posts/2020-09-02-scaling-kubernetes-networking-endpointslices.md diff --git a/content/pt/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md b/content/pt-br/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md similarity index 100% rename from content/pt/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md rename to content/pt-br/blog/_posts/2020-12-02-dont-panic-kubernetes-and-docker.md diff --git a/content/pt/case-studies/_index.md b/content/pt-br/case-studies/_index.md similarity index 100% rename from content/pt/case-studies/_index.md rename to content/pt-br/case-studies/_index.md diff --git a/content/pt/case-studies/chinaunicom/chinaunicom_featured_logo.png b/content/pt-br/case-studies/chinaunicom/chinaunicom_featured_logo.png similarity index 100% rename from content/pt/case-studies/chinaunicom/chinaunicom_featured_logo.png rename to content/pt-br/case-studies/chinaunicom/chinaunicom_featured_logo.png diff --git a/content/pt/case-studies/chinaunicom/index.html b/content/pt-br/case-studies/chinaunicom/index.html similarity index 100% rename from content/pt/case-studies/chinaunicom/index.html rename to content/pt-br/case-studies/chinaunicom/index.html diff --git a/content/pt/community/_index.html b/content/pt-br/community/_index.html similarity index 100% rename from content/pt/community/_index.html rename to content/pt-br/community/_index.html diff --git a/content/pt/community/code-of-conduct.md b/content/pt-br/community/code-of-conduct.md similarity index 100% rename from content/pt/community/code-of-conduct.md rename to content/pt-br/community/code-of-conduct.md diff --git a/content/pt/community/static/README.md b/content/pt-br/community/static/README.md similarity index 100% rename from content/pt/community/static/README.md rename to content/pt-br/community/static/README.md diff --git a/content/pt/community/static/cncf-code-of-conduct.md b/content/pt-br/community/static/cncf-code-of-conduct.md similarity index 100% rename from content/pt/community/static/cncf-code-of-conduct.md rename to content/pt-br/community/static/cncf-code-of-conduct.md diff --git a/content/pt/docs/_index.md b/content/pt-br/docs/_index.md similarity index 100% rename from content/pt/docs/_index.md rename to content/pt-br/docs/_index.md diff --git a/content/pt/docs/_search.md b/content/pt-br/docs/_search.md similarity index 100% rename from content/pt/docs/_search.md rename to content/pt-br/docs/_search.md diff --git a/content/pt/docs/concepts/_index.md b/content/pt-br/docs/concepts/_index.md similarity index 100% rename from content/pt/docs/concepts/_index.md rename to content/pt-br/docs/concepts/_index.md diff --git a/content/pt/docs/concepts/architecture/_index.md b/content/pt-br/docs/concepts/architecture/_index.md similarity index 100% rename from content/pt/docs/concepts/architecture/_index.md rename to content/pt-br/docs/concepts/architecture/_index.md diff --git a/content/pt/docs/concepts/architecture/cloud-controller.md b/content/pt-br/docs/concepts/architecture/cloud-controller.md similarity index 100% rename from content/pt/docs/concepts/architecture/cloud-controller.md rename to content/pt-br/docs/concepts/architecture/cloud-controller.md diff --git a/content/pt/docs/concepts/architecture/control-plane-node-communication.md b/content/pt-br/docs/concepts/architecture/control-plane-node-communication.md similarity index 100% rename from content/pt/docs/concepts/architecture/control-plane-node-communication.md rename to content/pt-br/docs/concepts/architecture/control-plane-node-communication.md diff --git a/content/pt/docs/concepts/architecture/controller.md b/content/pt-br/docs/concepts/architecture/controller.md similarity index 100% rename from content/pt/docs/concepts/architecture/controller.md rename to content/pt-br/docs/concepts/architecture/controller.md diff --git a/content/pt/docs/concepts/cluster-administration/_index.md b/content/pt-br/docs/concepts/cluster-administration/_index.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/_index.md rename to content/pt-br/docs/concepts/cluster-administration/_index.md diff --git a/content/pt/docs/concepts/cluster-administration/addons.md b/content/pt-br/docs/concepts/cluster-administration/addons.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/addons.md rename to content/pt-br/docs/concepts/cluster-administration/addons.md diff --git a/content/pt/docs/concepts/cluster-administration/certificates.md b/content/pt-br/docs/concepts/cluster-administration/certificates.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/certificates.md rename to content/pt-br/docs/concepts/cluster-administration/certificates.md diff --git a/content/pt/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/pt-br/docs/concepts/cluster-administration/cluster-administration-overview.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/cluster-administration-overview.md rename to content/pt-br/docs/concepts/cluster-administration/cluster-administration-overview.md diff --git a/content/pt/docs/concepts/cluster-administration/kubelet-garbage-collection.md b/content/pt-br/docs/concepts/cluster-administration/kubelet-garbage-collection.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/kubelet-garbage-collection.md rename to content/pt-br/docs/concepts/cluster-administration/kubelet-garbage-collection.md diff --git a/content/pt/docs/concepts/cluster-administration/logging.md b/content/pt-br/docs/concepts/cluster-administration/logging.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/logging.md rename to content/pt-br/docs/concepts/cluster-administration/logging.md diff --git a/content/pt/docs/concepts/cluster-administration/networking.md b/content/pt-br/docs/concepts/cluster-administration/networking.md similarity index 100% rename from content/pt/docs/concepts/cluster-administration/networking.md rename to content/pt-br/docs/concepts/cluster-administration/networking.md diff --git a/content/pt/docs/concepts/configuration/_index.md b/content/pt-br/docs/concepts/configuration/_index.md similarity index 100% rename from content/pt/docs/concepts/configuration/_index.md rename to content/pt-br/docs/concepts/configuration/_index.md diff --git a/content/pt/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/pt-br/docs/concepts/configuration/organize-cluster-access-kubeconfig.md similarity index 100% rename from content/pt/docs/concepts/configuration/organize-cluster-access-kubeconfig.md rename to content/pt-br/docs/concepts/configuration/organize-cluster-access-kubeconfig.md diff --git a/content/pt/docs/concepts/containers/_index.md b/content/pt-br/docs/concepts/containers/_index.md similarity index 100% rename from content/pt/docs/concepts/containers/_index.md rename to content/pt-br/docs/concepts/containers/_index.md diff --git a/content/pt/docs/concepts/containers/container-environment.md b/content/pt-br/docs/concepts/containers/container-environment.md similarity index 100% rename from content/pt/docs/concepts/containers/container-environment.md rename to content/pt-br/docs/concepts/containers/container-environment.md diff --git a/content/pt/docs/concepts/containers/container-lifecycle-hooks.md b/content/pt-br/docs/concepts/containers/container-lifecycle-hooks.md similarity index 100% rename from content/pt/docs/concepts/containers/container-lifecycle-hooks.md rename to content/pt-br/docs/concepts/containers/container-lifecycle-hooks.md diff --git a/content/pt/docs/concepts/containers/images.md b/content/pt-br/docs/concepts/containers/images.md similarity index 100% rename from content/pt/docs/concepts/containers/images.md rename to content/pt-br/docs/concepts/containers/images.md diff --git a/content/pt/docs/concepts/containers/runtime-class.md b/content/pt-br/docs/concepts/containers/runtime-class.md similarity index 100% rename from content/pt/docs/concepts/containers/runtime-class.md rename to content/pt-br/docs/concepts/containers/runtime-class.md diff --git a/content/pt/docs/concepts/extend-kubernetes/_index.md b/content/pt-br/docs/concepts/extend-kubernetes/_index.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/_index.md rename to content/pt-br/docs/concepts/extend-kubernetes/_index.md diff --git a/content/pt/docs/concepts/extend-kubernetes/api-extension/_index.md b/content/pt-br/docs/concepts/extend-kubernetes/api-extension/_index.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/api-extension/_index.md rename to content/pt-br/docs/concepts/extend-kubernetes/api-extension/_index.md diff --git a/content/pt/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md b/content/pt-br/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md rename to content/pt-br/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation.md diff --git a/content/pt/docs/concepts/extend-kubernetes/compute-storage-net/_index.md b/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/_index.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/compute-storage-net/_index.md rename to content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/_index.md diff --git a/content/pt/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md rename to content/pt-br/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md diff --git a/content/pt/docs/concepts/extend-kubernetes/operator.md b/content/pt-br/docs/concepts/extend-kubernetes/operator.md similarity index 100% rename from content/pt/docs/concepts/extend-kubernetes/operator.md rename to content/pt-br/docs/concepts/extend-kubernetes/operator.md diff --git a/content/pt/docs/concepts/overview/_index.md b/content/pt-br/docs/concepts/overview/_index.md similarity index 100% rename from content/pt/docs/concepts/overview/_index.md rename to content/pt-br/docs/concepts/overview/_index.md diff --git a/content/pt/docs/concepts/overview/components.md b/content/pt-br/docs/concepts/overview/components.md similarity index 100% rename from content/pt/docs/concepts/overview/components.md rename to content/pt-br/docs/concepts/overview/components.md diff --git a/content/pt/docs/concepts/overview/what-is-kubernetes.md b/content/pt-br/docs/concepts/overview/what-is-kubernetes.md similarity index 100% rename from content/pt/docs/concepts/overview/what-is-kubernetes.md rename to content/pt-br/docs/concepts/overview/what-is-kubernetes.md diff --git a/content/pt/docs/concepts/overview/working-with-objects/_index.md b/content/pt-br/docs/concepts/overview/working-with-objects/_index.md similarity index 100% rename from content/pt/docs/concepts/overview/working-with-objects/_index.md rename to content/pt-br/docs/concepts/overview/working-with-objects/_index.md diff --git a/content/pt/docs/concepts/overview/working-with-objects/names.md b/content/pt-br/docs/concepts/overview/working-with-objects/names.md similarity index 100% rename from content/pt/docs/concepts/overview/working-with-objects/names.md rename to content/pt-br/docs/concepts/overview/working-with-objects/names.md diff --git a/content/pt/docs/concepts/scheduling-eviction/_index.md b/content/pt-br/docs/concepts/scheduling-eviction/_index.md similarity index 100% rename from content/pt/docs/concepts/scheduling-eviction/_index.md rename to content/pt-br/docs/concepts/scheduling-eviction/_index.md diff --git a/content/pt/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/pt-br/docs/concepts/scheduling-eviction/kube-scheduler.md similarity index 100% rename from content/pt/docs/concepts/scheduling-eviction/kube-scheduler.md rename to content/pt-br/docs/concepts/scheduling-eviction/kube-scheduler.md diff --git a/content/pt/docs/concepts/scheduling-eviction/pod-overhead.md b/content/pt-br/docs/concepts/scheduling-eviction/pod-overhead.md similarity index 100% rename from content/pt/docs/concepts/scheduling-eviction/pod-overhead.md rename to content/pt-br/docs/concepts/scheduling-eviction/pod-overhead.md diff --git a/content/pt/docs/concepts/security/_index.md b/content/pt-br/docs/concepts/security/_index.md similarity index 100% rename from content/pt/docs/concepts/security/_index.md rename to content/pt-br/docs/concepts/security/_index.md diff --git a/content/pt/docs/concepts/security/overview.md b/content/pt-br/docs/concepts/security/overview.md similarity index 100% rename from content/pt/docs/concepts/security/overview.md rename to content/pt-br/docs/concepts/security/overview.md diff --git a/content/pt/docs/concepts/workloads/controllers/_index.md b/content/pt-br/docs/concepts/workloads/controllers/_index.md similarity index 100% rename from content/pt/docs/concepts/workloads/controllers/_index.md rename to content/pt-br/docs/concepts/workloads/controllers/_index.md diff --git a/content/pt/docs/concepts/workloads/controllers/cron-jobs.md b/content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md similarity index 100% rename from content/pt/docs/concepts/workloads/controllers/cron-jobs.md rename to content/pt-br/docs/concepts/workloads/controllers/cron-jobs.md diff --git a/content/pt/docs/contribute/_index.md b/content/pt-br/docs/contribute/_index.md similarity index 100% rename from content/pt/docs/contribute/_index.md rename to content/pt-br/docs/contribute/_index.md diff --git a/content/pt/docs/home/_index.md b/content/pt-br/docs/home/_index.md similarity index 100% rename from content/pt/docs/home/_index.md rename to content/pt-br/docs/home/_index.md diff --git a/content/pt/docs/home/supported-doc-versions.md b/content/pt-br/docs/home/supported-doc-versions.md similarity index 100% rename from content/pt/docs/home/supported-doc-versions.md rename to content/pt-br/docs/home/supported-doc-versions.md diff --git a/content/pt/docs/reference/_index.md b/content/pt-br/docs/reference/_index.md similarity index 100% rename from content/pt/docs/reference/_index.md rename to content/pt-br/docs/reference/_index.md diff --git a/content/pt/docs/reference/access-authn-authz/authentication.md b/content/pt-br/docs/reference/access-authn-authz/authentication.md similarity index 100% rename from content/pt/docs/reference/access-authn-authz/authentication.md rename to content/pt-br/docs/reference/access-authn-authz/authentication.md diff --git a/content/pt/docs/reference/access-authn-authz/bootstrap-tokens.md b/content/pt-br/docs/reference/access-authn-authz/bootstrap-tokens.md similarity index 100% rename from content/pt/docs/reference/access-authn-authz/bootstrap-tokens.md rename to content/pt-br/docs/reference/access-authn-authz/bootstrap-tokens.md diff --git a/content/pt/docs/reference/glossary/alternate-x509-schemes.md b/content/pt-br/docs/reference/glossary/alternate-x509-schemes.md similarity index 100% rename from content/pt/docs/reference/glossary/alternate-x509-schemes.md rename to content/pt-br/docs/reference/glossary/alternate-x509-schemes.md diff --git a/content/pt/docs/reference/glossary/cloud-controller-manager.md b/content/pt-br/docs/reference/glossary/cloud-controller-manager.md similarity index 100% rename from content/pt/docs/reference/glossary/cloud-controller-manager.md rename to content/pt-br/docs/reference/glossary/cloud-controller-manager.md diff --git a/content/pt/docs/reference/glossary/cluster.md b/content/pt-br/docs/reference/glossary/cluster.md similarity index 100% rename from content/pt/docs/reference/glossary/cluster.md rename to content/pt-br/docs/reference/glossary/cluster.md diff --git a/content/pt/docs/reference/glossary/cncf.md b/content/pt-br/docs/reference/glossary/cncf.md similarity index 100% rename from content/pt/docs/reference/glossary/cncf.md rename to content/pt-br/docs/reference/glossary/cncf.md diff --git a/content/pt/docs/reference/glossary/cni.md b/content/pt-br/docs/reference/glossary/cni.md similarity index 100% rename from content/pt/docs/reference/glossary/cni.md rename to content/pt-br/docs/reference/glossary/cni.md diff --git a/content/pt/docs/reference/glossary/container-runtime.md b/content/pt-br/docs/reference/glossary/container-runtime.md similarity index 100% rename from content/pt/docs/reference/glossary/container-runtime.md rename to content/pt-br/docs/reference/glossary/container-runtime.md diff --git a/content/pt/docs/reference/glossary/containerd.md b/content/pt-br/docs/reference/glossary/containerd.md similarity index 100% rename from content/pt/docs/reference/glossary/containerd.md rename to content/pt-br/docs/reference/glossary/containerd.md diff --git a/content/pt/docs/reference/glossary/control-plane.md b/content/pt-br/docs/reference/glossary/control-plane.md similarity index 100% rename from content/pt/docs/reference/glossary/control-plane.md rename to content/pt-br/docs/reference/glossary/control-plane.md diff --git a/content/pt/docs/reference/glossary/controller.md b/content/pt-br/docs/reference/glossary/controller.md similarity index 100% rename from content/pt/docs/reference/glossary/controller.md rename to content/pt-br/docs/reference/glossary/controller.md diff --git a/content/pt/docs/reference/glossary/cri-o.md b/content/pt-br/docs/reference/glossary/cri-o.md similarity index 100% rename from content/pt/docs/reference/glossary/cri-o.md rename to content/pt-br/docs/reference/glossary/cri-o.md diff --git a/content/pt/docs/reference/glossary/cri.md b/content/pt-br/docs/reference/glossary/cri.md similarity index 100% rename from content/pt/docs/reference/glossary/cri.md rename to content/pt-br/docs/reference/glossary/cri.md diff --git a/content/pt/docs/reference/glossary/customresourcedefinition.md b/content/pt-br/docs/reference/glossary/customresourcedefinition.md similarity index 100% rename from content/pt/docs/reference/glossary/customresourcedefinition.md rename to content/pt-br/docs/reference/glossary/customresourcedefinition.md diff --git a/content/pt/docs/reference/glossary/etcd.md b/content/pt-br/docs/reference/glossary/etcd.md similarity index 100% rename from content/pt/docs/reference/glossary/etcd.md rename to content/pt-br/docs/reference/glossary/etcd.md diff --git a/content/pt/docs/reference/glossary/kerberos.md b/content/pt-br/docs/reference/glossary/kerberos.md similarity index 100% rename from content/pt/docs/reference/glossary/kerberos.md rename to content/pt-br/docs/reference/glossary/kerberos.md diff --git a/content/pt/docs/reference/glossary/keystone.md b/content/pt-br/docs/reference/glossary/keystone.md similarity index 100% rename from content/pt/docs/reference/glossary/keystone.md rename to content/pt-br/docs/reference/glossary/keystone.md diff --git a/content/pt/docs/reference/glossary/kube-apiserver.md b/content/pt-br/docs/reference/glossary/kube-apiserver.md similarity index 100% rename from content/pt/docs/reference/glossary/kube-apiserver.md rename to content/pt-br/docs/reference/glossary/kube-apiserver.md diff --git a/content/pt/docs/reference/glossary/kube-controller-manager.md b/content/pt-br/docs/reference/glossary/kube-controller-manager.md similarity index 100% rename from content/pt/docs/reference/glossary/kube-controller-manager.md rename to content/pt-br/docs/reference/glossary/kube-controller-manager.md diff --git a/content/pt/docs/reference/glossary/kube-proxy.md b/content/pt-br/docs/reference/glossary/kube-proxy.md similarity index 100% rename from content/pt/docs/reference/glossary/kube-proxy.md rename to content/pt-br/docs/reference/glossary/kube-proxy.md diff --git a/content/pt/docs/reference/glossary/kube-scheduler.md b/content/pt-br/docs/reference/glossary/kube-scheduler.md similarity index 100% rename from content/pt/docs/reference/glossary/kube-scheduler.md rename to content/pt-br/docs/reference/glossary/kube-scheduler.md diff --git a/content/pt/docs/reference/glossary/kubelet.md b/content/pt-br/docs/reference/glossary/kubelet.md similarity index 100% rename from content/pt/docs/reference/glossary/kubelet.md rename to content/pt-br/docs/reference/glossary/kubelet.md diff --git a/content/pt/docs/reference/glossary/ldap.md b/content/pt-br/docs/reference/glossary/ldap.md similarity index 100% rename from content/pt/docs/reference/glossary/ldap.md rename to content/pt-br/docs/reference/glossary/ldap.md diff --git a/content/pt/docs/reference/glossary/node.md b/content/pt-br/docs/reference/glossary/node.md similarity index 100% rename from content/pt/docs/reference/glossary/node.md rename to content/pt-br/docs/reference/glossary/node.md diff --git a/content/pt/docs/reference/glossary/pod.md b/content/pt-br/docs/reference/glossary/pod.md similarity index 100% rename from content/pt/docs/reference/glossary/pod.md rename to content/pt-br/docs/reference/glossary/pod.md diff --git a/content/pt/docs/reference/glossary/saml.md b/content/pt-br/docs/reference/glossary/saml.md similarity index 100% rename from content/pt/docs/reference/glossary/saml.md rename to content/pt-br/docs/reference/glossary/saml.md diff --git a/content/pt/docs/reference/glossary/tls-common-name.md b/content/pt-br/docs/reference/glossary/tls-common-name.md similarity index 100% rename from content/pt/docs/reference/glossary/tls-common-name.md rename to content/pt-br/docs/reference/glossary/tls-common-name.md diff --git a/content/pt/docs/reference/glossary/uid.md b/content/pt-br/docs/reference/glossary/uid.md similarity index 100% rename from content/pt/docs/reference/glossary/uid.md rename to content/pt-br/docs/reference/glossary/uid.md diff --git a/content/pt/docs/reference/glossary/username.md b/content/pt-br/docs/reference/glossary/username.md similarity index 100% rename from content/pt/docs/reference/glossary/username.md rename to content/pt-br/docs/reference/glossary/username.md diff --git a/content/pt/docs/reference/kubectl/_index.md b/content/pt-br/docs/reference/kubectl/_index.md similarity index 100% rename from content/pt/docs/reference/kubectl/_index.md rename to content/pt-br/docs/reference/kubectl/_index.md diff --git a/content/pt/docs/reference/kubectl/cheatsheet.md b/content/pt-br/docs/reference/kubectl/cheatsheet.md similarity index 100% rename from content/pt/docs/reference/kubectl/cheatsheet.md rename to content/pt-br/docs/reference/kubectl/cheatsheet.md diff --git a/content/pt/docs/reference/tools.md b/content/pt-br/docs/reference/tools.md similarity index 100% rename from content/pt/docs/reference/tools.md rename to content/pt-br/docs/reference/tools.md diff --git a/content/pt/docs/setup/_index.md b/content/pt-br/docs/setup/_index.md similarity index 100% rename from content/pt/docs/setup/_index.md rename to content/pt-br/docs/setup/_index.md diff --git a/content/pt/docs/sitemap.md b/content/pt-br/docs/sitemap.md similarity index 100% rename from content/pt/docs/sitemap.md rename to content/pt-br/docs/sitemap.md diff --git a/content/pt/docs/tasks/_index.md b/content/pt-br/docs/tasks/_index.md similarity index 100% rename from content/pt/docs/tasks/_index.md rename to content/pt-br/docs/tasks/_index.md diff --git a/content/pt/docs/templates/feature-state-alpha.txt b/content/pt-br/docs/templates/feature-state-alpha.txt similarity index 100% rename from content/pt/docs/templates/feature-state-alpha.txt rename to content/pt-br/docs/templates/feature-state-alpha.txt diff --git a/content/pt/docs/templates/feature-state-beta.txt b/content/pt-br/docs/templates/feature-state-beta.txt similarity index 100% rename from content/pt/docs/templates/feature-state-beta.txt rename to content/pt-br/docs/templates/feature-state-beta.txt diff --git a/content/pt/docs/templates/feature-state-deprecated.txt b/content/pt-br/docs/templates/feature-state-deprecated.txt similarity index 100% rename from content/pt/docs/templates/feature-state-deprecated.txt rename to content/pt-br/docs/templates/feature-state-deprecated.txt diff --git a/content/pt/docs/templates/feature-state-stable.txt b/content/pt-br/docs/templates/feature-state-stable.txt similarity index 100% rename from content/pt/docs/templates/feature-state-stable.txt rename to content/pt-br/docs/templates/feature-state-stable.txt diff --git a/content/pt/docs/templates/index.md b/content/pt-br/docs/templates/index.md similarity index 100% rename from content/pt/docs/templates/index.md rename to content/pt-br/docs/templates/index.md diff --git a/content/pt/docs/tutorials/_index.md b/content/pt-br/docs/tutorials/_index.md similarity index 100% rename from content/pt/docs/tutorials/_index.md rename to content/pt-br/docs/tutorials/_index.md diff --git a/content/pt/docs/tutorials/hello-minikube.md b/content/pt-br/docs/tutorials/hello-minikube.md similarity index 100% rename from content/pt/docs/tutorials/hello-minikube.md rename to content/pt-br/docs/tutorials/hello-minikube.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/_index.html b/content/pt-br/docs/tutorials/kubernetes-basics/_index.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/_index.html rename to content/pt-br/docs/tutorials/kubernetes-basics/_index.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/_index.md similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/create-cluster/_index.md rename to content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/_index.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html rename to content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html rename to content/pt-br/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/_index.md similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/deploy-app/_index.md rename to content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/_index.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html rename to content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html rename to content/pt-br/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/explore/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/explore/_index.md similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/explore/_index.md rename to content/pt-br/docs/tutorials/kubernetes-basics/explore/_index.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/explore/explore-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-interactive.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/explore/explore-interactive.html rename to content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-interactive.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-intro.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/explore/explore-intro.html rename to content/pt-br/docs/tutorials/kubernetes-basics/explore/explore-intro.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/expose/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/expose/_index.md similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/expose/_index.md rename to content/pt-br/docs/tutorials/kubernetes-basics/expose/_index.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-interactive.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/expose/expose-interactive.html rename to content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-interactive.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-intro.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/expose/expose-intro.html rename to content/pt-br/docs/tutorials/kubernetes-basics/expose/expose-intro.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/scale/_index.md b/content/pt-br/docs/tutorials/kubernetes-basics/scale/_index.md similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/scale/_index.md rename to content/pt-br/docs/tutorials/kubernetes-basics/scale/_index.md diff --git a/content/pt/docs/tutorials/kubernetes-basics/scale/scale-interactive.html b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-interactive.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/scale/scale-interactive.html rename to content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-interactive.html diff --git a/content/pt/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html similarity index 100% rename from content/pt/docs/tutorials/kubernetes-basics/scale/scale-intro.html rename to content/pt-br/docs/tutorials/kubernetes-basics/scale/scale-intro.html diff --git a/content/pt/examples/admin/logging/fluentd-sidecar-config.yaml b/content/pt-br/examples/admin/logging/fluentd-sidecar-config.yaml similarity index 100% rename from content/pt/examples/admin/logging/fluentd-sidecar-config.yaml rename to content/pt-br/examples/admin/logging/fluentd-sidecar-config.yaml diff --git a/content/pt/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml b/content/pt-br/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml similarity index 100% rename from content/pt/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml rename to content/pt-br/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml diff --git a/content/pt/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml b/content/pt-br/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml similarity index 100% rename from content/pt/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml rename to content/pt-br/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml diff --git a/content/pt/examples/admin/logging/two-files-counter-pod.yaml b/content/pt-br/examples/admin/logging/two-files-counter-pod.yaml similarity index 100% rename from content/pt/examples/admin/logging/two-files-counter-pod.yaml rename to content/pt-br/examples/admin/logging/two-files-counter-pod.yaml diff --git a/content/pt/examples/debug/counter-pod.yaml b/content/pt-br/examples/debug/counter-pod.yaml similarity index 100% rename from content/pt/examples/debug/counter-pod.yaml rename to content/pt-br/examples/debug/counter-pod.yaml diff --git a/content/pt/includes/index.md b/content/pt-br/includes/index.md similarity index 100% rename from content/pt/includes/index.md rename to content/pt-br/includes/index.md diff --git a/content/pt/partners/_index.html b/content/pt-br/partners/_index.html similarity index 100% rename from content/pt/partners/_index.html rename to content/pt-br/partners/_index.html diff --git a/content/ru/docs/contribute/intermediate.md b/content/ru/docs/contribute/intermediate.md index ba3b06511f9d0..303a0877e5f75 100644 --- a/content/ru/docs/contribute/intermediate.md +++ b/content/ru/docs/contribute/intermediate.md @@ -217,8 +217,8 @@ PR объединяется, когда у него есть комментар ```bash origin git@github.com:/website.git (fetch) origin git@github.com:/website.git (push) - upstream https://github.com/kubernetes/website (fetch) - upstream https://github.com/kubernetes/website (push) + upstream https://github.com/kubernetes/website.git (fetch) + upstream https://github.com/kubernetes/website.git (push) ``` ### Работа в локальном репозитории diff --git a/content/zh/blog/_posts/2020-12-02-dockershim-faq.md b/content/zh/blog/_posts/2020-12-02-dockershim-faq.md index 8552f9fd484a3..b910cfdfd21ca 100644 --- a/content/zh/blog/_posts/2020-12-02-dockershim-faq.md +++ b/content/zh/blog/_posts/2020-12-02-dockershim-faq.md @@ -21,7 +21,7 @@ what that means, check out the blog post --> 本文回顾了自 Kubernetes v1.20 版宣布弃用 Dockershim 以来所引发的一些常见问题。 关于 Kubernetes kubelets 从容器运行时的角度弃用 Docker 的细节以及这些细节背后的含义,请参考博文 -[别慌: Kubernetes 和 Docker](/blog/2020/12/02/dont-panic-kubernetes-and-docker/) +[别慌: Kubernetes 和 Docker](/blog/2020/12/02/dont-panic-kubernetes-and-docker/)。 如果你是 Kubernetes 的终端用户,这对你不会有太大影响。 -这事并不意味着 Dockder 已死、也不意味着你不能或不该继续把 Docker 用作开发工具。 +这事并不意味着 Docker 已死、也不意味着你不能或不该继续把 Docker 用作开发工具。 Docker 仍然是构建容器的利器,使用命令 `docker build` 构建的镜像在 Kubernetes 集群中仍然可以运行。 随时欢迎您提供反馈! -- SIG-Auth [定期开会](https://github.com/kubernetes/community/tree/master/sig-auth#meetings),可以通过 [Slack 和邮件列表](https://github.com/kubernetes/community/tree/master/sig-auth#contact)加入 -- SIG-Storage [定期开会](https://github.com/kubernetes/community/tree/master/sig-storage#meetings),可以通过 [Slack 和邮件列表](https://github.com/kubernetes/community/tree/master/sig-storage#contact)加入 +- SIG-Auth [定期开会](https://github.com/kubernetes/community/tree/master/sig-auth#meetings),可以通过 [Slack 和邮件列表](https://github.com/kubernetes/community/tree/master/sig-auth#contact)加入 +- SIG-Storage [定期开会](https://github.com/kubernetes/community/tree/master/sig-storage#meetings),可以通过 [Slack 和邮件列表](https://github.com/kubernetes/community/tree/master/sig-storage#contact)加入 + diff --git a/content/zh/docs/concepts/architecture/cloud-controller.md b/content/zh/docs/concepts/architecture/cloud-controller.md index 7d84826583746..33d660b243c27 100644 --- a/content/zh/docs/concepts/architecture/cloud-controller.md +++ b/content/zh/docs/concepts/architecture/cloud-controller.md @@ -35,7 +35,7 @@ mechanism that allows different cloud providers to integrate their platforms wit ## 设计 {#design} -![Kubernetes 组件](/images/docs/components-of-kubernetes.png) +![Kubernetes 组件](/images/docs/components-of-kubernetes.svg) 云控制器管理器以一组多副本的进程集合的形式运行在控制面中,通常表现为 Pod 中的容器。每个 `cloud-controller-manager` 在同一进程中实现多个 @@ -126,7 +126,7 @@ Route 控制器负责适当地配置云平台中的路由,以便 Kubernetes [云控制器管理器的管理](/zh/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager) 给出了运行和管理云控制器管理器的指南。 +要升级 HA 控制平面以使用云控制器管理器,请参见 [将复制的控制平面迁移以使用云控制器管理器](/zh/docs/tasks/administer-cluster/controller-manager-leader-migration/) + 想要了解如何实现自己的云控制器管理器,或者对现有项目进行扩展么? -API 优先级和公平性( APF )是一种替代方案,可提升上述最大并发限制。 +API 优先级和公平性(APF)是一种替代方案,可提升上述最大并发限制。 APF 以更细粒度的方式对请求进行分类和隔离。 它还引入了空间有限的排队机制,因此在非常短暂的突发情况下,API 服务器不会拒绝任何请求。 通过使用公平排队技术从队列中分发请求,这样, @@ -50,7 +50,7 @@ Fairness feature enabled. {{< /caution >}} --> {{< caution >}} -属于“长时间运行”类型的请求(主要是 watch )不受 API 优先级和公平性过滤器的约束。 +属于“长时间运行”类型的请求(主要是 watch)不受 API 优先级和公平性过滤器的约束。 如果未启用 APF 特性,即便设置 `--max-requests-inflight` 标志,该类请求也不受约束。 {{< /caution >}} @@ -233,7 +233,7 @@ APF 特性附带推荐配置,该配置对实验场景应该足够; i.e. Kubelets, which must be able to contact the API server in order for workloads to be able to schedule on them. --> -* `system` 优先级用于 `system:nodes` 组(即 Kubelets )的请求; +* `system` 优先级用于 `system:nodes` 组(即 Kubelets)的请求; kubelets 必须能连上 API 服务器,以便工作负载能够调度到其上。 * `apiserver_flowcontrol_rejected_requests_total` 是一个计数器向量, 记录被拒绝的请求数量(自服务器启动以来累计值), - 由标签 `flow_chema` (表示与请求匹配的 FlowSchema ),`priority_evel` + 由标签 `flow_chema`(表示与请求匹配的 FlowSchema),`priority_evel` (表示分配给请该求的优先级)和 `reason` 来区分。 `reason` 标签将具有以下值之一: @@ -655,7 +655,7 @@ poorly-behaved workloads that may be harming system health. --> * `apiserver_flowcontrol_dispatched_requests_total` 是一个计数器向量, 记录开始执行的请求数量(自服务器启动以来的累积值), - 由标签 `flow_schema` (表示与请求匹配的 FlowSchema )和 + 由标签 `flow_schema`(表示与请求匹配的 FlowSchema)和 `priority_level`(表示分配给该请求的优先级)来区分。 * `apiserver_flowcontrol_read_vs_write_request_count_samples` 是一个直方图向量, 记录当前请求数量的观察值, - 由标签 `phase` (取值为 `waiting` 和 `executing` )和 `request_kind` - (取值 `mutating` 和 `readOnly` )拆分。定期以高速率观察该值。 + 由标签 `phase`(取值为 `waiting` 和 `executing`)和 `request_kind` + (取值 `mutating` 和 `readOnly`)拆分。定期以高速率观察该值。 * `apiserver_flowcontrol_read_vs_write_request_count_watermarks` 是一个直方图向量, 记录请求数量的高/低水位线, - 由标签 `phase` (取值为 `waiting` 和 `executing` )和 `request_kind` - (取值为 `mutating` 和 `readOnly` )拆分;标签 `mark` 取值为 `high` 和 `low` 。 + 由标签 `phase`(取值为 `waiting` 和 `executing`)和 `request_kind` + (取值为 `mutating` 和 `readOnly`)拆分;标签 `mark` 取值为 `high` 和 `low` 。 `apiserver_flowcontrol_read_vs_write_request_count_samples` 向量观察到有值新增, 则该向量累积。这些水位线显示了样本值的范围。 @@ -735,7 +735,7 @@ poorly-behaved workloads that may be harming system health. rate. --> * `apiserver_flowcontrol_priority_level_request_count_samples` 是一个直方图向量, - 记录当前请求的观测值,由标签 `phase` (取值为`waiting` 和 `executing`)和 + 记录当前请求的观测值,由标签 `phase`(取值为`waiting` 和 `executing`)和 `priority_level` 进一步区分。 每个直方图都会定期进行观察,直到相关类别的最后活动为止。观察频率高。 @@ -751,7 +751,7 @@ poorly-behaved workloads that may be harming system health. water marks show the range of values that occurred between samples. --> * `apiserver_flowcontrol_priority_level_request_count_watermarks` 是一个直方图向量, - 记录请求数的高/低水位线,由标签 `phase` (取值为 `waiting` 和 `executing` )和 + 记录请求数的高/低水位线,由标签 `phase`(取值为 `waiting` 和 `executing`)和 `priority_level` 拆分; 标签 `mark` 取值为 `high` 和 `low` 。 `apiserver_flowcontrol_priority_level_request_count_samples` 向量观察到有值新增, @@ -805,9 +805,9 @@ poorly-behaved workloads that may be harming system health. --> * `apiserver_flowcontrol_request_wait_duration_seconds` 是一个直方图向量, 记录请求排队的时间, - 由标签 `flow_schema` (表示与请求匹配的 FlowSchema ), - `priority_level` (表示分配该请求的优先级) - 和 `execute` (表示请求是否开始执行)进一步区分。 + 由标签 `flow_schema`(表示与请求匹配的 FlowSchema ), + `priority_level`(表示分配该请求的优先级) + 和 `execute`(表示请求是否开始执行)进一步区分。 * `apiserver_flowcontrol_request_execution_seconds` 是一个直方图向量, 记录请求实际执行需要花费的时间, - 由标签 `flow_schema` (表示与请求匹配的 FlowSchema )和 - `priority_level` (表示分配给该请求的优先级)进一步区分。 + 由标签 `flow_schema`(表示与请求匹配的 FlowSchema )和 + `priority_level`(表示分配给该请求的优先级)进一步区分。 -系统组件指标可以更好地了解系统内部发生的情况。指标对于构建仪表板和告警特别有用。 +通过系统组件指标可以更好地了解系统组个内部发生的情况。系统组件指标对于构建仪表板和告警特别有用。 Kubernetes 组件以 [Prometheus 格式](https://prometheus.io/docs/instrumenting/exposition_formats/) 生成度量值。 @@ -37,10 +37,10 @@ In most cases metrics are available on `/metrics` endpoint of the HTTP server. F Examples of those components: --> -## Kubernetes 中的指标 +## Kubernetes 中组件的指标 -在大多数情况下,可以在 HTTP 服务器的 `/metrics` 端点上访问度量值。 -对于默认情况下不公开端点的组件,可以使用 `--bind-address` 标志启用。 +在大多数情况下,可以通过 HTTP 访问组件的 `/metrics` 端点来获取组件的度量值。 +对于那些默认情况下不暴露端点的组件,可以使用 `--bind-address` 标志启用。 这些组件的示例: diff --git a/content/zh/docs/concepts/configuration/overview.md b/content/zh/docs/concepts/configuration/overview.md index 7f6cf8826aa18..a6c9811d20bc4 100644 --- a/content/zh/docs/concepts/configuration/overview.md +++ b/content/zh/docs/concepts/configuration/overview.md @@ -151,7 +151,7 @@ DNS server watches the Kubernetes API for new `Services` and creates a set of DN - 当您不需要 `kube-proxy` 负载均衡时,使用 [无头服务](/zh/docs/concepts/services-networking/service/#headless-services) @@ -175,7 +175,7 @@ services) (which have a `ClusterIP` of `None`) for easy service discovery when y A Service can be made to span multiple Deployments by omitting release-specific labels from its selector. [Deployments](/docs/concepts/workloads/controllers/deployment/) make it easy to update a running service without downtime. --> 通过从选择器中省略特定发行版的标签,可以使服务跨越多个 Deployment。 -[Deployment](/zh/docs/concepts/workloads/controllers/deployment/) 可以在不停机的情况下轻松更新正在运行的服务。 +当你需要不停机的情况下更新正在运行的服务,可以使用[Deployment](/zh/docs/concepts/workloads/controllers/deployment/)。 - `imagePullPolicy: IfNotPresent`:仅当镜像在本地不存在时才被拉取。 - `imagePullPolicy: Always`:每次启动 Pod 的时候都会拉取镜像。 -- `imagePullPolicy` 省略时,镜像标签为 `:latest` 或不存在,使用 `Always` 值。 -- `imagePullPolicy` 省略时,指定镜像标签并且不是 `:latest`,使用 `IfNotPresent` 值。 +- `imagePullPolicy` 省略时,镜像标签为 `:latest` 或不存在,其值自动被设置为 `Always`。注意,如果镜像标签的值发生改变,`imagePullPolicy` 的值不会被更新为 `IfNotPresent`。 +- `imagePullPolicy` 省略时,指定镜像标签并且不是 `:latest`,其值自动被设置为 `IfNotPresent`。注意,如果镜像标签的值之后被移除或者修改为 `latest`,`imagePullPolicy` 的值不会被更新为 `Always`。 - `imagePullPolicy: Never`:假设镜像已经存在本地,不会尝试拉取镜像。 {{< note >}} -底层镜像驱动程序的缓存语义能够使即便 `imagePullPolicy: Always` 的配置也很高效。 +只要镜像仓库是可访问的,底层镜像驱动程序的缓存语义能够使即便 `imagePullPolicy: Always` 的配置也很高效。 例如,对于 Docker,如果镜像已经存在,则拉取尝试很快,因为镜像层都被缓存并且不需要下载。 {{< /note >}} diff --git a/content/zh/docs/concepts/policy/node-resource-managers.md b/content/zh/docs/concepts/policy/node-resource-managers.md new file mode 100644 index 0000000000000..0651a66f73f5d --- /dev/null +++ b/content/zh/docs/concepts/policy/node-resource-managers.md @@ -0,0 +1,45 @@ +--- +title: 节点资源管理器 +content_type: concept +weight: 50 +--- + + + + + +Kubernetes 提供了一组资源管理器,用于支持延迟敏感的、高吞吐量的工作负载。 +资源管理器的目标是协调和优化节点资源,以支持对 CPU、设备和内存(巨页)等资源有特殊需求的 Pod。 + + + + +主管理器,也叫拓扑管理器(Topology Manager),是一个 Kubelet 组件, +它通过[策略](/zh/docs/tasks/administer-cluster/topology-manager/), +协调全局的资源管理过程。 + +各个管理器的配置方式会在专项文档中详细阐述: + + +- [CPU 管理器策略](/zh/docs/tasks/administer-cluster/cpu-management-policies/) +- [设备管理器](/zh/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#device-plugin-integration-with-the-topology-manager) +- [内存管理器策略](/zh/docs/tasks/administer-cluster/memory-manager/) diff --git a/content/zh/docs/concepts/services-networking/service-traffic-policy.md b/content/zh/docs/concepts/services-networking/service-traffic-policy.md new file mode 100644 index 0000000000000..f52f073d2eb3e --- /dev/null +++ b/content/zh/docs/concepts/services-networking/service-traffic-policy.md @@ -0,0 +1,128 @@ +--- +title: 服务内部流量策略 +content_type: concept +weight: 45 +--- + + + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + + +_服务内部流量策略_ 开启了内部流量限制,只路由内部流量到和发起方处于相同节点的服务端点。 +这里的”内部“流量指当前集群中的 Pod 所发起的流量。 +这种机制有助于节省开销,提升效率。 + + + + +## 使用服务内部流量策略 {#using-service-internal-traffic-policy} + + +一旦你启用了 `ServiceInternalTrafficPolicy` 这个 +[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/), +你就可以通过将 {{< glossary_tooltip text="Services" term_id="service" >}} 的 +`.spec.internalTrafficPolicy` 项设置为 `Local`, +来为它指定一个内部专用的流量策略。 +此设置就相当于告诉 kube-proxy 对于集群内部流量只能使用本地的服务端口。 + + +{{< note >}} +如果某节点上的 Pod 均不提供指定 Service 的服务端点, +即使该 Service 在其他节点上有可用的服务端点, +Service 的行为看起来也像是它只有 0 个服务端点(只针对此节点上的 Pod)。 +{{< /note >}} + + +以下示例展示了把 Service 的 `.spec.internalTrafficPolicy` 项设为 `Local` 时, +Service 的样子: + + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + selector: + app: MyApp + ports: + - protocol: TCP + port: 80 + targetPort: 9376 + internalTrafficPolicy: Local +``` + + +## 工作原理 {#how-it-works} + + +kube-proxy 基于 `spec.internalTrafficPolicy` 的设置来过滤路由的目标服务端点。 +当它的值设为 `Local` 时,只选择节点本地的服务端点。 +当它的值设为 `Cluster` 或缺省时,则选择所有的服务端点。 +启用[特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/) +`ServiceInternalTrafficPolicy` 后, +`spec.internalTrafficPolicy` 的值默认设为 `Cluster`。 + + +## 限制 {#constraints} + + +* 在一个Service上,当 `externalTrafficPolicy` 已设置为 `Local`时,服务内部流量策略无法使用。 + 换句话说,在一个集群的不同 Service 上可以同时使用这两个特性,但在一个 Service 上不行。 + +## {{% heading "whatsnext" %}} + + +* 请阅读[启用拓扑感知提示](/zh/docs/tasks/administer-cluster/enabling-topology-aware-hints) +* 请阅读[Service 的外部流量策略](/zh/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip) +* 请阅读[用 Service 连接应用](/zh/docs/concepts/services-networking/connect-applications-service/) diff --git a/content/zh/docs/concepts/storage/ephemeral-volumes.md b/content/zh/docs/concepts/storage/ephemeral-volumes.md index 47eab07777581..c5195ac56daa2 100644 --- a/content/zh/docs/concepts/storage/ephemeral-volumes.md +++ b/content/zh/docs/concepts/storage/ephemeral-volumes.md @@ -141,6 +141,7 @@ CSI ephemeral volumes are only supported by a subset of CSI drivers. The Kubernetes CSI [Drivers list](https://kubernetes-csi.github.io/docs/drivers.html) shows which drivers support ephemeral volumes. --> + 该特性需要启用参数 `CSIInlineVolume` [特性门控(feature gate)](/zh/docs/reference/command-line-tools-reference/feature-gates/)。 该参数从 Kubernetes 1.16 开始默认启用。 @@ -158,7 +159,7 @@ Conceptually, CSI ephemeral volumes are similar to `configMap`, scheduled onto a node. Kubernetes has no concept of rescheduling Pods anymore at this stage. Volume creation has to be unlikely to fail, otherwise Pod startup gets stuck. In particular, [storage capacity -aware Pod scheduling](/docs/concepts/storage-capacity/) is *not* +aware Pod scheduling](/docs/concepts/storage/storage-capacity/) is *not* supported for these volumes. They are currently also not covered by the storage resource usage limits of a Pod, because that is something that kubelet can only enforce for storage that it manages itself. @@ -218,19 +219,22 @@ As a cluster administrator, you can use a [PodSecurityPolicy](/docs/concepts/pol --> ### 通用临时卷 {#generic-ephemeral-volumes} -{{< feature-state for_k8s_version="v1.19" state="alpha" >}} +{{< feature-state for_k8s_version="v1.21" state="beta" >}} 这个特性需要启用 `GenericEphemeralVolume` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/)。 -因为这是一个alpha特性,默认禁用。 +因为这是一个 beta 特性,默认情况下启用。 -通用临时卷类似于 `emptyDir` 卷,但更加灵活: +通用临时卷与 `emptyDir` 卷类似,因为它们为暂存数据提供了一个 per-pod 的目录,该目录通常在置备后为空。 +但他们可能还会有其他特征: + - 存储可以是本地的,也可以是网络连接的。 - 卷可以有固定的大小,pod不能超量使用。 - 卷可能有一些初始数据,这取决于驱动程序和参数。 @@ -408,23 +414,28 @@ two choices: 集群管理员必须意识到这一点。 如果这不符合他们的安全模型,他们有两种选择: -- 通过特性门控显式禁用该特性,可以避免将来的 Kubernetes 版本默认启用时带来混乱。 +- 通过特性门控显式禁用该特性。 - 当`卷`列表不包含 `ephemeral` 卷类型时,使用 - [Pod 安全策略](/zh/docs/concepts/policy/pod-security-policy/)。 + [Pod 安全策略](/zh/docs/concepts/policy/pod-security-policy/) + (在 Kubernetes 1.21 中已弃用)。 +- 使用[准入 Webhook](/zh/docs/reference/access-authn-authz/extensible-admission-controllers/) + 拒绝像 Pod 这样具有通用临时卷。 -在一个命名空间中,用于 PVCs 的常规命名空间配额仍然适用, +在一个命名空间中,用于 PVCs 的常规命名空间配额[用于 PVCs 的常规命名空间配额](/zh/docs/concepts/policy/resource-quotas/#storage-resource-quota)仍然适用, 因此即使允许用户使用这种新机制,他们也不能使用它来规避其他策略。 ## {{% heading "whatsnext" %}} diff --git a/content/zh/docs/concepts/storage/persistent-volumes.md b/content/zh/docs/concepts/storage/persistent-volumes.md index 1160dd0e4a578..739540c00223e 100644 --- a/content/zh/docs/concepts/storage/persistent-volumes.md +++ b/content/zh/docs/concepts/storage/persistent-volumes.md @@ -312,7 +312,7 @@ For volume plugins that support the `Delete` reclaim policy, deletion removes bo 对于支持 `Delete` 回收策略的卷插件,删除动作会将 PersistentVolume 对象从 Kubernetes 中移除,同时也会从外部基础设施(如 AWS EBS、GCE PD、Azure Disk 或 -Cinder 卷)中移除所关联的存储资产。 +Cinder 卷)中移除所关联的存储资产。 动态供应的卷会继承[其 StorageClass 中设置的回收策略](#reclaim-policy),该策略默认 为 `Delete`。 管理员需要根据用户的期望来配置 StorageClass;否则 PV 卷被创建之后必须要被 @@ -726,7 +726,7 @@ Currently, storage size is the only resource that can be set or requested. Futu 一般而言,每个 PV 卷都有确定的存储容量。 容量属性是使用 PV 对象的 `capacity` 属性来设置的。 参考 Kubernetes -[资源模型(Resource Model)](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md) +[资源模型(Resource Model)](https://git.k8s.io/community/contributors/design-proposals/scheduling/resources.md) 设计提案,了解 `capacity` 字段可以接受的单位。 目前,存储大小是可以设置和请求的唯一资源。 @@ -1491,8 +1491,8 @@ and need persistent storage, it is recommended that you use the following patter config requiring PVCs). --> - 在你的工具链中,监测经过一段时间后仍未被绑定的 PVC 对象,要让用户知道这些对象, - 因为这可能意味着集群没有动态存储支持(因而用户必须先创建一个匹配的 PV),或者 - 集群没有配置存储系统(因而用户无法配置需要 PVC 的工作负载配置)。 + 因为这可能意味着集群不支持动态存储(因而用户必须先创建一个匹配的 PV),或者 + 集群没有配置存储系统(因而用户无法配置需要 PVC 的工作负载配置)。 ## {{% heading "whatsnext" %}} @@ -1519,4 +1519,3 @@ and need persistent storage, it is recommended that you use the following patter * [PersistentVolumeSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumespec-v1-core) * [PersistentVolumeClaim](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaim-v1-core) * [PersistentVolumeClaimSpec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#persistentvolumeclaimspec-v1-core) - diff --git a/content/zh/docs/concepts/storage/volume-health-monitoring.md b/content/zh/docs/concepts/storage/volume-health-monitoring.md new file mode 100644 index 0000000000000..4684d4353fbd0 --- /dev/null +++ b/content/zh/docs/concepts/storage/volume-health-monitoring.md @@ -0,0 +1,71 @@ +--- +title: 卷健康监测 +content_type: concept +--- + + + + +{{< feature-state for_k8s_version="v1.21" state="alpha" >}} + + +{{< glossary_tooltip text="CSI" term_id="csi" >}} 卷健康监测支持 CSI 驱动从底层的存储系统着手,探测异常的卷状态,并以事件的形式上报到 {{< glossary_tooltip text="PVCs" term_id="persistent-volume-claim" >}} 或 {{< glossary_tooltip text="Pods" term_id="pod" >}}. + + + + +## 卷健康监测 {#volume-health-monitoring} + + +Kubernetes _卷健康监测_ 是 Kubernetes 容器存储接口(CSI)实现的一部分。 +卷健康监测特性由两个组件实现:外部健康监测控制器和 {{< glossary_tooltip term_id="kubelet" text="kubelet" >}}。 + +如果 CSI 驱动器通过控制器的方式支持卷健康监测特性,那么只要在 CSI 卷上监测到异常卷状态,就会在 +{{< glossary_tooltip text="PersistentVolumeClaim" term_id="persistent-volume-claim" >}} (PVC) +中上报一个事件。 + + +外部健康监测 {{< glossary_tooltip text="控制器" term_id="controller" >}} 也会监测节点失效事件。 +如果要启动节点失效监测功能,你可以设置标志 `enable-node-watcher` 为 `true`。 +当外部健康监测器检测到一个节点失效事件,控制器会报送一个事件,该事件会在 PVC 上继续上报, +以表明使用此 PVC 的 Pod 正位于一个失效的节点上。 + +如果 CSI 驱动程序支持节点测的卷健康检测,那当在 CSI 卷上检测到异常卷时,会在使用该 PVC 的每个Pod 上触发一个事件。 + + +{{< note >}} +你需要启用 +`CSIVolumeHealth` [特性门控](/zh/docs/reference/command-line-tools-reference/feature-gates/) +,才能从节点测使用此特性。 +{{< /note >}} + +## {{% heading "whatsnext" %}} + + +参阅 [CSI 驱动程序文档](https://kubernetes-csi.github.io/docs/drivers.html), +可以找出有那些 CSI 驱动程序已实现了此特性。 \ No newline at end of file diff --git a/content/zh/docs/reference/command-line-tools-reference/feature-gates.md b/content/zh/docs/reference/command-line-tools-reference/feature-gates.md index f2818820fbc24..597bc98040af3 100644 --- a/content/zh/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/zh/docs/reference/command-line-tools-reference/feature-gates.md @@ -104,11 +104,10 @@ different Kubernetes components. | `AnyVolumeDataSource` | `false` | Alpha | 1.18 | | | `AppArmor` | `true` | Beta | 1.4 | | | `BalanceAttachedNodeVolumes` | `false` | Alpha | 1.11 | | -| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | | +| `BoundServiceAccountTokenVolume` | `false` | Alpha | 1.13 | 1.20 | +| `BoundServiceAccountTokenVolume` | `true` | Beta | 1.21 | | | `CPUManager` | `false` | Alpha | 1.8 | 1.9 | | `CPUManager` | `true` | Beta | 1.10 | | -| `CRIContainerLogRotation` | `false` | Alpha | 1.10 | 1.10 | -| `CRIContainerLogRotation` | `true` | Beta| 1.11 | | | `CSIInlineVolume` | `false` | Alpha | 1.15 | 1.15 | | `CSIInlineVolume` | `true` | Beta | 1.16 | - | | `CSIMigration` | `false` | Alpha | 1.14 | 1.16 | @@ -119,7 +118,8 @@ different Kubernetes components. | `CSIMigrationAzureDisk` | `false` | Alpha | 1.15 | 1.18 | | `CSIMigrationAzureDisk` | `false` | Beta | 1.19 | | | `CSIMigrationAzureDiskComplete` | `false` | Alpha | 1.17 | | -| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | | +| `CSIMigrationAzureFile` | `false` | Alpha | 1.15 | 1.19 | +| `CSIMigrationAzureFile` | `false` | Beta | 1.21 | | | `CSIMigrationAzureFileComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationGCE` | `false` | Alpha | 1.14 | 1.16 | | `CSIMigrationGCE` | `false` | Beta | 1.17 | | @@ -129,13 +129,16 @@ different Kubernetes components. | `CSIMigrationOpenStackComplete` | `false` | Alpha | 1.17 | | | `CSIMigrationvSphere` | `false` | Beta | 1.19 | | | `CSIMigrationvSphereComplete` | `false` | Beta | 1.19 | | -| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | | -| `CSIStorageCapacity` | `false` | Alpha | 1.19 | | +| `CSIServiceAccountToken` | `false` | Alpha | 1.20 | 1.20 | +| `CSIServiceAccountToken` | `true` | Beta | 1.21 | | +| `CSIStorageCapacity` | `false` | Alpha | 1.19 | 1.20 | +| `CSIStorageCapacity` | `true` | Beta | 1.21 | | | `CSIVolumeFSGroupPolicy` | `false` | Alpha | 1.19 | 1.19 | | `CSIVolumeFSGroupPolicy` | `true` | Beta | 1.20 | | | `ConfigurableFSGroupPolicy` | `false` | Alpha | 1.18 | 1.19 | | `ConfigurableFSGroupPolicy` | `true` | Beta | 1.20 | | -| `CronJobControllerV2` | `false` | Alpha | 1.20 | | +| `CronJobControllerV2` | `false` | Alpha | 1.20 | 1.20 | +| `CronJobControllerV2` | `true` | Beta | 1.21 | | | `CustomCPUCFSQuotaPeriod` | `false` | Alpha | 1.12 | | | `DefaultPodTopologySpread` | `false` | Alpha | 1.19 | 1.19 | | `DefaultPodTopologySpread` | `true` | Beta | 1.20 | | @@ -143,14 +146,11 @@ different Kubernetes components. | `DevicePlugins` | `true` | Beta | 1.10 | | | `DisableAcceleratorUsageMetrics` | `false` | Alpha | 1.19 | 1.19 | | `DisableAcceleratorUsageMetrics` | `true` | Beta | 1.20 | | -| `DownwardAPIHugePages` | `false` | Alpha | 1.20 | | +| `DownwardAPIHugePages` | `false` | Alpha | 1.20 | 1.20 | +| `DownwardAPIHugePages` | `false` | Beta | 1.21 | | | `DynamicKubeletConfig` | `false` | Alpha | 1.4 | 1.10 | | `DynamicKubeletConfig` | `true` | Beta | 1.11 | | | `EfficientWatchResumption` | `false` | Alpha | 1.20 | | -| `EndpointSlice` | `false` | Alpha | 1.16 | 1.16 | -| `EndpointSlice` | `false` | Beta | 1.17 | | -| `EndpointSlice` | `true` | Beta | 1.18 | | -| `EndpointSliceNodeName` | `false` | Alpha | 1.20 | | | `EndpointSliceProxying` | `false` | Alpha | 1.18 | 1.18 | | `EndpointSliceProxying` | `true` | Beta | 1.19 | | | `EndpointSliceTerminatingCondition` | `false` | Alpha | 1.20 | | @@ -162,15 +162,17 @@ different Kubernetes components. | `ExpandPersistentVolumes` | `false` | Alpha | 1.8 | 1.10 | | `ExpandPersistentVolumes` | `true` | Beta | 1.11 | | | `ExperimentalHostUserNamespaceDefaulting` | `false` | Beta | 1.5 | | -| `GenericEphemeralVolume` | `false` | Alpha | 1.19 | | -| `GracefulNodeShutdown` | `false` | Alpha | 1.20 | | +| `GenericEphemeralVolume` | `false` | Alpha | 1.19 | 1.20 | +| `GenericEphemeralVolume` | `true` | Beta | 1.21 | | +| `GracefulNodeShutdown` | `false` | Alpha | 1.20 | 1.20 | +| `GracefulNodeShutdown` | `true` | Beta | 1.21 | | | `HPAContainerMetrics` | `false` | Alpha | 1.20 | | | `HPAScaleToZero` | `false` | Alpha | 1.16 | | | `HugePageStorageMediumSize` | `false` | Alpha | 1.18 | 1.18 | | `HugePageStorageMediumSize` | `true` | Beta | 1.19 | | -| `IPv6DualStack` | `false` | Alpha | 1.15 | | -| `ImmutableEphemeralVolumes` | `false` | Alpha | 1.18 | 1.18 | -| `ImmutableEphemeralVolumes` | `true` | Beta | 1.19 | | +| `IngressClassNamespacedParams` | `false` | Alpha | 1.21 | | +| `IPv6DualStack` | `false` | Alpha | 1.15 | 1.20 | +| `IPv6DualStack` | `true` | Beta | 1.21 | | | `KubeletCredentialProviders` | `false` | Alpha | 1.20 | | | `KubeletPodResources` | `true` | Alpha | 1.13 | 1.14 | | `KubeletPodResources` | `true` | Beta | 1.15 | | @@ -179,22 +181,25 @@ different Kubernetes components. | `LocalStorageCapacityIsolation` | `false` | Alpha | 1.7 | 1.9 | | `LocalStorageCapacityIsolation` | `true` | Beta | 1.10 | | | `LocalStorageCapacityIsolationFSQuotaMonitoring` | `false` | Alpha | 1.15 | | +| `LogarithmicScaleDown` | `false` | Alpha | 1.21 | | +| `KubeletPodResourcesGetAllocatable` | `false` | Alpha | 1.21 | | | `MixedProtocolLBService` | `false` | Alpha | 1.20 | | +| `NamespaceDefaultLabelName` | `true` | Beta | 1.21 | | +| `NetworkPolicyEndPort` | `false` | Alpha | 1.21 | | | `NodeDisruptionExclusion` | `false` | Alpha | 1.16 | 1.18 | | `NodeDisruptionExclusion` | `true` | Beta | 1.19 | | | `NonPreemptingPriority` | `false` | Alpha | 1.15 | 1.18 | | `NonPreemptingPriority` | `true` | Beta | 1.19 | | -| `PodDisruptionBudget` | `false` | Alpha | 1.3 | 1.4 | -| `PodDisruptionBudget` | `true` | Beta | 1.5 | | +| `PodDeletionCost` | `false` | Alpha | 1.21 | | +| `PodAffinityNamespaceSelector` | `false` | Alpha | 1.21 | | | `PodOverhead` | `false` | Alpha | 1.16 | 1.17 | | `PodOverhead` | `true` | Beta | 1.18 | | +| `ProbeTerminationGracePeriod` | `false` | Alpha | 1.21 | | | `ProcMountType` | `false` | Alpha | 1.12 | | | `QOSReserved` | `false` | Alpha | 1.11 | | | `RemainingItemCount` | `false` | Alpha | 1.15 | | | `RemoveSelfLink` | `false` | Alpha | 1.16 | 1.19 | | `RemoveSelfLink` | `true` | Beta | 1.20 | | -| `RootCAConfigMap` | `false` | Alpha | 1.13 | 1.19 | -| `RootCAConfigMap` | `true` | Beta | 1.20 | | | `RotateKubeletServerCertificate` | `false` | Alpha | 1.7 | 1.11 | | `RotateKubeletServerCertificate` | `true` | Beta | 1.12 | | | `RunAsGroup` | `true` | Beta | 1.14 | | @@ -202,9 +207,9 @@ different Kubernetes components. | `SCTPSupport` | `true` | Beta | 1.19 | | | `ServerSideApply` | `false` | Alpha | 1.14 | 1.15 | | `ServerSideApply` | `true` | Beta | 1.16 | | -| `ServiceAccountIssuerDiscovery` | `false` | Alpha | 1.18 | 1.19 | -| `ServiceAccountIssuerDiscovery` | `true` | Beta | 1.20 | | +| `ServiceInternalTrafficPolicy` | `false` | Alpha | 1.21 | | | `ServiceLBNodePortControl` | `false` | Alpha | 1.20 | | +| `ServiceLoadBalancerClass` | `false` | Alpha | 1.21 | | | `ServiceNodeExclusion` | `false` | Alpha | 1.8 | 1.18 | | `ServiceNodeExclusion` | `true` | Beta | 1.19 | | | `ServiceTopology` | `false` | Alpha | 1.17 | | @@ -214,16 +219,9 @@ different Kubernetes components. | `StorageVersionAPI` | `false` | Alpha | 1.20 | | | `StorageVersionHash` | `false` | Alpha | 1.14 | 1.14 | | `StorageVersionHash` | `true` | Beta | 1.15 | | -| `SupportNodePidsLimit` | `false` | Alpha | 1.14 | 1.14 | -| `SupportNodePidsLimit` | `true` | Beta | 1.15 | | -| `SupportPodPidsLimit` | `false` | Alpha | 1.10 | 1.13 | -| `SupportPodPidsLimit` | `true` | Beta | 1.14 | | -| `TokenRequest` | `false` | Alpha | 1.10 | 1.11 | -| `TokenRequest` | `true` | Beta | 1.12 | | -| `TokenRequestProjection` | `false` | Alpha | 1.11 | 1.11 | -| `TokenRequestProjection` | `true` | Beta | 1.12 | | -| `Sysctls` | `true` | Beta | 1.11 | | +| `SuspendJob` | `false` | Alpha | 1.21 | | | `TTLAfterFinished` | `false` | Alpha | 1.12 | | +| `TopologyAwareHints` | `false` | Alpha | 1.21 | | | `TopologyManager` | `false` | Alpha | 1.16 | 1.17 | | `TopologyManager` | `true` | Beta | 1.18 | | | `ValidateProxyRedirects` | `false` | Alpha | 1.12 | 1.13 | @@ -232,8 +230,8 @@ different Kubernetes components. | `WinDSR` | `false` | Alpha | 1.14 | | | `WinOverlay` | `false` | Alpha | 1.14 | 1.19 | | `WinOverlay` | `true` | Beta | 1.20 | | -| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | | - +| `WindowsEndpointSliceProxying` | `false` | Alpha | 1.19 | 1.20 | +| `WindowsEndpointSliceProxying` | `true` | beta | 1.21 | | {{< /table >}} - `CPUManager`:启用容器级别的 CPU 亲和性支持,有关更多详细信息,请参见 [CPU 管理策略](/zh/docs/tasks/administer-cluster/cpu-management-policies/)。 -- `CRIContainerLogRotation`:为 cri 容器运行时启用容器日志轮换。 +- `CRIContainerLogRotation`:为 CRI 容器运行时启用容器日志轮换。日志文件的默认最大大小为10MB,缺省情况下,一个容器允许的最大日志文件数为5。这些值可以在kubelet配置中配置。 + 更多细节请参见[日志架构]( /zh/docs/concepts/cluster-administration/logging/#logging-at-the-node-level)。 - `CSIBlockVolume`:启用外部 CSI 卷驱动程序用于支持块存储。有关更多详细信息,请参见 [`csi` 原始块卷支持](/zh/docs/concepts/storage/volumes/#csi-raw-block-volume-support)。 - `CSIDriverRegistry`:在 csi.storage.k8s.io 中启用与 CSIDriver API 对象有关的所有逻辑。 @@ -747,6 +768,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `CSIVolumeFSGroupPolicy`: Allows CSIDrivers to use the `fsGroupPolicy` field. This field controls whether volumes created by a CSIDriver support volume ownership and permission modifications when these volumes are mounted. +- `CSIVolumeHealth`: Enable support for CSI volume health monitoring on node. - `ConfigurableFSGroupPolicy`: Allows user to configure volume permission change policy for fsGroups when mounting a volume in a Pod. See [Configure volume permission and ownership change policy for Pods](/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods) @@ -758,6 +780,7 @@ Each feature gate is designed for enabling/disabling a specific feature: --> - `CSIVolumeFSGroupPolicy`: 允许 CSIDrivers 使用 `fsGroupPolicy` 字段. 该字段能控制由 CSIDriver 创建的卷在挂载这些卷时是否支持卷所有权和权限修改。 +- `CSIVolumeHealth`: 启用对节点上的 CSI volume 运行状况监控的支持 - `ConfigurableFSGroupPolicy`:在 Pod 中挂载卷时,允许用户为 fsGroup 配置卷访问权限和属主变更策略。请参见 [为 Pod 配置卷访问权限和属主变更策略](/zh/docs/tasks/configure-pod-container/security-context/#configure-volume-permission-and-ownership-change-policy-for-pods)。 @@ -802,7 +825,7 @@ Each feature gate is designed for enabling/disabling a specific feature: - `DevicePlugins`: Enable the [device-plugins](/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) based resource provisioning on nodes. - `DisableAcceleratorUsageMetrics`: - [Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/). + [Disable accelerator metrics collected by the kubelet](/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics). - `DownwardAPIHugePages`: Enables usage of hugepages in [downward API](/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information). - `DryRun`: Enable server-side [dry run](/docs/reference/using-api/api-concepts/#dry-run) requests @@ -815,7 +838,7 @@ Each feature gate is designed for enabling/disabling a specific feature: [设备插件](/zh/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/)的 资源制备。 - `DisableAcceleratorUsageMetrics`: - [禁用 kubelet 收集加速器指标](/zh/docs/concepts/cluster-administration/system-metrics/). + [禁用 kubelet 收集加速器指标](/zh/docs/concepts/cluster-administration/system-metrics/#disable-accelerator-metrics). - `DownwardAPIHugePages`:允许在 [下行(Downward)API](/zh/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information) 中使用巨页信息。 @@ -890,6 +913,7 @@ Each feature gate is designed for enabling/disabling a specific feature: 该缺陷导致 Kubernetes 会忽略 exec 探针的超时值设置。 参阅[就绪态探针](/zh/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). +- `ExpandCSIVolumes`: 启用扩展 CSI 卷。 - `ExpandInUsePersistentVolumes`:启用扩充使用中的 PVC 的尺寸。请查阅 [调整使用中的 PersistentVolumeClaim 的大小](/zh/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim)。 - `ExpandPersistentVolumes`:允许扩充持久卷。请查阅 @@ -906,7 +931,7 @@ Each feature gate is designed for enabling/disabling a specific feature: [确保其被调度](/zh/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/)。 从 v1.13 开始已弃用此特性,转而使用 Pod 优先级和抢占功能。 -- `ExperimentalHostUserNamespaceDefaultingGate`:启用主机默认的用户名字空间。 +- `ExperimentalHostUserNamespaceDefaulting`:启用主机默认的用户名字空间。 这适用于使用其他主机名字空间、主机安装的容器,或具有特权或使用特定的非名字空间功能 (例如 MKNODE、SYS_MODULE 等)的容器。 如果在 Docker 守护程序中启用了用户名字空间重新映射,则启用此选项。 @@ -957,12 +982,17 @@ Each feature gate is designed for enabling/disabling a specific feature: - `HyperVContainer`: Enable [Hyper-V isolation](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container) for Windows containers. -- `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) - support for IPv6. - `ImmutableEphemeralVolumes`: Allows for marking individual Secrets and ConfigMaps as immutable for better safety and performance. -- `KubeletConfigFile` (*deprecated*): Enable loading kubelet configuration from a file - specified using a config file. +- `IPv6DualStack`: Enable [dual stack](/docs/concepts/services-networking/dual-stack/) + support for IPv6. +- `IndexedJob`: Allows the [Job](/docs/concepts/workloads/controllers/job/) + controller to manage Pod completions per completion index. +- `IngressClassNamespacedParams`: Allow namespace-scoped parameters reference in + `IngressClass` resouce. This feature adds two fields - `Scope` and `Namespace` + to `IngressClass.spec.parameters`. +- `KubeletConfigFile` (*deprecated*): Enable loading kubelet configuration + from a file specified using a config file. See [setting kubelet parameters via a config file](/docs/tasks/administer-cluster/kubelet-config-file/) for more details. - `KubeletCredentialProviders`: Enable kubelet exec credential providers for image pull credentials. @@ -971,10 +1001,12 @@ Each feature gate is designed for enabling/disabling a specific feature: --> - `HyperVContainer`:为 Windows 容器启用 [Hyper-V 隔离](https://docs.microsoft.com/en-us/virtualization/windowscontainers/manage-containers/hyperv-container)。 -- `IPv6DualStack`:启用[双协议栈](/zh/docs/concepts/services-networking/dual-stack/) - 以支持 IPv6。 - `ImmutableEphemeralVolumes`:允许将各个 Secret 和 ConfigMap 标记为不可变更的, 以提高安全性和性能。 +- `IPv6DualStack`:启用[双协议栈](/zh/docs/concepts/services-networking/dual-stack/) + 以支持 IPv6。 +- `IndexedJob`:允许 [Job](/zh/docs/concepts/workloads/controllers/job/) 控制器按每个完成的索引去管理 Pod 完成。 +- `IngressClassNamespacedParams`:允许引用命名空间范围的参数引用 `IngressClass`资源。该特性增加了两个字段 —— `Scope` 和 `Namespace` 到 `IngressClass.spec.parameters`。 - `KubeletConfigFile`(*已弃用*):启用从使用配置文件指定的文件中加载 kubelet 配置。 有关更多详细信息,请参见 [通过配置文件设置 kubelet 参数](/zh/docs/tasks/administer-cluster/kubelet-config-file/)。 @@ -983,15 +1015,20 @@ Each feature gate is designed for enabling/disabling a specific feature: - `KubeletPluginsWatcher`:启用基于探针的插件监视应用程序,使 kubelet 能够发现 类似 [CSI 卷驱动程序](/zh/docs/concepts/storage/volumes/#csi)这类插件。 - `KubeletPodResources`:启用 kubelet 的 Pod 资源 GRPC 端点。更多详细信息,请参见 [支持设备监控](https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/compute-device-assignment.md)。 +- `KubeletPodResourcesGetAllocatable`:启用kubelet的pod资源`GetAllocatableResources`功能。该API增强了[资源分配报告](https://kubernetes.io/zh/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/#monitoring-device-plugin-resources) + 包含有关可分配资源的信息,使客户端能够正确跟踪节点上的可用计算资源。 - `LegacyNodeRoleBehavior`:禁用此门控时,服务负载均衡器中和节点干扰中的原先行为 会忽略 `node-role.kubernetes.io/master` 标签,使用 `NodeDisruptionExclusion` 和 `ServiceNodeExclusion` 对应特性所提供的标签。 @@ -1007,9 +1044,11 @@ Each feature gate is designed for enabling/disabling a specific feature: supports project quotas and they are enabled, use project quotas to monitor [emptyDir volume](/docs/concepts/storage/volumes/#emptydir) storage consumption rather than filesystem walk for better performance and accuracy. +- `LogarithmicScaleDown`: Enable semi-random selection of pods to evict on controller scaledown + based on logarithmic bucketing of pod timestamps. - `MixedProtocolLBService`: Enable using different protocols in the same `LoadBalancer` type Service instance. -- `MountContainers`: Enable using utility containers on host as +- `MountContainers` (*deprecated*): Enable using utility containers on host as the volume mounter. --> - `LocalStorageCapacityIsolation`:允许使用 @@ -1022,35 +1061,48 @@ Each feature gate is designed for enabling/disabling a specific feature: 的后备文件系统支持项目配额,并且启用了这些配额,将使用项目配额来监视 [emptyDir 卷](/zh/docs/concepts/storage/volumes/#emptydir)的存储消耗 而不是遍历文件系统,以此获得更好的性能和准确性。 +- `LogarithmicScaleDown`:启用Pod的半随机(semi-random)选择,控制器将根据 Pod 时间戳的对数桶按比例缩小去驱逐 Pod。 - `MixedProtocolLBService`:允许在同一 `LoadBalancer` 类型的 Service 实例中使用不同 的协议。 -- `MountContainers`:允许使用主机上的工具容器作为卷挂载程序。 +- `MountContainers`( *已弃用* )`:允许使用主机上的工具容器作为卷挂载程序。 - `MountPropagation`:启用将一个容器安装的共享卷共享到其他容器或 Pod。 更多详细信息,请参见[挂载传播](/zh/docs/concepts/storage/volumes/#mount-propagation)。 +- `NamespaceDefaultLabelName`:配置 API 服务器以在所有名字空间上设置一个不可变的 {{< glossary_tooltip text="label" term_id="label" >}} + `kubernetes.io/metadata.name`,也包括名字空间。 - `NodeDisruptionExclusion`:启用节点标签 `node.kubernetes.io/exclude-disruption`, 以防止在可用区发生故障期间驱逐节点。 - `NodeLease`:启用新的 Lease(租期)API 以报告节点心跳,可用作节点运行状况信号。 -- `NonPreemptingPriority`:为 PriorityClass 和 Pod 启用 NonPreempting 选项。 +- `NonPreemptingPriority`:为 PriorityClass 和 Pod 启用 `preemptionPolicy` 选项。 - `PVCProtection`:启用防止仍被某 Pod 使用的 PVC 被删除的特性。 +- `PodDeletionCost`:启用[Pod 删除成本](/zh/docs/content/en/docs/concepts/workloads/controllers/replicaset/#pod-deletion-cost)功能 + 该功能使用户可以影响 ReplicaSet 的降序顺序。 - `PersistentLocalVolumes`:允许在 Pod 中使用 `local(本地)`卷类型。 如果请求 `local` 卷,则必须指定 Pod 亲和性属性。 -- `PodDisruptionBudget`:启用 - [PodDisruptionBudget](/zh/docs/tasks/run-application/configure-pdb/) 特性。 +- `PodDisruptionBudget`:启用 [PodDisruptionBudget](/zh/docs/tasks/run-application/configure-pdb/) 特性。 +- `PodAffinityNamespaceSelector`:启用[Pod 亲和性名称空间选择器](/zh/docs/concepts/scheduling-eviction/assign-pod-node/#namespace-selector) + 和[CrossNamespacePodAffinity](/zh/docs/concepts/policy/resource-quotas/#cross-namespace-pod-affinity-quota)资源配额功能。 - `PodOverhead`:启用 [PodOverhead](/zh/docs/concepts/scheduling-eviction/pod-overhead/) 特性以考虑 Pod 开销。 - `SCTPSupport`:在 Pod、Service、Endpoints、NetworkPolicy 定义中 @@ -1158,7 +1217,9 @@ Each feature gate is designed for enabling/disabling a specific feature: (颁发者和 JWKS URL)。详情参见 [为 Pod 配置服务账户](/zh/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery) 。 - `ServiceAppProtocol`:为 Service 和 Endpoints 启用 `appProtocol` 字段。 +- `ServiceInternalTrafficPolicy`:为服务启用 `InternalTrafficPolicy` 字段。 - `ServiceLBNodePortControl`:为服务启用 `spec.allocateLoadBalancerNodePorts` 字段。 + `ServiceLoadBalancerClass`: 为服务启用 `LoadBalancerClass` 字段。 有关更多信息,请参见 [负载均衡器类的定义 implementation](/zh/docs/concepts/services-networking/service/#specifying-class-of-load-balancer-implementation-load-balancer-class) for more details. - `ServiceLoadBalancerFinalizer`:为服务负载均衡启用终结器(finalizers)保护。 - `ServiceNodeExclusion`:启用从云提供商创建的负载均衡中排除节点。 @@ -1217,6 +1278,9 @@ Each feature gate is designed for enabling/disabling a specific feature: options can be specified to ensure that the specified number of process IDs will be reserved for the system as a whole and for Kubernetes system daemons respectively. +- `SuspendJob`: Enable support to suspend and resume Jobs. See + [the Jobs docs](/docs/concepts/workloads/controllers/job/) for + more details. - `Sysctls`: Enable support for namespaced kernel parameters (sysctls) that can be set for each pod. See [sysctls](/docs/tasks/administer-cluster/sysctl-cluster/) for more details. @@ -1228,6 +1292,8 @@ Each feature gate is designed for enabling/disabling a specific feature: `--system-reserved` 和 `--kube-reserved` 中的参数 `pid=<数值>` 可以分别用来 设定为整个系统所预留的进程 ID 个数和为 Kubernetes 系统守护进程预留的进程 ID 个数。 +- `SuspendJob`: 启用支持以暂停和恢复作业。 更多详细信息,请参见 + [Jobs 文档](zh//docs/concepts/workloads/controllers/job/)。 - `Sysctls`:允许为每个 Pod 设置的名字空间内核参数(sysctls)。 更多详细信息,请参见 [sysctls](/zh/docs/tasks/administer-cluster/sysctl-cluster/)。 +- 'VolumeCapacityPriority`: 基于可用 PV 容量的拓扑,启用对不同节点的优先级支持。 - `VolumePVCDataSource`:启用对将现有 PVC 指定数据源的支持。 - `VolumeScheduling`:启用卷拓扑感知调度,并使 PersistentVolumeClaim(PVC) 绑定能够了解调度决策;当与 PersistentLocalVolumes 特性门控一起使用时, diff --git a/content/zh/docs/reference/config-api/kube-proxy-config.v1alpha1.md b/content/zh/docs/reference/config-api/kube-proxy-config.v1alpha1.md new file mode 100644 index 0000000000000..86315856b2f87 --- /dev/null +++ b/content/zh/docs/reference/config-api/kube-proxy-config.v1alpha1.md @@ -0,0 +1,601 @@ +--- +title: kube-proxy Configuration (v1alpha1) +content_type: tool-reference +package: kubeproxy.config.k8s.io/v1alpha1 +auto_generated: true +--- + + +## Resource Types + + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + + + +## `KubeProxyConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration} + + + + + +KubeProxyConfiguration contains everything necessary to configure the +Kubernetes proxy server. + +
-h, --help
help for kubeconfig

help for kubeconfig

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--client-name string
The name of user. It will be used as the CN if client certificates are created

The name of user. It will be used as the CN if client certificates are created

--config string
Path to a kubeadm configuration file.

Path to a kubeadm configuration file.

-h, --help
help for user

help for user

--org stringSlice--org strings
The orgnizations of the client certificate. It will be used as the O if client certificates are created

The orgnizations of the client certificate. It will be used as the O if client certificates are created

--token string
The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates

The token that should be used as the authentication mechanism for this kubeconfig, instead of client certificates

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--cert-dir string     Default: "/etc/kubernetes/pki"
The path to the directory where the certificates are stored. If specified, clean this directory.

The path to the directory where the certificates are stored. If specified, clean this directory.

--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

-f, --force
Reset the node without prompting for confirmation.

Reset the node without prompting for confirmation.

-h, --help
help for reset

help for reset

--ignore-preflight-errors stringSlice--ignore-preflight-errors strings
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--skip-phases stringSlice--skip-phases strings
List of phases to be skipped

List of phases to be skipped

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for phase

help for phase

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--cert-dir string     Default: "/etc/kubernetes/pki"
The path to the directory where the certificates are stored. If specified, clean this directory.

The path to the directory where the certificates are stored. If specified, clean this directory.

--cri-socket string
Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

Path to the CRI socket to connect. If empty kubeadm will try to auto-detect this value; use this option only if you have more than one CRI installed or if you have non-standard CRI socket.

-h, --help
help for cleanup-node

help for cleanup-node

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-f, --force
Reset the node without prompting for confirmation.

Reset the node without prompting for confirmation.

-h, --help
help for preflight

help for preflight

--ignore-preflight-errors stringSlice--ignore-preflight-errors strings
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for remove-etcd-member

help for remove-etcd-member

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for update-cluster-status

help for update-cluster-status

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--dry-run
Whether to enable dry-run mode or not

Whether to enable dry-run mode or not

-h, --help
help for token

help for token

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--certificate-key string
When used together with '--print-join-command', print the full 'kubeadm join' flag needed to join the cluster as a control-plane. To create a new certificate key you must use 'kubeadm init phase upload-certs --upload-certs'.

When used together with '--print-join-command', print the full 'kubeadm join' flag needed to join the cluster as a control-plane. To create a new certificate key you must use 'kubeadm init phase upload-certs --upload-certs'.

--config string
Path to a kubeadm configuration file.

Path to a kubeadm configuration file.

--description string
A human friendly description of how this token is used.

A human friendly description of how this token is used.

--groups stringSlice     Default: [system:bootstrappers:kubeadm:default-node-token]--groups strings     Default: "system:bootstrappers:kubeadm:default-node-token"
Extra groups that this token will authenticate as when used for authentication. Must match "\\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\\z"

Extra groups that this token will authenticate as when used for authentication. Must match "\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\z"

-h, --help
help for create

help for create

--print-join-command
Instead of printing only the token, print the full 'kubeadm join' flag needed to join the cluster using the token.

Instead of printing only the token, print the full 'kubeadm join' flag needed to join the cluster using the token.

--ttl duration     Default: 24h0m0s
The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire

The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire

--usages stringSlice     Default: [signing,authentication]--usages strings     Default: "signing,authentication"
Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [signing,authentication]

Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [signing,authentication]

--dry-run
Whether to enable dry-run mode or not

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for delete

help for delete

--dry-run
Whether to enable dry-run mode or not

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for generate

help for generate

--dry-run
Whether to enable dry-run mode or not

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--allow-missing-template-keys     Default: true
If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.

If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.

-o, --experimental-output string     Default: "text"
Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file.

Output format. One of: text|json|yaml|go-template|go-template-file|template|templatefile|jsonpath|jsonpath-as-json|jsonpath-file.

-h, --help
help for list

help for list

--show-managed-fields

If true, keep the managedFields when printing objects in JSON or YAML format.

--dry-run
Whether to enable dry-run mode or not

Whether to enable dry-run mode or not

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for upgrade

help for upgrade

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--allow-experimental-upgrades
Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.

Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.

--allow-release-candidate-upgrades
Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.

Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.

--certificate-renewal     Default: true
Perform the renewal of certificates used by component changed during upgrades.

Perform the renewal of certificates used by component changed during upgrades.

--config string
Path to a kubeadm configuration file.

Path to a kubeadm configuration file.

--dry-run
Do not change any state, just output what actions would be performed.

Do not change any state, just output what actions would be performed.

--etcd-upgrade     Default: true
Perform the upgrade of etcd.

Perform the upgrade of etcd.

--experimental-patches string
Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false)

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-f, --force
Force upgrading although some requirements might not be met. This also implies non-interactive mode.

Force upgrading although some requirements might not be met. This also implies non-interactive mode.

-h, --help
help for apply

help for apply

--ignore-preflight-errors stringSlice--ignore-preflight-errors strings
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--print-config
Specifies whether the configuration file that will be used in the upgrade should be printed or not.

Specifies whether the configuration file that will be used in the upgrade should be printed or not.

-y, --yes
Perform the upgrade and do not prompt for confirmation (non-interactive mode).

Perform the upgrade and do not prompt for confirmation (non-interactive mode).

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--api-server-manifest string     Default: "/etc/kubernetes/manifests/kube-apiserver.yaml"
path to API server manifest

path to API server manifest

--config string
Path to a kubeadm configuration file.

Path to a kubeadm configuration file.

-c, --context-lines int     Default: 3
How many lines of context in the diff

How many lines of context in the diff

--controller-manager-manifest string     Default: "/etc/kubernetes/manifests/kube-controller-manager.yaml"
path to controller manifest

path to controller manifest

-h, --help
help for diff

help for diff

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--scheduler-manifest string     Default: "/etc/kubernetes/manifests/kube-scheduler.yaml"
path to scheduler manifest

path to scheduler manifest

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--certificate-renewal     Default: true
Perform the renewal of certificates used by component changed during upgrades.

Perform the renewal of certificates used by component changed during upgrades.

--dry-run
Do not change any state, just output the actions that would be performed.

Do not change any state, just output the actions that would be performed.

--etcd-upgrade     Default: true
Perform the upgrade of etcd.

Perform the upgrade of etcd.

--experimental-patches string
Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help
help for node

help for node

--ignore-preflight-errors stringSlice--ignore-preflight-errors strings
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--skip-phases stringSlice--skip-phases strings
List of phases to be skipped

List of phases to be skipped

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for phase

help for phase

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--certificate-renewal     Default: true
Perform the renewal of certificates used by component changed during upgrades.

Perform the renewal of certificates used by component changed during upgrades.

--dry-run
Do not change any state, just output the actions that would be performed.

Do not change any state, just output the actions that would be performed.

--etcd-upgrade     Default: true
Perform the upgrade of etcd.

Perform the upgrade of etcd.

--experimental-patches string
Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

Path to a directory that contains files named "target[suffix][+patchtype].extension". For example, "kube-apiserver0+merge.yaml" or just "etcd.json". "patchtype" can be one of "strategic", "merge" or "json" and they match the patch formats supported by kubectl. The default "patchtype" is "strategic". "extension" must be either "json" or "yaml". "suffix" is an optional string that can be used to determine which patches are applied first alpha-numerically.

-h, --help
help for control-plane

help for control-plane

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--dry-run
Do not change any state, just output the actions that would be performed.

Do not change any state, just output the actions that would be performed.

-h, --help
help for kubelet-config

help for kubelet-config

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for preflight

help for preflight

--ignore-preflight-errors stringSlice--ignore-preflight-errors strings
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

--allow-experimental-upgrades
Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.

Show unstable versions of Kubernetes as an upgrade alternative and allow upgrading to an alpha/beta/release candidate versions of Kubernetes.

--allow-release-candidate-upgrades
Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.

Show release candidate versions of Kubernetes as an upgrade alternative and allow upgrading to a release candidate versions of Kubernetes.

--config string
Path to a kubeadm configuration file.

Path to a kubeadm configuration file.

--feature-gates string
A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (ALPHA - default=false)
PublicKeysECDSA=true|false (ALPHA - default=false)

A set of key=value pairs that describe feature gates for various features. Options are:
IPv6DualStack=true|false (BETA - default=true)
PublicKeysECDSA=true|false (ALPHA - default=false)

-h, --help
help for plan

help for plan

--ignore-preflight-errors stringSlice--ignore-preflight-errors strings
A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

A list of checks whose errors will be shown as warnings. Example: 'IsPrivilegedUser,Swap'. Value 'all' ignores errors from all checks.

--kubeconfig string     Default: "/etc/kubernetes/admin.conf"
The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

The kubeconfig file to use when talking to the cluster. If the flag is not set, a set of standard locations can be searched for an existing kubeconfig file.

--print-config
Specifies whether the configuration file that will be used in the upgrade should be printed or not.

Specifies whether the configuration file that will be used in the upgrade should be printed or not.

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

-h, --help
help for version

help for version

-o, --output string
Output format; available options are 'yaml', 'json' and 'short'

Output format; available options are 'yaml', 'json' and 'short'

--rootfs string
[EXPERIMENTAL] The path to the 'real' host root filesystem.

[EXPERIMENTAL] The path to the 'real' host root filesystem.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
apiVersion
string
kubeproxy.config.k8s.io/v1alpha1
kind
string
KubeProxyConfiguration
featureGates [Required]
+map[string]bool +
+ featureGates is a map of feature names to bools that enable or disable alpha/experimental features.
bindAddress [Required]
+string +
+ bindAddress is the IP address for the proxy server to serve on (set to 0.0.0.0 +for all interfaces)
healthzBindAddress [Required]
+string +
+ healthzBindAddress is the IP address and port for the health check server to serve on, +defaulting to 0.0.0.0:10256
metricsBindAddress [Required]
+string +
+ metricsBindAddress is the IP address and port for the metrics server to serve on, +defaulting to 127.0.0.1:10249 (set to 0.0.0.0 for all interfaces)
bindAddressHardFail [Required]
+bool +
+ bindAddressHardFail, if true, kube-proxy will treat failure to bind to a port as fatal and exit
enableProfiling [Required]
+bool +
+ enableProfiling enables profiling via web interface on /debug/pprof handler. +Profiling handlers will be handled by metrics server.
clusterCIDR [Required]
+string +
+ clusterCIDR is the CIDR range of the pods in the cluster. It is used to +bridge traffic coming from outside of the cluster. If not provided, +no off-cluster bridging will be performed.
hostnameOverride [Required]
+string +
+ hostnameOverride, if non-empty, will be used as the identity instead of the actual hostname.
clientConnection [Required]
+ClientConnectionConfiguration +
+ clientConnection specifies the kubeconfig file and client connection settings for the proxy +server to use when communicating with the apiserver.
iptables [Required]
+KubeProxyIPTablesConfiguration +
+ iptables contains iptables-related configuration options.
ipvs [Required]
+KubeProxyIPVSConfiguration +
+ ipvs contains ipvs-related configuration options.
oomScoreAdj [Required]
+int32 +
+ oomScoreAdj is the oom-score-adj value for kube-proxy process. Values must be within +the range [-1000, 1000]
mode [Required]
+ProxyMode +
+ mode specifies which proxy mode to use.
portRange [Required]
+string +
+ portRange is the range of host ports (beginPort-endPort, inclusive) that may be consumed +in order to proxy service traffic. If unspecified (0-0) then ports will be randomly chosen.
udpIdleTimeout [Required]
+meta/v1.Duration +
+ udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s'). +Must be greater than 0. Only applicable for proxyMode=userspace.
conntrack [Required]
+KubeProxyConntrackConfiguration +
+ conntrack contains conntrack-related configuration options.
configSyncPeriod [Required]
+meta/v1.Duration +
+ configSyncPeriod is how often configuration from the apiserver is refreshed. Must be greater +than 0.
nodePortAddresses [Required]
+[]string +
+ nodePortAddresses is the --nodeport-addresses value for kube-proxy process. Values must be valid +IP blocks. These values are as a parameter to select the interfaces where nodeport works. +In case someone would like to expose a service on localhost for local visit and some other interfaces for +particular purpose, a list of IP blocks would do that. +If set it to "127.0.0.0/8", kube-proxy will only select the loopback interface for NodePort. +If set it to a non-zero IP block, kube-proxy will filter that down to just the IPs that applied to the node. +An empty string slice is meant to select all network interfaces.
winkernel [Required]
+KubeProxyWinkernelConfiguration +
+ winkernel contains winkernel-related configuration options.
showHiddenMetricsForVersion [Required]
+string +
+ ShowHiddenMetricsForVersion is the version for which you want to show hidden metrics.
detectLocalMode [Required]
+LocalMode +
+ DetectLocalMode determines mode to use for detecting local traffic, defaults to LocalModeClusterCIDR
+ + + +## `KubeProxyConntrackConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConntrackConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyConntrackConfiguration contains conntrack settings for +the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
maxPerCore [Required]
+int32 +
+ maxPerCore is the maximum number of NAT connections to track +per CPU core (0 to leave the limit as-is and ignore min).
min [Required]
+int32 +
+ min is the minimum value of connect-tracking records to allocate, +regardless of conntrackMaxPerCore (set maxPerCore=0 to leave the limit as-is).
tcpEstablishedTimeout [Required]
+meta/v1.Duration +
+ tcpEstablishedTimeout is how long an idle TCP connection will be kept open +(e.g. '2s'). Must be greater than 0 to set.
tcpCloseWaitTimeout [Required]
+meta/v1.Duration +
+ tcpCloseWaitTimeout is how long an idle conntrack entry +in CLOSE_WAIT state will remain in the conntrack +table. (e.g. '60s'). Must be greater than 0 to set.
+ + + +## `KubeProxyIPTablesConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPTablesConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyIPTablesConfiguration contains iptables-related configuration +details for the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
masqueradeBit [Required]
+int32 +
+ masqueradeBit is the bit of the iptables fwmark space to use for SNAT if using +the pure iptables proxy mode. Values must be within the range [0, 31].
masqueradeAll [Required]
+bool +
+ masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode.
syncPeriod [Required]
+meta/v1.Duration +
+ syncPeriod is the period that iptables rules are refreshed (e.g. '5s', '1m', +'2h22m'). Must be greater than 0.
minSyncPeriod [Required]
+meta/v1.Duration +
+ minSyncPeriod is the minimum period that iptables rules are refreshed (e.g. '5s', '1m', +'2h22m').
+ + + +## `KubeProxyIPVSConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyIPVSConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyIPVSConfiguration contains ipvs-related configuration +details for the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
syncPeriod [Required]
+meta/v1.Duration +
+ syncPeriod is the period that ipvs rules are refreshed (e.g. '5s', '1m', +'2h22m'). Must be greater than 0.
minSyncPeriod [Required]
+meta/v1.Duration +
+ minSyncPeriod is the minimum period that ipvs rules are refreshed (e.g. '5s', '1m', +'2h22m').
scheduler [Required]
+string +
+ ipvs scheduler
excludeCIDRs [Required]
+[]string +
+ excludeCIDRs is a list of CIDR's which the ipvs proxier should not touch +when cleaning up ipvs services.
strictARP [Required]
+bool +
+ strict ARP configure arp_ignore and arp_announce to avoid answering ARP queries +from kube-ipvs0 interface
tcpTimeout [Required]
+meta/v1.Duration +
+ tcpTimeout is the timeout value used for idle IPVS TCP sessions. +The default value is 0, which preserves the current timeout value on the system.
tcpFinTimeout [Required]
+meta/v1.Duration +
+ tcpFinTimeout is the timeout value used for IPVS TCP sessions after receiving a FIN. +The default value is 0, which preserves the current timeout value on the system.
udpTimeout [Required]
+meta/v1.Duration +
+ udpTimeout is the timeout value used for IPVS UDP packets. +The default value is 0, which preserves the current timeout value on the system.
+ + + +## `KubeProxyWinkernelConfiguration` {#kubeproxy-config-k8s-io-v1alpha1-KubeProxyWinkernelConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +KubeProxyWinkernelConfiguration contains Windows/HNS settings for +the Kubernetes proxy server. + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
networkName [Required]
+string +
+ networkName is the name of the network kube-proxy will use +to create endpoints and policies
sourceVip [Required]
+string +
+ sourceVip is the IP address of the source VIP endoint used for +NAT when loadbalancing
enableDSR [Required]
+bool +
+ enableDSR tells kube-proxy whether HNS policies should be created +with DSR
+ + + +## `LocalMode` {#kubeproxy-config-k8s-io-v1alpha1-LocalMode} + +(Alias of `string`) + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +LocalMode represents modes to detect local traffic from the node + + + + + +## `ProxyMode` {#kubeproxy-config-k8s-io-v1alpha1-ProxyMode} + +(Alias of `string`) + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +ProxyMode represents modes used by the Kubernetes proxy server. + +Currently, three modes of proxy are available in Linux platform: 'userspace' (older, going to be EOL), 'iptables' +(newer, faster), 'ipvs'(newest, better in performance and scalability). + +Two modes of proxy are available in Windows platform: 'userspace'(older, stable) and 'kernelspace' (newer, faster). + +In Linux platform, if proxy mode is blank, use the best-available proxy (currently iptables, but may change in the +future). If the iptables proxy is selected, regardless of how, but the system's kernel or iptables versions are +insufficient, this always falls back to the userspace proxy. IPVS mode will be enabled when proxy mode is set to 'ipvs', +and the fall back path is firstly iptables and then userspace. + +In Windows platform, if proxy mode is blank, use the best-available proxy (currently userspace, but may change in the +future). If winkernel proxy is selected, regardless of how, but the Windows kernel can't support this mode of proxy, +this always falls back to the userspace proxy. + + + + + + + +## `ClientConnectionConfiguration` {#ClientConnectionConfiguration} + + + + +**Appears in:** + +- [KubeProxyConfiguration](#kubeproxy-config-k8s-io-v1alpha1-KubeProxyConfiguration) + + +ClientConnectionConfiguration contains details for constructing a client. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
kubeconfig [Required]
+string +
+ kubeconfig is the path to a KubeConfig file.
acceptContentTypes [Required]
+string +
+ acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the +default value of 'application/json'. This field will control all connections to the server used by a particular +client.
contentType [Required]
+string +
+ contentType is the content type used when sending data to the server from this client.
qps [Required]
+float32 +
+ qps controls the number of queries per second allowed for this connection.
burst [Required]
+int32 +
+ burst allows extra queries to accumulate when a client is exceeding its rate.
diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet-config.v1beta1.md b/content/zh/docs/reference/config-api/kubelet-config.v1beta1.md similarity index 67% rename from content/en/docs/reference/command-line-tools-reference/kubelet-config.v1beta1.md rename to content/zh/docs/reference/config-api/kubelet-config.v1beta1.md index a5e28a1043018..bee05b68db403 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet-config.v1beta1.md +++ b/content/zh/docs/reference/config-api/kubelet-config.v1beta1.md @@ -1,26 +1,21 @@ - - - - - --- -title: kubelet.config.k8s.io/v1beta1 +title: Kubelet Configuration (v1beta1) content_type: tool-reference +package: kubelet.config.k8s.io/v1beta1 +auto_generated: true --- ## Resource Types - - [KubeletConfiguration](#kubelet-config-k8s-io-v1beta1-KubeletConfiguration) - [SerializedNodeConfigSource](#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource) - -### `KubeletConfiguration` {#kubelet-config-k8s-io-v1beta1-KubeletConfiguration} +## `KubeletConfiguration` {#kubelet-config-k8s-io-v1beta1-KubeletConfiguration} @@ -28,20 +23,21 @@ content_type: tool-reference KubeletConfiguration contains the configuration for the Kubelet - +
- - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -1161,7 +1250,7 @@ Default: "10s" -### `SerializedNodeConfigSource` {#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource} +## `SerializedNodeConfigSource` {#kubelet-config-k8s-io-v1beta1-SerializedNodeConfigSource} @@ -1171,20 +1260,21 @@ SerializedNodeConfigSource allows us to serialize v1.NodeConfigSource. This type is used internally by the Kubelet for tracking checkpointed dynamic configs. It exists in the kubeletconfig API group because it is classified as a versioned input to the Kubelet. -
FieldDescription
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
KubeletConfiguration
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
KubeletConfiguration
enableServer*
+
enableServer [Required]
bool
enableServer enables Kubelet's secured server. + + enableServer enables Kubelet's secured server. Note: Kubelet's insecure port is controlled by the readOnlyPort option. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. @@ -49,10 +45,11 @@ Default: true
staticPodPath
+
staticPodPath
string
staticPodPath is the path to the directory containing local (static) pods to + + staticPodPath is the path to the directory containing local (static) pods to run, or the path to a single static pod file. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that the set of static pods specified at the new path may be different than the @@ -61,10 +58,11 @@ Default: ""
syncFrequency
+
syncFrequency
meta/v1.Duration
syncFrequency is the max period between synchronizing running + + syncFrequency is the max period between synchronizing running containers and config. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that shortening this duration may have a negative performance impact, especially @@ -74,10 +72,11 @@ Default: "1m"
fileCheckFrequency
+
fileCheckFrequency
meta/v1.Duration
fileCheckFrequency is the duration between checking config files for + + fileCheckFrequency is the duration between checking config files for new data Dynamic Kubelet Config (beta): If dynamically updating this field, consider that shortening the duration will cause the Kubelet to reload local Static Pod @@ -86,10 +85,11 @@ Default: "20s"
httpCheckFrequency
+
httpCheckFrequency
meta/v1.Duration
httpCheckFrequency is the duration between checking http for new data + + httpCheckFrequency is the duration between checking http for new data Dynamic Kubelet Config (beta): If dynamically updating this field, consider that shortening the duration will cause the Kubelet to poll staticPodURL more frequently, which may have a negative performance impact. @@ -97,10 +97,11 @@ Default: "20s"
staticPodURL
+
staticPodURL
string
staticPodURL is the URL for accessing static pods to run + + staticPodURL is the URL for accessing static pods to run Dynamic Kubelet Config (beta): If dynamically updating this field, consider that the set of static pods specified at the new URL may be different than the ones the Kubelet initially started with, and this may disrupt your node. @@ -108,20 +109,22 @@ Default: ""
staticPodURLHeader
+
staticPodURLHeader
map[string][]string
staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL + + staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt the ability to read the latest set of static pods from StaticPodURL. Default: nil
address
+
address
string
address is the IP address for the Kubelet to serve on (set to 0.0.0.0 + + address is the IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces). Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. @@ -129,20 +132,22 @@ Default: "0.0.0.0"
port
+
port
int32
port is the port for the Kubelet to serve on. + + port is the port for the Kubelet to serve on. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: 10250
readOnlyPort
+
readOnlyPort
int32
readOnlyPort is the read-only port for the Kubelet to serve on with + + readOnlyPort is the read-only port for the Kubelet to serve on with no authentication/authorization. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. @@ -150,10 +155,11 @@ Default: 0 (disabled)
tlsCertFile
+
tlsCertFile
string
tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, + + tlsCertFile is the file containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If tlsCertFile and tlsPrivateKeyFile are not provided, a self-signed certificate and key are generated for the public address and saved to the directory @@ -164,20 +170,22 @@ Default: ""
tlsPrivateKeyFile
+
tlsPrivateKeyFile
string
tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile + + tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Default: ""
tlsCipherSuites
+
tlsCipherSuites
[]string
TLSCipherSuites is the list of allowed cipher suites for the server. + + TLSCipherSuites is the list of allowed cipher suites for the server. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. @@ -185,10 +193,11 @@ Default: nil
tlsMinVersion
+
tlsMinVersion
string
TLSMinVersion is the minimum TLS version supported. + + TLSMinVersion is the minimum TLS version supported. Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants). Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. @@ -196,10 +205,11 @@ Default: ""
rotateCertificates
+
rotateCertificates
bool
rotateCertificates enables client certificate rotation. The Kubelet will request a + + rotateCertificates enables client certificate rotation. The Kubelet will request a new certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -209,10 +219,11 @@ Default: false
serverTLSBootstrap
+
serverTLSBootstrap
bool
serverTLSBootstrap enables server certificate bootstrap. Instead of self + + serverTLSBootstrap enables server certificate bootstrap. Instead of self signing a serving certificate, the Kubelet will request a certificate from the certificates.k8s.io API. This requires an approver to approve the certificate signing requests. The RotateKubeletServerCertificate feature @@ -225,10 +236,11 @@ Default: false
authentication
+
authentication
KubeletAuthentication
authentication specifies how requests to the Kubelet's server are authenticated + + authentication specifies how requests to the Kubelet's server are authenticated Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Defaults: @@ -240,10 +252,11 @@ Defaults:
authorization
+
authorization
KubeletAuthorization
authorization specifies how requests to the Kubelet's server are authorized + + authorization specifies how requests to the Kubelet's server are authorized Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. Defaults: @@ -254,10 +267,11 @@ Defaults:
registryPullQPS
+
registryPullQPS
int32
registryPullQPS is the limit of registry pulls per second. + + registryPullQPS is the limit of registry pulls per second. Set to 0 for no limit. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact scalability by changing the amount of traffic produced @@ -266,10 +280,11 @@ Default: 5
registryBurst
+
registryBurst
int32
registryBurst is the maximum size of bursty pulls, temporarily allows + + registryBurst is the maximum size of bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registryPullQPS. Only used if registryPullQPS > 0. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -279,10 +294,11 @@ Default: 10
eventRecordQPS
+
eventRecordQPS
int32
eventRecordQPS is the maximum event creations per second. If 0, there + + eventRecordQPS is the maximum event creations per second. If 0, there is no limit enforced. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact scalability by changing the amount of traffic produced by @@ -291,10 +307,11 @@ Default: 5
eventBurst
+
eventBurst
int32
eventBurst is the maximum size of a burst of event creations, temporarily + + eventBurst is the maximum size of a burst of event creations, temporarily allows event creations to burst to this number, while still not exceeding eventRecordQPS. Only used if eventRecordQPS > 0. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -304,10 +321,11 @@ Default: 10
enableDebuggingHandlers
+
enableDebuggingHandlers
bool
enableDebuggingHandlers enables server endpoints for log access + + enableDebuggingHandlers enables server endpoints for log access and local running of containers and commands, including the exec, attach, logs, and portforward features. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -316,40 +334,44 @@ Default: true
enableContentionProfiling
+
enableContentionProfiling
bool
enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. + + enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that enabling it may carry a performance impact. Default: false
healthzPort
+
healthzPort
int32
healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) + + healthzPort is the port of the localhost healthz endpoint (set to 0 to disable) Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that monitor Kubelet health. Default: 10248
healthzBindAddress
+
healthzBindAddress
string
healthzBindAddress is the IP address for the healthz server to serve on + + healthzBindAddress is the IP address for the healthz server to serve on Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that monitor Kubelet health. Default: "127.0.0.1"
oomScoreAdj
+
oomScoreAdj
int32
oomScoreAdj is The oom-score-adj value for kubelet process. Values + + oomScoreAdj is The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000]. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact the stability of nodes under memory pressure. @@ -357,10 +379,11 @@ Default: -999
clusterDomain
+
clusterDomain
string
clusterDomain is the DNS domain for this cluster. If set, kubelet will + + clusterDomain is the DNS domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains. Dynamic Kubelet Config (beta): Dynamically updating this field is not recommended, @@ -369,10 +392,11 @@ Default: ""
clusterDNS
+
clusterDNS
[]string
clusterDNS is a list of IP addresses for the cluster DNS server. If set, + + clusterDNS is a list of IP addresses for the cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution instead of the host's DNS servers. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -382,10 +406,11 @@ Default: nil
streamingConnectionIdleTimeout
+
streamingConnectionIdleTimeout
meta/v1.Duration
streamingConnectionIdleTimeout is the maximum time a streaming connection + + streamingConnectionIdleTimeout is the maximum time a streaming connection can be idle before the connection is automatically closed. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact components that rely on infrequent updates over streaming @@ -394,10 +419,11 @@ Default: "4h"
nodeStatusUpdateFrequency
+
nodeStatusUpdateFrequency
meta/v1.Duration
nodeStatusUpdateFrequency is the frequency that kubelet computes node + + nodeStatusUpdateFrequency is the frequency that kubelet computes node status. If node lease feature is not enabled, it is also the frequency that kubelet posts node status to master. Note: When node lease feature is not enabled, be cautious when changing the @@ -411,10 +437,11 @@ Default: "10s"
nodeStatusReportFrequency
+
nodeStatusReportFrequency
meta/v1.Duration
nodeStatusReportFrequency is the frequency that kubelet posts node + + nodeStatusReportFrequency is the frequency that kubelet posts node status to master if node status does not change. Kubelet will ignore this frequency and post node status immediately if any change is detected. It is only used when node lease feature is enabled. nodeStatusReportFrequency's @@ -425,10 +452,11 @@ Default: "1m"
nodeLeaseDurationSeconds
+
nodeLeaseDurationSeconds
int32
nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease, + + nodeLeaseDurationSeconds is the duration the Kubelet will set on its corresponding Lease, when the NodeLease feature is enabled. This feature provides an indicator of node health by having the Kubelet create and periodically renew a lease, named after the node, in the kube-node-lease namespace. If the lease expires, the node can be considered unhealthy. @@ -442,10 +470,11 @@ Default: 40
imageMinimumGCAge
+
imageMinimumGCAge
meta/v1.Duration
imageMinimumGCAge is the minimum age for an unused image before it is + + imageMinimumGCAge is the minimum age for an unused image before it is garbage collected. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may trigger or delay garbage collection, and may change the image overhead @@ -454,10 +483,11 @@ Default: "2m"
imageGCHighThresholdPercent
+
imageGCHighThresholdPercent
int32
imageGCHighThresholdPercent is the percent of disk usage after which + + imageGCHighThresholdPercent is the percent of disk usage after which image garbage collection is always run. The percent is calculated as this field value out of 100. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -467,10 +497,11 @@ Default: 85
imageGCLowThresholdPercent
+
imageGCLowThresholdPercent
int32
imageGCLowThresholdPercent is the percent of disk usage before which + + imageGCLowThresholdPercent is the percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. The percent is calculated as this field value out of 100. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -480,30 +511,33 @@ Default: 80
volumeStatsAggPeriod
+
volumeStatsAggPeriod
meta/v1.Duration
How frequently to calculate and cache volume disk usage for all pods + + How frequently to calculate and cache volume disk usage for all pods Dynamic Kubelet Config (beta): If dynamically updating this field, consider that shortening the period may carry a performance impact. Default: "1m"
kubeletCgroups
+
kubeletCgroups
string
kubeletCgroups is the absolute name of cgroups to isolate the kubelet in + + kubeletCgroups is the absolute name of cgroups to isolate the kubelet in Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: ""
systemCgroups
+
systemCgroups
string
systemCgroups is absolute name of cgroups in which to place + + systemCgroups is absolute name of cgroups in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. Dynamic Kubelet Config (beta): This field should not be updated without a full node @@ -512,10 +546,11 @@ Default: ""
cgroupRoot
+
cgroupRoot
string
cgroupRoot is the root cgroup to use for pods. This is handled by the + + cgroupRoot is the root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. @@ -523,10 +558,11 @@ Default: ""
cgroupsPerQOS
+
cgroupsPerQOS
bool
Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes + + Enable QoS based Cgroup hierarchy: top level cgroups for QoS Classes And all Burstable and BestEffort pods are brought up under their specific top level QoS cgroup. Dynamic Kubelet Config (beta): This field should not be updated without a full node @@ -535,20 +571,22 @@ Default: true
cgroupDriver
+
cgroupDriver
string
driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) + + driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd) Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. Default: "cgroupfs"
cpuManagerPolicy
+
cpuManagerPolicy
string
CPUManagerPolicy is the name of the policy to use. + + CPUManagerPolicy is the name of the policy to use. Requires the CPUManager feature gate to be enabled. Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. @@ -556,10 +594,11 @@ Default: "none"
cpuManagerReconcilePeriod
+
cpuManagerReconcilePeriod
meta/v1.Duration
CPU Manager reconciliation period. + + CPU Manager reconciliation period. Requires the CPUManager feature gate to be enabled. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that shortening the period may carry a performance impact. @@ -567,10 +606,11 @@ Default: "10s"
topologyManagerPolicy
+
topologyManagerPolicy
string
TopologyManagerPolicy is the name of the policy to use. + + TopologyManagerPolicy is the name of the policy to use. Policies other than "none" require the TopologyManager feature gate to be enabled. Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. @@ -578,20 +618,22 @@ Default: "none"
topologyManagerScope
+
topologyManagerScope
string
TopologyManagerScope represents the scope of topology hint generation + + TopologyManagerScope represents the scope of topology hint generation that topology manager requests and hint providers generate. "pod" scope requires the TopologyManager feature gate to be enabled. Default: "container"
qosReserved
+
qosReserved
map[string]string
qosReserved is a set of resource name to percentage pairs that specify + + qosReserved is a set of resource name to percentage pairs that specify the minimum percentage of a resource reserved for exclusive use by the guaranteed QoS tier. Currently supported resources: "memory" @@ -602,10 +644,11 @@ Default: nil
runtimeRequestTimeout
+
runtimeRequestTimeout
meta/v1.Duration
runtimeRequestTimeout is the timeout for all runtime requests except long running + + runtimeRequestTimeout is the timeout for all runtime requests except long running requests - pull, logs, exec and attach. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may disrupt components that interact with the Kubelet server. @@ -613,10 +656,11 @@ Default: "2m"
hairpinMode
+
hairpinMode
string
hairpinMode specifies how the Kubelet should configure the container + + hairpinMode specifies how the Kubelet should configure the container bridge for hairpin packets. Setting this flag allows endpoints in a Service to loadbalance back to themselves if they should try to access their own Service. Values: @@ -631,10 +675,11 @@ Default: "promiscuous-bridge"
maxPods
+
maxPods
int32
maxPods is the number of pods that can run on this Kubelet. + + maxPods is the number of pods that can run on this Kubelet. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that changes may cause Pods to fail admission on Kubelet restart, and may change the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting @@ -644,10 +689,11 @@ Default: 110
podCIDR
+
podCIDR
string
The CIDR to use for pod IP addresses, only used in standalone mode. + + The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master. Dynamic Kubelet Config (beta): This field should always be set to the empty default. It should only set for standalone Kubelets, which cannot use Dynamic Kubelet Config. @@ -655,20 +701,22 @@ Default: ""
podPidsLimit
+
podPidsLimit
int64
PodPidsLimit is the maximum number of pids in any pod. + + PodPidsLimit is the maximum number of pids in any pod. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that lowering it may prevent container processes from forking after the change. Default: -1
resolvConf
+
resolvConf
string
ResolverConfig is the resolver configuration file used as the basis + + ResolverConfig is the resolver configuration file used as the basis for the container DNS resolution configuration. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that changes will only take effect on Pods created after the update. Draining @@ -677,19 +725,21 @@ Default: "/etc/resolv.conf"
runOnce
+
runOnce
bool
RunOnce causes the Kubelet to check the API server once for pods, + + RunOnce causes the Kubelet to check the API server once for pods, run those in addition to the pods specified by static pod files, and exit. Default: false
cpuCFSQuota
+
cpuCFSQuota
bool
cpuCFSQuota enables CPU CFS quota enforcement for containers that + + cpuCFSQuota enables CPU CFS quota enforcement for containers that specify CPU limits. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that disabling it may reduce node stability. @@ -697,10 +747,11 @@ Default: true
cpuCFSQuotaPeriod
+
cpuCFSQuotaPeriod
meta/v1.Duration
CPUCFSQuotaPeriod is the CPU CFS quota period value, cpu.cfs_period_us. + + CPUCFSQuotaPeriod is the CPU CFS quota period value, cpu.cfs_period_us. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that limits set for containers will result in different cpu.cfs_quota settings. This will trigger container restarts on the node being reconfigured. @@ -708,10 +759,11 @@ Default: "100ms"
nodeStatusMaxImages
+
nodeStatusMaxImages
int32
nodeStatusMaxImages caps the number of images reported in Node.Status.Images. + + nodeStatusMaxImages caps the number of images reported in Node.Status.Images. Note: If -1 is specified, no cap will be applied. If 0 is specified, no image is returned. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that different values can be reported on node status. @@ -719,20 +771,22 @@ Default: 50
maxOpenFiles
+
maxOpenFiles
int64
maxOpenFiles is Number of files that can be opened by Kubelet process. + + maxOpenFiles is Number of files that can be opened by Kubelet process. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact the ability of the Kubelet to interact with the node's filesystem. Default: 1000000
contentType
+
contentType
string
contentType is contentType of requests sent to apiserver. + + contentType is contentType of requests sent to apiserver. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact the ability for the Kubelet to communicate with the API server. If the Kubelet loses contact with the API server due to a change to this field, @@ -741,10 +795,11 @@ Default: "application/vnd.kubernetes.protobuf"
kubeAPIQPS
+
kubeAPIQPS
int32
kubeAPIQPS is the QPS to use while talking with kubernetes apiserver + + kubeAPIQPS is the QPS to use while talking with kubernetes apiserver Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact scalability by changing the amount of traffic the Kubelet sends to the API server. @@ -752,10 +807,11 @@ Default: 5
kubeAPIBurst
+
kubeAPIBurst
int32
kubeAPIBurst is the burst to allow while talking with kubernetes apiserver + + kubeAPIBurst is the burst to allow while talking with kubernetes apiserver Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact scalability by changing the amount of traffic the Kubelet sends to the API server. @@ -763,10 +819,11 @@ Default: 10
serializeImagePulls
+
serializeImagePulls
bool
serializeImagePulls when enabled, tells the Kubelet to pull images one + + serializeImagePulls when enabled, tells the Kubelet to pull images one at a time. We recommend ∗not∗ changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. @@ -776,10 +833,11 @@ Default: true
evictionHard
+
evictionHard
map[string]string
Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. + + Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}. To explicitly disable, pass a 0% or 100% threshold on an arbitrary resource. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may trigger or delay Pod evictions. @@ -791,10 +849,11 @@ Default:
evictionSoft
+
evictionSoft
map[string]string
Map of signal names to quantities that defines soft eviction thresholds. + + Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may trigger or delay Pod evictions, and may change the allocatable reported @@ -803,10 +862,11 @@ Default: nil
evictionSoftGracePeriod
+
evictionSoftGracePeriod
map[string]string
Map of signal names to quantities that defines grace periods for each soft eviction signal. + + Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may trigger or delay Pod evictions. @@ -814,20 +874,22 @@ Default: nil
evictionPressureTransitionPeriod
+
evictionPressureTransitionPeriod
meta/v1.Duration
Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. + + Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that lowering it may decrease the stability of the node when the node is overcommitted. Default: "5m"
evictionMaxPodGracePeriod
+
evictionMaxPodGracePeriod
int32
Maximum allowed grace period (in seconds) to use when terminating pods in + + Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. This value effectively caps the Pod's TerminationGracePeriodSeconds value during soft evictions. Note: Due to issue #64530, the behavior has a bug where this value currently just @@ -840,10 +902,11 @@ Default: 0
evictionMinimumReclaim
+
evictionMinimumReclaim
map[string]string
Map of signal names to quantities that defines minimum reclaims, which describe the minimum + + Map of signal names to quantities that defines minimum reclaims, which describe the minimum amount of a given resource the kubelet will reclaim when performing a pod eviction while that resource is under pressure. For example: {"imagefs.available": "2Gi"} Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -852,10 +915,11 @@ Default: nil
podsPerCore
+
podsPerCore
int32
podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods. + + podsPerCore is the maximum number of pods per core. Cannot exceed MaxPods. If 0, this field is ignored. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that changes may cause Pods to fail admission on Kubelet restart, and may change @@ -866,10 +930,11 @@ Default: 0
enableControllerAttachDetach
+
enableControllerAttachDetach
bool
enableControllerAttachDetach enables the Attach/Detach controller to + + enableControllerAttachDetach enables the Attach/Detach controller to manage attachment/detachment of volumes scheduled to this node, and disables kubelet from executing any attach/detach operations Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -882,10 +947,11 @@ Default: true
protectKernelDefaults
+
protectKernelDefaults
bool
protectKernelDefaults, if true, causes the Kubelet to error if kernel + + protectKernelDefaults, if true, causes the Kubelet to error if kernel flags are not as it expects. Otherwise the Kubelet will attempt to modify kernel flags to match its expectation. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -895,10 +961,11 @@ Default: false
makeIPTablesUtilChains
+
makeIPTablesUtilChains
bool
If true, Kubelet ensures a set of iptables rules are present on host. + + If true, Kubelet ensures a set of iptables rules are present on host. These rules will serve as utility rules for various components, e.g. KubeProxy. The rules will be created based on IPTablesMasqueradeBit and IPTablesDropBit. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that @@ -907,10 +974,11 @@ Default: true
iptablesMasqueradeBit
+
iptablesMasqueradeBit
int32
iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT + + iptablesMasqueradeBit is the bit of the iptables fwmark space to mark for SNAT Values must be within the range [0, 31]. Must be different from other mark bits. Warning: Please match the value of the corresponding parameter in kube-proxy. TODO: clean up IPTablesMasqueradeBit in kube-proxy @@ -921,10 +989,11 @@ Default: 14
iptablesDropBit
+
iptablesDropBit
int32
iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. + + iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets. Values must be within the range [0, 31]. Must be different from other mark bits. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it needs to be coordinated with other components, like kube-proxy, and the update @@ -933,10 +1002,11 @@ Default: 15
featureGates
+
featureGates
map[string]bool
featureGates is a map of feature names to bools that enable or disable alpha/experimental + + featureGates is a map of feature names to bools that enable or disable alpha/experimental features. This field modifies piecemeal the built-in default values from "k8s.io/kubernetes/pkg/features/kube_features.go". Dynamic Kubelet Config (beta): If dynamically updating this field, consider the @@ -948,20 +1018,22 @@ Default: nil
failSwapOn
+
failSwapOn
bool
failSwapOn tells the Kubelet to fail to start if swap is enabled on the node. + + failSwapOn tells the Kubelet to fail to start if swap is enabled on the node. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that setting it to true will cause the Kubelet to crash-loop if swap is enabled. Default: true
containerLogMaxSize
+
containerLogMaxSize
string
A quantity defines the maximum size of the container log file before it is rotated. + + A quantity defines the maximum size of the container log file before it is rotated. For example: "5Mi" or "256Ki". Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may trigger log rotation. @@ -969,29 +1041,32 @@ Default: "10Mi"
containerLogMaxFiles
+
containerLogMaxFiles
int32
Maximum number of container log files that can be present for a container. + + Maximum number of container log files that can be present for a container. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that lowering it may cause log files to be deleted. Default: 5
configMapAndSecretChangeDetectionStrategy
+
configMapAndSecretChangeDetectionStrategy
ResourceChangeDetectionStrategy
ConfigMapAndSecretChangeDetectionStrategy is a mode in which + + ConfigMapAndSecretChangeDetectionStrategy is a mode in which config map and secret managers are running. Default: "Watch"
systemReserved
+
systemReserved
map[string]string
systemReserved is a set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) + + systemReserved is a set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. @@ -1003,10 +1078,11 @@ Default: nil
kubeReserved
+
kubeReserved
map[string]string
A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs + + A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently cpu, memory and local storage for root file system are supported. See http://kubernetes.io/docs/user-guide/compute-resources for more detail. @@ -1018,19 +1094,21 @@ Default: nil
reservedSystemCPUs*
+
reservedSystemCPUs [Required]
string
This ReservedSystemCPUs option specifies the cpu list reserved for the host level system threads and kubernetes related threads. + + This ReservedSystemCPUs option specifies the cpu list reserved for the host level system threads and kubernetes related threads. This provide a "static" CPU list rather than the "dynamic" list by system-reserved and kube-reserved. This option overwrites CPUs provided by system-reserved and kube-reserved.
showHiddenMetricsForVersion
+
showHiddenMetricsForVersion
string
The previous version for which you want to show hidden metrics. + + The previous version for which you want to show hidden metrics. Only the previous minor version is meaningful, other values will not be allowed. The format is ., e.g.: '1.16'. The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, @@ -1039,10 +1117,11 @@ Default: ""
systemReservedCgroup
+
systemReservedCgroup
string
This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. + + This flag helps kubelet identify absolute name of top level cgroup used to enforce `SystemReserved` compute resource reservation for OS system daemons. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. @@ -1050,10 +1129,11 @@ Default: ""
kubeReservedCgroup
+
kubeReservedCgroup
string
This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. + + This flag helps kubelet identify absolute name of top level cgroup used to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. Dynamic Kubelet Config (beta): This field should not be updated without a full node reboot. It is safest to keep this value the same as the local config. @@ -1061,10 +1141,11 @@ Default: ""
enforceNodeAllocatable
+
enforceNodeAllocatable
[]string
This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. + + This flag specifies the various Node Allocatable enforcements that Kubelet needs to perform. This flag accepts a list of options. Acceptable options are `none`, `pods`, `system-reserved` & `kube-reserved`. If `none` is specified, no other options may be specified. Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md) doc for more information. @@ -1078,20 +1159,22 @@ Default: ["pods"]
allowedUnsafeSysctls
+
allowedUnsafeSysctls
[]string
A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in ∗). + + A comma separated whitelist of unsafe sysctls or sysctl patterns (ending in ∗). Unsafe sysctl groups are kernel.shm∗, kernel.msg∗, kernel.sem, fs.mqueue.∗, and net.∗. These sysctls are namespaced but not allowed by default. For example: "kernel.msg∗,net.ipv4.route.min_pmtu" Default: []
volumePluginDir
+
volumePluginDir
string
volumePluginDir is the full path of the directory in which to search + + volumePluginDir is the full path of the directory in which to search for additional third party volume plugins. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that changing the volumePluginDir may disrupt workloads relying on third party volume plugins. @@ -1099,10 +1182,11 @@ Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
providerID
+
providerID
string
providerID, if set, sets the unique id of the instance that an external provider (i.e. cloudprovider) + + providerID, if set, sets the unique id of the instance that an external provider (i.e. cloudprovider) can use to identify a specific node. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact the ability of the Kubelet to interact with cloud providers. @@ -1110,10 +1194,11 @@ Default: ""
kernelMemcgNotification
+
kernelMemcgNotification
bool
kernelMemcgNotification, if set, the kubelet will integrate with the kernel memcg notification + + kernelMemcgNotification, if set, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling. Dynamic Kubelet Config (beta): If dynamically updating this field, consider that it may impact the way Kubelet interacts with the kernel. @@ -1121,36 +1206,40 @@ Default: false
logging*
+
logging [Required]
LoggingConfiguration
Logging specifies the options of logging. + + Logging specifies the options of logging. Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. Defaults: Format: text
enableSystemLogHandler
+
enableSystemLogHandler
bool
enableSystemLogHandler enables system logs via web interface host:port/logs/ + + enableSystemLogHandler enables system logs via web interface host:port/logs/ Default: true
shutdownGracePeriod
+
shutdownGracePeriod
meta/v1.Duration
ShutdownGracePeriod specifies the total duration that the node should delay the shutdown and total grace period for pod termination during a node shutdown. + + ShutdownGracePeriod specifies the total duration that the node should delay the shutdown and total grace period for pod termination during a node shutdown. Default: "30s"
shutdownGracePeriodCriticalPods
+
shutdownGracePeriodCriticalPods
meta/v1.Duration
ShutdownGracePeriodCriticalPods specifies the duration used to terminate critical pods during a node shutdown. This should be less than ShutdownGracePeriod. + + ShutdownGracePeriodCriticalPods specifies the duration used to terminate critical pods during a node shutdown. This should be less than ShutdownGracePeriod. For example, if ShutdownGracePeriod=30s, and ShutdownGracePeriodCriticalPods=10s, during a node shutdown the first 20 seconds would be reserved for gracefully terminating normal pods, and the last 10 seconds would be reserved for terminating critical pods. Default: "10s"
+
- - + + - - + @@ -1193,7 +1283,7 @@ It exists in the kubeletconfig API group because it is classified as a versioned -### `HairpinMode` {#kubelet-config-k8s-io-v1beta1-HairpinMode} +## `HairpinMode` {#kubelet-config-k8s-io-v1beta1-HairpinMode} (Alias of `string`) @@ -1206,7 +1296,7 @@ hairpin packets. -### `KubeletAnonymousAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAnonymousAuthentication} +## `KubeletAnonymousAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAnonymousAuthentication} @@ -1218,16 +1308,17 @@ hairpin packets. -
FieldDescription
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
SerializedNodeConfigSource
apiVersion
string
kubelet.config.k8s.io/v1beta1
kind
string
SerializedNodeConfigSource
source
-core/v1.NodeConfigSource +
source
+core/v1.NodeConfigSource
Source is the source that we are serializing + Source is the source that we are serializing
+
- - @@ -1238,7 +1329,7 @@ Anonymous requests have a username of system:anonymous, and a group name of syst -### `KubeletAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAuthentication} +## `KubeletAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletAuthentication} @@ -1250,30 +1341,33 @@ Anonymous requests have a username of system:anonymous, and a group name of syst -
FieldDescription
enabled
+
enabled
bool
enabled allows anonymous requests to the kubelet server. + + enabled allows anonymous requests to the kubelet server. Requests that are not rejected by another authentication method are treated as anonymous requests. Anonymous requests have a username of system:anonymous, and a group name of system:unauthenticated.
+
- - + - - + - - + @@ -1282,7 +1376,7 @@ Anonymous requests have a username of system:anonymous, and a group name of syst -### `KubeletAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorization} +## `KubeletAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorization} @@ -1294,25 +1388,27 @@ Anonymous requests have a username of system:anonymous, and a group name of syst -
FieldDescription
x509
+
x509
KubeletX509Authentication
x509 contains settings related to x509 client certificate authentication + x509 contains settings related to x509 client certificate authentication
webhook
+
webhook
KubeletWebhookAuthentication
webhook contains settings related to webhook bearer token authentication + webhook contains settings related to webhook bearer token authentication
anonymous
+
anonymous
KubeletAnonymousAuthentication
anonymous contains settings related to anonymous authentication + anonymous contains settings related to anonymous authentication
+
- - - - + @@ -1321,7 +1417,7 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. -### `KubeletAuthorizationMode` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorizationMode} +## `KubeletAuthorizationMode` {#kubelet-config-k8s-io-v1beta1-KubeletAuthorizationMode} (Alias of `string`) @@ -1337,7 +1433,7 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. -### `KubeletWebhookAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthentication} +## `KubeletWebhookAuthentication` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthentication} @@ -1349,23 +1445,25 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. -
FieldDescription
mode
+
mode
KubeletAuthorizationMode
mode is the authorization mode to apply to requests to the kubelet server. + + mode is the authorization mode to apply to requests to the kubelet server. Valid values are AlwaysAllow and Webhook. Webhook mode uses the SubjectAccessReview API to determine authorization.
webhook
+
webhook
KubeletWebhookAuthorization
webhook contains settings related to Webhook authorization. + webhook contains settings related to Webhook authorization.
+
- - + - - + @@ -1374,7 +1472,7 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. -### `KubeletWebhookAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthorization} +## `KubeletWebhookAuthorization` {#kubelet-config-k8s-io-v1beta1-KubeletWebhookAuthorization} @@ -1386,23 +1484,25 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. -
FieldDescription
enabled
+
enabled
bool
enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API + enabled allows bearer token authentication backed by the tokenreviews.authentication.k8s.io API
cacheTTL
+
cacheTTL
meta/v1.Duration
cacheTTL enables caching of authentication results + cacheTTL enables caching of authentication results
+
- - + - - + @@ -1411,7 +1511,7 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. -### `KubeletX509Authentication` {#kubelet-config-k8s-io-v1beta1-KubeletX509Authentication} +## `KubeletX509Authentication` {#kubelet-config-k8s-io-v1beta1-KubeletX509Authentication} @@ -1423,16 +1523,17 @@ Webhook mode uses the SubjectAccessReview API to determine authorization. -
FieldDescription
cacheAuthorizedTTL
+
cacheAuthorizedTTL
meta/v1.Duration
cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer. + cacheAuthorizedTTL is the duration to cache 'authorized' responses from the webhook authorizer.
cacheUnauthorizedTTL
+
cacheUnauthorizedTTL
meta/v1.Duration
cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer. + cacheUnauthorizedTTL is the duration to cache 'unauthorized' responses from the webhook authorizer.
+
- - @@ -1443,7 +1544,7 @@ and groups corresponding to the Organization in the client certificate. -### `ResourceChangeDetectionStrategy` {#kubelet-config-k8s-io-v1beta1-ResourceChangeDetectionStrategy} +## `ResourceChangeDetectionStrategy` {#kubelet-config-k8s-io-v1beta1-ResourceChangeDetectionStrategy} (Alias of `string`) @@ -1462,7 +1563,7 @@ managers (secret, configmap) are discovering object changes. -### `LoggingConfiguration` {#LoggingConfiguration} +## `LoggingConfiguration` {#LoggingConfiguration} @@ -1475,24 +1576,26 @@ managers (secret, configmap) are discovering object changes. LoggingConfiguration contains logging options Refer [Logs Options](https://github.com/kubernetes/component-base/blob/master/logs/options.go) for more information. -
FieldDescription
clientCAFile
+
clientCAFile
string
clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate + + clientCAFile is the path to a PEM-encoded certificate bundle. If set, any request presenting a client certificate signed by one of the authorities in the bundle is authenticated with a username corresponding to the CommonName, and groups corresponding to the Organization in the client certificate.
+
- - - - diff --git a/content/zh/docs/reference/kubectl/docker-cli-to-kubectl.md b/content/zh/docs/reference/kubectl/docker-cli-to-kubectl.md index b53e6fcd77a19..74473b4b1cea0 100644 --- a/content/zh/docs/reference/kubectl/docker-cli-to-kubectl.md +++ b/content/zh/docs/reference/kubectl/docker-cli-to-kubectl.md @@ -371,7 +371,7 @@ nginx-app 1 1 1 1 2m ``` ```shell -kubectl get po -l run=nginx-app +kubectl get po -l app=nginx-app ``` ``` NAME READY STATUS RESTARTS AGE @@ -385,7 +385,7 @@ deployment "nginx-app" deleted ``` ```shell -kubectl get po -l run=nginx-app +kubectl get po -l app=nginx-app # Return nothing ``` diff --git a/content/zh/docs/reference/setup-tools/_index.md b/content/zh/docs/reference/setup-tools/_index.md index 9cd9d94f0deba..08b4c38e46949 100644 --- a/content/zh/docs/reference/setup-tools/_index.md +++ b/content/zh/docs/reference/setup-tools/_index.md @@ -2,3 +2,8 @@ title: 安装工具 weight: 50 --- + + diff --git a/content/zh/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/zh/docs/reference/setup-tools/kubeadm/implementation-details.md index ee17bb699ad9b..3e3fe80b895a1 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -46,7 +46,7 @@ with the aim of sharing knowledge on Kubernetes cluster best practices. - lock-down the kubelet API - locking down access to the API for system components like the kube-proxy and CoreDNS - locking down what a Bootstrap Token can access - - **Easy to use**: The user should not have to run anything more than a couple of commands: + - **User-friendly**: The user should not have to run anything more than a couple of commands: - `kubeadm init` - `export KUBECONFIG=/etc/kubernetes/admin.conf` - `kubectl apply -f ` @@ -63,7 +63,7 @@ with the aim of sharing knowledge on Kubernetes cluster best practices. - 锁定 kubelet API - 锁定对系统组件(例如 kube-proxy 和 CoreDNS)的 API 的访问 - 锁定启动引导令牌(Bootstrap Token)可以访问的内容 -- **易用的**:用户只需要运行几个命令即可: +- **用户友好**:用户只需要运行几个命令即可: - `kubeadm init` - `export KUBECONFIG=/etc/kubernetes/admin.conf` - `kubectl apply -f <所选网络.yaml>` @@ -558,7 +558,7 @@ API 服务器的静态 Pod 清单会受到用户提供的以下参数的影响: - `--requestheader-client-ca-file` to`front-proxy-ca.crt` - `--proxy-client-cert-file` to `front-proxy-client.crt` - `--proxy-client-key-file` to `front-proxy-client.key` - - Other flags for securing the front proxy ([API Aggregation](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/aggregated-api-servers.md)) communications: + - Other flags for securing the front proxy ([API Aggregation](/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)) communications: - `--requestheader-username-headers=X-Remote-User` - `--requestheader-group-headers=X-Remote-Group` - `--requestheader-extra-headers-prefix=X-Remote-Extra-` @@ -580,7 +580,7 @@ API 服务器的静态 Pod 清单会受到用户提供的以下参数的影响: - `--proxy-client-key-file` 设为 `front-proxy-client.key` - 其他用于保护前端代理( - [API 聚合层](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/aggregated-api-servers.md)) + [API 聚合层](/zh/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/)) 通信的标志: - `--requestheader-username-headers=X-Remote-User` @@ -697,7 +697,7 @@ into `/var/lib/kubelet/config/init/kubelet` file. 初始化配置用于在这个特定节点上启动 kubelet,从而为 kubelet 插件文件提供了 一种替代方法。如以下步骤中所述,这种配置将由 kubelet 基本配置所替代。 @@ -710,17 +710,24 @@ See [set Kubelet parameters via a config file](/docs/tasks/administer-cluster/ku 1. 要使动态 kubelet 配置生效,应在 `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` 中指定 `--dynamic-config-dir=/var/lib/kubelet/config/dynamic` 标志。 -2. 通过使用配置文件 `--config some-file.yaml` 将 `KubeletConfiguration` 对象传递给 +1. 通过使用配置文件 `--config some-file.yaml` 将 `KubeletConfiguration` 对象传递给 `kubeadm init` 或 `kubeadm join` 来更改 kubelet 配置。 可以使用 `---` 分隔符将 `KubeletConfiguration` 对象与其他对象(例如 `InitConfiguration`) 分开。更多的详细信息,请查看 `kubeadm config print-default` 命令。 + +有关 `KubeletConfiguration` 结构的详细信息,可参阅 +[`KubeletConfiguration` 参考文档](/docs/reference/config-api/kubelet-config.v1beta1/)。 + @@ -748,7 +755,7 @@ kubeadm 依靠 kubelet 拉取控制平面镜像并将其作为静态 Pod 正确 --> ### (可选)编写基本 kubelet 配置 {#write-base-kubelet-configuration} -{{< feature-state for_k8s_version="v1.9" state="alpha" >}} +{{< feature-state for_k8s_version="v1.11" state="beta" >}} -- 在 Kubernetes 1.18 版本中,通过 kubeadm 部署 kube-dns 这一操作已经弃用, - 将在未来的版本中删除。 - CoreDNS 服务的名称为 `kube-dns`。这样做是为了防止当用户将集群 DNS 从 kube-dns - 切换到 CoreDNS 或者反过来时,出现服务中断。`--config` 方法在 + 切换到 CoreDNS 时出现服务中断。`--config` 方法在 [这里](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon) 有描述。 -- 在 `kube-system` 名字空间中创建 CoreDNS/kube-dns 的 ServiceAccount -- `kube-dns` 的 ServiceAccount 绑定了 `system:kube-dns` ClusterRole 中的特权 +- 在 `kube-system` 名字空间中创建 CoreDNS 的 ServiceAccount +- `coredns` 的 ServiceAccount 绑定了 `system:coredns` ClusterRole 中的特权 + + +在 Kubernetes 1.21 版本中,kubeadm 对 `kube-dns` 的支持被移除。 +你可以在 kubeadm 使用 CoreDNS,即使相关的 Service 名字仍然是 `kube-dns`。 -知道集群信息后,将写入文件 `bootstrap-kubelet.conf`,从而允许 kubelet 执行 -TLS 引导(相反,在 v1.7 之前 TLS 引导都是由 kubeadm 管理)。 +知道集群信息后,kubeadm 将写入文件 `bootstrap-kubelet.conf`,从而允许 kubelet 执行 +TLS 引导。 -TLS 引导机制使用共享令牌对 Kubernetes 主控节点进行临时身份验证,以便 +TLS 引导机制使用共享令牌对 Kubernetes API 服务器进行临时身份验证,以便 为本地创建的密钥对提交证书签名请求(CSR)。 @@ -1222,9 +1232,9 @@ kubelet 加入集群,同时删除 `bootstrap-kubelet.conf`。 -### (可选)编写 init kubelet 配置 {#write-init-kubelet-configuration} +### (可选)写入初始的 kubelet 配置 {#write-init-kubelet-configuration} -{{< feature-state for_k8s_version="v1.9" state="alpha" >}} +{{< feature-state for_k8s_version="v1.11" state="beta" >}} -1. 使用引导令牌凭证从 `kube-system` 名字空间中 ConfigMap `kubelet-base-config-v1.9` +1. 使用引导令牌凭证从 `kube-system` 名字空间中 ConfigMap `kubelet-base-config-v1.x` 中读取 kubelet 基本配置, - 并将其作为 kubelet init 配置文件 `/var/lib/kubelet/config/init/kubelet` 写入磁盘。 + 并将其作为 kubelet 初始配置文件 `/var/lib/kubelet/config/init/kubelet` 写入磁盘。 2. 一旦 kubelet 开始使用节点自己的凭据(`/etc/kubernetes/kubelet.conf`), 就更新当前节点配置,指定该节点或 kubelet 配置来自上述 ConfigMap。 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md index 82d4f88024ff4..3276fd9acc273 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md @@ -18,47 +18,10 @@ weight: 90 请试用这些功能并给我们提供反馈! {{< /caution >}} -## kubeadm alpha kubeconfig user {#cmd-phase-kubeconfig} - - -使用子命令 `user` 为其他用户创建 kubeconfig 文件。 - -{{< tabs name="tab-kubeconfig" >}} -{{< tab name="kubeconfig" include="generated/kubeadm_alpha_kubeconfig.md" />}} -{{< tab name="user" include="generated/kubeadm_alpha_kubeconfig_user.md" />}} -{{< /tabs >}} - -## kubeadm alpha kubelet config {#cmd-phase-kubelet} - -使用以下命令启用 DynamicKubeletConfiguration 功能。 - -{{< tabs name="tab-kubelet" >}} -{{< tab name="kubelet" include="generated/kubeadm_alpha_kubelet.md" />}} -{{< tab name="enable-dynamic" include="generated/kubeadm_alpha_kubelet_config_enable-dynamic.md" />}} -{{< /tabs >}} - -## kubeadm alpha selfhosting pivot {#cmd-selfhosting} - - -子命令 `pivot` 可用于将 Pod 托管的静态控制平面转换为自托管的控制平面。 -有关 `pivot` 更多信息,请参见 -[文档](/zh/docs/setup/production-environment/tools/kubeadm/self-hosting/)。 - - - -{{< tabs name="selfhosting" >}} -{{< tab name="selfhosting" include="generated/kubeadm_alpha_selfhosting.md" />}} -{{< tab name="pivot" include="generated/kubeadm_alpha_selfhosting_pivot.md" />}} -{{< /tabs >}} +目前在 `kubeadm alpha` 之下没有试验性质的命令。 ## {{% heading "whatsnext" %}} diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md index 69cbc59b4c304..86664ff8d64b5 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init-phase.md @@ -4,11 +4,9 @@ weight: 90 content_type: concept --- -`kubeadm init phase` 能确保调用引导过程的原子步骤。因此,如果希望自定义应用,则可以让 kubeadm 做一些工作,然后填补空白。 - +`kubeadm init phase` 能确保调用引导过程的原子步骤。 +因此,如果希望自定义应用,则可以让 kubeadm 做一些工作,然后填补空白。 -`kubeadm init phase` 与 [kubeadm init 工作流](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-workflow)一致,后台都使用相同的代码。 +`kubeadm init phase` 与 [kubeadm init 工作流](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-workflow) +一致,后台都使用相同的代码。 -可以使用此命令将 kubeadm 配置文件上传到集群。或者使用 [kubeadm config](/zh/docs/reference/setup-tools/kubeadm/kubeadm-config/)。 +可以使用此命令将 kubeadm 配置文件上传到集群。或者使用 +[kubeadm config](/zh/docs/reference/setup-tools/kubeadm/kubeadm-config/)。 {{< tabs name="upload-config" >}} {{< tab name="upload-config" include="generated/kubeadm_init_phase_upload-config.md" />}} @@ -177,7 +177,8 @@ By default the certs and encryption key expire after two hours. -使用以下阶段来给具有 `node-role.kubernetes.io/master=""` 键值对的节点打标签(label)和记录污点(taint)。 +使用以下阶段来给具有 `node-role.kubernetes.io/master=""` 键值对的节点 +打标签(label)和记录污点(taint)。 {{< tabs name="tab-mark-control-plane" >}} {{< tab name="mark-control-plane" include="generated/kubeadm_init_phase_mark-control-plane.md" />}} @@ -232,50 +233,12 @@ install them selectively. {{< tab name="kube-proxy" include="generated/kubeadm_init_phase_addon_kube-proxy.md" />}} {{< /tabs >}} - -要使用 kube-dns 代替 CoreDNS,必须传递一个配置文件: - - - -```bash -# 仅用于安装 DNS 插件 -kubeadm init phase addon coredns --config=someconfig.yaml -# 用于创建完整的控制平面节点 -kubeadm init --config=someconfig.yaml -# 用于列出或者拉取镜像 -kubeadm config images list/pull --config=someconfig.yaml -# 升级 -kubeadm upgrade apply --config=someconfig.yaml -``` - - -该文件必须在 [`ClusterConfiguration`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#ClusterConfiguration) 中包含一个 [`DNS`](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2#DNS) 字段,以及包含一个插件的类型 - `kube-dns`(默认值为 `CoreDNS`)。 - -```yaml -apiVersion: kubeadm.k8s.io/v1beta2 -kind: ClusterConfiguration -dns: - type: "kube-dns" -``` - -有关 `v1beta2` 配置中每个字段的更多详细信息,可以访问 [API](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2)。 +有关 `v1beta2` 配置中每个字段的更多详细信息,可以访问 +[API](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2)。 ## {{% heading "whatsnext" %}} @@ -285,7 +248,11 @@ For more details on each field in the `v1beta2` configuration you can navigate t * [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` * [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/) to try experimental functionality --> -* [kubeadm init](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/) 引导 Kubernetes 控制平面节点 -* [kubeadm join](/zh/docs/reference/setup-tools/kubeadm/kubeadm-join/) 将节点连接到集群 -* [kubeadm reset](/zh/docs/reference/setup-tools/kubeadm/kubeadm-reset/) 恢复通过 `kubeadm init` 或 `kubeadm join` 操作对主机所做的任何更改 -* [kubeadm alpha](/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha/) 尝试实验性功能 +* [kubeadm init](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/) + 引导 Kubernetes 控制平面节点 +* [kubeadm join](/zh/docs/reference/setup-tools/kubeadm/kubeadm-join/) + 将节点加入到集群 +* [kubeadm reset](/zh/docs/reference/setup-tools/kubeadm/kubeadm-reset/) + 恢复通过 `kubeadm init` 或 `kubeadm join` 操作对主机所做的任何更改 +* [kubeadm alpha](/zh/docs/reference/setup-tools/kubeadm/kubeadm-alpha/) + 尝试实验性功能 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md index f7e4bb9bf140d..2c70fe93e9445 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-init.md @@ -129,22 +129,18 @@ following steps: 8. 通过 API 服务器安装一个 DNS 服务器 (CoreDNS) 和 kube-proxy 附加组件。 在 Kubernetes 版本 1.11 和更高版本中,CoreDNS 是默认的 DNS 服务器。 - 要安装 kube-dns 而不是 CoreDNS,必须在 kubeadm `ClusterConfiguration` 中配置 DNS 插件。 - 有关配置的更多信息,请参见下面的"带配置文件使用 kubeadm init" 一节。 请注意,尽管已部署 DNS 服务器,但直到安装 CNI 时才调度它。 {{< warning >}} - 从 v1.18 开始,在 kubeadm 中使用 kube-dns 已废弃,并将在以后的版本中将其删除。 + 从 v1.18 开始,在 kubeadm 中使用 kube-dns 的支持已被废弃,并已在 v1.21 版本中删除。 {{< /warning >}} 可以使用 [kubeadm config print](/zh/docs/reference/setup-tools/kubeadm/kubeadm-config/) 命令打印出默认配置。 @@ -255,7 +251,7 @@ page and pick a version from [the list](https://godoc.org/k8s.io/kubernetes/cmd/ 有关配置的字段和用法的更多信息, 你可以访问 API 参考页面并从 -[列表](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#pkg-subdirectories) +[列表](https://pkg.go.dev/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm#section-directories) 中选择一个版本。 kubeadm 配置中有关 kube-proxy 的说明请查看: -- [kube-proxy](https://godoc.org/k8s.io/kubernetes/pkg/proxy/apis/config#KubeProxyConfiguration) + +- [kube-proxy 参考](/zh/docs/reference/config-api/kube-proxy-config.v1alpha1/) 使用 kubeadm 启用 IPVS 模式的说明请查看: + - [IPVS](https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/ipvs/README.md) -1. 一旦知道集群信息,kubelet 就可以开始 TLS 引导过程。 +2. 一旦知道集群信息,kubelet 就可以开始 TLS 引导过程。 TLS 引导程序使用共享令牌与 Kubernetes API 服务器进行临时的身份验证,以提交证书签名请求 (CSR); 默认情况下,控制平面自动对该 CSR 请求进行签名。 @@ -56,7 +56,7 @@ This action consists of the following steps: 1. Finally, kubeadm configures the local kubelet to connect to the API server with the definitive identity assigned to the node. --> -1. 最后,kubeadm 配置本地 kubelet 使用分配给节点的确定标识连接到 API 服务器。 +3. 最后,kubeadm 配置本地 kubelet 使用分配给节点的确定标识连接到 API 服务器。 Kubeadm 的发现有几个选项,每个选项都有安全性上的优缺点。 -适合你的环境的正确方法取决于节点是如何准备的以及你对网络的安全性期望和节点的生命周期特点。 +适合你的环境的正确方法取决于节点是如何准备的以及你对网络的安全性期望 +和节点的生命周期特点。 **`kubeadm join` 命令示例** - + 对于工作节点: ```shell kubeadm join --discovery-token abcdef.1234567890abcdef --discovery-token-ca-cert-hash sha256:1234..cdef 1.2.3.4:6443 ``` - + 对于控制面节点: ```shell @@ -200,26 +205,28 @@ if the `kubeadm init` command was called with `--upload-certs`. master even if other worker nodes or the network are compromised. - Convenient to execute manually since all of the information required fits - into a single `kubeadm join` command that is easy to copy and paste. + into a single `kubeadm join` command. --> **优势:** - - 允许引导节点安全地发现主节点的信任根,即使其他工作节点或网络受到损害。 - - 方便手动执行,因为所需的所有信息都适合于易于复制和粘贴的单个 `kubeadm join` 命令。 +- 允许引导节点安全地发现主节点的信任根,即使其他工作节点或网络受到损害。 + +- 方便手动执行,因为所需的所有信息都可放到一个 `kubeadm join` 命令中。 **劣势:** - - CA 哈希通常在主节点被提供之前是不知道的,这使得构建使用 kubeadm 的自动化配置工具更加困难。 - 通过预先生成CA,你可以解除这个限制。 + +- CA 哈希通常在主节点被提供之前是不知道的,这使得构建使用 kubeadm 的自动化配置工具更加困难。 + 通过预先生成CA,你可以解除这个限制。 _这是 Kubernetes 1.7 和早期版本_中的默认设置;使用时要注意一些重要的补充说明。 此模式仅依赖于对称令牌来签名(HMAC-SHA256)发现信息,这些发现信息为主节点建立信任根。 -在 Kubernetes 1.8 及以上版本中仍然可以使用 `--discovery-token-unsafe-skip-ca-verification` 参数,但是如果可能的话,你应该考虑使用一种其他模式。 +在 Kubernetes 1.8 及以上版本中仍然可以使用 `--discovery-token-unsafe-skip-ca-verification` +参数,但是如果可能的话,你应该考虑使用一种其他模式。 **`kubeadm join` 命令示例** @@ -249,33 +257,34 @@ kubeadm join --token abcdef.1234567890abcdef --discovery-token-unsafe-skip-ca-ve **优势** - - 仍然可以防止许多网络级攻击。 +- 仍然可以防止许多网络级攻击。 - - 可以提前生成令牌并与主节点和工作节点共享,这样主节点和工作节点就可以并行引导而无需协调。 - 这允许它在许多配置场景中使用。 +- 可以提前生成令牌并与主节点和工作节点共享,这样主节点和工作节点就可以并行引导而无需协调。 + 这允许它在许多配置场景中使用。 **劣势** - - 如果攻击者能够通过某些漏洞窃取引导令牌,那么他们可以使用该令牌(连同网络级访问)为其它处于引导过程中的节点提供假冒的主节点。 - 在你的环境中,这可能是一个适当的折衷方法,也可能不是。 +- 如果攻击者能够通过某些漏洞窃取引导令牌,那么他们可以使用该令牌(连同网络级访问) + 为其它处于引导过程中的节点提供假冒的主节点。 + 在你的环境中,这可能是一个适当的折衷方法,也可能不是。 这种方案提供了一种带外方式在主节点和引导节点之间建立信任根。 如果使用 kubeadm 构建自动配置,请考虑使用此模式。 -发现文件的格式为常规的 Kubernetes [kubeconfig](/zh/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) 文件。 +发现文件的格式为常规的 Kubernetes +[kubeconfig](/zh/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) 文件。 如果发现文件不包含凭据,则将使用 TLS 发现令牌。 @@ -300,35 +310,36 @@ In case the discovery file does not contain credentials, the TLS discovery token **Example `kubeadm join` commands:** --> **`kubeadm join` 命令示例:** - - `kubeadm join --discovery-file path/to/file.conf` (本地文件) - - `kubeadm join --discovery-file https://url/file.conf` (远程 HTTPS URL) +- `kubeadm join --discovery-file path/to/file.conf` (本地文件) + +- `kubeadm join --discovery-file https://url/file.conf` (远程 HTTPS URL) **优势:** - - 允许引导节点安全地发现主节点的信任根,即使网络或其他工作节点受到损害。 +- 允许引导节点安全地发现主节点的信任根,即使网络或其他工作节点受到损害。 **劣势:** - - 要求你有某种方法将发现信息从主节点传送到引导节点。 - 例如,这可以通过云提供商或驱动工具实现。 - 该文件中的信息不是加密的,而是需要 HTTPS 或等效文件来保证其完整性。 +- 要求你有某种方法将发现信息从主节点传送到引导节点。 + 例如,这可以通过云提供商或驱动工具实现。 + 该文件中的信息不是加密的,而是需要 HTTPS 或等效文件来保证其完整性。 -默认情况下,Kubernetes 启用了 CSR 自动批准器,如果在身份验证时使用 Bootstrap Token,它会批准对 kubelet 的任何客户端证书的请求。 +默认情况下,Kubernetes 启用了 CSR 自动批准器,如果在身份验证时使用启动引导令牌, +它会批准对 kubelet 的任何客户端证书的请求。 如果不希望集群自动批准kubelet客户端证书,可以通过执行以下命令关闭它: ```shell @@ -362,13 +374,15 @@ kubectl delete clusterrolebinding kubeadm:node-autoapprove-bootstrap -关闭后,`kubeadm join` 操作将会被阻断,直到管理员已经手动批准了在途中的 CSR 才会继续: +关闭后,`kubeadm join` 操作将会被阻塞,直到管理员已经手动批准了在途中的 CSR 才会继续: ```shell kubectl get csr ``` - + 输出类似于: ``` @@ -380,7 +394,9 @@ node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ 18s system:bootstra kubectl certificate approve node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ ``` - + 输出类似于: ``` @@ -391,7 +407,9 @@ certificatesigningrequest "node-csr-c69HXe7aYcqkS1bKmH4faEnHAWxn6i2bHZ2mD04jZyQ" kubectl get csr ``` - + 输出类似于: ``` @@ -416,7 +434,8 @@ default. While there is no private data in this ConfigMap, some users might wish it off regardless. Doing so will disable the ability to use the `--discovery-token` flag of the `kubeadm join` flow. Here are the steps to do so: --> -为了实现使用令牌作为唯一验证信息的加入工作流,默认情况下会公开带有验证主节点标识所需数据的 ConfigMap。 +为了实现使用令牌作为唯一验证信息的加入工作流,默认情况下会公开带有验证主节点标识 +所需数据的 ConfigMap。 虽然此 ConfigMap 中没有私有数据,但一些用户可能希望无论如何都关闭它。 这样做需要禁用 `kubeadm join` 工作流的 `--discovery-token` 参数。 以下是实现步骤: @@ -430,7 +449,9 @@ it off regardless. Doing so will disable the ability to use the `--discovery-tok kubectl -n kube-public get cm cluster-info -o yaml | grep "kubeconfig:" -A11 | grep "apiVersion" -A10 | sed "s/ //" | tee cluster-info.yaml ``` - + 输出类似于: ``` @@ -457,9 +478,9 @@ users: [] * 关闭 `cluster-info` ConfigMap 的公开访问: -```shell -kubectl -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo -``` + ```shell + kubectl -n kube-public delete rolebinding kubeadm:bootstrap-signer-clusterinfo + ``` -* [kubeadm init](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/) 初始化 Kubernetes 主节点 -* [kubeadm token](/zh/docs/reference/setup-tools/kubeadm/kubeadm-token/) 管理 `kubeadm join` 的令牌 -* [kubeadm reset](/zh/docs/reference/setup-tools/kubeadm/kubeadm-reset/) 将 `kubeadm init` 或 `kubeadm join` 对主机的更改恢复到之前状态 +* [kubeadm init](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/) + 初始化 Kubernetes 主节点 +* [kubeadm token](/zh/docs/reference/setup-tools/kubeadm/kubeadm-token/) + 管理 `kubeadm join` 的令牌 +* [kubeadm reset](/zh/docs/reference/setup-tools/kubeadm/kubeadm-reset/) + 将 `kubeadm init` 或 `kubeadm join` 对主机的更改恢复到之前状态 diff --git a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md index 4388edcfe4f60..0faea8865cce3 100644 --- a/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md +++ b/content/zh/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md @@ -3,7 +3,7 @@ title: kubeadm upgrade content_type: concept weight: 40 --- - +--> `kubeadm upgrade` 是一个对用户友好的命令,它将复杂的升级逻辑包装在一个命令后面,支持升级的规划和实际执行。 - -## kubeadm 升级指南 +## kubeadm upgrade 指南 -[本文档](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)概述了使用 kubeadm 执行升级的步骤。 -有关 kubeadm 旧版本,请参阅 Kubernetes 网站的旧版文档。 +[本文档](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/)概述 +使用 kubeadm 执行升级的步骤。 +与 kubeadm 旧版本相关的文档,请参阅 Kubernetes 网站的旧版文档。 -你可以使用 `kubeadm upgrade diff` 来查看将应用于静态 pod 清单的更改。 - - -要在 Kubernetes v1.13.0 及更高版本中使用 kube-dns 进行升级,请遵循[本指南](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init-phase/#cmd-phase-addon)。 +你可以使用 `kubeadm upgrade diff` 来查看将应用于静态 Pod 清单的更改。 -在 Kubernetes v1.15.0 和更高版本中,`kubeadm upgrade apply` 和 `kubeadm upgrade node` 也将自动续订该节点上的 kubeadm 托管证书,包括存储在 kubeconfig 文件中的证书。 -要选择退出,可以传递参数 `--certificate-renewal=false`。有关证书续订的更多详细信息请参见[证书管理文档](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-certs)。 +在 Kubernetes v1.15.0 和更高版本中,`kubeadm upgrade apply` 和 `kubeadm upgrade node` +也将自动续订该节点上的 kubeadm 托管证书,包括存储在 kubeconfig 文件中的证书。 +要选择退出,可以传递参数 `--certificate-renewal=false`。 +有关证书续订的更多详细信息请参见[证书管理文档](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-certs)。 {{< note >}} @@ -78,11 +75,12 @@ reports of unexpected results. ## kubeadm upgrade node {#cmd-upgrade-node} {{< include "generated/kubeadm_upgrade_node.md" >}} - - ## {{% heading "whatsnext" %}} -* 如果你使用 kubeadm v1.7.x 或更低版本初始化集群,则可以参考[kubeadm 配置](/zh/docs/reference/setup-tools/kubeadm/kubeadm-config/)配置集群用于 `kubeadm upgrade`。 +* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config/) if you initialized your cluster using kubeadm v1.7.x or lower, to configure your cluster for `kubeadm upgrade` +--> +* 如果你使用 kubeadm v1.7.x 或更低版本初始化集群,则可以参考 + [kubeadm 配置](/zh/docs/reference/setup-tools/kubeadm/kubeadm-config/) + 配置集群用于 `kubeadm upgrade`。 diff --git a/content/zh/docs/reference/using-api/deprecation-policy.md b/content/zh/docs/reference/using-api/deprecation-policy.md index 84e2137528d2f..27c2a5b636600 100644 --- a/content/zh/docs/reference/using-api/deprecation-policy.md +++ b/content/zh/docs/reference/using-api/deprecation-policy.md @@ -45,7 +45,7 @@ into 3 main tracks, each of which has different policies for deprecation: ## 弃用 API 的一部分 {#deprecating-parts-of-the-api} 由于 Kubernetes 是一个 API 驱动的系统,API 会随着时间推移而演化,以反映 -人们对问题共建的认识的变化。Kubernetes API 实际上是一个 API 集合,其中每个 +人们对问题空间的认识的变化。Kubernetes API 实际上是一个 API 集合,其中每个 成员称作“API 组(API Group)”,并且每个 API 组都是独立管理版本的。 [API 版本](/zh/docs/reference/using-api/#api-versioning)会有 三类,每类有不同的废弃策略: @@ -448,7 +448,7 @@ Starting in Kubernetes v1.19, making an API request to a deprecated REST API end 1. Returns a `Warning` header (as defined in [RFC7234, Section 5.5](https://tools.ietf.org/html/rfc7234#section-5.5)) in the API response. 2. Adds a `"k8s.io/deprecated":"true"` annotation to the [audit event](/docs/tasks/debug-application-cluster/audit/) recorded for the request. -3. Sets an `apiserver_requested_deprecated_apis` gauge metric to `1` in the `kube-apiserver` +3. Sets an `apiserver_requested_deprecated_apis` gauge metric to `1` in the `kube-apiserver` process. The metric has labels for `group`, `version`, `resource`, `subresource` that can be joined to the `apiserver_request_total` metric, and a `removed_release` label that indicates the Kubernetes release in which the API will no longer be served. The following Prometheus query @@ -510,7 +510,7 @@ supported in API v1 must exist and function until API v1 is removed. ### 组件配置结构 {#component-config-structures} @@ -715,6 +715,82 @@ Both warnings and documentation must indicate whether a feature gate is non-oper 弃用时,必须在发布说明和对应的 CLI 帮助信息中通过文档宣布。 警告信息和文档都要标明是否某特性门控不再起作用。** + + +### 弃用度量值 {#Deprecating a metric} + +Kubernetes 控制平面的每个组件都公开度量值(通常是 `/metrics` 端点),它们通常由集群管理员使用。 +并不是所有的度量值都是同样重要的:一些度量值通常用作 SLIs 或被使用来确定 SLOs,这些往往比较重要。 +其他度量值在本质上带有实验性,或者主要用于 Kubernetes 开发过程。 + +因此,度量值分为两个稳定性类别(`ALPHA` 和 `STABLE`); +此分类会影响在 Kubernetes 发布版本中移除某度量值。 +所对应的分类取决于对该度量值重要性的预期。 +弃用和移除度量值的规则如下: + + +**规则 #9a: 对于相应的稳定性类别,度量值起作用的周期必须不小于:** + + * **STABLE: 4 个发布版本或者 12 个月 (取其较长者)** + * **ALPHA: 0 个发布版本** + +**规则 #9b: 在度量值被宣布启用之后,它起作用的周期必须不小于:** + + * **STABLE: 3 个发布版本或者 9 个月 (取其较长者)** + * **ALPHA: 0 个发布版本** + + +已弃用的度量值将在其描述文本前加上一个已弃用通知字符串 '(Deprecated from x.y)', +并将在度量值被记录期间发出警告日志。就像稳定的、未被弃用的度量指标一样, +被弃用的度量值将自动注册到 metrics 端点,因此被弃用的度量值也是可见的。 + +在随后的版本中(当度量值 `deprecatedVersion` 等于_当前 Kubernetes 版本 - 3_), +被弃用的度量值将变成 _隐藏(Hidden)_ metric 度量值。 +与被弃用的度量值不同,隐藏的度量值将不再被自动注册到 metrics 端点(因此被隐藏)。 +但是,它们可以通过可执行文件的命令行标志显式启用(`--show-hidden-metrics-for-version=`)。 + +如果集群管理员不能对早期的弃用警告作出反应,这一设计就为他们提供了抓紧迁移弃用度量值的途径。 +隐藏的度量值应该在再过一个发行版本后被删除。 + -本节介绍了设置和运行 Kubernetes 环境的不同选项。 +本节列出了设置和运行 Kubernetes 的不同方法。 -不同的 Kubernetes 解决方案满足不同的要求:易于维护、安全性、可控制性、可用资源以及操作和管理 Kubernetes 集群所需的专业知识。 +安装 Kubernetes 时,请根据以下条件选择安装类型:易于维护、安全性、可控制性、可用资源以及操作和管理 Kubernetes 集群所需的专业知识。 可以在本地机器、云、本地数据中心上部署 Kubernetes 集群,或选择一个托管的 Kubernetes 集群。还可以跨各种云提供商或裸机环境创建自定义解决方案。 - -更简单地说,可以在学习和生产环境中创建一个 Kubernetes 集群。 - - - -如果正打算学习 Kubernetes,请使用基于 Docker 的解决方案:Docker 是 Kubernetes 社区支持或生态系统中用来在本地计算机上设置 Kubernetes 集群的一种工具。 - - -{{< table caption="本地机器解决方案表,其中列出了社区和生态系统支持的用于部署 Kubernetes 的工具。" >}} - -|社区 |生态系统 | -| ------------ | -------- | -| [Minikube](/zh/docs/setup/learning-environment/minikube/) | [Docker Desktop](https://www.docker.com/products/docker-desktop)| -| [kind (Kubernetes IN Docker)](/zh/docs/setup/learning-environment/kind/) | [Minishift](https://docs.okd.io/latest/minishift/)| -| | [MicroK8s](https://microk8s.io/)| - +如果正打算学习 Kubernetes,请使用 Kubernetes 社区支持或生态系统中的工具在本地计算机上设置 Kubernetes 集群。 -本页描述如何在多个区(Zone)中运行集群。 +本页描述如何跨多个区(Zone)中运行集群。 -## 介绍 - -Kubernetes 1.2 添加了跨多个失效区(Failure Zone)运行同一集群的能力 -(GCE 把它们称作“区(Zones)”,AWS 把它们称作“可用区(Availability Zones)”, -这里我们用“区(Zones)”指代它们)。 -此能力是更广泛的集群联邦(Cluster Federation)特性的一个轻量级版本。 -集群联邦之前有一个昵称 -["Ubernetes"](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/design-proposals/multicluster/federation.md))。 -完全的集群联邦可以将运行在多个区域(Region)或云供应商(或本地数据中心)的多个 -Kubernetes 集群组合起来。 -不过,很多用户仅仅是希望在同一云厂商平台的多个区域运行一个可用性更好的集群, -而这恰恰是 1.2 引入的多区支持所带来的特性 -(此特性之前有一个昵称 “Ubernetes Lite”)。 - - -多区支持有意实现的有局限性:可以在跨多个区域运行同一 Kubernetes 集群,但只能 -在同一区域(Region)和云厂商平台。目前仅自动支持 GCE 和 AWS,尽管为其他云平台 -或裸金属平台添加支持页相对容易,只需要确保节点和卷上添加合适的标签即可。 - - -## 功能 - -节点启动时,`kubelet` 自动向其上添加区信息标签。 - - -在单区(Single-Zone)集群中, Kubernetes 会自动将副本控制器或服务中的 Pod -分布到不同节点,以降低节点失效的影响。 -在多区集群中,这一分布负载的行为被扩展到跨区分布,以降低区失效的影响, -跨区分布的能力是通过 `SelectorSpreadPriority` 实现的。此放置策略亦仅仅是 -尽力而为,所以如果你的集群所跨区是异质的(例如,节点个数不同、节点类型 -不同或者 Pod 资源需求不同),放置策略都可能无法完美地跨区完成 Pod 的 -均衡分布。如果需要,你可以使用同质区(节点个数和类型相同)以降低不均衡 -分布的可能性。 - - -持久卷被创建时,`PersistentVolumeLabel` 准入控制器会自动为其添加区标签。 -调度器使用 `VolumeZonePredicate` 断言确保申领某给定卷的 Pod 只会被放到 -该卷所在的区。这是因为卷不可以跨区挂载。 - - -## 局限性 - -多区支持有一些很重要的局限性: - -* 我们假定不同的区之间在网络上彼此距离很近,所以我们不执行可感知区的路由。 - 尤其是,即使某些负责提供该服务的 Pod 与客户端位于同一区,通过服务末端 - 进入的流量可能会跨区,因而会导致一些额外的延迟和开销。 - - -* 卷与区之间的亲和性仅适用于 PV 持久卷。例如,如果你直接在 Pod 规约中指定某 EBS - 卷,这种亲和性支持就无法工作。 - -* 集群无法跨多个云平台或者地理区域运行。这类功能需要完整的联邦特性支持。 +## Background - -* 尽管你的节点位于多个区中,`kube-up` 脚本目前默认只能构造一个主控节点。 - 尽管服务是高可用的,能够忍受失去某个区的问题,控制面位于某一个区中。 - 希望运行高可用控制面的用户应该遵照 - [高可用性](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/) - 中的指令构建。 - - -### 卷局限性 +Kubernetes is designed so that a single Kubernetes cluster can run +across multiple failure zones, typically where these zones fit within +a logical grouping called a _region_. Major cloud providers define a region +as a set of failure zones (also called _availability zones_) that provide +a consistent set of features: within a region, each zone offers the same +APIs and services. -以下局限性通过 -[拓扑感知的卷绑定](/zh/docs/concepts/storage/storage-classes/#volume-binding-mode)解决: - -* 使用动态卷供应时,StatefulSet 卷的跨区分布目前与 Pod - 亲和性和反亲和性策略不兼容。 - - -* 如果 StatefulSet 的名字中包含连字符("-"),卷的跨区分布可能无法实现存储的 - 跨区同一分布。 - -* 当在一个 Deployment 或 Pod 规约中指定多个 PVC 申领时,则需要为某特定区域 - 配置 StorageClass,或者在某一特定区域中需要静态供应 PV 卷。 - 另一种解决方案是使用 StatefulSet,确保给定副本的所有卷都从同一区中供应。 +## 背景 - -## 演练 +Kubernetes 从设计上允许同一个 Kubernetes 集群跨多个失效区来运行, +通常这些去位于某个称作 _区域(region)_ 逻辑分组中。 +主要的云提供商都将区域定义为一组失效区的集合(也称作 _可用区(Availability Zones)_), +能够提供一组一致的功能特性:每个区域内,各个可用区提供相同的 API 和服务。 -我们现在准备对在 GCE 和 AWS 上配置和使用多区集群进行演练。为了完成此演练, -你需要设置 `MULTIZONE=true` 来启动一个完整的集群,之后指定 -`KUBE_USE_EXISTING_MASTER=true` 并再次运行 `kube-up` 添加其他区中的节点。 +典型的云体系结构都会尝试降低某个区中的失效影响到其他区中服务的概率。 -### 建立集群 +## 控制面行为 {#control-plane-behavior} -和往常一样创建集群,不过需要设置 MULTIZONE,以便告诉集群需要管理多个区。 -这里我们在 `us-central1-a` 创建节点。 - -GCE: - -```shell -curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a NUM_NODES=3 bash -``` - -AWS: - -```shell -curl -sS https://get.k8s.io | MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a NUM_NODES=3 bash -``` - - -这一步骤和往常一样启动一个集群,不过尽管 `MULTIZONE=true` -标志已经启用了多区功能特性支持,集群仍然运行在一个区内。 +所有的[控制面组件](/zh/docs/concepts/overview/components/#control-plane-components) +都支持以一组可相互替换的资源池的形式来运行,每个组件都有多个副本。 -### 节点已被打标签 - -查看节点,你会看到节点上已经有了区信息标签。 -目前这些节点都在 `us-central1-a` (GCE) 或 `us-west-2a` (AWS)。 -对于区域(Region),标签为 `topology.kubernetes.io/region`, -对于区(Zone),标签为 `topology.kubernetes.io/zone`: +当你部署集群控制面时,应将控制面组件的副本跨多个失效区来部署。 +如果可用性是一个很重要的指标,应该选择至少三个失效区,并将每个 +控制面组件(API 服务器、调度器、etcd、控制器管理器)复制多个副本, +跨至少三个失效区来部署。如果你在运行云控制器管理器,则也应该将 +该组件跨所选的三个失效区来部署。 -```shell -kubectl get nodes --show-labels -``` - - -输出类似于: - -``` -NAME STATUS ROLES AGE VERSION LABELS -kubernetes-master Ready,SchedulingDisabled 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master -kubernetes-minion-87j9 Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 -kubernetes-minion-9vlv Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv -kubernetes-minion-a12q Ready 6m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q -``` +{{< note >}} -### 添加第二个区中的节点 - -让我们向现有集群中添加另外一组节点,复用现有的主控节点,但运行在不同的区 -(`us-central1-b` 或 `us-west-2b`)。 -我们再次运行 `kube-up`,不过设置 `KUBE_USE_EXISTING_MASTER=true`。 -`kube-up` 不会创建新的主控节点,而会复用之前创建的主控节点。 - -GCE: - -```shell -KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-b NUM_NODES=3 kubernetes/cluster/kube-up.sh -``` +Kubernetes 并不会为 API 服务器端点提供跨失效区的弹性。 +你可以为集群 API 服务器使用多种技术来提升其可用性,包括使用 +DNS 轮转、SRV 记录或者带健康检查的第三方负载均衡解决方案等等。 +{{< /note >}} -在 AWS 上,我们还需要为额外的子网指定网络 CIDR,以及主控节点的内部 IP 地址: - -```shell -KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2b NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.1.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh -``` +## Node behavior - -再次查看节点,你会看到新启动了三个节点并且其标签表明运行在 `us-central1-b` 区: +## 节点行为 {#node-behavior} -```shell -kubectl get nodes --show-labels -``` +Kubernetes 自动为负载资源(如{{< glossary_tooltip text="Deployment" term_id="deployment" >}} +或 {{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}})) +跨集群中不同节点来部署其 Pods。 +这种分布逻辑有助于降低失效带来的影响。 -输出类似于: - -``` -NAME STATUS ROLES AGE VERSION LABELS -kubernetes-master Ready,SchedulingDisabled 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-1,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-master -kubernetes-minion-281d Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d -kubernetes-minion-87j9 Ready 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-87j9 -kubernetes-minion-9vlv Ready 16m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv -kubernetes-minion-a12q Ready 17m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-a12q -kubernetes-minion-pp2f Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-pp2f -kubernetes-minion-wf8i Ready 2m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-wf8i -``` +节点启动时,每个节点上的 kubelet 会向 Kubernetes API 中代表该 kubelet 的 Node 对象 +添加 {{< glossary_tooltip text="标签" term_id="label" >}}。 +这些标签可能包含[区信息](/zh/docs/reference/labels-annotations-taints/#topologykubernetesiozone)。 -### 卷亲和性 - -通过动态卷供应创建一个卷(只有 PV 持久卷支持区亲和性): - -```bash -kubectl apply -f - <}} +能够更好地分布 Pods,以实现更好的可用性,降低因为某种失效给整个工作负载 +带来的风险。 -{{< note >}} -Kubernetes 1.3 及以上版本会将动态 PV 申领散布到所配置的各个区。 -在 1.2 版本中,动态持久卷总是在集群主控节点所在的区 -(这里的 `us-central1-a` 或 `us-west-2a`), -对应的 Issue ([#23330](https://github.com/kubernetes/kubernetes/issues/23330)) -在 1.3 及以上版本中已经解决。 -{{< /note >}} +例如,你可以设置一种约束,确保某个 StatefulSet 中的三个副本都运行在 +不同的可用区中,只要其他条件允许。你可以通过声明的方式来定义这种约束, +而不需要显式指定每个工作负载使用哪些可用区。 -现在我们来验证 Kubernetes 自动为 PV 打上了所在区或区域的标签: +### Distributing nodes across zones -```shell -kubectl get pv --show-labels -``` +Kubernetes' core does not create nodes for you; you need to do that yourself, +or use a tool such as the [Cluster API](https://cluster-api.sigs.k8s.io/) to +manage nodes on your behalf. - -输出类似于: +### 跨多个区分布节点 {#distributing-nodes-across-zones} -``` -NAME CAPACITY ACCESSMODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE LABELS -pv-gce-mj4gm 5Gi RWO Retain Bound default/claim1 manual 46s topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a -``` +Kubernetes 的核心逻辑并不会帮你创建节点,你需要自行完成此操作,或者使用 +类似 [Cluster API](https://cluster-api.sigs.k8s.io/) 这类工具来替你管理节点。 -现在我们将创建一个使用 PVC 申领的 Pod。 -由于 GCE PD 或 AWS EBS 卷都不能跨区挂载,这意味着 Pod 只能创建在卷所在的区: - -```yaml -kubectl apply -f - < -注意 Pod 自动创建在卷所在的区,因为云平台提供商一般不允许跨区挂接存储卷。 - -```shell -kubectl describe pod mypod | grep Node -``` +## Manual zone assignment for Pods -``` -Node: kubernetes-minion-9vlv/10.240.0.5 -``` - - -检查节点标签: - -```shell -kubectl get node kubernetes-minion-9vlv --show-labels -``` - -``` -NAME STATUS AGE VERSION LABELS -kubernetes-minion-9vlv Ready 22m v1.6.0+fff5156 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv -``` +## 为 Pods 手动指定区 -### Pod 跨区分布 - -同一副本控制器或服务的多个 Pod 会自动完成跨区分布。 -首先,我们现在第三个区启动一些节点: - -GCE: - -```shell -KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-f NUM_NODES=3 kubernetes/cluster/kube-up.sh -``` - -AWS: - -```shell -KUBE_USE_EXISTING_MASTER=true MULTIZONE=true KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2c NUM_NODES=3 KUBE_SUBNET_CIDR=172.20.2.0/24 MASTER_INTERNAL_IP=172.20.0.9 kubernetes/cluster/kube-up.sh -``` +你可以应用[节点选择算符约束](/zh/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) +到你所创建的 Pods 上,或者为 Deployment、StatefulSet 或 Job 这类工作负载资源 +中的 Pod 模板设置此类约束。 -验证你现在有来自三个区的节点: +## Storage access for zones -```shell -kubectl get nodes --show-labels -``` - - -创建 `guestbook-go` 示例,其中包含副本个数为 3 的 RC,运行一个简单的 Web 应用: - -```shell -find kubernetes/examples/guestbook-go/ -name '*.json' | xargs -I {} kubectl apply -f {} -``` - - -Pod 应该跨三个区分布: - -```shell -kubectl describe pod -l app=guestbook | grep Node -``` - -``` -Node: kubernetes-minion-9vlv/10.240.0.5 -Node: kubernetes-minion-281d/10.240.0.8 -Node: kubernetes-minion-olsh/10.240.0.11 -``` - -```shell -kubectl get node kubernetes-minion-9vlv kubernetes-minion-281d kubernetes-minion-olsh --show-labels -``` +## 跨区的存储访问 -``` -NAME STATUS ROLES AGE VERSION LABELS -kubernetes-minion-9vlv Ready 34m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-a,kubernetes.io/hostname=kubernetes-minion-9vlv -kubernetes-minion-281d Ready 20m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-b,kubernetes.io/hostname=kubernetes-minion-281d -kubernetes-minion-olsh Ready 3m v1.13.0 beta.kubernetes.io/instance-type=n1-standard-2,topology.kubernetes.io/region=us-central1,topology.kubernetes.io/zone=us-central1-f,kubernetes.io/hostname=kubernetes-minion-olsh -``` +当创建持久卷时,`PersistentVolumeLabel` +[准入控制器](/zh/docs/reference/access-authn-authz/admission-controllers/) +会自动向那些链接到特定区的 PersistentVolume 添加区标签。 +{{< glossary_tooltip text="调度器" term_id="kube-scheduler" >}}通过其 +`NoVolumeZoneConflict` 断言确保申领给定 PersistentVolume 的 Pods 只会 +被调度到该卷所在的可用区。 -负载均衡器也会跨集群中的所有区;`guestbook-go` 示例中包含了一个负载均衡 -服务的例子: - -```shell -kubectl describe service guestbook | grep LoadBalancer.Ingress -``` +你可以为 PersistentVolumeClaim 指定{{< glossary_tooltip text="StorageClass" term_id="storage-class" >}} +以设置该类中的存储可以使用的失效域(区)。 +要了解如何配置能够感知失效域或区的 StorageClass,请参阅 +[可用的拓扑逻辑](/zh/docs/concepts/storage/storage-classes/#allowed-topologies)。 -输出类似于: +## Networking -``` -LoadBalancer Ingress: 130.211.126.21 -``` - - -设置上面的 IP 地址: +## 网络 {#networking} -```shell -export IP=130.211.126.21 -``` +Kubernetes 自身不提供与可用区相关的联网配置。 +你可以使用[网络插件](/zh/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) +来配置集群的联网,该网络解决方案可能拥有一些与可用区相关的元素。 +例如,如果你的云提供商支持 `type=LoadBalancer` 的 Service,则负载均衡器 +可能仅会将请求流量发送到运行在负责处理给定连接的负载均衡器组件所在的区。 +请查阅云提供商的文档了解详细信息。 -使用 curl 访问该 IP: - - -```shell -curl -s http://${IP}:3000/env | grep HOSTNAME -``` +对于自定义的或本地集群部署,也可以考虑这些因素 +{{< glossary_tooltip text="Service" term_id="service" >}} +{{< glossary_tooltip text="Ingress" term_id="ingress" >}} 的行为, +包括处理不同失效区的方法,在很大程度上取决于你的集群是如何搭建的。 -输出类似于: +## Fault recovery -``` - "HOSTNAME": "guestbook-44sep", -``` +When you set up your cluster, you might also need to consider whether and how +your setup can restore service if all the failure zones in a region go +off-line at the same time. For example, do you rely on there being at least +one node able to run Pods in a zone? +Make sure that any cluster-critical repair work does not rely +on there being at least one healthy node in your cluster. For example: if all nodes +are unhealthy, you might need to run a repair Job with a special +{{< glossary_tooltip text="toleration" term_id="toleration" >}} so that the repair +can complete enough to bring at least one node into service. - -如果多次尝试该命令: +## 失效恢复 {#fault-recovery} -```shell -(for i in `seq 20`; do curl -s http://${IP}:3000/env | grep HOSTNAME; done) | sort | uniq -``` +在搭建集群时,你可能需要考虑当某区域中的所有失效区都同时掉线时,是否以及如何 +恢复服务。例如,你是否要求在某个区中至少有一个节点能够运行 Pod? +请确保任何对集群很关键的修复工作都不要指望集群中至少有一个健康节点。 +例如:当所有节点都不健康时,你可能需要运行某个修复性的 Job, +该 Job 要设置特定的{{< glossary_tooltip text="容忍度" term_id="toleration" >}} +以便修复操作能够至少将一个节点恢复为可用状态。 - -输出类似于: +Kubernetes 对这类问题没有现成的解决方案;不过这也是要考虑的因素之一。 -```shell - "HOSTNAME": "guestbook-44sep", - "HOSTNAME": "guestbook-hum5n", - "HOSTNAME": "guestbook-ppm40", -``` +## {{% heading "whatsnext" %}} -负载均衡器正确地选择不同的 Pod,即使它们跨了多个区。 - - -### 停止集群 - -当完成以上工作之后,清理任务现场: - -GCE: - -```shell -KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-f kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=gce KUBE_USE_EXISTING_MASTER=true KUBE_GCE_ZONE=us-central1-b kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=gce KUBE_GCE_ZONE=us-central1-a kubernetes/cluster/kube-down.sh -``` - -AWS: - -```shell -KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2c kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=aws KUBE_USE_EXISTING_MASTER=true KUBE_AWS_ZONE=us-west-2b kubernetes/cluster/kube-down.sh -KUBERNETES_PROVIDER=aws KUBE_AWS_ZONE=us-west-2a kubernetes/cluster/kube-down.sh -``` - +要了解调度器如何在集群中放置 Pods 并遵从所配置的约束,可参阅 +[调度与驱逐](/zh/docs/concepts/scheduling-eviction/)。 diff --git a/content/zh/docs/setup/learning-environment/_index.md b/content/zh/docs/setup/learning-environment/_index.md index 9923716978f70..f68e52a4393c1 100644 --- a/content/zh/docs/setup/learning-environment/_index.md +++ b/content/zh/docs/setup/learning-environment/_index.md @@ -2,3 +2,50 @@ title: 学习环境 weight: 20 --- + + + +## kind + + +你可以使用 [`kind`](https://kind.sigs.k8s.io/docs/) 来在本地计算机上运行 Kubernetes。 +此工具要求你已经安装并配置了 [Docker](https://docs.docker.com/get-docker/)。 + +kind [快速入门](https://kind.sigs.k8s.io/docs/user/quick-start/)页面 +为你展示了如何开始使用 kind 的相关信息。 + +## minikube + + +与 `kind` 类似,[`minikube`](https://minikube.sigs.k8s.io/) 是一个允许你在 +本地运行 Kubernetes 的工具。`minikube` 在你的个人计算机上运行一个单节点的 +Kubernetes 集群(包括 Windows、macOS 和 Linux PC 机),这样你可以尝试 +Kubernetes 或者执行每天的开发工作。 + +如果你所关注的是如何安装该工具,可以查阅官方的 +[Get Started!](https://minikube.sigs.k8s.io/docs/start/) +文档。 + diff --git a/content/zh/docs/setup/production-environment/container-runtimes.md b/content/zh/docs/setup/production-environment/container-runtimes.md index 99739e8b1004b..c0a8a66b5e72e 100644 --- a/content/zh/docs/setup/production-environment/container-runtimes.md +++ b/content/zh/docs/setup/production-environment/container-runtimes.md @@ -20,7 +20,8 @@ You need to install a into each node in the cluster so that Pods can run there. This page outlines what is involved and describes related tasks for setting up nodes. --> -你需要在集群内每个节点上安装一个{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}} +你需要在集群内每个节点上安装一个 +{{< glossary_tooltip text="容器运行时" term_id="container-runtime" >}} 以使 Pod 可以运行在上面。本文概述了所涉及的内容并描述了与节点设置相关的任务。 @@ -59,7 +60,8 @@ systemd means that there will be two different cgroup managers. --> 控制组用来约束分配给进程的资源。 -当某个 Linux 系统发行版使用 [systemd](https://www.freedesktop.org/wiki/Software/systemd/) 作为其初始化系统时,初始化进程会生成并使用一个 root 控制组 (`cgroup`), 并充当 cgroup 管理器。 +当某个 Linux 系统发行版使用 [systemd](https://www.freedesktop.org/wiki/Software/systemd/) +作为其初始化系统时,初始化进程会生成并使用一个 root 控制组 (`cgroup`), 并充当 cgroup 管理器。 Systemd 与 cgroup 集成紧密,并将为每个 systemd 单元分配一个 cgroup。 你也可以配置容器运行时和 kubelet 使用 `cgroupfs`。 连同 systemd 一起使用 `cgroupfs` 意味着将有两个不同的 cgroup 管理器。 @@ -72,9 +74,12 @@ In the field, people have reported cases where nodes that are configured to use for the kubelet and Docker, but `systemd` for the rest of the processes, become unstable under resource pressure. --> -单个 cgroup 管理器将简化分配资源的视图,并且默认情况下将对可用资源和使用中的资源具有更一致的视图。 +单个 cgroup 管理器将简化分配资源的视图,并且默认情况下将对可用资源和使用 +中的资源具有更一致的视图。 当有两个管理器共存于一个系统中时,最终将对这些资源产生两种视图。 -在此领域人们已经报告过一些案例,某些节点配置让 kubelet 和 docker 使用 `cgroupfs`,而节点上运行的其余进程则使用 systemd; 这类节点在资源压力下会变得不稳定。 +在此领域人们已经报告过一些案例,某些节点配置让 kubelet 和 docker 使用 +`cgroupfs`,而节点上运行的其余进程则使用 systemd; 这类节点在资源压力下 +会变得不稳定。 -注意:非常 *不* 建议更改已加入集群的节点的 cgroup 驱动。 -如果 kubelet 已经使用某 cgroup 驱动的语义创建了 pod,更改运行时以使用别的 cgroup 驱动,当为现有 Pods 重新创建 PodSandbox 时会产生错误。重启 kubelet 也可能无法解决此类问题。 -如果你有切实可行的自动化方案,使用其他已更新配置的节点来替换该节点,或者使用自动化方案来重新安装。 +注意:更改已加入集群的节点的 cgroup 驱动是一项敏感的操作。 +如果 kubelet 已经使用某 cgroup 驱动的语义创建了 pod,更改运行时以使用 +别的 cgroup 驱动,当为现有 Pods 重新创建 PodSandbox 时会产生错误。 +重启 kubelet 也可能无法解决此类问题。 +如果你有切实可行的自动化方案,使用其他已更新配置的节点来替换该节点, +或者使用自动化方案来重新安装。 + + +### 将 kubeadm 托管的集群迁移到 `systemd` 驱动 + + +如果你想迁移到现有 kubeadm 托管集群中的 `systemd` cgroup 驱动程序, +遵循此[迁移指南](/zh/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver)。 本节包含使用 containerd 作为 CRI 运行时的必要步骤。 @@ -171,7 +171,10 @@ Install containerd: -1. 从官方Docker仓库安装 `containerd.io` 软件包。可以在 [安装 Docker 引擎](https://docs.docker.com/engine/install/#server) 中找到有关为各自的 Linux 发行版设置 Docker 存储库和安装 `containerd.io` 软件包的说明。 +1. 从官方Docker仓库安装 `containerd.io` 软件包。可以在 + [安装 Docker 引擎](https://docs.docker.com/engine/install/#server) + 中找到有关为各自的 Linux 发行版设置 Docker 存储库和安装 `containerd.io` + 软件包的说明。 -启动 Powershell 会话,将 `$Version` 设置为所需的版本(例如:`$ Version=1.4.3`),然后运行以下命令: +启动 Powershell 会话,将 `$Version` 设置为所需的版本(例如:`$ Version=1.4.3`), +然后运行以下命令: - 3. 启动 containerd: ```powershell @@ -248,15 +252,7 @@ Start a Powershell session, set `$Version` to the desired version (ex: `$Version - 结合 `runc` 使用 `systemd` cgroup 驱动,在 `/etc/containerd/config.toml` 中设置 ``` @@ -280,7 +276,7 @@ When using kubeadm, manually configure the [cgroup driver for kubelet](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node). --> 当使用 kubeadm 时,请手动配置 -[kubelet 的 cgroup 驱动](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node). +[kubelet 的 cgroup 驱动](/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node). ### CRI-O @@ -288,47 +284,29 @@ When using kubeadm, manually configure the This section contains the necessary steps to install CRI-O as a container runtime. Use the following commands to install CRI-O on your system: +--> +本节包含安装 CRI-O 作为容器运行时的必要步骤。 + +使用以下命令在系统中安装 CRI-O: {{< note >}} + +CRI-O 的主要以及次要版本必须与 Kubernetes 的主要和次要版本相匹配。 +更多信息请查阅 +[CRI-O 兼容性列表](https://github.com/cri-o/cri-o#compatibility-matrix-cri-o--kubernetes)。 {{< /note >}} + -本节包含安装 CRI-O 作为容器运行时的必要步骤。 - -使用以下命令在系统中安装 CRI-O: - -提示:CRI-O 的主要以及次要版本必须与 Kubernetes 的主要和次要版本相匹配。 -更多信息请查阅 [CRI-O 兼容性列表](https://github.com/cri-o/cri-o#compatibility-matrix-cri-o--kubernetes)。 - -安装以及配置的先决条件: +安装并配置前置环境: ```shell -# 创建 .conf 文件,以便在系统启动时加载内核模块 +# 创建 .conf 文件以在启动时加载模块 cat < Then run - --> +--> 在下列操作系统上安装 CRI-O, 使用下表中合适的值设置环境变量 `OS`: | 操作系统 | `$OS` | @@ -383,6 +361,7 @@ Then run
然后执行 + ```shell cat < 将 `$VERSION` 设置为与你的 Kubernetes 相匹配的 CRI-O 版本。 例如,如果要安装 CRI-O 1.20,请设置 `VERSION=1.20`。 @@ -532,9 +507,11 @@ sudo dnf install cri-o ```shell sudo dnf module list cri-o ``` + CRI-O 不支持在 Fedora 上固定到特定的版本。 然后执行 + ```shell sudo dnf module enable cri-o:$VERSION sudo dnf install cri-o --now @@ -545,24 +522,32 @@ sudo dnf install cri-o --now +启动 CRI-O: ```shell sudo systemctl daemon-reload sudo systemctl enable crio --no ``` + +参阅[CRI-O 安装指南](https://github.com/cri-o/cri-o/blob/master/install.md) +了解进一步的详细信息。 + + -默认情况下,CRI-O 使用 systemd cgroup 驱动程序。切换到` -`cgroupfs` -cgroup 驱动程序,或者编辑 `/ etc / crio / crio.conf` 或放置一个插件 +--> +#### cgroup 驱动 + +默认情况下,CRI-O 使用 systemd cgroup 驱动程序。要切换到 `cgroupfs` +驱动程序,或者编辑 `/ etc / crio / crio.conf` 或放置一个插件 在 `/etc/crio/crio.conf.d/02-cgroup-manager.conf` 中的配置,例如: ```toml @@ -570,27 +555,31 @@ cgroup 驱动程序,或者编辑 `/ etc / crio / crio.conf` 或放置一个插 conmon_cgroup = "pod" cgroup_manager = "cgroupfs" ``` + -另请注意更改后的 `conmon_cgroup` ,必须将其设置为 -`pod`将 CRI-O 与 `cgroupfs` 一起使用时。通常有必要保持 -kubelet 的 cgroup 驱动程序配置(通常透过 kubeadm 完成)和CRI-O 同步中。 +另请注意更改后的 `conmon_cgroup`,将 CRI-O 与 `cgroupfs` 一起使用时, +必须将其设置为 `pod`。通常有必要保持 kubelet 的 cgroup 驱动程序配置 +(通常透过 kubeadm 完成)和 CRI-O 一致。 ### Docker + -1. 在每个节点上,根据[安装 Docker 引擎](https://docs.docker.com/engine/install/#server) 为你的 Linux 发行版安装 Docker。 - 你可以在此文件中找到最新的经过验证的 Docker 版本[依赖关系](https://git.k8s.io/kubernetes/build/dependencies.yaml)。 +1. 在每个节点上,根据[安装 Docker 引擎](https://docs.docker.com/engine/install/#server) + 为你的 Linux 发行版安装 Docker。 + 你可以在此文件中找到最新的经过验证的 Docker 版本 + [依赖关系](https://git.k8s.io/kubernetes/build/dependencies.yaml)。 -2. 配置 Docker 守护程序,尤其是使用 systemd 来管理容器的cgroup。 +2. 配置 Docker 守护程序,尤其是使用 systemd 来管理容器的 cgroup。 ```shell sudo mkdir /etc/docker @@ -609,14 +598,16 @@ kubelet 的 cgroup 驱动程序配置(通常透过 kubeadm 完成)和CRI-O {{< note >}} - - 对于运行 Linux 内核版本 4.0 或更高版本,或使用 3.10.0-51 及更高版本的 RHEL 或 CentOS 的系统,`overlay2`是首选的存储驱动程序。 + --> + 对于运行 Linux 内核版本 4.0 或更高版本,或使用 3.10.0-51 及更高版本的 RHEL + 或 CentOS 的系统,`overlay2`是首选的存储驱动程序。 {{< /note >}} + 3. 重新启动 Docker 并在启动时启用: + ```shell sudo systemctl enable docker sudo systemctl daemon-reload @@ -629,9 +620,9 @@ For more information refer to - [Configure the Docker daemon](https://docs.docker.com/config/daemon/) - [Control Docker with systemd](https://docs.docker.com/config/daemon/systemd/) --> -{{< /note >}} +有关更多信息,请参阅 +- [配置 Docker 守护程序](https://docs.docker.com/config/daemon/) +- [使用 systemd 控制 Docker](https://docs.docker.com/config/daemon/systemd/) +{{< /note >}} -有关更多信息,请参阅 - - [配置 Docker 守护程序](https://docs.docker.com/config/daemon/) - - [使用 systemd 控制 Docker](https://docs.docker.com/config/daemon/systemd/) diff --git a/content/zh/docs/setup/production-environment/tools/kops.md b/content/zh/docs/setup/production-environment/tools/kops.md index 871158dc89349..94b448361af24 100644 --- a/content/zh/docs/setup/production-environment/tools/kops.md +++ b/content/zh/docs/setup/production-environment/tools/kops.md @@ -19,9 +19,9 @@ It uses a tool called [`kops`](https://github.com/kubernetes/kops). 本篇使用了一个名为 [`kops`](https://github.com/kubernetes/kops) 的工具。 -kops 是一个自用的供应系统: +kops 是一个自动化的制备系统: * 全自动安装流程 * 使用 DNS 识别集群 -* 自我修复:一切都在自动扩展组中运行 +* 自我修复:一切都在自动扩缩组中运行 * 支持多种操作系统(如 Debian、Ubuntu 16.04、CentOS、RHEL、Amazon Linux 和 CoreOS) - 参考 [images.md](https://github.com/kubernetes/kops/blob/master/docs/operations/images.md) * 支持高可用 - 参考 [high_availability.md](https://github.com/kubernetes/kops/blob/master/docs/high_availability.md) * 可以直接提供或者生成 terraform 清单 - 参考 [terraform.md](https://github.com/kubernetes/kops/blob/master/docs/terraform.md) +## {{% heading "prerequisites" %}} + -如果你有不同的观点,你可能更喜欢使用 [kubeadm](/zh/docs/reference/setup-tools/kubeadm/) -作为构建工具来构建自己的集群。kops 建立在 kubeadm 工作的基础上。 +* 你必须安装 [kubectl](/zh/docs/tasks/tools/)。 +* 你必须安装[安装](https://github.com/kubernetes/kops#installing) `kops` + 到 64 位的(AMD64 和 Intel 64)设备架构上。 +* 你必须拥有一个 [AWS 账户](https://docs.aws.amazon.com/polly/latest/dg/setting-up.html), + 生成 [IAM 秘钥](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys) + 并[配置](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html#cli-quick-configuration) + 该秘钥。IAM 用户需要[足够的权限许可](https://github.com/kubernetes/kops/blob/master/docs/getting_started/aws.md#setup-iam-user)。 - + ## 创建集群 ### (1/5) 安装 kops -#### 前提条件 +#### 安装 -你必须安装 [kubectl](/zh/docs/tasks/tools/install-kubectl/) 才能使 kops 工作。 +从[下载页面](https://github.com/kubernetes/kops/releases)下载 kops +(从源代码构建也很方便): + +{{< tabs name="kops_installation" >}} +{{% tab name="macOS" %}} +使用下面的命令下载最新发布版本: -Download kops from the [releases page](https://github.com/kubernetes/kops/releases) (it is also easy to build from source): +```shell +curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64 +``` + + -#### 安装 +要下载特定版本,使用特定的 kops 版本替换下面命令中的部分: + +```shell +$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) +``` + + +例如,要下载 kops v1.20.0,输入: -从[下载页面](https://github.com/kubernetes/kops/releases)下载 kops(从源代码构建也很容易): +```shell +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-darwin-amd64 +``` -在 macOS 上: +令 kops 二进制文件可执行: ```shell -curl -OL https://github.com/kubernetes/kops/releases/download/1.10.0/kops-darwin-amd64 chmod +x kops-darwin-amd64 -mv kops-darwin-amd64 /usr/local/bin/kops -# 你也可以使用 Homebrew 安装 kops +``` + + +将 kops 二进制文件移到你的 PATH 下: + +```shell +sudo mv kops-darwin-amd64 /usr/local/bin/kops +``` + +你也可以使用 [Homebrew](https://brew.sh/) 安装 kops: + +```shell brew update && brew install kops ``` +{{% /tab %}} +{{% tab name="Linux" %}} + + +使用命令下载最新发布版本: + +```shell +curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64 +``` + + +要下载 kops 的特定版本,用特定的 kops 版本替换下面命令中的部分: + +```shell +$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4) +``` -在 Linux 上: +例如,要下载 kops v1.20 版本,输入: + +```shell +curl -LO https://github.com/kubernetes/kops/releases/download/v1.20.0/kops-linux-amd64 +``` + + +令 kops 二进制文件可执行: ```shell -wget https://github.com/kubernetes/kops/releases/download/1.10.0/kops-linux-amd64 chmod +x kops-linux-amd64 -mv kops-linux-amd64 /usr/local/bin/kops ``` + +将 kops 二进制文件移到 PATH 下: + + +```shell +sudo mv kops-linux-amd64 /usr/local/bin/kops +``` + +你也可以使用 [Homebrew](https://docs.brew.sh/Homebrew-on-Linux) +来安装 kops: + +```shell +brew update && brew install kops +``` + +{{% /tab %}} +{{< /tabs >}} + ### (2/5) 为你的集群创建一个 route53 域名 -kops 在集群内部都使用 DNS 进行发现操作,因此你可以从客户端访问 kubernetes API 服务器。 +kops 在集群内部和外部都使用 DNS 进行发现操作,这样你可以从客户端访问 +kubernetes API 服务器。 -你应该使用子域名来划分集群。作为示例,我们将使用域名 `useast1.dev.example.com`。 -然后,API 服务器端点域名将为 `api.useast1.dev.example.com`。 +你可以,或许应该使用子域名来划分集群。作为示例,我们将使用域名 `useast1.dev.example.com`。 +这样,API 服务器端点域名将为 `api.useast1.dev.example.com`。 -这一步很容易搞砸(这是问题的第一大原因!) +检查你的 route53 域已经被正确设置(这是导致问题的最常见原因!)。 如果你安装了 dig 工具,则可以通过运行以下步骤再次检查集群是否配置正确: ```shell @@ -187,8 +277,10 @@ administer the same clusters - this is much easier than passing around kubecfg f to the S3 bucket will have administrative access to all your clusters, so you don't want to share it beyond the operations team. --> -多个集群可以使用同一 S3 存储桶,并且你可以在管理同一集群的同事之间共享一个 S3 存储桶 - 这比传递 kubecfg 文件容易得多。 -但是有权访问 S3 存储桶的任何人都将拥有对所有集群的管理访问权限,因此你不想在运营团队之外共享它。 +多个集群可以使用同一 S3 存储桶,并且你可以在管理同一集群的同事之间共享一个 +S3 存储桶 - 这比传递 kubecfg 文件容易得多。 +但是有权访问 S3 存储桶的任何人都将拥有对所有集群的管理访问权限, +因此你不想在运营团队之外共享它。 -在我们的示例中,我们选择 `dev.example.com` 作为托管区域,因此让我们选择 `clusters.dev.example.com` 作为 S3 存储桶名称。 +在我们的示例中,我们选择 `dev.example.com` 作为托管区域,因此我们选择 +`clusters.dev.example.com` 作为 S3 存储桶名称。 * 导出 `AWS_PROFILE` 文件(如果你需要选择一个配置文件用来使 AWS CLI 正常工作) * 使用 `aws s3 mb s3://clusters.dev.example.com` 创建 S3 存储桶 -* 你可以进行 `export KOPS_STATE_STORE=s3://clusters.dev.example.com` 操作,然后 kops 将默认使用此位置。 +* 你可以进行 `export KOPS_STATE_STORE=s3://clusters.dev.example.com` 操作, + 然后 kops 将默认使用此位置。 我们建议将其放入你的 bash profile 文件或类似文件中。 ### (4/5) 建立你的集群配置 -运行 "kops create cluster" 以创建你的集群配置: +运行 `kops create cluster` 以创建你的集群配置: `kops create cluster --zones=us-east-1c useast1.dev.example.com` @@ -229,7 +323,8 @@ kops will create the configuration for your cluster. Note that it _only_ create not actually create the cloud resources - you'll do that in the next step with a `kops update cluster`. This give you an opportunity to review the configuration or change it. --> -kops 将为你的集群创建配置。请注意,它_仅_创建配置,实际上并没有创建云资源 - 你将在下一步中使用 `kops update cluster` 进行配置。 +kops 将为你的集群创建配置。请注意,它_仅_创建配置,实际上并没有创建云资源 - +你将在下一步中使用 `kops update cluster` 进行配置。 这使你有机会查看配置或进行更改。 -## 反馈 - - -* Slack 频道: [#kops-users](https://kubernetes.slack.com/messages/kops-users/) -* [GitHub Issues](https://github.com/kubernetes/kops/issues) - ## {{% heading "whatsnext" %}} * 了解有关 Kubernetes 的[概念](/zh/docs/concepts/) 和 - [`kubectl`](/zh/docs/reference/kubectl/overview/) 的更多信息。 + [`kubectl`](/zh/docs/reference/kubectl/overview/) 有关的更多信息。 * 了解 `kops` [高级用法](https://github.com/kubernetes/kops)。 -* 请参阅 `kops` [文档](https://github.com/kubernetes/kops) 获取教程、最佳做法和高级配置选项。 +* 请参阅 `kops` [文档](https://github.com/kubernetes/kops) 获取教程、 + 最佳做法和高级配置选项。 diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md b/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md index eabdd1bc1ffd8..9f7ac075840c5 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm.md @@ -6,13 +6,13 @@ content_type: task weight: 30 --- - +--> @@ -346,6 +346,20 @@ Alternatively, if you are the `root` user, you can run: export KUBECONFIG=/etc/kubernetes/admin.conf ``` +{{< warning >}} + +kubeadm 对 `admin.conf` 中的证书进行签名时,将其配置为 +`Subject: O = system:masters, CN = kubernetes-admin`。 +`system:masters` 是一个例外的、超级用户组,可以绕过鉴权层(例如 RBAC)。 +不要将 `admin.conf` 文件与任何人共享,应该使用 `kubeadm kubeconfig user` +命令为其他用户生成 kubeconfig 文件,完成对他们的定制授权。 +{{< /warning >}} + {{< note >}} -目前 Calico 是 kubeadm 项目中执行 e2e 测试的唯一 CNI 插件。 -如果你发现与 CNI 插件相关的问题,应在其各自的问题跟踪器中记录而不是在 kubeadm 或 kubernetes 问题跟踪器中记录。 +kubeadm 应该是与 CNI 无关的,对 CNI 驱动进行验证目前不在我们的端到端测试范畴之内。 +如果你发现与 CNI 插件相关的问题,应在其各自的问题跟踪器中记录而不是在 kubeadm +或 kubernetes 问题跟踪器中记录。 {{< /note >}} @@ -42,12 +38,12 @@ in the kubeadm [issue tracker](https://github.com/kubernetes/kubeadm/issues/new) See also [The upgrade documentation](/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-15). --> -在下一步之前,您应该仔细考虑哪种方法更好的满足您的应用程序和环境的需求。 +在下一步之前,你应该仔细考虑哪种方法更好的满足你的应用程序和环境的需求。 [这是对比文档](/zh/docs/setup/production-environment/tools/kubeadm/ha-topology/) 讲述了每种方法的优缺点。 -如果您在安装 HA 集群时遇到问题,请在 kubeadm [问题跟踪](https://github.com/kubernetes/kubeadm/issues/new)里向我们提供反馈。 +如果你在安装 HA 集群时遇到问题,请在 kubeadm [问题跟踪](https://github.com/kubernetes/kubeadm/issues/new)里向我们提供反馈。 -您也可以阅读 [升级文件](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) +你也可以阅读 [升级文件](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) -对于这两种方法,您都需要以下基础设施: +对于这两种方法,你都需要以下基础设施: -- 配置三台机器 [kubeadm 的最低要求](/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) 给主节点 -- 配置三台机器 [kubeadm 的最低要求](/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) 给工作节点 -- 在集群中,所有计算机之间的完全网络连接(公网或私网) -- 所有机器上的 sudo 权限 -- 每台设备对系统中所有节点的 SSH 访问 -- 在所有机器上安装 `kubeadm` 和 `kubelet`,`kubectl` 是可选的。 +- 配置满足 [kubeadm 的最低要求](/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) + 的三台机器作为控制面节点 +- 配置满足 [kubeadm 的最低要求](/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#before-you-begin) + 的三台机器作为工作节点 +- 在集群中,确保所有计算机之间存在全网络连接(公网或私网) +- 在所有机器上具有 sudo 权限 +- 从某台设备通过 SSH 访问系统中所有节点的能力 +- 所有机器上已经安装 `kubeadm` 和 `kubelet`,`kubectl` 是可选的。 -仅对于外部 etcd 集群来说,您还需要: +仅对于外部 etcd 集群来说,你还需要: - 给 etcd 成员使用的另外三台机器 - - - + ## 这两种方法的第一步 - ### 为 kube-apiserver 创建负载均衡器 +{{< note >}} -{{< note >}} -使用负载均衡器需要许多配置。您的集群搭建可能需要不同的配置。下面的例子只是其中的一方面配置。 +使用负载均衡器需要许多配置。你的集群搭建可能需要不同的配置。 +下面的例子只是其中的一方面配置。 {{< /note >}} 1. 创建一个名为 kube-apiserver 的负载均衡器解析 DNS。 - - 在云环境中,应该将控制平面节点放置在 TCP 后面转发负载平衡。 该负载均衡器将流量分配给目标列表中所有运行状况良好的控制平面节点。健康检查 apiserver 是在 kube-apiserver 监听端口(默认值 `:6443`)上的一个 TCP 检查。 + - 在云环境中,应该将控制平面节点放置在 TCP 后面转发负载平衡。 + 该负载均衡器将流量分配给目标列表中所有运行状况良好的控制平面节点。 + API 服务器的健康检查是在 kube-apiserver 的监听端口(默认值 `:6443`) + 上进行的一个 TCP 检查。 - 不建议在云环境中直接使用 IP 地址。 - - 负载均衡器必须能够在 apiserver 端口上与所有控制平面节点通信。它还必须允许其监听端口的传入流量。 + - 负载均衡器必须能够在 API 服务器端口上与所有控制平面节点通信。 + 它还必须允许其监听端口的入站流量。 - 确保负载均衡器的地址始终匹配 kubeadm 的 `ControlPlaneEndpoint` 地址。 - - 阅读[软件负载平衡选项指南](https://github.com/kubernetes/kubeadm/blob/master/docs/ha-considerations.md#options-for-software-load-balancing)以获取更多详细信息。 + - 阅读[软件负载平衡选项指南](https://github.com/kubernetes/kubeadm/blob/master/docs/ha-considerations.md#options-for-software-load-balancing) + 以获取更多详细信息。 + ## 使用堆控制平面和 etcd 节点 - ### 控制平面节点的第一步 1. 初始化控制平面: - ```sh + ```shell sudo kubeadm init --control-plane-endpoint "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" --upload-certs ``` - - 您可以使用 `--kubernetes-version` 标志来设置要使用的 Kubernetes 版本。建议将 kubeadm、kebelet、kubectl 和 Kubernetes 的版本匹配。 + - 你可以使用 `--kubernetes-version` 标志来设置要使用的 Kubernetes 版本。 + 建议将 kubeadm、kebelet、kubectl 和 Kubernetes 的版本匹配。 - 这个 `--control-plane-endpoint` 标志应该被设置成负载均衡器的地址或 DNS 和端口。 - - 这个 `--upload-certs` 标志用来将在所有控制平面实例之间的共享证书上传到集群。如果正好相反,你更喜欢手动地通过控制平面节点或者使用自动化 - 工具复制证书,请删除此标志并参考如下部分[证书分配手册](#manual-certs)。 - - -{{< note >}} -标志 `kubeadm init`、`--config` 和 `--certificate-key` 不能混合使用,因此如果您要使用[kubeadm 配置](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2),您必须在相应的配置文件(位于 `InitConfiguration` 和 `JoinConfiguration: controlPlane`)添加 `certificateKey` 字段。 -{{< /note >}} + - 这个 `--upload-certs` 标志用来将在所有控制平面实例之间的共享证书上传到集群。 + 如果正好相反,你更喜欢手动地通过控制平面节点或者使用自动化 + 工具复制证书,请删除此标志并参考如下部分[证书分配手册](#manual-certs)。 + + {{< note >}} + + 标志 `kubeadm init`、`--config` 和 `--certificate-key` 不能混合使用, + 因此如果你要使用 + [kubeadm 配置](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2), + 你必须在相应的配置文件 + (位于 `InitConfiguration` 和 `JoinConfiguration: controlPlane`)添加 `certificateKey` 字段。 + {{< /note >}} + + {{< note >}} + + 一些 CNI 网络插件如 Calico 需要 CIDR 例如 `192.168.0.0/16` 和一些像 Weave 没有。参考 + [CNI 网络文档](/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network)。 + 通过传递 `--pod-network-cidr` 标志添加 pod CIDR,或者你可以使用 kubeadm + 配置文件,在 `ClusterConfiguration` 的 `networking` 对象下设置 `podSubnet` 字段。 + {{< /note >}} + + + - 输出类似于: - -{{< note >}} -一些 CNI 网络插件如 Calico 需要 CIDR 例如 `192.168.0.0/16` 和一些像 Weave 没有。参考 -[CNI 网络文档](/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network)。 -通过传递 `--pod-network-cidr` 标志添加 pod CIDR,或者您可以使用 kubeadm 配置文件,在 `ClusterConfiguration` 的 `networking` 对象下设置 `podSubnet` 字段。 -{{< /note >}} - - - -- 命令完成后,您应该会看到类似以下内容: - - ```sh - ... - 现在,您可以通过在根目录上运行以下命令来加入任意数量的控制平面节点: - kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07 - - 请注意,证书密钥可以访问集群内敏感数据,请保密! - 为了安全起见,将在两个小时内删除上传的证书; 如有必要,您可以使用 kubeadm 初始化上传证书阶段,之后重新加载证书。 - - 然后,您可以通过在根目录上运行以下命令来加入任意数量的工作节点: - kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 - ``` - - - 将此输出复制到文本文件。 稍后您将需要它来将控制平面节点和辅助节点加入集群。 - - 当 `--upload-certs` 与 `kubeadm init` 一起使用时,主控制平面的证书被加密并上传到 `kubeadm-certs` 密钥中。 + --> + - 将此输出复制到文本文件。 稍后你将需要它来将控制平面节点和工作节点加入集群。 + - 当 `--upload-certs` 与 `kubeadm init` 一起使用时,主控制平面的证书 + 被加密并上传到 `kubeadm-certs` Secret 中。 - 要重新上传证书并生成新的解密密钥,请在已加入集群节点的控制平面上使用以下命令: - ```sh + ```shell sudo kubeadm init phase upload-certs --upload-certs ``` + + - 你还可以在 `init` 期间指定自定义的 `--certificate-key`,以后可以由 `join` 使用。 + 要生成这样的密钥,可以使用以下命令: - - 您还可以在 `init` 期间指定自定义的 `--certificate-key`,以后可以由 `join` 使用。 - 要生成这样的密钥,可以使用以下命令: - - ```sh - kubeadm alpha certs certificate-key + ```shell + kubeadm certs certificate-key ``` - -{{< note >}} -`kubeadm-certs` 密钥和解密密钥会在两个小时后失效。 -{{< /note >}} + {{< note >}} + + `kubeadm-certs` 密钥和解密密钥会在两个小时后失效。 + {{< /note >}} - -{{< caution >}} -正如命令输出中所述,证书密钥可访问群集敏感数据,并将其保密! -{{< /caution >}} + {{< caution >}} + + 正如命令输出中所述,证书密钥可访问群集敏感数据。请妥善保管! + {{< /caution >}} -1. 应用您选择的 CNI 插件: +2. 应用你所选择的 CNI 插件: [请遵循以下指示](/zh/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/#pod-network) - 安装 CNI 提供程序。如果适用,请确保配置与 kubeadm 配置文件中指定的 Pod CIDR 相对应。 + 安装 CNI 提供程序。如果适用,请确保配置与 kubeadm 配置文件中指定的 Pod + CIDR 相对应。 在此示例中,我们使用 Weave Net: - ```sh + ```shell kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" ``` -1. 输入以下内容,并查看 pods 的控制平面组件启动: + +3. 输入以下内容,并查看控制平面组件的 Pods 启动: - ```sh + ```shell kubectl get pod -n kube-system -w ``` @@ -340,14 +326,14 @@ As stated in the command output, the certificate key gives access to cluster sen --> ### 其余控制平面节点的步骤 +{{< note >}} -{{< note >}} -从 kubeadm 1.15 版本开始,您可以并行加入多个控制平面节点。 -在此版本之前,您必须在第一个节点初始化后才能依序的增加新的控制平面节点。 +从 kubeadm 1.15 版本开始,你可以并行加入多个控制平面节点。 +在此版本之前,你必须在第一个节点初始化后才能依序的增加新的控制平面节点。 {{< /note >}} -对于每个其他控制平面节点,您应该: +对于每个其他控制平面节点,你应该: -1. 执行先前由第一个节点上的 `kubeadm init` 输出提供给您的 join 命令。 +1. 执行先前由第一个节点上的 `kubeadm init` 输出提供给你的 join 命令。 它看起来应该像这样: ```sh @@ -375,7 +361,8 @@ For each additional control plane node you should: ``` - 这个 `--control-plane` 命令通知 `kubeadm join` 创建一个新的控制平面。 - - `--certificate-key ...` 将导致从集群中的 `kubeadm-certs` 秘钥下载控制平面证书并使用给定的密钥进行解密。 + - `--certificate-key ...` 将导致从集群中的 `kubeadm-certs` Secret 下载 + 控制平面证书并使用给定的密钥进行解密。 - ## 外部 etcd 节点 使用外部 etcd 节点设置集群类似于用于堆叠 etcd 的过程, -不同之处在于您应该首先设置 etcd,并在 kubeadm 配置文件中传递 etcd 信息。 +不同之处在于你应该首先设置 etcd,并在 kubeadm 配置文件中传递 etcd 信息。 - ### 设置 ectd 集群 -1. 按照 [这些指示](/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) 去设置 etcd 集群。 +1. 按照 [这些指示](/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) + 去设置 etcd 集群。 -1. 设置 SSH 在 [这](#manual-certs)描述。 +1. 根据[这里](#manual-certs)的描述配置 SSH。 1. 将以下文件从集群中的任何 etcd 节点复制到第一个控制平面节点: - ```sh + ```shell export CONTROL_PLANE="ubuntu@10.0.0.7" scp /etc/kubernetes/pki/etcd/ca.crt "${CONTROL_PLANE}": scp /etc/kubernetes/pki/apiserver-etcd-client.crt "${CONTROL_PLANE}": @@ -432,93 +417,86 @@ in the kubeadm config file. 1. Create a file called `kubeadm-config.yaml` with the following contents: - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - kubernetesVersion: stable - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - external: - endpoints: - - https://ETCD_0_IP:2379 - - https://ETCD_1_IP:2379 - - https://ETCD_2_IP:2379 - caFile: /etc/kubernetes/pki/etcd/ca.crt - certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt - keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key - + ```yaml + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + kubernetesVersion: stable + controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" + etcd: + external: + endpoints: + - https://ETCD_0_IP:2379 + - https://ETCD_1_IP:2379 + - https://ETCD_2_IP:2379 + caFile: /etc/kubernetes/pki/etcd/ca.crt + certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt + keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key + ``` --> ### 设置第一个控制平面节点 1. 用以下内容创建一个名为 `kubeadm-config.yaml` 的文件: - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - kubernetesVersion: stable - controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT" - etcd: - external: - endpoints: - - https://ETCD_0_IP:2379 - - https://ETCD_1_IP:2379 - - https://ETCD_2_IP:2379 - caFile: /etc/kubernetes/pki/etcd/ca.crt - certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt - keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key - - -{{< note >}} -这里堆 etcd 和外部 etcd 之前的区别在于设置外部 etcd 需要一个 `etcd` 的 `external` 对象下带有 etcd 端点的配置文件。 -如果是堆 etcd 技术,是自动管理的。 -{{< /note >}} - - + 这里的内部(stacked) etcd 和外部 etcd 之前的区别在于设置外部 etcd + 需要一个 `etcd` 的 `external` 对象下带有 etcd 端点的配置文件。 + 如果是内部 etcd,是自动管理的。 + {{< /note >}} + + + - 在你的集群中,将配置模板中的以下变量替换为适当值: - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `ETCD_0_IP` - - `ETCD_1_IP` - - `ETCD_2_IP` + - `LOAD_BALANCER_DNS` + - `LOAD_BALANCER_PORT` + - `ETCD_0_IP` + - `ETCD_1_IP` + - `ETCD_2_IP` + +以下的步骤与设置内置 etcd 的集群是相似的: + +1. 在节点上运行 `sudo kubeadm init --config kubeadm-config.yaml --upload-certs` 命令。 -- 在您的集群中,将配置模板中的以下变量替换为适当值: - - - `LOAD_BALANCER_DNS` - - `LOAD_BALANCER_PORT` - - `ETCD_0_IP` - - `ETCD_1_IP` - - `ETCD_2_IP` +1. 记下输出的 join 命令,这些命令将在以后使用。 -以下的步骤与设置堆集群是相似的: +1. 应用你选择的 CNI 插件。以下示例适用于 Weave Net: -1. 在节点上运行 `sudo kubeadm init --config kubeadm-config.yaml --upload-certs` 命令。 - -1. 编写输出联接命令,这些命令将返回到文本文件以供以后使用。 - -1. 应用您选择的 CNI 插件。 给定以下示例适用于 Weave Net: - - ```sh - kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" - ``` + ```shell + kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + ``` - ### 其他控制平面节点的步骤 -步骤与设置堆 etcd 相同: +步骤与设置内置 etcd 相同: - 确保第一个控制平面节点已完全初始化。 -- 使用保存到文本文件的连接命令将每个控制平面节点连接在一起。建议一次加入一个控制平面节点。 +- 使用保存到文本文件的 join 命令将每个控制平面节点连接在一起。 + 建议一次加入一个控制平面节点。 - 不要忘记默认情况下,`--certificate-key` 中的解密秘钥会在两个小时后过期。 - + ## 列举控制平面之后的常见任务 - ### 安装工作节点 -您可以使用之前存储的命令将工作节点加入集群中 -作为 `kubeadm init` 命令的输出: +你可以使用之前存储的 `kubeadm init` 命令的输出将工作节点加入集群中: ```sh sudo kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 @@ -573,11 +547,10 @@ There are many ways to do this. In the following example we are using `ssh` and SSH is required if you want to control all nodes from a single machine. --> - ## 手动证书分发 {#manual-certs} -如果您选择不将 `kubeadm init` 与 `--upload-certs` 命令一起使用, -则意味着您将必须手动将证书从主控制平面节点复制到 +如果你选择不将 `kubeadm init` 与 `--upload-certs` 命令一起使用, +则意味着你将必须手动将证书从主控制平面节点复制到 将要加入的控制平面节点上。 有许多方法可以实现这种操作。在下面的例子中我们使用 `ssh` 和 `scp`: @@ -585,147 +558,106 @@ SSH is required if you want to control all nodes from a single machine. 如果要在单独的一台计算机控制所有节点,则需要 SSH。 +1. 在你的主设备上启用 ssh-agent,要求该设备能访问系统中的所有其他节点: -1. 在您的主设备上启动 ssh-agent,要求该设备能访问系统中的所有其他节点: - - ``` - eval $(ssh-agent) - ``` - -1. 将 SSH 身份添加到会话中: - - ``` - ssh-add ~/.ssh/path_to_private_key - ``` - -1. 检查节点间的 SSH 以确保连接是正常运行的 + ```shell + eval $(ssh-agent) + ``` - - SSH 到任何节点时,请确保添加 `-A` 标志: - - ``` - ssh -A 10.0.0.7 - ``` - - - 当在任何节点上使用 sudo 时,请确保环境完善,以便使用 SSH - 转发任务: + +2. 将 SSH 身份添加到会话中: - ``` - sudo -E -s - ``` + ```shell + ssh-add ~/.ssh/path_to_private_key + ``` +3. 检查节点间的 SSH 以确保连接是正常运行的 + + + - SSH 到任何节点时,请确保添加 `-A` 标志: + + ```shell + ssh -A 10.0.0.7 + ``` + + + - 当在任何节点上使用 sudo 时,请确保保持环境变量设置,以便 SSH + 转发能够正常工作: + + ```shell + sudo -E -s + ``` + - -1. 在所有节点上配置 SSH 之后,您应该在运行过 `kubeadm init` 命令的第一个控制平面节点上运行以下脚本。 +4. 在所有节点上配置 SSH 之后,你应该在运行过 `kubeadm init` 命令的第一个 + 控制平面节点上运行以下脚本。 该脚本会将证书从第一个控制平面节点复制到另一个控制平面节点: - 在以下示例中,用其他控制平面节点的 IP 地址替换 `CONTROL_PLANE_IPS`。 - - ```sh - USER=ubuntu # 可自己设置 - CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" - for host in ${CONTROL_PLANE_IPS}; do - scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.key "${USER}"@$host: - scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: - scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: - scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt - scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key - done - ``` - - -{{< caution >}} -只需要复制上面列表中的证书。kubeadm 将负责生成其余证书以及加入控制平面实例所需的 SAN。 -如果您错误地复制了所有证书,由于缺少所需的 SAN,创建其他节点可能会失败。 -{{< /caution >}} + + 在以下示例中,用其他控制平面节点的 IP 地址替换 `CONTROL_PLANE_IPS`。 + + ```sh + USER=ubuntu # 可定制 + CONTROL_PLANE_IPS="10.0.0.7 10.0.0.8" + for host in ${CONTROL_PLANE_IPS}; do + scp /etc/kubernetes/pki/ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.key "${USER}"@$host: + scp /etc/kubernetes/pki/sa.pub "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host: + scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host: + scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt + scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key + done + ``` + + {{< caution >}} + + 只需要复制上面列表中的证书。kubeadm 将负责生成其余证书以及加入控制平面实例所需的 SAN。 + 如果你错误地复制了所有证书,由于缺少所需的 SAN,创建其他节点可能会失败。 + {{< /caution >}} - -1. 然后,在每个连接控制平面节点上,您必须先运行以下脚本,然后再运行 `kubeadm join`。 +5. 然后,在每个即将加入集群的控制平面节点上,你必须先运行以下脚本,然后 + 再运行 `kubeadm join`。 该脚本会将先前复制的证书从主目录移动到 `/etc/kubernetes/pki`: - ```sh - USER=ubuntu # 可自己设置 - mkdir -p /etc/kubernetes/pki/etcd - mv /home/${USER}/ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/ca.key /etc/kubernetes/pki/ - mv /home/${USER}/sa.pub /etc/kubernetes/pki/ - mv /home/${USER}/sa.key /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ - mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ - mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt - mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key - ``` + ```shell + USER=ubuntu # 可定制 + mkdir -p /etc/kubernetes/pki/etcd + mv /home/${USER}/ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/ca.key /etc/kubernetes/pki/ + mv /home/${USER}/sa.pub /etc/kubernetes/pki/ + mv /home/${USER}/sa.key /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.crt /etc/kubernetes/pki/ + mv /home/${USER}/front-proxy-ca.key /etc/kubernetes/pki/ + mv /home/${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt + mv /home/${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key + ``` + diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index c584a0562319d..e2640977be064 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -495,66 +495,36 @@ kubeadm to tell it what to do. kubelet 现在每隔几秒就会重启,因为它陷入了一个等待 kubeadm 指令的死循环。 -## 在控制平面节点上配置 kubelet 使用的 cgroup 驱动程序 {#configure-cgroup-driver-used-by-kubelet-on-contol-plane-node} - -使用 Docker 时,kubeadm 会自动为其检测 cgroup 驱动并在运行时对 -`/var/lib/kubelet/kubeadm-flags.env` 文件进行配置。 +## 配置 cgroup 驱动程序 {#configure-cgroup-driver} -如果你在使用不同的 CRI,你必须为 `kubeadm init` 传递 `cgroupDriver` -值,像这样: - -```yaml -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -cgroupDriver: -``` +容器运行时和 kubelet 都具有名字为 +["cgroup driver"](/zh/docs/setup/production-environment/container-runtimes/) +的属性,该属性对于在 Linux 机器上管理 CGroups 而言非常重要。 +{{< warning >}} -进一步的相关细节,可参阅 -[使用配置文件来执行 kubeadm init](/zh/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) 以及 [KubeletConfiguration](/docs/reference/config-api/kubelet-config.v1beta1/)。 - -请注意,你只需要在你的 cgroup 驱动程序不是 `cgroupfs` 时这么做, -因为它已经是 kubelet 中的默认值。 +你需要确保容器运行时和 kubelet 所使用的是相同的 cgroup 驱动,否则 kubelet +进程会失败。 -{{< note >}} - -由于 kubelet 已经弃用了 `--cgroup-driver` 标志,如果你在配置文件 -`/var/lib/kubelet/kubeadm-flags.env` 或者 `/etc/default/kubelet` -(对于 RPM 而言是 `/etc/sysconfig/kubelet`)包含此设置,请将其删除 -并使用 KubeletConfiguration 作为替代(默认存储于 -`/var/lib/kubelet/config.yaml` 文件中)。 -{{< /note >}} - - -自动检测其他容器运行时(例如 CRI-O 和 containerd)的 cgroup 驱动的相关 -工作扔在进行中。 +相关细节可参见[配置 cgroup 驱动](/zh/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/)。 +{{< /warning >}} -## 故障排查 +## 故障排查 {#troubleshooting} 如果你在使用 kubeadm 时遇到困难,请参阅我们的 [故障排查文档](/zh/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm/)。 diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md b/content/zh/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md index 4dd6304c6de8b..08e17cc1b3242 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/kubelet-integration.md @@ -6,13 +6,11 @@ content_type: concept weight: 80 --- @@ -33,10 +31,11 @@ manager instead, but you need to configure it manually. Some kubelet configuration details need to be the same across all kubelets involved in the cluster, while other configuration aspects need to be set on a per-kubelet basis to accommodate the different characteristics of a given machine (such as OS, storage, and networking). You can manage the configuration -of your kubelets manually, but kubeadm now provides a `KubeletConfiguration` API type for [managing your -kubelet configurations centrally](#configure-kubelets-using-kubeadm). +of your kubelets manually, but kubeadm now provides a `KubeletConfiguration` API type for +[managing your kubelet configurations centrally](#configure-kubelets-using-kubeadm). --> -kubeadm CLI 工具的生命周期与 [kubelet](/zh/docs/reference/command-line-tools-reference/kubelet)解耦,它是一个守护程序,在 Kubernetes 集群中的每个节点上运行。 +kubeadm CLI 工具的生命周期与 [kubelet](/zh/docs/reference/command-line-tools-reference/kubelet) +解耦;kubelet 是一个守护程序,在 Kubernetes 集群中的每个节点上运行。 当 Kubernetes 初始化或升级时,kubeadm CLI 工具由用户执行,而 kubelet 始终在后台运行。 由于kubelet是守护程序,因此需要通过某种初始化系统或服务管理器进行维护。 @@ -48,8 +47,6 @@ kubeadm CLI 工具的生命周期与 [kubelet](/zh/docs/reference/command-line-t 你可以手动地管理 kubelet 的配置,但是 kubeadm 现在提供一种 `KubeletConfiguration` API 类型 用于[集中管理 kubelet 的配置](#configure-kubelets-using-kubeadm)。 - - ### 将集群级配置传播到每个 kubelet 中 @@ -106,8 +106,8 @@ kubeadm init --service-cidr 10.96.0.0/12 你还需要通过 kubelet 使用 `--cluster-dns` 标志设置 DNS 地址。 在集群中的每个管理器和节点上的 kubelet 的设置需要相同。 kubelet 提供了一个版本化的结构化 API 对象,该对象可以配置 kubelet 中的大多数参数,并将此配置推送到集群中正在运行的每个 kubelet 上。 -此对象被称为 **kubelet 的配置组件**。 -该配置组件允许用户指定标志,例如用骆峰值代表集群的 DNS IP 地址,如下所示: +此对象被称为 [`KubeletConfiguration`](/zh/docs/reference/config-api/kubelet-config.v1beta1/)。 +`KubeletConfiguration` 允许用户指定标志,例如用骆峰值代表集群的 DNS IP 地址,如下所示: ```yaml apiVersion: kubelet.config.k8s.io/v1beta1 @@ -116,7 +116,7 @@ clusterDNS: - 10.96.0.10 ``` -有关组件配置的更多详细信息,亲参阅 [本节](#configure-kubelets-using-kubeadm)。 +有关 `KubeletConfiguration` 的更多详细信息,亲参阅[本节](#configure-kubelets-using-kubeadm)。 ## 使用 kubeadm 配置 kubelet @@ -186,7 +186,8 @@ for more information on the individual fields. 通过调用 `kubeadm config print init-defaults --component-configs KubeletConfiguration`, 你可以看到此结构中的所有默认值。 -也可以阅读 [kubelet 配置组件的 API 参考](https://godoc.org/k8s.io/kubernetes/pkg/kubelet/apis/config#KubeletConfiguration)来获取有关各个字段的更多信息。 +也可以阅读 [KubeletConfiguration 参考](/docs/reference/config-api/kubelet-config.v1beta1/) +来获取有关各个字段的更多信息。 -## Kubernetes 二进制文件和软件包内容 +## Kubernetes 可执行文件和软件包内容 Kubernetes 版本对应的 DEB 和 RPM 软件包是: @@ -383,4 +388,3 @@ Kubernetes 版本对应的 DEB 和 RPM 软件包是: | `kubectl` | 安装 `/usr/bin/kubectl` 可执行文件。 | | `cri-tools` | 从 [cri-tools git 仓库](https://github.com/kubernetes-sigs/cri-tools)中安装 `/usr/bin/crictl` 可执行文件。 | - diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md b/content/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md index b29cf2215c810..38602913d3a4b 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm.md @@ -5,13 +5,11 @@ weight: 70 --- @@ -35,7 +33,7 @@ becoming unavailable. This task walks through the process of creating a high availability etcd cluster of three members that can be used as an external etcd when using kubeadm to set up a kubernetes cluster. --> -默认情况下,kubeadm 运行单成员的 etcd 集群,该集群由控制面节点上的 kubelet 以静态 Pod 的方式进行管理。由于 etcd 集群只包含一个成员且不能在任一成员不可用时保持运行,所以这不是一种高可用设置。本任务,将告诉您如何在使用 kubeadm 创建一个 kubernetes 集群时创建一个外部 etcd:有三个成员的高可用 etcd 集群。 +默认情况下,kubeadm 运行单成员的 etcd 集群,该集群由控制面节点上的 kubelet 以静态 Pod 的方式进行管理。由于 etcd 集群只包含一个成员且不能在任一成员不可用时保持运行,所以这不是一种高可用设置。本任务,将告诉你如何在使用 kubeadm 创建一个 kubernetes 集群时创建一个外部 etcd:有三个成员的高可用 etcd 集群。 @@ -85,334 +83,287 @@ kubeadm 包含生成下述证书所需的所有必要的密码学工具;在这 1. 将 kubelet 配置为 etcd 的服务管理器。 - 由于 etcd 是首先创建的,因此您必须通过创建具有更高优先级的新文件来覆盖 kubeadm 提供的 kubelet 单元文件。 - - ```sh - cat << EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf - [Service] - ExecStart= - # Replace "systemd" with the cgroup driver of your container runtime. The default value in the kubelet is "cgroupfs". - ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd - Restart=always - EOF - - systemctl daemon-reload - systemctl restart kubelet - ``` - - - -1. 为 kubeadm 创建配置文件。 - - 使用以下脚本为每个将要运行 etcd 成员的主机生成一个 kubeadm 配置文件。 - - - ```sh - # 使用 IP 或可解析的主机名替换 HOST0、HOST1 和 HOST2 - export HOST0=10.0.0.6 - export HOST1=10.0.0.7 - export HOST2=10.0.0.8 - - # 创建临时目录来存储将被分发到其它主机上的文件 - mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/ - - ETCDHOSTS=(${HOST0} ${HOST1} ${HOST2}) - NAMES=("infra0" "infra1" "infra2") - - for i in "${!ETCDHOSTS[@]}"; do - HOST=${ETCDHOSTS[$i]} - NAME=${NAMES[$i]} - cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml - apiVersion: "kubeadm.k8s.io/v1beta2" - kind: ClusterConfiguration - etcd: - local: - serverCertSANs: - - "${HOST}" - peerCertSANs: - - "${HOST}" - extraArgs: - initial-cluster: infra0=https://${ETCDHOSTS[0]}:2380,infra1=https://${ETCDHOSTS[1]}:2380,infra2=https://${ETCDHOSTS[2]}:2380 - initial-cluster-state: new - name: ${NAME} - listen-peer-urls: https://${HOST}:2380 - listen-client-urls: https://${HOST}:2379 - advertise-client-urls: https://${HOST}:2379 - initial-advertise-peer-urls: https://${HOST}:2380 - EOF - done - ``` - - -1. 生成证书颁发机构 - - 如果您已经拥有 CA,那么唯一的操作是复制 CA 的 `crt` 和 `key` 文件到 `etc/kubernetes/pki/etcd/ca.crt` 和 `/etc/kubernetes/pki/etcd/ca.key`。复制完这些文件后继续下一步,“为每个成员创建证书”。 - - - 如果您还没有 CA,则在 `$HOST0`(您为 kubeadm 生成配置文件的位置)上运行此命令。 - - ``` - kubeadm init phase certs etcd-ca - ``` - - - 创建了如下两个文件 - - - `/etc/kubernetes/pki/etcd/ca.crt` - - `/etc/kubernetes/pki/etcd/ca.key` - - -1. 为每个成员创建证书 - - - ```sh - kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml - kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml - cp -R /etc/kubernetes/pki /tmp/${HOST2}/ - # 清理不可重复使用的证书 - find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete - - kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml - kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml - cp -R /etc/kubernetes/pki /tmp/${HOST1}/ - find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete - - kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml - kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml - # 不需要移动 certs 因为它们是给 HOST0 使用的 - - # 清理不应从此主机复制的证书 - find /tmp/${HOST2} -name ca.key -type f -delete - find /tmp/${HOST1} -name ca.key -type f -delete - ``` - - -1. 复制证书和 kubeadm 配置 - - 证书已生成,现在必须将它们移动到对应的主机。 - - ```sh - USER=ubuntu - HOST=${HOST1} - scp -r /tmp/${HOST}/* ${USER}@${HOST}: - ssh ${USER}@${HOST} - USER@HOST $ sudo -Es - root@HOST $ chown -R root:root pki - root@HOST $ mv pki /etc/kubernetes/ - ``` - - -1. 确保已经所有预期的文件都存在 - - `$HOST0` 所需文件的完整列表如下: - - ``` - /tmp/${HOST0} - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── ca.key - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - - - 在 `$HOST1`: - - ``` - $HOME - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - - - 在 `$HOST2` - - ``` - $HOME - └── kubeadmcfg.yaml - --- - /etc/kubernetes/pki - ├── apiserver-etcd-client.crt - ├── apiserver-etcd-client.key - └── etcd - ├── ca.crt - ├── healthcheck-client.crt - ├── healthcheck-client.key - ├── peer.crt - ├── peer.key - ├── server.crt - └── server.key - ``` - - -1. 创建静态 Pod 清单 - - 既然证书和配置已经就绪,是时候去创建清单了。在每台主机上运行 `kubeadm` 命令来生成 etcd 使用的静态清单。 - - ```sh - root@HOST0 $ kubeadm init phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml - root@HOST1 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml - root@HOST2 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml - ``` - - -1. 可选:检查群集运行状况 - - ```sh - docker run --rm -it \ - --net host \ - -v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \ - --cert /etc/kubernetes/pki/etcd/peer.crt \ - --key /etc/kubernetes/pki/etcd/peer.key \ - --cacert /etc/kubernetes/pki/etcd/ca.crt \ - --endpoints https://${HOST0}:2379 endpoint health --cluster - ... - https://[HOST0 IP]:2379 is healthy: successfully committed proposal: took = 16.283339ms - https://[HOST1 IP]:2379 is healthy: successfully committed proposal: took = 19.44402ms - https://[HOST2 IP]:2379 is healthy: successfully committed proposal: took = 35.926451ms - ``` - - - 将 `${ETCD_TAG}` 设置为你的 etcd 镜像的版本标签,例如 `3.4.3-0`。要查看 kubeadm 使用的 etcd 镜像和标签,请执行 `kubeadm config images list --kubernetes-version ${K8S_VERSION}`,其中 `${K8S_VERSION}` 是 `v1.17.0` 作为例子。 - - - 将 `${HOST0}` 设置为要测试的主机的 IP 地址 + {{< note >}} + 你必须在要运行 etcd 的所有主机上执行此操作。 + {{< /note >}} + 由于 etcd 是首先创建的,因此你必须通过创建具有更高优先级的新文件来覆盖 + kubeadm 提供的 kubelet 单元文件。 + + ```sh + cat << EOF > /etc/systemd/system/kubelet.service.d/20-etcd-service-manager.conf + [Service] + ExecStart= + # 将下面的 "systemd" 替换为你的容器运行时所使用的 cgroup 驱动。 + # kubelet 的默认值为 "cgroupfs"。 + ExecStart=/usr/bin/kubelet --address=127.0.0.1 --pod-manifest-path=/etc/kubernetes/manifests --cgroup-driver=systemd + Restart=always + EOF + + systemctl daemon-reload + systemctl restart kubelet + ``` + + + 检查 kubelet 的状态以确保其处于运行状态: + + + ```shell + systemctl status kubelet + ``` + + +2. 为 kubeadm 创建配置文件。 + + 使用以下脚本为每个将要运行 etcd 成员的主机生成一个 kubeadm 配置文件。 + + ```sh + # 使用 IP 或可解析的主机名替换 HOST0、HOST1 和 HOST2 + export HOST0=10.0.0.6 + export HOST1=10.0.0.7 + export HOST2=10.0.0.8 + + # 创建临时目录来存储将被分发到其它主机上的文件 + mkdir -p /tmp/${HOST0}/ /tmp/${HOST1}/ /tmp/${HOST2}/ + + ETCDHOSTS=(${HOST0} ${HOST1} ${HOST2}) + NAMES=("infra0" "infra1" "infra2") + + for i in "${!ETCDHOSTS[@]}"; do + HOST=${ETCDHOSTS[$i]} + NAME=${NAMES[$i]} + cat << EOF > /tmp/${HOST}/kubeadmcfg.yaml + apiVersion: "kubeadm.k8s.io/v1beta2" + kind: ClusterConfiguration + etcd: + local: + serverCertSANs: + - "${HOST}" + peerCertSANs: + - "${HOST}" + extraArgs: + initial-cluster: infra0=https://${ETCDHOSTS[0]}:2380,infra1=https://${ETCDHOSTS[1]}:2380,infra2=https://${ETCDHOSTS[2]}:2380 + initial-cluster-state: new + name: ${NAME} + listen-peer-urls: https://${HOST}:2380 + listen-client-urls: https://${HOST}:2379 + advertise-client-urls: https://${HOST}:2379 + initial-advertise-peer-urls: https://${HOST}:2380 + EOF + done + ``` + + +3. 生成证书颁发机构 + + 如果你已经拥有 CA,那么唯一的操作是复制 CA 的 `crt` 和 `key` 文件到 + `etc/kubernetes/pki/etcd/ca.crt` 和 `/etc/kubernetes/pki/etcd/ca.key`。 + 复制完这些文件后继续下一步,“为每个成员创建证书”。 + + + 如果你还没有 CA,则在 `$HOST0`(你为 kubeadm 生成配置文件的位置)上运行此命令。 + + ``` + kubeadm init phase certs etcd-ca + ``` + + + 这一操作创建如下两个文件 + + - `/etc/kubernetes/pki/etcd/ca.crt` + - `/etc/kubernetes/pki/etcd/ca.key` + + +4. 为每个成员创建证书 + + ```shell + kubeadm init phase certs etcd-server --config=/tmp/${HOST2}/kubeadmcfg.yaml + kubeadm init phase certs etcd-peer --config=/tmp/${HOST2}/kubeadmcfg.yaml + kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST2}/kubeadmcfg.yaml + kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST2}/kubeadmcfg.yaml + cp -R /etc/kubernetes/pki /tmp/${HOST2}/ + # 清理不可重复使用的证书 + find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete + + kubeadm init phase certs etcd-server --config=/tmp/${HOST1}/kubeadmcfg.yaml + kubeadm init phase certs etcd-peer --config=/tmp/${HOST1}/kubeadmcfg.yaml + kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST1}/kubeadmcfg.yaml + kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST1}/kubeadmcfg.yaml + cp -R /etc/kubernetes/pki /tmp/${HOST1}/ + find /etc/kubernetes/pki -not -name ca.crt -not -name ca.key -type f -delete + + kubeadm init phase certs etcd-server --config=/tmp/${HOST0}/kubeadmcfg.yaml + kubeadm init phase certs etcd-peer --config=/tmp/${HOST0}/kubeadmcfg.yaml + kubeadm init phase certs etcd-healthcheck-client --config=/tmp/${HOST0}/kubeadmcfg.yaml + kubeadm init phase certs apiserver-etcd-client --config=/tmp/${HOST0}/kubeadmcfg.yaml + # 不需要移动 certs 因为它们是给 HOST0 使用的 + + # 清理不应从此主机复制的证书 + find /tmp/${HOST2} -name ca.key -type f -delete + find /tmp/${HOST1} -name ca.key -type f -delete + ``` + + +5. 复制证书和 kubeadm 配置 + + 证书已生成,现在必须将它们移动到对应的主机。 + + ```shell + USER=ubuntu + HOST=${HOST1} + scp -r /tmp/${HOST}/* ${USER}@${HOST}: + ssh ${USER}@${HOST} + USER@HOST $ sudo -Es + root@HOST $ chown -R root:root pki + root@HOST $ mv pki /etc/kubernetes/ + ``` + + +6. 确保已经所有预期的文件都存在 + + `$HOST0` 所需文件的完整列表如下: + + ```none + /tmp/${HOST0} + └── kubeadmcfg.yaml + --- + /etc/kubernetes/pki + ├── apiserver-etcd-client.crt + ├── apiserver-etcd-client.key + └── etcd + ├── ca.crt + ├── ca.key + ├── healthcheck-client.crt + ├── healthcheck-client.key + ├── peer.crt + ├── peer.key + ├── server.crt + └── server.key + ``` + + + 在 `$HOST1` 上: + + ``` + $HOME + └── kubeadmcfg.yaml + --- + /etc/kubernetes/pki + ├── apiserver-etcd-client.crt + ├── apiserver-etcd-client.key + └── etcd + ├── ca.crt + ├── healthcheck-client.crt + ├── healthcheck-client.key + ├── peer.crt + ├── peer.key + ├── server.crt + └── server.key + ``` + + + 在 `$HOST2` 上: + + ``` + $HOME + └── kubeadmcfg.yaml + --- + /etc/kubernetes/pki + ├── apiserver-etcd-client.crt + ├── apiserver-etcd-client.key + └── etcd + ├── ca.crt + ├── healthcheck-client.crt + ├── healthcheck-client.key + ├── peer.crt + ├── peer.key + ├── server.crt + └── server.key + ``` + + +7. 创建静态 Pod 清单 + + 既然证书和配置已经就绪,是时候去创建清单了。 + 在每台主机上运行 `kubeadm` 命令来生成 etcd 使用的静态清单。 + + ```shell + root@HOST0 $ kubeadm init phase etcd local --config=/tmp/${HOST0}/kubeadmcfg.yaml + root@HOST1 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml + root@HOST2 $ kubeadm init phase etcd local --config=/home/ubuntu/kubeadmcfg.yaml + ``` + + +8. 可选:检查群集运行状况 + + ```shell + docker run --rm -it \ + --net host \ + -v /etc/kubernetes:/etc/kubernetes k8s.gcr.io/etcd:${ETCD_TAG} etcdctl \ + --cert /etc/kubernetes/pki/etcd/peer.crt \ + --key /etc/kubernetes/pki/etcd/peer.key \ + --cacert /etc/kubernetes/pki/etcd/ca.crt \ + --endpoints https://${HOST0}:2379 endpoint health --cluster + ... + https://[HOST0 IP]:2379 is healthy: successfully committed proposal: took = 16.283339ms + https://[HOST1 IP]:2379 is healthy: successfully committed proposal: took = 19.44402ms + https://[HOST2 IP]:2379 is healthy: successfully committed proposal: took = 35.926451ms + ``` + + - 将 `${ETCD_TAG}` 设置为你的 etcd 镜像的版本标签,例如 `3.4.3-0`。 + 要查看 kubeadm 使用的 etcd 镜像和标签,请执行 + `kubeadm config images list --kubernetes-version ${K8S_VERSION}`, + 例如,其中的 `${K8S_VERSION}` 可以是 `v1.17.0`。 + - 将 `${HOST0}` 设置为要测试的主机的 IP 地址。 ## {{% heading "whatsnext" %}} @@ -422,7 +373,6 @@ highly available control plane using the [external etcd method with kubeadm](/docs/setup/independent/high-availability/). --> 一旦拥有了一个正常工作的 3 成员的 etcd 集群,你就可以基于 -[使用 kubeadm 的外部 etcd 方法](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/), +[使用 kubeadm 外部 etcd 的方法](/zh/docs/setup/production-environment/tools/kubeadm/high-availability/), 继续部署一个高可用的控制平面。 - diff --git a/content/zh/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md b/content/zh/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md index 1eda24253f6c2..38a04c2d94118 100644 --- a/content/zh/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md +++ b/content/zh/docs/setup/production-environment/tools/kubeadm/troubleshooting-kubeadm.md @@ -101,11 +101,11 @@ This may be caused by a number of problems. The most common are: There are two common ways to fix the cgroup driver problem: - 1. Install Docker again following instructions + 1. Install Docker again following instructions [here](/docs/setup/production-environment/container-runtimes/#docker). - 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to - [Configure cgroup driver used by kubelet on control-plane node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) + 1. Change the kubelet config to match the Docker cgroup driver manually, you can refer to + [Configure cgroup driver used by kubelet on control-plane node](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) - control plane Docker containers are crashlooping or hanging. You can check this by running `docker ps` and investigating each container by running `docker logs`. --> @@ -122,7 +122,8 @@ This may be caused by a number of problems. The most common are: 有两种常见方法可解决 cgroup 驱动程序问题: - 1. 按照 [此处](/zh/docs/setup/production-environment/container-runtimes/#docker) 的说明再次安装 Docker。 + 1. 按照[此处](/zh/docs/setup/production-environment/container-runtimes/#docker) 的说明 + 重新安装 Docker。 1. 更改 kubelet 配置以手动匹配 Docker cgroup 驱动程序,你可以参考 [在主节点上配置 kubelet 要使用的 cgroup 驱动程序](/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) @@ -134,8 +135,11 @@ This may be caused by a number of problems. The most common are: The following could happen if Docker halts and does not remove any Kubernetes-managed containers: -```bash +```shell sudo kubeadm reset +``` + +```console [preflight] Running pre-flight checks [reset] Stopping the kubelet service [reset] Unmounting mounted directories in "/var/lib/kubelet" @@ -145,14 +149,14 @@ sudo kubeadm reset A possible solution is to restart the Docker service and then re-run `kubeadm reset`: -```bash +```shell sudo systemctl restart docker.service sudo kubeadm reset ``` Inspecting the logs for docker may also be useful: -```sh +```shell journalctl -ul docker ``` --> @@ -160,8 +164,11 @@ journalctl -ul docker 如果 Docker 停止并且不删除 Kubernetes 所管理的所有容器,可能发生以下情况: -```bash +```shell sudo kubeadm reset +``` + +```none [preflight] Running pre-flight checks [reset] Stopping the kubelet service [reset] Unmounting mounted directories in "/var/lib/kubelet" @@ -171,7 +178,7 @@ sudo kubeadm reset 一个可行的解决方案是重新启动 Docker 服务,然后重新运行 `kubeadm reset`: -```bash +```shell sudo systemctl restart docker.service sudo kubeadm reset ``` @@ -189,10 +196,10 @@ Right after `kubeadm init` there should not be any pods in these states. - If there are pods in one of these states _right after_ `kubeadm init`, please open an issue in the kubeadm repo. `coredns` (or `kube-dns`) should be in the `Pending` state - until you have deployed the network solution. + until you have deployed the network add-on. - If you see Pods in the `RunContainerError`, `CrashLoopBackOff` or `Error` state - after deploying the network solution and nothing happens to `coredns` (or `kube-dns`), - it's very likely that the Pod Network solution that you installed is somehow broken. + after deploying the network add-on and nothing happens to `coredns` (or `kube-dns`), + it's very likely that the Pod Network add-on that you installed is somehow broken. You might have to grant it more RBAC privileges or use a newer version. Please file an issue in the Pod Network providers' issue tracker and get the issue triaged there. - If you install a version of Docker older than 1.12.1, remove the `MountFlags=slave` option @@ -206,11 +213,11 @@ Right after `kubeadm init` there should not be any pods in these states. - 在 `kubeadm init` 命令执行完后,如果有 pods 处于这些状态之一,请在 kubeadm 仓库提起一个 issue。`coredns` (或者 `kube-dns`) 应该处于 `Pending` 状态, - 直到你部署了网络解决方案为止。 + 直到你部署了网络插件为止。 -- 如果在部署完网络解决方案之后,有 Pods 处于 `RunContainerError`、`CrashLoopBackOff` +- 如果在部署完网络插件之后,有 Pods 处于 `RunContainerError`、`CrashLoopBackOff` 或 `Error` 状态之一,并且`coredns` (或者 `kube-dns`)仍处于 `Pending` 状态, - 那很可能是你安装的网络解决方案由于某种原因无法工作。你或许需要授予它更多的 + 那很可能是你安装的网络插件由于某种原因无法工作。你或许需要授予它更多的 RBAC 特权或使用较新的版本。请在 Pod Network 提供商的问题跟踪器中提交问题, 然后在此处分类问题。 @@ -221,17 +228,18 @@ Right after `kubeadm init` there should not be any pods in these states. 当 Kubernetes 不能找到 `var/run/secrets/kubernetes.io/serviceaccount` 文件时会发生错误。 -## `coredns` (或 `kube-dns`)停滞在 `Pending` 状态 +## `coredns` 停滞在 `Pending` 状态 这一行为是 **预期之中** 的,因为系统就是这么设计的。 -kubeadm 的网络供应商是中立的,因此管理员应该选择 [安装 pod 的网络解决方案](/zh/docs/concepts/cluster-administration/addons/)。 +kubeadm 的网络供应商是中立的,因此管理员应该选择 +[安装 pod 的网络插件](/zh/docs/concepts/cluster-administration/addons/)。 你必须完成 Pod 的网络配置,然后才能完全部署 CoreDNS。 在网络被配置好之前,DNS 组件会一直处于 `Pending` 状态。 @@ -239,7 +247,7 @@ kubeadm 的网络供应商是中立的,因此管理员应该选择 [安装 pod ## `HostPort` services do not work The `HostPort` and `HostIP` functionality is available depending on your Pod Network -provider. Please contact the author of the Pod Network solution to find out whether +provider. Please contact the author of the Pod Network add-on to find out whether `HostPort` and `HostIP` functionality are available. Calico, Canal, and Flannel CNI providers are verified to support HostPort. @@ -251,7 +259,7 @@ services](/docs/concepts/services-networking/service/#nodeport) or use `HostNetw --> ## `HostPort` 服务无法工作 -此 `HostPort` 和 `HostIP` 功能是否可用取决于你的 Pod 网络配置。请联系 Pod 解决方案的作者, +此 `HostPort` 和 `HostIP` 功能是否可用取决于你的 Pod 网络配置。请联系 Pod 网络插件的作者, 以确认 `HostPort` 和 `HostIP` 功能是否可用。 已验证 Calico、Canal 和 Flannel CNI 驱动程序支持 HostPort。 @@ -663,3 +671,139 @@ kubectl taint nodes NODE_NAME node-role.kubernetes.io/master:NoSchedule- kubectl taint nodes NODE_NAME node-role.kubernetes.io/master:NoSchedule- ``` + +## 节点上的 `/usr` 被以只读方式挂载 {#usr-mounted-read-only} + +在类似 Fedora CoreOS 或者 Flatcar Container Linux 这类 Linux 发行版本中, +目录 `/usr` 是以只读文件系统的形式挂载的。 +在支持 [FlexVolume](https://github.com/kubernetes/community/blob/ab55d85/contributors/devel/sig-storage/flexvolume.md)时, +类似 kubelet 和 kube-controller-manager 这类 Kubernetes 组件使用默认路径 +`/usr/libexec/kubernetes/kubelet-plugins/volume/exec/`, +而 FlexVolume 的目录 _必须是可写入的_,该功能特性才能正常工作。 + + +为了解决这个问题,你可以使用 kubeadm 的[配置文件](https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2) +来配置 FlexVolume 的目录。 + +在(使用 `kubeadm init` 创建的)主控制节点上,使用 `-config` +参数传入如下文件: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: InitConfiguration +nodeRegistration: + kubeletExtraArgs: + volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" +--- +apiVersion: kubeadm.k8s.io/v1beta2 +kind: ClusterConfiguration +controllerManager: + extraArgs: + flex-volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" +``` + + +在加入到集群中的节点上,使用下面的文件: + +```yaml +apiVersion: kubeadm.k8s.io/v1beta2 +kind: JoinConfiguration +nodeRegistration: + kubeletExtraArgs: + volume-plugin-dir: "/opt/libexec/kubernetes/kubelet-plugins/volume/exec/" +``` + + +或者,你要可以更改 `/etc/fstab` 使得 `/usr` 目录能够以可写入的方式挂载,不过 +请注意这样做本质上是在更改 Linux 发行版的某种设计原则。 + + +## `kubeadm upgrade plan` 输出错误信息 `context deadline exceeded` + +在使用 `kubeadm` 来升级某运行外部 etcd 的 Kubernetes 集群时可能显示这一错误信息。 +这并不是一个非常严重的一个缺陷,之所以出现此错误信息,原因是老的 kubeadm +版本会对外部 etcd 集群执行版本检查。你可以继续执行 `kubeadm upgrade apply ...`。 + +这一问题已经在 1.19 版本中得到修复。 + + +## `kubeadm reset` 会卸载 `/var/lib/kubelet` + +如果已经挂载了 `/var/lib/kubelet` 目录,执行 `kubeadm reset` 操作的时候 +会将其卸载。 + +要解决这一问题,可以在执行了 `kubeadm reset` 操作之后重新挂载 +`/var/lib/kubelet` 目录。 + +这是一个在 1.15 中引入的故障,已经在 1.20 版本中修复。 + + +## 无法在 kubeadm 集群中安全地使用 metrics-server + +在 kubeadm 集群中可以通过为 [metrics-server](https://github.com/kubernetes-sigs/metrics-server) +设置 `--kubelet-insecure-tls` 来以不安全的形式使用该服务。 +建议不要在生产环境集群中这样使用。 + + +如果你需要在 metrics-server 和 kubelt 之间使用 TLS,会有一个问题, +kubeadm 为 kubelt 部署的是自签名的服务证书。这可能会导致 metrics-server +端报告下面的错误信息: + +``` +x509: certificate signed by unknown authority +x509: certificate is valid for IP-foo not IP-bar +``` + + +参见[为 kubelet 启用签名的服务证书](/zh/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs) +以进一步了解如何在 kubeadm 集群中配置 kubelet 使用正确签名了的服务证书。 + +另请参阅[How to run the metrics-server securely](https://github.com/kubernetes-sigs/metrics-server/blob/master/FAQ.md#how-to-run-metrics-server-securely)。 + diff --git a/content/zh/docs/setup/production-environment/tools/kubespray.md b/content/zh/docs/setup/production-environment/tools/kubespray.md index eabd14c7f5c6a..0120acd20cf67 100644 --- a/content/zh/docs/setup/production-environment/tools/kubespray.md +++ b/content/zh/docs/setup/production-environment/tools/kubespray.md @@ -14,12 +14,17 @@ weight: 30 -此快速入门有助于使用 [Kubespray](https://github.com/kubernetes-sigs/kubespray) 安装在 GCE、Azure、OpenStack、AWS、vSphere、Packet(裸机)、Oracle Cloud Infrastructure(实验性)或 Baremetal 上托管的 Kubernetes 集群。 +此快速入门有助于使用 [Kubespray](https://github.com/kubernetes-sigs/kubespray) +安装在 GCE、Azure、OpenStack、AWS、vSphere、Packet(裸机)、Oracle Cloud +Infrastructure(实验性)或 Baremetal 上托管的 Kubernetes 集群。 -Kubespray 是一个由 [Ansible](https://docs.ansible.com/) playbooks、[清单(inventory)](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md)、供应工具和通用 OS/Kubernetes 集群配置管理任务的领域知识组成的。 Kubespray 提供: +Kubespray 是一个由 [Ansible](https://docs.ansible.com/) playbooks、 +[清单(inventory)](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md)、 +制备工具和通用 OS/Kubernetes 集群配置管理任务的领域知识组成的。 +Kubespray 提供: -要选择最适合你的用例的工具,请阅读[此比较](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md)以 - [kubeadm](/zh/docs/reference/setup-tools/kubeadm/) 和 [kops](/zh/docs/setup/production-environment/tools/kops/) 。 +要选择最适合你的用例的工具,请阅读 +[kubeadm](/zh/docs/reference/setup-tools/kubeadm/) 和 +[kops](/zh/docs/setup/production-environment/tools/kops/) 之间的 +[这份比较](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/comparisons.md)。 + 。 - ## 创建集群 ### (1/5)满足下层设施要求 @@ -81,11 +87,14 @@ Provision servers with the following [requirements](https://github.com/kubernete --> * 在将运行 Ansible 命令的计算机上安装 Ansible v2.9 和 python-netaddr * **运行 Ansible Playbook 需要 Jinja 2.11(或更高版本)** -* 目标服务器必须有权访问 Internet 才能拉取 Docker 镜像。否则,需要其他配置([请参见离线环境](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/offline-environment.md)) +* 目标服务器必须有权访问 Internet 才能拉取 Docker 镜像。否则, + 需要其他配置([请参见离线环境](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/offline-environment.md)) * 目标服务器配置为允许 IPv4 转发 * **你的 SSH 密钥必须复制**到清单中的所有服务器部分 -* 防火墙不受管理,你将需要按照以前的方式实施自己的规则。为了避免在部署过程中出现任何问题,你应该禁用防火墙 -* 如果从非 root 用户帐户运行 kubespray,则应在目标服务器中配置正确的特权升级方法。然后应指定“ansible_become” 标志或命令参数 “--become” 或 “-b” +* 防火墙不受管理,你将需要按照以前的方式实施自己的规则。 + 为了避免在部署过程中出现任何问题,你应该禁用防火墙 +* 如果从非 root 用户帐户运行 kubespray,则应在目标服务器中配置正确的特权升级方法。 + 然后应指定“ansible_become” 标志或命令参数 “--become” 或 “-b” ### (2/5)编写清单文件 -设置服务器后,请创建一个 [Ansible 的清单文件](https://docs.ansible.com/ansible/intro_inventory.html)。你可以手动执行此操作,也可以通过动态清单脚本执行此操作。有关更多信息,请参阅“[建立你自己的清单](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)”。 +设置服务器后,请创建一个 +[Ansible 的清单文件](https://docs.ansible.com/ansible/latest/network/getting_started/first_inventory.html)。 +你可以手动执行此操作,也可以通过动态清单脚本执行此操作。有关更多信息,请参阅 +“[建立你自己的清单](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#building-your-own-inventory)”。 ### (3/5)规划集群部署 @@ -146,11 +157,12 @@ Kubespray 能够自定义部署的许多方面: * 证书生成方式 - -可以修改[变量文件](https://docs.ansible.com/ansible/playbooks_variables.html)以进行 Kubespray 定制。 -如果你刚刚开始使用 Kubespray,请考虑使用 Kubespray 默认设置来部署你的集群并探索 Kubernetes 。 +可以修改[变量文件](https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html) +以进行 Kubespray 定制。 +如果你刚刚开始使用 Kubespray,请考虑使用 Kubespray 默认设置来部署你的集群 +并探索 Kubernetes 。 - ### (4/5)部署集群 接下来,部署你的集群: -使用 [ansible-playbook](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#starting-custom-deployment) 进行j集群部署。 +使用 [ansible-playbook](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/getting-started.md#starting-custom-deployment) +进行集群部署。 ```shell ansible-playbook -i your/inventory/inventory.ini cluster.yml -b -v \ @@ -172,7 +184,9 @@ ansible-playbook -i your/inventory/inventory.ini cluster.yml -b -v \ -大型部署(超过 100 个节点)可能需要[特定的调整](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/large-deployments.md),以获得最佳效果。 +大型部署(超过 100 个节点)可能需要 +[特定的调整](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/large-deployments.md), +以获得最佳效果。 ### (5/5)验证部署 -Kubespray 提供了一种使用 [Netchecker](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/netcheck.md) +Kubespray 提供了一种使用 +[Netchecker](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/netcheck.md) 验证 Pod 间连接和 DNS 解析的方法。 Netchecker 确保 netchecker-agents pod 可以解析。 DNS 请求并在默认名称空间内对每个请求执行 ping 操作。 @@ -241,16 +256,17 @@ When running the reset playbook, be sure not to accidentally target your product --> ## 反馈 -* Slack 频道:[#kubespray](https://kubernetes.slack.com/messages/kubespray/)(你可以在[此处](https://slack.k8s.io/)获得邀请) +* Slack 频道:[#kubespray](https://kubernetes.slack.com/messages/kubespray/) + (你可以在[此处](https://slack.k8s.io/)获得邀请) * [GitHub 问题](https://github.com/kubernetes-sigs/kubespray/issues) ## {{% heading "whatsnext" %}} - -查看有关 Kubespray 的[路线图](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/roadmap.md)的计划工作。 +查看有关 Kubespray 的 +[路线图](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/roadmap.md) +的计划工作。 diff --git a/content/zh/docs/setup/release/version-skew-policy.md b/content/zh/docs/setup/release/version-skew-policy.md index 5ef5a448849ae..b2fbebfbd142e 100644 --- a/content/zh/docs/setup/release/version-skew-policy.md +++ b/content/zh/docs/setup/release/version-skew-policy.md @@ -27,12 +27,14 @@ For more information, see [Kubernetes Release Versioning](https://github.com/kub --> Kubernetes 版本号格式为 **x.y.z**,其中 **x** 为大版本号,**y** 为小版本号,**z** 为补丁版本号。 版本号格式遵循 [Semantic Versioning](https://semver.org/) 规则。 -更多信息,请参阅 [Kubernetes 发布版本](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning)。 +更多信息,请参阅 +[Kubernetes 发布版本](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md#kubernetes-release-versioning)。 -Kubernetes 项目会维护最近的三个小版本分支({{< skew latestVersion >}}, {{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}})。 +Kubernetes 项目会维护最近的三个小版本分支({{< skew latestVersion >}}, +{{< skew prevMinorVersion >}}, {{< skew oldestMinorVersion >}})。 Kubernetes 1.19 及更高的版本将获得大约1年的补丁支持。 Kubernetes 1.18 及更早的版本获得大约9个月的补丁支持。 @@ -45,9 +47,13 @@ The [Release Managers](https://git.k8s.io/sig-release/release-managers.md) group For more information, see the Kubernetes [patch releases](https://git.k8s.io/sig-release/releases/patch-releases.md) page. --> 一些 bug 修复,包括安全修复,取决于其严重性和可行性,有可能会反向合并到这三个发布分支。 -补丁版本会[定期](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence)或根据需要从这些分支中发布。 -最终是否发布是由[发布管理者](https://github.com/kubernetes/sig-release/blob/master/release-managers.md)来决定的。 -如需了解更多信息,请查看 Kubernetes [补丁发布](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md)。 +补丁版本会[定期](https://git.k8s.io/sig-release/releases/patch-releases.md#cadence) +或根据需要从这些分支中发布。 +最终是否发布是由 +[发布管理者](https://github.com/kubernetes/sig-release/blob/master/release-managers.md) +来决定的。 +如需了解更多信息,请查看 Kubernetes +[补丁发布](https://github.com/kubernetes/sig-release/blob/master/releases/patch-releases.md)。 * 最新的 `kube-apiserver` 版本号如果是 **{{< skew latestVersion >}}** -* 则受支持的 `kube-apiserver` 版本号包括 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** +* 其他受支持的 `kube-apiserver` 版本号包括 **{{< skew latestVersion >}}** 和 + **{{< skew prevMinorVersion >}}** ### kubelet @@ -90,7 +97,8 @@ Example: 例如: * `kube-apiserver` 版本号如果是 **{{< skew latestVersion >}}** -* 受支持的的 `kubelet` 版本将包括 **{{< skew latestVersion >}}**、**{{< skew prevMinorVersion >}}** 和 **{{< skew oldestMinorVersion >}}** +* 受支持的的 `kubelet` 版本将包括 **{{< skew latestVersion >}}**、 + **{{< skew prevMinorVersion >}}** 和 **{{< skew oldestMinorVersion >}}** 例如: -* 如果 `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** -* `kubelet` 的受支持版本将是 **{{< skew prevMinorVersion >}}** 和 **{{< skew oldestMinorVersion >}}** -(**{{< skew latestVersion >}}** 不再支持,因为它比 **{{< skew prevMinorVersion >}}** 版本的 `kube-apiserver` 更新) +* 如果 `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 + **{{< skew prevMinorVersion >}}** +* `kubelet` 的受支持版本将是 **{{< skew prevMinorVersion >}}** 和 + **{{< skew oldestMinorVersion >}}** + (**{{< skew latestVersion >}}** 不再支持,因为它比 + **{{< skew prevMinorVersion >}}** 版本的 `kube-apiserver` 更新) -`kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 版本不能高于 `kube-apiserver` 版本号。 -最好它们的版本号与 `kube-apiserver` 保持一致,但允许比 `kube-apiserver` 低一个小版本(为了支持在线升级)。 +`kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` +版本不能高于 `kube-apiserver` 版本号。 +最好它们的版本号与 `kube-apiserver` 保持一致,但允许比 `kube-apiserver` +低一个小版本(为了支持在线升级)。 {{< note >}} -如果在 HA 集群中,多个 `kube-apiserver` 实例版本号不一致,他们也可以跟任意一个 `kube-apiserver` 实例通信(例如,通过 load balancer), -但 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 版本可用范围会相应的减小。 +如果在 HA 集群中,多个 `kube-apiserver` 实例版本号不一致,他们也可以跟 +任意一个 `kube-apiserver` 实例通信(例如,通过 load balancer), +但 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` +版本可用范围会相应的减小。 {{< /note >}} 例如: -* `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** 版本 -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 可以通过 load balancer 与所有的 `kube-apiserver` 通信 -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 可选版本为 **{{< skew prevMinorVersion >}}** -(**{{< skew latestVersion >}}** 不再支持,因为它比 **{{< skew prevMinorVersion >}}** 版本的 `kube-apiserver` 更新) +* `kube-apiserver` 实例同时存在 **{{< skew latestVersion >}}** 和 + **{{< skew prevMinorVersion >}}** 版本 +* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` + 可以通过 load balancer 与所有的 `kube-apiserver` 通信 +* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` + 可选版本为 **{{< skew prevMinorVersion >}}** + (**{{< skew latestVersion >}}** 不再支持,因为它比 **{{< skew prevMinorVersion >}}** + 版本的 `kube-apiserver` 更新) ### kubectl @@ -171,7 +191,8 @@ Example: 例如: * 如果 `kube-apiserver` 当前是 **{{< skew latestVersion >}}** 版本 -* `kubectl` 则支持 **{{< skew nextMinorVersion >}}**、**{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** +* `kubectl` 则支持 **{{< skew nextMinorVersion >}}**、**{{< skew latestVersion >}}** + 和 **{{< skew prevMinorVersion >}}** 例如: -* `kube-apiserver` 多个实例同时存在 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}** -* `kubectl` 可选的版本为 **{{< skew latestVersion >}}** 和 **{{< skew prevMinorVersion >}}**(其他版本不再支持,因为它会比其中某个 `kube-apiserver` 实例高或低一个小版本) +* `kube-apiserver` 多个实例同时存在 **{{< skew latestVersion >}}** 和 + **{{< skew prevMinorVersion >}}** +* `kubectl` 可选的版本为 **{{< skew latestVersion >}}** 和 + **{{< skew prevMinorVersion >}}**(其他版本不再支持, + 因为它会比其中某个 `kube-apiserver` 实例高或低一个小版本) 组件之间支持的版本偏差会影响组件升级的顺序。 -本节描述组件从版本 **{{< skew prevMinorVersion >}}** 到 **{{< skew latestVersion >}}** 的升级次序。 +本节描述组件从版本 **{{< skew prevMinorVersion >}}** 到 **{{< skew latestVersion >}}** +的升级次序。 ### kube-apiserver @@ -220,9 +245,14 @@ Pre-requisites: * The webhooks are able to handle any new versions of REST resources that will be sent to them, and any new fields added to existing versions in **{{< skew latestVersion >}}** --> * 单实例集群中,`kube-apiserver` 实例版本号须是 **{{< skew prevMinorVersion >}}** -* 高可用(HA)集群中,所有的 `kube-apiserver` 实例版本号必须是 **{{< skew prevMinorVersion >}}** 或 **{{< skew latestVersion >}}**(确保满足最新和最旧的实例小版本号相差不大于1) -* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 版本号必须为 **{{< skew prevMinorVersion >}}**(确保不高于 API server 的版本,且版本号相差不大于1) -* `kubelet` 实例版本号必须是 **{{< skew prevMinorVersion >}}** 或 **{{< skew oldestMinorVersion >}}**(确保版本号不高于 API server,且版本号相差不大于2) +* 高可用(HA)集群中,所有的 `kube-apiserver` 实例版本号必须是 + **{{< skew prevMinorVersion >}}** 或 **{{< skew latestVersion >}}** + (确保满足最新和最旧的实例小版本号相差不大于1) +* `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` + 版本号必须为 **{{< skew prevMinorVersion >}}** + (确保不高于 API server 的版本,且版本号相差不大于1) +* `kubelet` 实例版本号必须是 **{{< skew prevMinorVersion >}}** 或 + **{{< skew oldestMinorVersion >}}**(确保版本号不高于 API server,且版本号相差不大于2) * 注册的 admission 插件必须能够处理新的 `kube-apiserver` 实例发送过来的数据: * `ValidatingWebhookConfiguration` 和 `MutatingWebhookConfiguration` 对象必须升级到可以处理 **{{< skew latestVersion >}}** 版本新加的 REST 资源(或使用 1.15 版本提供的 @@ -258,12 +288,14 @@ Pre-requisites: --> 前提条件: -* `kube-apiserver` 实例必须为 **{{< skew latestVersion >}}** (HA 集群中,所有的`kube-apiserver` 实例必须在组件升级前完成升级) +* `kube-apiserver` 实例必须为 **{{< skew latestVersion >}}** + (HA 集群中,所有的`kube-apiserver` 实例必须在组件升级前完成升级) -升级 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` 到 **{{< skew latestVersion >}}** +升级 `kube-controller-manager`、`kube-scheduler` 和 `cloud-controller-manager` +到 **{{< skew latestVersion >}}** ### kubelet @@ -278,15 +310,26 @@ Optionally upgrade `kubelet` instances to **{{< skew latestVersion >}}** (or the * `kube-apiserver` 实例必须为 **{{< skew latestVersion >}}** 版本 -`kubelet` 可以升级到 **{{< skew latestVersion >}}**(或者停留在 **{{< skew prevMinorVersion >}}** 或 **{{< skew oldestMinorVersion >}}**) +`kubelet` 可以升级到 **{{< skew latestVersion >}}**(或者停留在 +**{{< skew prevMinorVersion >}}** 或 **{{< skew oldestMinorVersion >}}**) +{{< note >}} + +在对 `kubelet` 执行次版本升级时,先[腾空](/zh/docs/tasks/administer-cluster/safely-drain-node/) +节点上的 Pods。 +目前不支持原地升级 `kubelet` 的次版本。 +{{}} + +{{< warning >}} -{{< warning >}} 集群中 `kubelet` 版本号不建议比 `kube-apiserver` 低两个版本号: * 它们必须升级到与 `kube-apiserver` 相差不超过 1 个小版本,才可以升级其他控制面组件 @@ -318,4 +361,5 @@ If `kube-proxy` version is **{{< skew oldestMinorVersion >}}**: 如果 `kube-proxy` 的版本是 **{{< skew oldestMinorVersion >}}**: * `kubelet` 版本必须相同,也是 **{{< skew oldestMinorVersion >}}** -* `kube-apiserver` 版本必须在 **{{< skew oldestMinorVersion >}}** 到 **{{< skew latestVersion >}}** 之间(闭区间) \ No newline at end of file +* `kube-apiserver` 版本必须在 **{{< skew oldestMinorVersion >}}** 到 + **{{< skew latestVersion >}}** 之间(闭区间) diff --git a/content/zh/docs/tasks/administer-cluster/configure-upgrade-etcd.md b/content/zh/docs/tasks/administer-cluster/configure-upgrade-etcd.md index 749aabc73d5fc..c64815d38b004 100644 --- a/content/zh/docs/tasks/administer-cluster/configure-upgrade-etcd.md +++ b/content/zh/docs/tasks/administer-cluster/configure-upgrade-etcd.md @@ -18,12 +18,8 @@ content_type: task {{< glossary_definition term_id="etcd" length="all" >}} - - - ## {{% heading "prerequisites" %}} - {{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} @@ -35,13 +31,21 @@ content_type: task * Run etcd as a cluster of odd members. -* etcd is a leader-based distributed system. Ensure that the leader periodically send heartbeats on time to all followers to keep the cluster stable. +* etcd is a leader-based distributed system. Ensure that the leader + periodically send heartbeats on time to all followers to keep the cluster + stable. * Ensure that no resource starvation occurs. - Performance and stability of the cluster is sensitive to network and disk IO. Any resource starvation can lead to heartbeat timeout, causing instability of the cluster. An unstable etcd indicates that no leader is elected. Under such circumstances, a cluster cannot make any changes to its current state, which implies no new pods can be scheduled. + Performance and stability of the cluster is sensitive to network and disk + I/O. Any resource starvation can lead to heartbeat timeout, causing instability + of the cluster. An unstable etcd indicates that no leader is elected. Under + such circumstances, a cluster cannot make any changes to its current state, + which implies no new pods can be scheduled. -* Keeping stable etcd clusters is critical to the stability of Kubernetes clusters. Therefore, run etcd clusters on dedicated machines or isolated environments for [guaranteed resource requirements](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/hardware.md#hardware-recommendations). +* Keeping etcd clusters stable is critical to the stability of Kubernetes + clusters. Therefore, run etcd clusters on dedicated machines or isolated + environments for [guaranteed resource requirements](https://etcd.io/docs/current/op-guide/hardware/). * The minimum recommended version of etcd to run in production is `3.2.10+`. --> @@ -53,16 +57,23 @@ content_type: task * 确保不发生资源不足。 - 集群的性能和稳定性对网络和磁盘 IO 非常敏感。任何资源匮乏都会导致心跳超时,从而导致集群的不稳定。不稳定的情况表明没有选出任何主节点。在这种情况下,集群不能对其当前状态进行任何更改,这意味着不能调度新的 pod。 + 集群的性能和稳定性对网络和磁盘 I/O 非常敏感。任何资源匮乏都会导致心跳超时, + 从而导致集群的不稳定。不稳定的情况表明没有选出任何主节点。 + 在这种情况下,集群不能对其当前状态进行任何更改,这意味着不能调度新的 pod。 -* 保持稳定的 etcd 集群对 Kubernetes 集群的稳定性至关重要。因此,请在专用机器或隔离环境上运行 etcd 集群,以满足[所需资源需求](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/hardware.md#hardware-recommendations)。 +* 保持 etcd 集群的稳定对 Kubernetes 集群的稳定性至关重要。 + 因此,请在专用机器或隔离环境上运行 etcd 集群,以满足 + [所需资源需求](https://etcd.io/docs/current/op-guide/hardware/)。 * 在生产中运行的 etcd 的最低推荐版本是 `3.2.10+`。 ## 资源要求 -使用有限的资源运行 etcd 只适合测试目的。为了在生产中部署,需要先进的硬件配置。在生产中部署 etcd 之前,请查看[所需资源参考文档](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/hardware.md#example-hardware-configurations)。 +使用有限的资源运行 etcd 只适合测试目的。为了在生产中部署,需要先进的硬件配置。 +在生产中部署 etcd 之前,请查看 +[所需资源参考文档](https://etcd.io/docs/current/op-guide/hardware/#example-hardware-configurations)。 ## 启动 etcd 集群 @@ -83,13 +96,15 @@ Use a single-node etcd cluster only for testing purpose. 1. Run the following: - ```sh - ./etcd --listen-client-urls=http://$PRIVATE_IP:2379 --advertise-client-urls=http://$PRIVATE_IP:2379 - ``` + ```sh + etcd --listen-client-urls=http://$PRIVATE_IP:2379 \ + --advertise-client-urls=http://$PRIVATE_IP:2379 + ``` -2. Start Kubernetes API server with the flag `--etcd-servers=$PRIVATE_IP:2379`. +2. Start the Kubernetes API server with the flag + `--etcd-servers=$PRIVATE_IP:2379`. - Replace `PRIVATE_IP` with your etcd client IP. + Make sure `PRIVATE_IP` is set to your etcd client IP. --> ### 单节点 etcd 集群 @@ -97,53 +112,70 @@ Use a single-node etcd cluster only for testing purpose. 1. 运行以下命令: - ```sh - ./etcd --listen-client-urls=http://$PRIVATE_IP:2379 --advertise-client-urls=http://$PRIVATE_IP:2379 - ``` + ```sh + etcd --listen-client-urls=http://$PRIVATE_IP:2379 \ + --advertise-client-urls=http://$PRIVATE_IP:2379 + ``` 2. 使用参数 `--etcd-servers=$PRIVATE_IP:2379` 启动 Kubernetes API 服务器。 - 使用您 etcd 客户端 IP 替换 `PRIVATE_IP`。 + 确保将 `PRIVATE_IP` 设置为etcd客户端 IP。 ### 多节点 etcd 集群 -为了耐用性和高可用性,在生产中将以多节点集群的方式运行 etcd,并且定期备份。建议在生产中使用五个成员的集群。有关该内容的更多信息,请参阅[常见问题文档](https://github.com/coreos/etcd/blob/master/Documentation/faq.md#what-is-failure-tolerance)。 +为了耐用性和高可用性,在生产中将以多节点集群的方式运行 etcd,并且定期备份。 +建议在生产中使用五个成员的集群。 +有关该内容的更多信息,请参阅 +[常见问题文档](https://etcd.io/docs/current/faq/#what-is-failure-tolerance)。 -可以通过静态成员信息或动态发现的方式配置 etcd 集群。有关集群的详细信息,请参阅 [etcd 集群文档](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/clustering.md)。 +可以通过静态成员信息或动态发现的方式配置 etcd 集群。 +有关集群的详细信息,请参阅 +[etcd 集群文档](https://etcd.io/docs/current/op-guide/clustering/)。 -例如,考虑运行以下客户端 URL 的五个成员的 etcd 集群:`http://$IP1:2379`,`http://$IP2:2379`,`http://$IP3:2379`,`http://$IP4:2379` 和 `http://$IP5:2379`。要启动 Kubernetes API 服务器: +例如,考虑运行以下客户端 URL 的五个成员的 etcd 集群:`http://$IP1:2379`, +`http://$IP2:2379`,`http://$IP3:2379`,`http://$IP4:2379` 和 `http://$IP5:2379`。 +要启动 Kubernetes API 服务器: 1. 运行以下命令: - ```sh - ./etcd --listen-client-urls=http://$IP1:2379, http://$IP2:2379, http://$IP3:2379, http://$IP4:2379, http://$IP5:2379 --advertise-client-urls=http://$IP1:2379, http://$IP2:2379, http://$IP3:2379, http://$IP4:2379, http://$IP5:2379 - ``` + ```shell + etcd --listen-client-urls=http://$IP1:2379,http://$IP2:2379,http://$IP3:2379,http://$IP4:2379,http://$IP5:2379 --advertise-client-urls=http://$IP1:2379,http://$IP2:2379,http://$IP3:2379,http://$IP4:2379,http://$IP5:2379 + ``` -2. 使用参数 `--etcd-servers=$IP1:2379, $IP2:2379, $IP3:2379, $IP4:2379, $IP5:2379` 启动 Kubernetes API 服务器。 +2. 使用参数 `--etcd-servers=$IP1:2379,$IP2:2379,$IP3:2379,$IP4:2379,$IP5:2379` + 启动 Kubernetes API 服务器。 - 使用您 etcd 客户端 IP 地址替换 `IP`。 + 确保将 `IP` 变量设置为客户端 IP 地址。 ## 安全的 etcd 集群 -对 etcd 的访问相当于集群中的 root 权限,因此理想情况下只有 API 服务器才能访问它。考虑到数据的敏感性,建议只向需要访问 etcd 集群的节点授予权限。 +对 etcd 的访问相当于集群中的 root 权限,因此理想情况下只有 API 服务器才能访问它。 +考虑到数据的敏感性,建议只向需要访问 etcd 集群的节点授予权限。 -想要确保 etcd 的安全,可以设置防火墙规则或使用 etcd 提供的安全特性,这些安全特性依赖于 x509 公钥基础设施(PKI)。首先,通过生成密钥和证书对来建立安全的通信通道。 -例如,使用密钥对 `peer.key` 和 `peer.cert` 来保护 etcd 成员之间的通信,而 `client.cert` 和 `client.cert` 用于保护 etcd 与其客户端之间的通信。请参阅 etcd 项目提供的[示例脚本](https://github.com/coreos/etcd/tree/master/hack/tls-setup),以生成用于客户端身份验证的密钥对和 CA 文件。 +想要确保 etcd 的安全,可以设置防火墙规则或使用 etcd 提供的安全特性,这些安全特性依赖于 x509 公钥基础设施(PKI)。 +首先,通过生成密钥和证书对来建立安全的通信通道。 +例如,使用密钥对 `peer.key` 和 `peer.cert` 来保护 etcd 成员之间的通信, +而 `client.key` 和 `client.cert` 用于保护 etcd 与其客户端之间的通信。 +请参阅 etcd 项目提供的[示例脚本](https://github.com/coreos/etcd/tree/master/hack/tls-setup), +以生成用于客户端身份验证的密钥对和 CA 文件。 ### 安全通信 -若要使用安全对等通信对 etcd 进行配置,请指定参数 `--peer-key-file=peer.key` 和 `--peer-cert-file=peer.cert`,并使用 https 作为 URL 模式。 +若要使用安全对等通信对 etcd 进行配置,请指定参数 `--peer-key-file=peer.key` +和 `--peer-cert-file=peer.cert`,并使用 HTTPS 作为 URL 模式。 -类似地,要使用安全客户端通信对 etcd 进行配置,请指定参数 `--key-file=k8sclient.key` 和 `--cert-file=k8sclient.cert`,并使用 https 作为 URL 模式。 +类似地,要使用安全客户端通信对 etcd 进行配置,请指定参数 `--key-file=k8sclient.key` +和 `--cert-file=k8sclient.cert`,并使用 HTTPS 作为 URL 模式。 +使用安全通信的客户端命令的示例: + +``` +ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 \ + --cert=/etc/kubernetes/pki/etcd/server.crt \ + --key=/etc/kubernetes/pki/etcd/server.key \ + --cacert=/etc/kubernetes/pki/etcd/ca.crt \ + member list +``` ### 限制 etcd 集群的访问 配置安全通信后,将 etcd 集群的访问限制在 Kubernetes API 服务器上。使用 TLS 身份验证来完成此任务。 -例如,考虑由 CA `etcd.ca` 信任的密钥对 `k8sclient.key` 和 `k8sclient.cert`。当 etcd 配置为 `--client-cert-auth` 和 TLS 时,它使用系统 CA 或由 `--trusted-ca-file` 参数传入的 CA 验证来自客户端的证书。 +例如,考虑由 CA `etcd.ca` 信任的密钥对 `k8sclient.key` 和 `k8sclient.cert`。 +当 etcd 配置为 `--client-cert-auth` 和 TLS 时,它使用系统 CA 或由 `--trusted-ca-file` 参数传入的 CA 验证来自客户端的证书。 指定参数 `--client-cert-auth=true` 和 `--trusted-ca-file=etcd.ca` 将限制对具有证书 `k8sclient.cert` 的客户端的访问。 -一旦正确配置了 etcd,只有具有有效证书的客户端才能访问它。要让 Kubernetes API 服务器访问,可以使用参数 `--etcd-certfile=k8sclient.cert`,`--etcd-keyfile=k8sclient.key` 和 `--etcd-cafile=ca.cert` 配置它。 +一旦正确配置了 etcd,只有具有有效证书的客户端才能访问它。要让 Kubernetes API 服务器访问, +可以使用参数 `--etcd-certfile=k8sclient.cert`,`--etcd-keyfile=k8sclient.key` 和 `--etcd-cafile=ca.cert` 配置。 {{< note >}} -Kubernetes 目前不支持 etcd 身份验证。想要了解更多信息,请参阅相关的问题[支持 etcd v2 的基本认证](https://github.com/kubernetes/kubernetes/issues/23398)。 +Kubernetes 目前不支持 etcd 身份验证。 +想要了解更多信息,请参阅相关的问题 +[支持 etcd v2 的基本认证](https://github.com/kubernetes/kubernetes/issues/23398)。 {{< /note >}} ## 替换失败的 etcd 成员 -etcd 集群通过容忍少数成员故障实现高可用性。但是,要改善集群的整体健康状况,请立即替换失败的成员。当多个成员失败时,逐个替换它们。替换失败成员需要两个步骤:删除失败成员和添加新成员。 +etcd 集群通过容忍少数成员故障实现高可用性。 +但是,要改善集群的整体健康状况,请立即替换失败的成员。当多个成员失败时,逐个替换它们。 +替换失败成员需要两个步骤:删除失败成员和添加新成员。 -虽然 etcd 在内部保留唯一的成员 ID,但建议为每个成员使用唯一的名称,以避免人为错误。例如,考虑一个三成员的 etcd 集群。让 URL 为:member1=http://10.0.0.1, member2=http://10.0.0.2 和 member3=http://10.0.0.3。当 member1 失败时,将其替换为 member4=http://10.0.0.4。 +虽然 etcd 在内部保留唯一的成员 ID,但建议为每个成员使用唯一的名称,以避免人为错误。 +例如,考虑一个三成员的 etcd 集群。让 URL 为:`member1=http://10.0.0.1`, `member2=http://10.0.0.2` +和 `member3=http://10.0.0.3`。当 `member1` 失败时,将其替换为 `member4=http://10.0.0.4`。 -1. 获取失败的 member1 的成员 ID: +1. 获取失败的 `member1` 的成员 ID: - `etcdctl --endpoints=http://10.0.0.2,http://10.0.0.3 member list` + ```shell + etcdctl --endpoints=http://10.0.0.2,http://10.0.0.3 member list + ``` - 显示以下信息: + 显示以下信息: - 8211f1d0f64f3269, started, member1, http://10.0.0.1:2380, http://10.0.0.1:2379 - 91bc3c398fb3c146, started, member2, http://10.0.0.2:2380, http://10.0.0.2:2379 - fd422379fda50e48, started, member3, http://10.0.0.3:2380, http://10.0.0.3:2379 + ```console + 8211f1d0f64f3269, started, member1, http://10.0.0.1:2380, http://10.0.0.1:2379 + 91bc3c398fb3c146, started, member2, http://10.0.0.2:2380, http://10.0.0.2:2379 + fd422379fda50e48, started, member3, http://10.0.0.3:2380, http://10.0.0.3:2379 + ``` 2. 移除失败的成员 - `etcdctl member remove 8211f1d0f64f3269` + ```shell + etcdctl member remove 8211f1d0f64f3269 + ``` - 显示以下信息: + 显示以下信息: - Removed member 8211f1d0f64f3269 from cluster + ```console + Removed member 8211f1d0f64f3269 from cluster + ``` 3. 增加新成员: - `./etcdctl member add member4 --peer-urls=http://10.0.0.4:2380` + ```shell + etcdctl member add member4 --peer-urls=http://10.0.0.4:2380 + ``` - 显示以下信息: + 显示以下信息: - Member 2be1eb8f84b7f63e added to cluster ef37ad9dc622a7c4 + ```console + Member 2be1eb8f84b7f63e added to cluster ef37ad9dc622a7c4 + ``` 4. 在 IP 为 `10.0.0.4` 的机器上启动新增加的成员: - export ETCD_NAME="member4" - export ETCD_INITIAL_CLUSTER="member2=http://10.0.0.2:2380,member3=http://10.0.0.3:2380,member4=http://10.0.0.4:2380" - export ETCD_INITIAL_CLUSTER_STATE=existing - etcd [flags] + ```shell + export ETCD_NAME="member4" + export ETCD_INITIAL_CLUSTER="member2=http://10.0.0.2:2380,member3=http://10.0.0.3:2380,member4=http://10.0.0.4:2380" + export ETCD_INITIAL_CLUSTER_STATE=existing + etcd [flags] + ``` 5. 做以下事情之一: - 1. 更新其 `--etcd-servers` 参数,使 Kubernetes 知道配置进行了更改,然后重新启动 Kubernetes API 服务器。 + 1. 更新 Kubernetes API 服务器的 `--etcd-servers` 参数,使 Kubernetes 知道配置进行了更改,然后重新启动 Kubernetes API 服务器。 2. 如果在 deployment 中使用了负载均衡,更新负载均衡配置。 -有关集群重新配置的详细信息,请参阅 [etcd 重构文档](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#remove-a-member)。 +有关集群重新配置的详细信息,请参阅 [etcd 重构文档](https://etcd.io/docs/current/op-guide/runtime-configuration/#remove-a-member)。 ## 备份 etcd 集群 -所有 Kubernetes 对象都存储在 etcd 上。定期备份 etcd 集群数据对于在灾难场景(例如丢失所有主节点)下恢复 Kubernetes 集群非常重要。快照文件包含所有 Kubernetes 状态和关键信息。为了保证敏感的 Kubernetes 数据的安全,可以对快照文件进行加密。 +所有 Kubernetes 对象都存储在 etcd 上。定期备份 etcd 集群数据对于在灾难场景(例如丢失所有控制平面节点)下恢复 Kubernetes 集群非常重要。 +快照文件包含所有 Kubernetes 状态和关键信息。为了保证敏感的 Kubernetes 数据的安全,可以对快照文件进行加密。 备份 etcd 集群可以通过两种方式完成:etcd 内置快照和卷快照。 ### 内置快照 -etcd 支持内置快照,因此备份 etcd 集群很容易。快照可以从使用 `etcdctl snapshot save` 命令的活动成员中获取,也可以通过从 etcd [数据目录](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/configuration.md#--data-dir)复制 `member/snap/db` 文件,该 etcd 数据目录目前没有被 etcd 进程使用。获取快照通常不会影响成员的性能。 +etcd 支持内置快照。快照可以从使用 `etcdctl snapshot save` 命令的活动成员中获取, +也可以通过从 etcd [数据目录](https://etcd.io/docs/current/op-guide/configuration/#--data-dir) +复制 `member/snap/db` 文件,该 etcd 数据目录目前没有被 etcd 进程使用。获取快照不会影响成员的性能。 下面是一个示例,用于获取 `$ENDPOINT` 所提供的键空间的快照到文件 `snapshotdb`: -```sh +```shell ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshotdb -# exit 0 +``` -# verify the snapshot +验证快照: + +```shell ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshotdb +``` + +```console +----------+----------+------------+------------+ | HASH | REVISION | TOTAL KEYS | TOTAL SIZE | +----------+----------+------------+------------+ @@ -361,54 +516,137 @@ ETCDCTL_API=3 etcdctl --write-out=table snapshot status snapshotdb ### 卷快照 如果 etcd 运行在支持备份的存储卷(如 Amazon Elastic Block 存储)上,则可以通过获取存储卷的快照来备份 etcd 数据。 +### 使用 etcdctl 选项的快照 + +我们还可以使用 etcdctl 提供的各种选项来拍摄快照。例如: + +```shell +ETCDCTL_API=3 etcdctl -h +``` + +列出 etcdctl 可用的各种选项。例如,你可以通过指定端点,证书等来拍摄快照,如下所示: + +```shell +ETCDCTL_API=3 etcdctl --endpoints=https://127.0.0.1:2379 \ + --cacert= --cert= --key= \ + snapshot save +``` +可以从 etcd Pod 的描述中获得 `trusted-ca-file`, `cert-file` 和 `key-file` 。 + ## 扩展 etcd 集群 -通过交换性能,扩展 etcd 集群可以提高可用性。缩放不会提高集群性能和能力。一般情况下不要扩大或缩小 etcd 集群的集合。不要为 etcd 集群配置任何自动缩放组。强烈建议始终在任何官方支持的规模上运行生产 Kubernetes 集群时使用静态的五成员 etcd 集群。 +通过交换性能,扩展 etcd 集群可以提高可用性。缩放不会提高集群性能和能力。 +一般情况下不要扩大或缩小 etcd 集群的集合。不要为 etcd 集群配置任何自动缩放组。 +强烈建议始终在任何官方支持的规模上运行生产 Kubernetes 集群时使用静态的五成员 etcd 集群。 -合理的扩展是在需要更高可靠性的情况下,将三成员集群升级为五成员集群。请参阅 [etcd 重新配置文档](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#remove-a-member)以了解如何将成员添加到现有集群中的信息。 +合理的扩展是在需要更高可靠性的情况下,将三成员集群升级为五成员集群。 +请参阅 [etcd 重新配置文档](https://etcd.io/docs/current/op-guide/runtime-configuration/#remove-a-member) +以了解如何将成员添加到现有集群中的信息。 ## 恢复 etcd 集群 -etcd 支持从 [major.minor](http://semver.org/) 或其他不同 patch 版本的 etcd 进程中获取的快照进行恢复。还原操作用于恢复失败的集群的数据。 +etcd 支持从 [major.minor](http://semver.org/) 或其他不同 patch 版本的 etcd 进程中获取的快照进行恢复。 +还原操作用于恢复失败的集群的数据。 -在启动还原操作之前,必须有一个快照文件。它可以是来自以前备份操作的快照文件,也可以是来自剩余[数据目录](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/configuration.md#--data-dir)的快照文件。 -有关从快照文件还原集群的详细信息和示例,请参阅 [etcd 灾难恢复文档](https://github.com/coreos/etcd/blob/master/Documentation/op-guide/recovery.md#restoring-a-cluster)。 +在启动还原操作之前,必须有一个快照文件。它可以是来自以前备份操作的快照文件, +也可以是来自剩余[数据目录]( https://etcd.io/docs/current/op-guide/configuration/#--data-dir)的快照文件。 +例如: + +```shell +ETCDCTL_API=3 etcdctl --endpoints 10.2.0.9:2379 snapshot restore snapshotdb +``` + +有关从快照文件还原集群的详细信息和示例,请参阅 +[etcd 灾难恢复文档](https://etcd.io/docs/current/op-guide/recovery/#restoring-a-cluster)。 如果还原的集群的访问 URL 与前一个集群不同,则必须相应地重新配置 Kubernetes API 服务器。 在本例中,使用参数 `--etcd-servers=$NEW_ETCD_CLUSTER` 而不是参数 `--etcd-servers=$OLD_ETCD_CLUSTER` 重新启动 Kubernetes API 服务器。 @@ -420,82 +658,11 @@ etcd 支持从 [major.minor](http://semver.org/) 或其他不同 patch 版本的 {{< note >}} 如果集群中正在运行任何 API 服务器,则不应尝试还原 etcd 的实例。相反,请按照以下步骤还原 etcd: -- 停止 *所有* kube-apiserver 实例 +- 停止 *所有* API 服务实例 - 在所有 etcd 实例中恢复状态 -- 重启所有 kube-apiserver 实例 +- 重启所有 API 服务实例 -我们还建议重启所有组件(例如 kube-scheduler、kube-controller-manager、kubelet),以确保它们不会 +我们还建议重启所有组件(例如 `kube-scheduler`、`kube-controller-manager`、`kubelet`),以确保它们不会 依赖一些过时的数据。请注意,实际中还原会花费一些时间。 在还原过程中,关键组件将丢失领导锁并自行重启。 -{{< note >}} - - -## 升级和回滚 etcd 集群 - -从 Kubernetes v1.13.0 开始,不在支持 etcd2 作为新的或现有 Kubernetes 集群的后端。Kubernetes 支持 etcd2 和 etcd3 的时间表如下: - -- Kubernetes v1.0: 仅限 etcd2 -- Kubernetes v1.5.1: 添加了 etcd3 支持,新的集群仍默认为 etcd2 -- Kubernetes v1.6.0: 使用 `kube-up.sh` 创建的新集群默认为 etcd3,而 `kube-apiserver` 默认为 etcd3 -- Kubernetes v1.9.0: 宣布弃用 etcd2 存储后端 -- Kubernetes v1.13.0: 删除了 etcd2 存储后端,`kube-apiserver` 将拒绝以 `--storage-backend = etcd2` 开头,消息 `etcd2 不再是支持的存储后端` - -在使用 `--storage-backend = etcd2` 升级 v1.12.x kube-apiserver 到 v1.13.x 之前,etcd v2 数据必须迁移到 v3 存储后端,并且 kube-apiserver 调用改为使用 `--storage-backend=etcd3`。 - -从 etcd2 迁移到 etcd3 的过程在很大程度上取决于部署和配置 etcd 集群的方式,以及如何部署和配置 Kubernetes 集群。 我们建议您查阅集群提供商的文档,以了解是否存在预定义的解决方案。 - -如果您的集群是通过 `kube-up.sh` 创建的并且仍然使用 etcd2 作为其存储后端,请参阅 [Kubernetes v1.12 etcd 集群升级文档](https://v1-12.docs.kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#upgrading-and-rolling-back-etcd-clusters) - - - -## 已知问题:具有安全端点的 etcd 客户端均衡器 - -在 etcd v3.3.13 或更早版本的 etcd v3 客户端有一个[严重的错误](https://github.com/kubernetes/kubernetes/issues/72102),会影响 kube-apiserver 和 HA 部署。etcd 客户端平衡器故障转移不适用于安全端点。结果是,etcd 服务器可能会失败或短暂地与 kube-apiserver 断开连接。这会影响 kube-apiserver HA 的部署。 - -该修复程序是在 [etcd v3.4](https://github.com/etcd-io/etcd/pull/10911) 中进行的(并反向移植到 v3.3.14 或更高版本):现在,新客户端将创建自己的凭证捆绑包,以在拨号功能中正确设置授权目标。 - -因为此修复程序要求将 gRPC 依赖升级(到 v1.23.0 ),因此,下游 Kubernetes [未反向移植 etcd 升级](https://github.com/kubernetes/kubernetes/issues/72102#issuecomment-526645978)。这意味着只能从 Kubernetes 1.16 获得 [kube-apiserver 中的 etcd 修复](https://github.com/etcd-io/etcd/pull/10911/commits/db61ee106ca9363ba3f188ecf27d1a8843da33ab)。 - -要紧急修复 Kubernetes 1.15 或更早版本的此错误,请构建一个自定义的 kube-apiserver 。 您可以使用[`vendor/google.golang.org/grpc/credentials/credentials.go`](https://github.com/kubernetes/kubernetes/blob/7b85be021cd2943167cd3d6b7020f44735d9d90b/vendor/google.golang.org/grpc/credentials/credentials.go#L135) 和 [etcd@db61ee106](https://github.com/etcd-io/etcd/pull/10911/commits/db61ee106ca9363ba3f188ecf27d1a8843da33ab) 来进行本地更改。 - -请看 ["kube-apiserver 1.13.x refuses to work when first etcd-server is not available"](https://github.com/kubernetes/kubernetes/issues/72102). - +{{< note >}} \ No newline at end of file diff --git a/content/zh/docs/tasks/administer-cluster/controller-manager-leader-migration.md b/content/zh/docs/tasks/administer-cluster/controller-manager-leader-migration.md new file mode 100644 index 0000000000000..f8ed2cdb4a856 --- /dev/null +++ b/content/zh/docs/tasks/administer-cluster/controller-manager-leader-migration.md @@ -0,0 +1,293 @@ +--- +title: "将重复的控制平面迁至云控制器管理器" +linkTitle: "将重复的控制平面迁至云控制器管理器" +content_type: task +--- + + + + + +{{< feature-state state="alpha" for_k8s_version="v1.21" >}} + +{{< glossary_definition term_id="cloud-controller-manager" length="all" prepend="云管理控制器是">}} + + +## 背景 +作为[云驱动提取工作](https://kubernetes.io/blog/2019/04/17/the-future-of-cloud-providers-in-kubernetes/) +的一部分,所有特定于云的控制器都必须移出 `kube-controller-manager`。 +所有在 `kube-controller-manager` 中运行云控制器的现有集群必须迁移到云驱动特定的 `cloud-controller-manager` 中运行控制器。 + +领导者迁移提供了一种机制,使得 HA 集群可以通过两个组件之间的共享资源锁定, +安全地将“特定于云”的控制器从 `kube-controller-manager` 和迁移到`cloud-controller-manager`, +同时升级复制的控制平面。 +对于单节点控制平面,或者在升级过程中可以容忍控制器管理器不可用的情况,则不需要领导者迁移,并且可以忽略本指南。 + + +领导者迁移是一项 Alpha 阶段功能,默认情况下处于禁用状态,它需要设置控制器管理器的 `--enable-leader-migration` 参数。 +可以通过在 `kube-controller-manager` 或 `cloud-controller-manager` 上设置特性门控 +`ControllerManagerLeaderMigration` 和 `--enable-leader-migration` 来启用。 +领导者迁移仅在升级期间适用,并且可以安全地禁用,也可以在升级完成后保持启用状态。 + +本指南将引导你手动将控制平面从内置的云驱动的 `kube-controller-manager` 升级为 +同时运行 `kube-controller-manager` 和 `cloud-controller-manager`。 +如果使用工具来管理群集,请参阅对应工具和云驱动的文档以获取更多详细信息。 + +## {{% heading "prerequisites" %}} + + +假定控制平面正在运行 Kubernetes N 版本,并且要升级到 N+1 版本。 +尽管可以在同一版本中进行迁移,但理想情况下,迁移应作为升级的一部分执行,以便可以更改配置与发布保持一致。 +N 和 N+1的确切版本取决于各个云驱动。例如,如果云驱动构建了一个可与 Kubernetes 1.22 配合使用的 `cloud-controller-manager`, +则 N 可以为 1.21,N+1 可以为 1.22。 + +控制平面节点应运行 `kube-controller-manager`,并通过 `--leader-elect=true` 启用领导者选举。 +从版本 N 开始,树内云驱动必须设置 `--cloud-provider` 标志,而且 `cloud-controller-manager` 尚未部署。 + + +树外云驱动必须已经构建了一个实现领导者迁移的 `cloud-controller-manager`。 +如果云驱动导入了 v0.21.0 或更高版本的 `k8s.io/cloud-provider` 和 `k8s.io/controller-manager`, +则可以进行领导者迁移。 + +本指南假定每个控制平面节点的 kubelet 以静态 pod 的形式启动 `kube-controller-manager` +和 `cloud-controller-manager`,静态 pod 的定义在清单文件中。 +如果组件以其他设置运行,请相应地调整步骤。 + +为了获得授权,本指南假定集群使用 RBAC。 +如果其他授权模式授予 `kube-controller-manager` 和 `cloud-controller-manager` 组件权限, +请以与该模式匹配的方式授予所需的访问权限。 + + + + +### 授予访问迁移 Lease 的权限 + +控制器管理器的默认权限仅允许访问其主 Lease 对象。为了使迁移正常进行,需要访问其他 Lease 对象。 + +你可以通过修改 `system::leader-locking-kube-controller-manager` 角色来授予 +`kube-controller-manager` 对 Lease API 的完全访问权限。 +本任务指南假定迁移 Lease 的名称为 `cloud-provider-extraction-migration`。 + +`kubectl patch -n kube-system role 'system::leader-locking-kube-controller-manager' -p '{"rules": [ {"apiGroups":[ "coordination.k8s.io"], "resources": ["leases"], "resourceNames": ["cloud-provider-extraction-migration"], "verbs": ["create", "list", "get", "update"] } ]}' --type=merge` + +对 `system::leader-locking-cloud-controller-manager` 角色执行相同的操作。 + +`kubectl patch -n kube-system role 'system::leader-locking-cloud-controller-manager' -p '{"rules": [ {"apiGroups":[ "coordination.k8s.io"], "resources": ["leases"], "resourceNames": ["cloud-provider-extraction-migration"], "verbs": ["create", "list", "get", "update"] } ]}' --type=merge` + + +### 初始领导者迁移配置 + +领导者迁移需要一个表示控制器到管理器分配状态的配置文件。 +目前,对于树内云驱动,`kube-controller-manager` 运行 `route`、`service` 和 `cloud-node-lifecycle`。 +以下示例配置显示了分配。 + +```yaml +kind: LeaderMigrationConfiguration +apiVersion: controllermanager.config.k8s.io/v1alpha1 +leaderName: cloud-provider-extraction-migration +resourceLock: leases +controllerLeaders: + - name: route + component: kube-controller-manager + - name: service + component: kube-controller-manager + - name: cloud-node-lifecycle + component: kube-controller-manager +``` + + +在每个控制平面节点上,将内容保存到 `/etc/leadermigration.conf` 中, +并更新 `kube-controller-manager` 清单,以便将文件安装在容器内的同一位置。 +另外,更新相同的清单,添加以下参数: + +- `--feature-gates=ControllerManagerLeaderMigration=true` 启用领导者迁移(这是 Alpha 版功能) +- `--enable-leader-migration` 在控制器管理器上启用领导者迁移 +- `--leader-migration-config=/etc/leadermigration.conf` 设置配置文件 + +在每个节点上重新启动 `kube-controller-manager`。这时,`kube-controller-manager` +已启用领导者迁移,并准备进行迁移。 + + +### 部署云控制器管理器 + +在 N+1 版本中,控制器到管理器分配的期望状态可以由新的配置文件表示,如下所示。 +请注意,每个 `controllerLeaders` 的 `component` 字段从 `kube-controller-manager` 更改为 `cloud-controller-manager`。 + +```yaml +kind: LeaderMigrationConfiguration +apiVersion: controllermanager.config.k8s.io/v1alpha1 +leaderName: cloud-provider-extraction-migration +resourceLock: leases +controllerLeaders: + - name: route + component: cloud-controller-manager + - name: service + component: cloud-controller-manager + - name: cloud-node-lifecycle + component: cloud-controller-manager +``` + + + +当创建 N+1 版本的控制平面节点时,应将内容部署到 `/etc/leadermigration.conf`。 +应该更新 `cloud-controller-manager` 清单,以与 N 版本的 `kube-controller-manager` 相同的方式挂载配置文件。 +类似地,添加 `--feature-gates=ControllerManagerLeaderMigration=true`、`--enable-leader-migration` +和 `--leader-migration-config=/etc/leadermigration.conf` 到 `cloud-controller-manager` 的参数中。 + +使用已更新的 `cloud-controller-manager` 清单创建一个新的 N+1 版本的控制平面节点。 +并且没有设置 `kube-controller-manager` 的 `--cloud-provider` 标志。 +N+1 版本的 `kube-controller-manager` 不能启用领导者迁移, +因为在使用外部云驱动的情况下,它不再运行已迁移的控制器,因此不参与迁移。 + +请参阅[云控制器管理器管理](/zh/docs/tasks/administer-cluster/running-cloud-controller/) +了解有关如何部署 `cloud-controller-manager` 的更多细节。 + + +### 升级控制平面 + +现在,控制平面包含 N 和 N+1 版本的节点。 +N 版本的节点仅运行 `kube-controller-manager`,而 N+1 版本的节点同时运行 +`kube-controller-manager` 和 `cloud-controller-manager`。 +根据配置所指定,已迁移的控制器在 N 版本的 `kube-controller-manager` 或 N+1 版本的 +`cloud-controller-manager` 下运行, +具体取决于哪个控制器管理器拥有迁移 Lease 对象。任何时候都不存在一个控制器在两个控制器管理器下运行。 + +以滚动的方式创建一个新的版本为 N+1 的控制平面节点,并将 N+1 版本中的一个关闭, +直到控制平面仅包含版本为 N+1 的节点。 +如果需要从 N+1 版本回滚到 N 版本,则将启用了领导者迁移的 `kube-controller-manager` +且版本为 N 的节点添加回控制平面,每次替换 N+1 版本的一个,直到只有 N 版本的节点为止。 + + +### (可选)禁用领导者迁移 {#disable-leader-migration} + +现在,控制平面已经升级,可以同时运行 N+1 版本的 `kube-controller-manager` 和 `cloud-controller-manager` 了。 +领导者迁移已经完成工作,可以安全地禁用以节省一个 Lease 资源。 +在将来可以安全地重新启用领导者迁移以完成回滚。 + +在滚动管理器中,更新 `cloud-controller-manager` 的清单以同时取消设置 `--enable-leader-migration` +和 `--leader-migration-config=` 标志,并删除 `/etc/leadermigration.conf` 的挂载。 +最后删除 `/etc/leadermigration.conf`。 +要重新启用领导者迁移,请重新创建配置文件,并将其挂载和启用领导者迁移的标志添加回到 `cloud-controller-manager`。 + +## {{% heading "whatsnext" %}} + +- 阅读[领导者迁移控制器管理器](https://github.com/kubernetes/enhancements/tree/master/keps/sig-cloud-provider/2436-controller-manager-leader-migration)改进建议 \ No newline at end of file diff --git a/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md b/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md index ccf573b155ac1..6de803ec91fe3 100644 --- a/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md +++ b/content/zh/docs/tasks/job/automated-tasks-with-cron-jobs.md @@ -2,7 +2,7 @@ title: 使用 CronJob 运行自动化任务 content_type: task weight: 10 -min-kubernetes-server-version: v1.8 +min-kubernetes-server-version: v1.21 --- +在Kubernetes v1.21 版本中,CronJob 被提升为通用版本。如果你使用的是旧版本的 Kubernetes,请参考你正在使用的 Kubernetes 版本的文档,这样你就能看到准确的信息。旧的 Kubernetes 版本不支持`batch/v1` CronJob API。 你可以利用 [CronJobs](/zh/docs/concepts/workloads/controllers/cron-jobs) 执行基于时间调度的任务。这些自动化任务和 Linux 或者 Unix 系统的 [Cron](https://en.wikipedia.org/wiki/Cron) 任务类似。 CronJobs 在创建周期性以及重复性的任务时很有帮助,例如执行备份操作或者发送邮件。CronJobs 也可以在特定时间调度单个任务,例如你想调度低活跃周期的任务。 @@ -283,25 +289,17 @@ If this field is not specified, the jobs have no deadline. 不满足这种最后期限的任务会被统计为失败任务。如果该域没有声明,那任务就没有最后期限。 -CronJob 控制器会统计错过了多少次调度。如果错过了100次以上的调度,CronJob 就不再调度了。 -当没有设置 `.spec.startingDeadlineSeconds` 时,CronJob 控制器统计从 -`status.lastScheduleTime` 到当前的调度错过次数。 -例如一个 CronJob 期望每分钟执行一次,`status.lastScheduleTime`是 `5:00am`, -但现在是 `7:00am`。那意味着 120 次调度被错过了,所以 CronJob 将不再被调度。 -如果设置了 `.spec.startingDeadlineSeconds` 域(非空),CronJob 控制器统计从 -`.spec.startingDeadlineSeconds` 到当前时间错过了多少次任务。 -例如设置了 `200`,它会统计过去 200 秒内错过了多少次调度。 -在那种情况下,如果过去 200 秒内错过了超过 100 次的调度,CronJob 就不再调度。 +如果`.spec.startingDeadlineSeconds`字段被设置(非空),CronJob 控制器会计算从预期创建 Job 到当前时间的时间差。 +如果时间差大于该限制,则跳过此次执行。 + +例如,如果将其设置为 `200`,则 Job 控制器允许在实际调度之后最多 200 秒内创建 Job。 + +## {{% heading "prerequisites" %}} + + +kubectl 版本和集群版本之间的差异必须在一个小版本号内。 +例如:v1.2 版本的客户端只能与 v1.1、v1.2 和 v1.3 版本的集群一起工作。 +用最新版的 kubectl 有助于避免不可预见的问题。 + + +## 在 Windows 上安装 kubectl {#install-kubectl-on-windows} + + +在 Windows 系统中安装 kubectl 有如下几种方法: + +- [用 curl 在 Windows 上安装 kubectl](#install-kubectl-binary-with-curl-on-windows) +- [用 PowerShell 从 PSGallery 安装](#install-with-powershell-from-psgallery) +- [在 Windows 上用 Chocolatey 或 Scoop 安装](#install-on-windows-using-chocolatey-or-scoop) +- [作为谷歌云 SDK 的一部分,在 Windows 上安装](#install-on-windows-as-part-of-the-google-cloud-sdk) + + +### 用 curl 在 Windows 上安装 kubectl {#install-kubectl-binary-with-curl-on-windows} + + +1. 下载 [最新发行版 {{< param "fullversion" >}}](https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe)。 + + 如果你已安装了 `curl`,也可以使用此命令: + + ```powershell + curl -LO https://dl.k8s.io/release/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe + ``` + + + {{< note >}} + 要想找到最新稳定的版本(例如:为了编写脚本),可以看看这里 [https://dl.k8s.io/release/stable.txt](https://dl.k8s.io/release/stable.txt)。 + {{< /note >}} + + +1. 验证该可执行文件(可选步骤) + + 下载 kubectl 校验和文件: + + ```powershell + curl -LO https://dl.k8s.io/{{< param "fullversion" >}}/bin/windows/amd64/kubectl.exe.sha256 + ``` + + + 基于校验和文件,验证 kubectl 的可执行文件: + + + - 在命令行环境中,手工对比 `CertUtil` 命令的输出与校验和文件: + + ```cmd + CertUtil -hashfile kubectl.exe SHA256 + type kubectl.exe.sha256 + ``` + + + - 用 PowerShell 自动验证,用运算符 `-eq` 来直接取得 `True` 或 `False` 的结果: + + ```powershell + $($(CertUtil -hashfile .\kubectl.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl.exe.sha256) + ``` + + +1. 将可执行文件的路径添加到 `PATH`。 + +1. 测试一下,确保此 `kubectl` 的版本和期望版本一致: + + ```cmd + kubectl version --client + ``` + + +{{< note >}} +[Windows 版的 Docker Desktop](https://docs.docker.com/docker-for-windows/#kubernetes) +将其自带版本的 `kubectl` 添加到 `PATH`。 +如果你之前安装过 Docker Desktop,可能需要把此 `PATH` 条目置于 Docker Desktop 安装的条目之前, +或者直接删掉 Docker Desktop 的 `kubectl`。 +{{< /note >}} + + +### 用 PowerShell 从 PSGallery 安装 {#install-with-powershell-from-psgallery} + + +如果你工作在 Windows 平台上,且使用 [PowerShell Gallery](https://www.powershellgallery.com/) 包管理器, +则可以用 PowerShell 安装、更新 kubectl。 + + +1. 运行安装命令(确保提供了参数 `DownloadLocation`): + + ```powershell + Install-Script -Name 'install-kubectl' -Scope CurrentUser -Force + install-kubectl.ps1 [-DownloadLocation ] + ``` + + + {{< note >}} + 如果没有指定 `DownloadLocation`,`kubectl` 则会被安装到用户的 `temp` 目录下。 + {{< /note >}} + + + 安装程序创建 `$HOME/.kube`,并指示其创建配置文件。 + + +1. 测试一下,确保你安装的是最新版本: + + ```powershell + kubectl version --client + ``` + + +{{< note >}} +更新安装是通过重新运行步骤 1 中的两个命令而实现。 +{{< /note >}} + + +### 在 Windows 上用 Chocolatey 或 Scoop 安装 {#install-on-windows-using-chocolatey-or-scoop} + + +1. 要在 Windows 上安装 kubectl,你可以使用包管理器 [Chocolatey](https://chocolatey.org) + 或是命令行安装器 [Scoop](https://scoop.sh)。 + + {{< tabs name="kubectl_win_install" >}} + {{% tab name="choco" %}} + ```powershell + choco install kubernetes-cli + ``` + {{% /tab %}} + {{% tab name="scoop" %}} + ```powershell + scoop install kubectl + ``` + {{% /tab %}} + {{< /tabs >}} + + +2. 测试一下,确保安装的是最新版本: + + ```powershell + kubectl version --client + ``` + + +3. 导航到你的 home 目录: + + + ```powershell + # 当你用 cmd.exe 时,则运行: cd %USERPROFILE% + cd ~ + ``` + + +4. 创建目录 `.kube`: + + ```powershell + mkdir .kube + ``` + + +5. 切换到新创建的目录 `.kube` + + ```powershell + cd .kube + ``` + + +6. 配置 kubectl,以接入远程的 Kubernetes 集群: + + ```powershell + New-Item config -type file + ``` + + +{{< note >}} +编辑配置文件,你需要先选择一个文本编辑器,比如 Notepad。 +{{< /note >}} + + +### 作为谷歌云 SDK 的一部分,在 Windows 上安装 {#install-on-windows-as-part-of-the-google-cloud-sdk} + +{{< include "included/install-kubectl-gcloud.md" >}} + + +## 验证 kubectl 配置 {#verify-kubectl-configration} + +{{< include "included/verify-kubectl.md" >}} + + +## kubectl 可选配置 {#optional-kubectl-configurations} + +### 启用 shell 自动补全功能 {#enable-shell-autocompletion} + + +kubectl 为 Bash 和 Zsh 提供自动补全功能,可以减轻许多输入的负担。 + +下面是设置 Zsh 自动补全功能的操作步骤,前提是你在 Windows 上面运行的是 Zsh。 + +{{< include "included/optional-kubectl-configs-zsh.md" >}} + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} \ No newline at end of file diff --git a/content/zh/docs/tutorials/_index.md b/content/zh/docs/tutorials/_index.md index 42415cafe2bc4..821855b712812 100644 --- a/content/zh/docs/tutorials/_index.md +++ b/content/zh/docs/tutorials/_index.md @@ -51,10 +51,14 @@ Kubernetes 文档的这一部分包含教程。每个教程展示了如何完成 ## 配置 +* [示例:配置 Java 微服务](/zh/docs/tutorials/configuration/configure-java-microservice/) + * [使用一个 ConfigMap 配置 Redis](/zh/docs/tutorials/configuration/configure-redis-using-configmap/) @@ -17,11 +19,15 @@ content_type: tutorial -Apparmor 是一个 Linux 内核安全模块,它补充了标准的基于 Linux 用户和组的安全模块将程序限制为有限资源集的权限。AppArmor 可以配置为任何应用程序减少潜在的攻击面,并且提供更加深入的防御。AppArmor 是通过配置文件进行配置的,这些配置文件被调整为报名单,列出了特定程序或者容器所需要的访问权限,如 Linux 功能、网络访问、文件权限等。每个配置文件都可以在*强制*模式(阻止访问不允许的资源)或*投诉*模式(仅报告冲突)下运行。 +Apparmor 是一个 Linux 内核安全模块,它补充了标准的基于 Linux 用户和组的安全模块将程序限制为有限资源集的权限。 +AppArmor 可以配置为任何应用程序减少潜在的攻击面,并且提供更加深入的防御。 +AppArmor 是通过配置文件进行配置的,这些配置文件被调整为允许特定程序或者容器访问,如 Linux 功能、网络访问、文件权限等。 +每个配置文件都可以在*强制(enforcing)*模式(阻止访问不允许的资源)或*投诉(complain)*模式 +(仅报告冲突)下运行。 @@ -244,9 +250,8 @@ k8s-apparmor-example-deny-write (enforce) *本例假设您已经使用 AppArmor 支持设置了一个集群。* - -首先,我们需要将要使用的配置文件加载到节点上。我们将使用的配置文件仅拒绝所有文件写入: + +首先,我们需要将要使用的配置文件加载到节点上。配置文件拒绝所有文件写入: ```shell #include @@ -259,9 +264,12 @@ profile k8s-apparmor-example-deny-write flags=(attach_disconnected) { ``` -由于我们不知道 Pod 将被安排在那里,我们需要在所有节点上加载配置文件。在本例中,我们将只使用 SSH 来安装概要文件,但是在[使用配置文件设置节点](#setting-up-nodes-with-profiles)中讨论了其他方法。 +nodes. For this example we'll use SSH to install the profiles, but other approaches are +discussed in [Setting up nodes with profiles](#setting-up-nodes-with-profiles). +--> +由于我们不知道 Pod 将被调度到哪里,我们需要在所有节点上加载配置文件。 +在本例中,我们将使用 SSH 来安装概要文件,但是在[使用配置文件设置节点](#setting-up-nodes-with-profiles) +中讨论了其他方法。 ```shell NODES=( @@ -403,9 +411,9 @@ Events: 23s 23s 1 {kubelet e2e-test-stclair-node-pool-t1f5} Warning AppArmor Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" is not loaded ``` - -注意 pod 呈现失败状态,并且显示一条有用的错误信息:`Pod Cannot enforce AppArmor: profile +注意 pod 呈现 Pending 状态,并且显示一条有用的错误信息:`Pod Cannot enforce AppArmor: profile "k8s-apparmor-example-allow-write" 未加载`。还用相同的消息记录了一个事件。 diff --git a/content/zh/docs/tutorials/clusters/seccomp.md b/content/zh/docs/tutorials/clusters/seccomp.md index 693eb576457ea..9f8d7dc2f3a7e 100644 --- a/content/zh/docs/tutorials/clusters/seccomp.md +++ b/content/zh/docs/tutorials/clusters/seccomp.md @@ -52,14 +52,14 @@ Kubernetes 允许你将加载到节点上的 seccomp 配置文件自动应用于 为了完成本教程中的所有步骤,你必须安装 [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) -和 [kubectl](/zh/docs/tasks/tools/install-kubectl/)。本教程将显示同时具有 alpha(v1.19 之前的版本) +和 [kubectl](/zh/docs/tasks/tools/)。本教程将显示同时具有 alpha(v1.19 之前的版本) 和通常可用的 seccomp 功能的示例,因此请确保为所使用的版本[正确配置](https://kind.sigs.k8s.io/docs/user/quick-start/#setting-kubernetes-version)了集群。 @@ -91,8 +91,8 @@ into the cluster. For simplicity, [kind](https://kind.sigs.k8s.io/) can be used to create a single node cluster with the seccomp profiles loaded. Kind runs Kubernetes in Docker, -so each node of the cluster is actually just a container. This allows for files -to be mounted in the filesystem of each container just as one might load files +so each node of the cluster is a container. This allows for files +to be mounted in the filesystem of each container similar to loading files onto a node. Download the example above, and save it to a file named `kind.yaml`. Then create @@ -101,8 +101,8 @@ the cluster with the configuration. ## 使用 Kind 创建一个本地 Kubernetes 集群 为简单起见,可以使用 [kind](https://kind.sigs.k8s.io/) 创建一个已经加载 seccomp 配置文件的单节点集群。 -Kind 在 Docker 中运行 Kubernetes,因此集群的每个节点实际上只是一个容器。这允许将文件挂载到每个容器的文件系统中, -就像将文件挂载到节点上一样。 +Kind 在 Docker 中运行 Kubernetes,因此集群的每个节点都是一个容器。这允许将文件挂载到每个容器的文件系统中, +类似于将文件挂载到节点上。 {{< codenew file="pods/security/seccomp/kind.yaml" >}}
diff --git a/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md b/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md index f57514b47ebfa..027771a28c309 100644 --- a/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md +++ b/content/zh/docs/tutorials/configuration/configure-redis-using-configmap.md @@ -19,17 +19,13 @@ This page provides a real world example of how to configure Redis using a Config -* * 创建一个包含以下内容的 `kustomization.yaml` 文件: - * 一个 ConfigMap 生成器 - * 一个使用 ConfigMap 的 Pod 资源配置 -* 使用 `kubectl apply -k ./` 应用整个路径的配置 +* 使用 Redis 配置的值创建一个 ConfigMap +* 创建一个 Redis Pod,挂载并使用创建的 ConfigMap * 验证配置已经被正确应用。 @@ -55,105 +51,311 @@ This page provides a real world example of how to configure Redis using a Config ## 真实世界的案例:使用 ConfigMap 来配置 Redis -按照下面的步骤,您可以使用ConfigMap中的数据来配置Redis缓存。 +按照下面的步骤,使用 ConfigMap 中的数据来配置 Redis 缓存。 -1. 根据`docs/user-guide/configmap/redis/redis-config`来创建一个ConfigMap: +首先创建一个配置模块为空的 ConfigMap: +```shell +cat <./example-redis-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-redis-config +data: + redis-config: "" +EOF +``` -{{< codenew file="pods/config/redis-config" >}} + +应用上面创建的 ConfigMap 以及 Redis pod 清单: ```shell -curl -OL https://k8s.io/examples/pods/config/redis-config - -cat <./kustomization.yaml -configMapGenerator: -- name: example-redis-config - files: - - redis-config -EOF +kubectl apply -f example-redis-config.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml ``` -将 pod 的资源配置添加到 `kustomization.yaml` 文件中: +检查 Redis pod 清单的内容,并注意以下几点: + +* 由 `spec.volumes[1]` 创建一个名为 `config` 的卷。 +* `spec.volumes[1].items[0]` 下的 `key` 和 `path` 会将来自 `example-redis-config` + ConfigMap 中的 `redis-config` 密钥公开在 `config` 卷上一个名为 `redis-config` 的文件中。 +* 然后 `config` 卷被 `spec.containers[0].volumeMounts[1]` 挂载在 `/redis-master`。 + +这样做的最终效果是将上面 `example-redis-config` 配置中 `data.redis-config` 的数据作为 Pod 中的 `/redis-master/redis.conf` 公开。 {{< codenew file="pods/config/redis-pod.yaml" >}} + +检查创建的对象: + ```shell -curl -OL https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml +kubectl get pod/redis configmap/example-redis-config +``` -cat <>./kustomization.yaml -resources: -- redis-pod.yaml -EOF + +你应该可以看到以下输出: + +```shell +NAME READY STATUS RESTARTS AGE +pod/redis 1/1 Running 0 8s + +NAME DATA AGE +configmap/example-redis-config 1 14s ``` -应用整个 kustomization 文件夹以创建 ConfigMap 和 Pod 对象: +回顾一下,我们在 `example-redis-config` ConfigMap 保留了空的 `redis-config` 键: ```shell -kubectl apply -k . +kubectl describe configmap/example-redis-config ``` -使用以下命令检查创建的对象 +你应该可以看到一个空的 `redis-config` 键: ```shell -> kubectl get -k . -NAME DATA AGE -configmap/example-redis-config-dgh9dg555m 1 52s +Name: example-redis-config +Namespace: default +Labels: +Annotations: + +Data +==== +redis-config: +``` -NAME READY STATUS RESTARTS AGE -pod/redis 1/1 Running 0 52s + +使用 `kubectl exec` 进入 pod,运行 `redis-cli` 工具检查当前配置: + +```shell +kubectl exec -it redis -- redis-cli +``` + + +查看 `maxmemory`: + +```shell +127.0.0.1:6379> CONFIG GET maxmemory +``` + + +它应该显示默认值 0: + +```shell +1) "maxmemory" +2) "0" +``` + + +同样,查看 `maxmemory-policy`: + +```shell +127.0.0.1:6379> CONFIG GET maxmemory-policy +``` + + +它也应该显示默认值 `noeviction`: + +```shell +1) "maxmemory-policy" +2) "noeviction" +``` + + +现在,向 `example-redis-config` ConfigMap 添加一些配置: + +{{< codenew file="pods/config/example-redis-config.yaml" >}} + + +应用更新的 ConfigMap: + +```shell +kubectl apply -f example-redis-config.yaml ``` -在示例中,配置卷挂载在 `/redis-master` 下。 -它使用 `path` 将 `redis-config` 密钥添加到名为 `redis.conf` 的文件中。 -因此,redis配置的文件路径为 `/redis-master/redis.conf`。 -这是镜像将在其中查找 redis master 的配置文件的位置。 +确认 ConfigMap 已更新: + +```shell +kubectl describe configmap/example-redis-config +``` -使用 `kubectl exec` 进入 pod 并运行 `redis-cli` 工具来验证配置已正确应用: +你应该可以看到我们刚刚添加的配置: + +```shell +Name: example-redis-config +Namespace: default +Labels: +Annotations: + +Data +==== +redis-config: +---- +maxmemory 2mb +maxmemory-policy allkeys-lru +``` + + +通过 `kubectl exec` 使用 `redis-cli` 再次检查 Redis Pod,查看是否已应用配置: ```shell kubectl exec -it redis -- redis-cli +``` + + +查看 `maxmemory`: + +```shell 127.0.0.1:6379> CONFIG GET maxmemory +``` + + +它保持默认值 0: + +```shell 1) "maxmemory" -2) "2097152" +2) "0" +``` + + +同样,`maxmemory-policy` 保留为默认设置 `noeviction`: + +```shell 127.0.0.1:6379> CONFIG GET maxmemory-policy +``` + + +返回: + +```shell 1) "maxmemory-policy" -2) "allkeys-lru" +2) "noeviction" ``` -删除创建的 pod: +配置值未更改,因为需要重新启动 Pod 才能从关联的 ConfigMap 中获取更新的值。 +让我们删除并重新创建 Pod: + ```shell kubectl delete pod redis +kubectl apply -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/pods/config/redis-pod.yaml ``` + +现在,最后一次重新检查配置值: + +```shell +kubectl exec -it redis -- redis-cli +``` + +查看 `maxmemory`: + +```shell +127.0.0.1:6379> CONFIG GET maxmemory +``` + + +现在,它应该返回更新后的值 2097152: + +```shell +1) "maxmemory" +2) "2097152" +``` + + +同样,`maxmemory-policy` 也已更新: + +```shell +127.0.0.1:6379> CONFIG GET maxmemory-policy +``` + + +现在它反映了期望值 `allkeys-lru`: + +```shell +1) "maxmemory-policy" +2) "allkeys-lru" +``` + + +删除创建的资源,清理你的工作: + +```shell +kubectl delete pod/redis configmap/example-redis-config +``` ## {{% heading "whatsnext" %}} diff --git a/content/zh/docs/tutorials/hello-minikube.md b/content/zh/docs/tutorials/hello-minikube.md index c07f99998b116..0fc6b828d7511 100644 --- a/content/zh/docs/tutorials/hello-minikube.md +++ b/content/zh/docs/tutorials/hello-minikube.md @@ -112,7 +112,7 @@ This tutorial provides a container image that uses NGINX to echo back all the re @@ -120,7 +120,7 @@ To stop the proxy, run `Ctrl+C` to exit the process. The dashboard remains runni `dashboard` 命令启用仪表板插件,并在默认的 Web 浏览器中打开代理。你可以在仪表板上创建 Kubernetes 资源,例如 Deployment 和 Service。 如果你以 root 用户身份在环境中运行, -请参见[使用 URL 打开仪表板](/zh/docs/tutorials/hello-minikube#open-dashboard-with-url)。 +请参见[使用 URL 打开仪表板](#open-dashboard-with-url)。 要停止代理,请运行 `Ctrl+C` 退出该进程。仪表板仍在运行中。 {{< /note >}} @@ -273,9 +273,9 @@ Kubernetes [*Service*](/docs/concepts/services-networking/service/). 如果你用 `kubectl expose` 暴露了其它的端口,客户端将不能访问其它端口。 -2. 查看你刚刚创建的 Service: +2. 查看你创建的 Service: ```shell kubectl get services @@ -391,9 +391,9 @@ Minikube 有一组内置的 {{< glossary_tooltip text="插件" term_id="addons" ``` -3. 查看刚才创建的 Pod 和 Service: +3. 查看创建的 Pod 和 Service: ```shell kubectl get pod,svc -n kube-system diff --git a/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md b/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md index 0dd5ddcf1a2ce..4fe30daeceb5a 100644 --- a/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md +++ b/content/zh/docs/tutorials/stateful-application/basic-stateful-set.md @@ -120,6 +120,8 @@ Headless Service and StatefulSet defined in `web.yaml`. ```shell kubectl apply -f web.yaml +``` +``` service/nginx created statefulset.apps/web created ``` @@ -134,10 +136,19 @@ The command above creates two Pods, each running an ```shell kubectl get service nginx +``` +``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx ClusterIP None 80/TCP 12s - +``` + +...然后获取 `web` StatefulSet,以验证两者均已成功创建: +```shell kubectl get statefulset web +``` +``` NAME DESIRED CURRENT AGE web 2 1 20s ``` @@ -159,6 +170,8 @@ look like the example below. ```shell kubectl get pods -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 0/1 Pending 0 0s web-0 0/1 Pending 0 0s @@ -200,10 +213,11 @@ StatefulSet 中的 Pod 拥有一个唯一的顺序索引和稳定的网络身份 ```shell kubectl get pods -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 1m web-1 1/1 Running 0 1m - ``` +这将启动一个新的 shell。在新 shell 中,运行: +```shell +# Run this in the dns-test container shell nslookup web-0.nginx +``` + +输出类似于: +``` Server: 10.0.0.10 Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local @@ -284,6 +313,8 @@ the Pods in the StatefulSet. ```shell kubectl delete pod -l app=nginx +``` +``` pod "web-0" deleted pod "web-1" deleted ``` @@ -297,6 +328,8 @@ Running and Ready. ```shell kubectl get pod -w -l app=nginx +``` +``` NAME READY STATUS RESTARTS AGE web-0 0/1 ContainerCreating 0 0s NAME READY STATUS RESTARTS AGE @@ -316,11 +349,32 @@ DNS entries. ```shell for i in 0 1; do kubectl exec web-$i -- sh -c 'hostname'; done +``` +``` web-0 web-1 - +``` + +然后,运行: +``` kubectl run -i --tty --image busybox:1.28 dns-test --restart=Never --rm /bin/sh +``` + +这将启动一个新的 shell。在新 shell 中,运行: +```shell +# Run this in the dns-test container shell nslookup web-0.nginx +``` + +输出类似于: +``` Server: 10.0.0.10 Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local @@ -377,6 +431,12 @@ Get the PersistentVolumeClaims for `web-0` and `web-1`. ```shell kubectl get pvc -l app=nginx +``` + +输出类似于: +``` NAME STATUS VOLUME CAPACITY ACCESSMODES AGE www-web-0 Bound pvc-15c268c7-b507-11e6-932f-42010a800002 1Gi RWO 48s www-web-1 Bound pvc-15c79307-b507-11e6-932f-42010a800002 1Gi RWO 48s @@ -405,30 +465,35 @@ NGINX web 服务器默认会加载位于 `/usr/share/nginx/html/index.html` 的 将 Pod 的主机名写入它们的`index.html`文件并验证 NGINX web 服务器使用该主机名提供服务。 ```shell -for i in 0 1; do kubectl exec web-$i -- sh -c 'echo $(hostname) > /usr/share/nginx/html/index.html'; done +for i in 0 1; do kubectl exec "web-$i" -- sh -c 'echo "$(hostname)" > /usr/share/nginx/html/index.html'; done -for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done +for i in 0 1; do kubectl exec -i -t "web-$i" -- curl http://localhost/; done +``` +``` web-0 web-1 ``` {{< note >}} -请注意,如果你看见上面的 curl 命令返回了 403 Forbidden 的响应,你需要像这样修复使用 `volumeMounts`(due to a [bug when using hostPath volumes](https://github.com/kubernetes/kubernetes/issues/2630))挂载的目录的权限: +请注意,如果你看见上面的 curl 命令返回了 **403 Forbidden** 的响应,你需要像这样修复使用 `volumeMounts` +(原因归咎于[使用 hostPath 卷时存在的缺陷](https://github.com/kubernetes/kubernetes/issues/2630)) +挂载的目录的权限 +运行: + +`for i in 0 1; do kubectl exec web-$i -- chmod 755 /usr/share/nginx/html; done` -```shell -for i in 0 1; do kubectl exec web-$i -- chmod 755 /usr/share/nginx/html; done -``` -在你重新尝试上面的 curl 命令之前。 +在你重新尝试上面的 `curl` 命令之前。 {{< /note >}} ```shell kubectl scale sts web --replicas=5 +``` +``` statefulset.apps/web scaled ``` +输出类似于: +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 7m web-1 1/1 Running 0 7m @@ -761,7 +850,9 @@ StatefulSet 里的 Pod 采用和序号相反的顺序更新。在更新下一个 获取 Pod 来查看他们的容器镜像。 ```shell -for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +``` +``` k8s.gcr.io/nginx-slim:0.8 k8s.gcr.io/nginx-slim:0.8 k8s.gcr.io/nginx-slim:0.8 @@ -798,6 +889,8 @@ Patch `web` StatefulSet 来对 `updateStrategy` 字段添加一个分区。 ```shell kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":3}}}}' +``` +``` statefulset.apps/web patched ``` @@ -809,6 +902,8 @@ Patch the StatefulSet again to change the container's image. ```shell kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.7"}]' +``` +``` statefulset.apps/web patched ``` @@ -819,7 +914,9 @@ Delete a Pod in the StatefulSet. 删除 StatefulSet 中的 Pod。 ```shell -kubectl delete po web-2 +kubectl delete pod web-2 +``` +``` pod "web-2" deleted ``` @@ -830,7 +927,9 @@ Wait for the Pod to be Running and Ready. 等待 Pod 变成 Running 和 Ready。 ```shell -kubectl get po -lapp=nginx -w +kubectl get pod -l app=nginx -w +``` +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 4m web-1 1/1 Running 0 4m @@ -845,10 +944,10 @@ Get the Pod's container. 获取 Pod 的容器。 ```shell -kubectl get po web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +kubectl get pod web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +``` +``` k8s.gcr.io/nginx-slim:0.8 - - ``` +输出类似于: +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 6m web-1 0/1 Terminating 0 6m @@ -952,7 +1065,9 @@ Get the `web-1` Pods container. 获取 `web-1` Pod 的容器。 ```shell -kubectl get po web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +kubectl get pod web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}' +``` +``` k8s.gcr.io/nginx-slim:0.8 ``` @@ -986,6 +1101,8 @@ The partition is currently set to `2`. Set the partition to `0`. ```shell kubectl patch statefulset web -p '{"spec":{"updateStrategy":{"type":"RollingUpdate","rollingUpdate":{"partition":0}}}}' +``` +``` statefulset.apps/web patched ``` @@ -996,7 +1113,13 @@ Wait for all of the Pods in the StatefulSet to become Running and Ready. 等待 StatefulSet 中的所有 Pod 变成 Running 和 Ready。 ```shell -kubectl get po -lapp=nginx -w +kubectl get pod -l app=nginx -w +``` + +输出类似于: +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 3m web-1 0/1 ContainerCreating 0 11s @@ -1020,11 +1143,12 @@ Get the Pod's containers. 获取 Pod 的容器。 ```shell -for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +for p in 0 1 2; do kubectl get pod "web-$p" --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done +``` +``` k8s.gcr.io/nginx-slim:0.7 k8s.gcr.io/nginx-slim:0.7 k8s.gcr.io/nginx-slim:0.7 - ``` -当重新创建 `web` StatefulSet 时,`web-0`被第一个重新启动。由于 `web-1` 已经处于 Running 和 Ready 状态,当 `web-0` 变成 Running 和 Ready 时,StatefulSet 会直接接收这个 Pod。由于你重新创建的 StatefulSet 的 `replicas` 等于 2,一旦 `web-0` 被重新创建并且 `web-1` 被认为已经处于 Running 和 Ready 状态时,`web-2`将会被终止。 +当重新创建 `web` StatefulSet 时,`web-0` 被第一个重新启动。 +由于 `web-1` 已经处于 Running 和 Ready 状态,当 `web-0` 变成 Running 和 Ready 时, +StatefulSet 会接收这个 Pod。由于你重新创建的 StatefulSet 的 `replicas` 等于 2, +一旦 `web-0` 被重新创建并且 `web-1` 被认为已经处于 Running 和 Ready 状态时,`web-2` 将会被终止。 -让我们再看看被 Pod 的 web 服务器加载的 `index.html` 的内容。 +让我们再看看被 Pod 的 web 服务器加载的 `index.html` 的内容: ```shell -for i in 0 1; do kubectl exec -it web-$i -- curl localhost; done +for i in 0 1; do kubectl exec -i -t "web-$i" -- curl http://localhost/; done +``` + +``` web-0 web-1 ``` @@ -1229,11 +1370,17 @@ In one terminal window, watch the Pods in the StatefulSet. kubectl get pods -w -l app=nginx ``` + 在另一个窗口中再次删除这个 StatefulSet。这次省略 `--cascade=false` 参数。 ```shell kubectl delete statefulset web +``` + +``` statefulset.apps "web" deleted ``` @@ -1246,6 +1393,9 @@ and wait for all of the Pods to transition to Terminating. ```shell kubectl get pods -w -l app=nginx +``` + +``` NAME READY STATUS RESTARTS AGE web-0 1/1 Running 0 11m web-1 1/1 Running 0 27m @@ -1279,6 +1429,9 @@ must delete the `nginx` Service manually. ```shell kubectl delete service nginx +``` + +``` service "nginx" deleted ``` @@ -1290,9 +1443,11 @@ Recreate the StatefulSet and Headless Service one more time. ```shell kubectl apply -f web.yaml +``` + +``` service/nginx created statefulset.apps/web created - ``` ## Pod 管理策略 -对于某些分布式系统来说,StatefulSet 的顺序性保证是不必要和/或者不应该的。这些系统仅仅要求唯一性和身份标志。为了解决这个问题,在 Kubernetes 1.7 中我们针对 StatefulSet API Object 引入了 `.spec.podManagementPolicy`。 - +对于某些分布式系统来说,StatefulSet 的顺序性保证是不必要和/或者不应该的。 +这些系统仅仅要求唯一性和身份标志。为了解决这个问题,在 Kubernetes 1.7 中 +我们针对 StatefulSet API 对象引入了 `.spec.podManagementPolicy`。 +此选项仅影响扩缩操作的行为。更新不受影响。 ### OrderedReady Pod 管理策略 @@ -1372,7 +1536,8 @@ Pod. ### Parallel Pod 管理策略 -`Parallel` pod 管理策略告诉 StatefulSet 控制器并行的终止所有 Pod,在启动或终止另一个 Pod 前,不必等待这些 Pod 变成 Running 和 Ready 或者完全终止状态。 +`Parallel` pod 管理策略告诉 StatefulSet 控制器并行的终止所有 Pod, +在启动或终止另一个 Pod 前,不必等待这些 Pod 变成 Running 和 Ready 或者完全终止状态。 {{< codenew file="application/web/web-parallel.yaml" >}} @@ -1398,16 +1563,17 @@ kubectl get po -lapp=nginx -w ``` -在另一个终端窗口创建清单中的 StatefulSet 和 Service。 +在另一个终端窗口创建清单中的 StatefulSet 和 Service: ```shell kubectl apply -f web-parallel.yaml +``` +``` service/nginx created statefulset.apps/web created - ``` -StatefulSet 控制器启动了两个新的 Pod,而且在启动第二个之前并没有等待第一个变成 Running 和 Ready 状态。 +StatefulSet 启动了两个新的 Pod,而且在启动第二个之前并没有等待第一个变成 Running 和 Ready 状态。 -保持这个终端打开,并在另一个终端删除 `web` StatefulSet。 +## {{% heading "cleanup" %}} + +您应该打开两个终端,准备在清理过程中运行 `kubectl` 命令。 ```shell kubectl delete sts web +# sts is an abbreviation for statefulset ``` -在另一个终端里再次检查 `kubectl get` 命令的输出。 +你可以监测 `kubectl get` 来查看那些 Pod 被删除 ```shell +kubectl get pod -l app=nginx -w +``` +``` web-3 1/1 Terminating 0 9m web-2 1/1 Terminating 0 9m web-3 1/1 Terminating 0 9m @@ -1530,10 +1709,12 @@ kubectl delete svc nginx 你需要删除本教程中用到的 PersistentVolumes 的持久化存储介质。基于你的环境、存储配置和提供方式,按照必须的步骤保证回收所有的存储。 diff --git a/content/zh/docs/tutorials/stateful-application/cassandra.md b/content/zh/docs/tutorials/stateful-application/cassandra.md index 98d0d6ab44016..461e3d16325e2 100644 --- a/content/zh/docs/tutorials/stateful-application/cassandra.md +++ b/content/zh/docs/tutorials/stateful-application/cassandra.md @@ -1,807 +1,439 @@ --- -title: "示例:使用 Stateful Sets 部署 Cassandra" +title: "示例:使用 StatefulSet 部署 Cassandra" +content_type: tutorial +weight: 30 --- -## 目录 - - - [准备工作](#prerequisites) - - [Cassandra docker 镜像](#cassandra-docker) - - [快速入门](#quickstart) - - [步骤1:创建 Cassandra Headless Service](#step-1-create-a-cassandra-headless-service) - - [步骤2:使用 StatefulSet 创建 Cassandra Ring 环](#step-2-use-a-statefulset-to-create-cassandra-ring) - - [步骤3:验证并修改 Cassandra StatefulSet](#step-3-validate-and-modify-the-cassandra-statefulset) - - [步骤4:删除 Cassandra StatefulSet](#step-4-delete-cassandra-statefulset) - - [步骤5:使用 Replication Controller 创建 Cassandra 节点 pods](#step-5-use-a-replication-controller-to-create-cassandra-node-pods) - - [步骤6:Cassandra 集群扩容](#step-6-scale-up-the-cassandra-cluster) - - [步骤7:删除 Replication Controller](#step-7-delete-the-replication-controller) - - [步骤8:使用 DaemonSet 替换 Replication Controller](#step-8-use-a-daemonset-instead-of-a-replication-controller) - - [步骤9:资源清理](#step-9-resource-cleanup) - - [Seed Provider Source](#seed-provider-source) - - -下文描述了在 Kubernetes 上部署一个_云原生_ [Cassandra](http://cassandra.apache.org/) 的过程。当我们说_云原生_时,指的是一个应用能够理解它运行在一个集群管理器内部,并且使用这个集群的管理基础设施来帮助实现这个应用。特别的,本例使用了一个自定义的 Cassandra `SeedProvider` 帮助 Cassandra 发现新加入集群 Cassandra 节点。 - - -本示例也使用了Kubernetes的一些核心组件: - -- [_Pods_](/zh/docs/user-guide/pods) -- [ _Services_](/zh/docs/user-guide/services) -- [_Replication Controllers_](/zh/docs/user-guide/replication-controller) -- [_Stateful Sets_](/zh/docs/concepts/workloads/controllers/statefulset/) -- [_Daemon Sets_](/zh/docs/admin/daemons) - - - -## 准备工作 - - -本示例假设你已经安装运行了一个 Kubernetes集群(版本 >=1.2),并且还在某个路径下安装了 [`kubectl`](/zh/docs/tasks/tools/install-kubectl/) 命令行工具。请查看 [getting started guides](/zh/docs/getting-started-guides/) 获取关于你的平台的安装说明。 - - -本示例还需要一些代码和配置文件。为了避免手动输入,你可以 `git clone` Kubernetes 源到你本地。 - - -## Cassandra Docker 镜像 - - -Pod 使用来自 Google [容器仓库](https://cloud.google.com/container-registry/docs/) 的 [```gcr.io/google-samples/cassandra:v12```](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile) 镜像。这个 docker 镜像基于 `debian:jessie` 并包含 OpenJDK 8。该镜像包含一个从 Apache Debian 源中安装的标准 Cassandra。你可以通过使用环境变量改变插入到 `cassandra.yaml` 文件中的参数值。 - -| ENV VAR | DEFAULT VALUE | -| ---------------------- | :------------: | -| CASSANDRA_CLUSTER_NAME | 'Test Cluster' | -| CASSANDRA_NUM_TOKENS | 32 | -| CASSANDRA_RPC_ADDRESS | 0.0.0.0 | - - -## 快速入门 - - -{{< codenew file="application/cassandra/cassandra-service.yaml" >}} - -如果你希望直接跳到我们使用的命令,以下是全部步骤: - + - -```sh - -kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-service.yaml -``` - -{{< codenew file="application/cassandra/cassandra-statefulset.yaml" >}} - -``` -# 创建 statefulset -kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml - -# 验证 Cassandra 集群。替换一个 pod 的名称。 -kubectl exec -ti cassandra-0 -- nodetool status - -# 清理 -grace=$(kubectl get po cassandra-0 -o=jsonpath='{.spec.terminationGracePeriodSeconds}') \ - && kubectl delete statefulset,po -l app=cassandra \ - && echo "Sleeping $grace" \ - && sleep $grace \ - && kubectl delete pvc -l app=cassandra - -# -# 资源控制器示例 -# - -# 创建一个副本控制器来复制 cassandra 节点 -kubectl create -f cassandra/cassandra-controller.yaml - -# 验证 Cassandra 集群。替换一个 pod 的名称。 -kubectl exec -ti cassandra-xxxxx -- nodetool status - -# 扩大 Cassandra 集群 -kubectl scale rc cassandra --replicas=4 - -# 删除副本控制器 -kubectl delete rc cassandra - -# -# 创建一个 DaemonSet,在每个 kubernetes 节点上放置一个 cassandra 节点 -# - -kubectl create -f cassandra/cassandra-daemonset.yaml --validate=false - -# 资源清理 -kubectl delete service -l app=cassandra -kubectl delete daemonset cassandra -``` +本教程描述拉如何在 Kubernetes 上运行 [Apache Cassandra](https://cassandra.apache.org/)。 +数据库 Cassandra 需要永久性存储提供数据持久性(应用 _状态_)。 +在此示例中,自定义 Cassandra seed provider 使数据库在加入 Cassandra 集群时发现新的 Cassandra 实例。 +使用 *StatefulSets* 可以更轻松地将有状态的应用程序部署到你的 Kubernetes 集群中。 +有关本教程中使用的功能的更多信息, +参阅 [StatefulSet](/zh/docs/concepts/workloads/controllers/statefulset/)。 -## 步骤 1:创建 Cassandra Headless Service + +{{< note >}} +Cassandra 和 Kubernetes 都使用术语 _node_ 来表示集群的成员。 +在本教程中,属于 StatefulSet 的 Pod 是 Cassandra 节点,并且是 Cassandra 集群的成员(称为 _ring_)。 +当这些 Pod 在你的 Kubernetes 集群中运行时,Kubernetes 控制平面会将这些 Pod 调度到 Kubernetes 的 +{{< glossary_tooltip text="节点" term_id="node" >}}上。 + +当 Cassandra 节点启动时,使用 _seed列表_ 来引导发现 ring 中其他节点。 +本教程部署了一个自定义的 Cassandra seed provider,使数据库可以发现新的 Cassandra Pod 出现在 Kubernetes 集群中。 +{{< /note >}} -Kubernetes _[Service](/zh/docs/user-guide/services)_ 描述一组执行同样任务的 [_Pod_](/zh/docs/user-guide/pods)。在 Kubernetes 中,一个应用的原子调度单位是一个 Pod:一个或多个_必须_调度到相同主机上的容器。 +## {{% heading "objectives" %}} -这个 Service 用于在 Kubernetes 集群内部进行 Cassandra 客户端和 Cassandra Pod 之间的 DNS 查找。 + +* 创建并验证 Cassandra 无头(headless){{< glossary_tooltip text="Service" term_id="service" >}}.. +* 使用 {{< glossary_tooltip term_id="StatefulSet" >}} 创建一个 Cassandra ring。 +* 验证 StatefulSet。 +* 修改 StatefulSet。 +* 删除 StatefulSet 及其 {{< glossary_tooltip text="Pod" term_id="pod" >}}. -以下为这个 service 的描述: -```yaml -apiVersion: v1 -kind: Service -metadata: - labels: - app: cassandra - name: cassandra -spec: - clusterIP: None - ports: - - port: 9042 - selector: - app: cassandra -``` +## {{% heading "prerequisites" %}} +{{< include "task-tutorial-prereqs.md" >}} -Download [`cassandra-service.yaml`](/examples/application/cassandra/cassandra-service.yaml) -and [`cassandra-statefulset.yaml`](/examples/application/cassandra/cassandra-statefulset.yaml) + +要完成本教程,你应该已经熟悉 {{< glossary_tooltip text="Pod" term_id="pod" >}}, +{{< glossary_tooltip text="Service" term_id="service" >}}和 {{< glossary_tooltip text="StatefulSet" term_id="StatefulSet" >}}。 -为 StatefulSet 创建 service + +### 额外的 Minikube 设置说明 -以下命令显示了 service 是否被成功创建。 +{{< caution >}} +[Minikube](https://minikube.sigs.k8s.io/docs/)默认为 1024MiB 内存和 1 个 CPU。 +在本教程中,使用默认资源配置运行 Minikube 会导致资源不足的错误。为避免这些错误,请使用以下设置启动 Minikube: -```console -$ kubectl get svc cassandra +```shell +minikube start --memory 5120 --cpus=4 ``` +{{< /caution >}} -命令的响应应该像这样: - -```console -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -cassandra None 9042/TCP 45s -``` - -如果返回错误则表示 service 创建失败。 + + +## 为 Cassandra 创建无头(headless) Services {#creating-a-cassandra-headless-service} +在 Kubernetes 中,一个 {{< glossary_tooltip text="Service" term_id="service" >}} +描述了一组执行相同任务的 {{< glossary_tooltip text="Pod" term_id="pod" >}}。 -本示例使用了 GCE Storage Class,请根据你运行的云平台做适当的修改。 +以下 Service 用于在 Cassandra Pod 和集群中的客户端之间进行 DNS 查找: -```yaml -apiVersion: "apps/v1beta1" -kind: StatefulSet -metadata: - name: cassandra -spec: - serviceName: cassandra - replicas: 3 - template: - metadata: - labels: - app: cassandra - spec: - containers: - - name: cassandra - image: gcr.io/google-samples/cassandra:v12 - imagePullPolicy: Always - ports: - - containerPort: 7000 - name: intra-node - - containerPort: 7001 - name: tls-intra-node - - containerPort: 7199 - name: jmx - - containerPort: 9042 - name: cql - resources: - limits: - cpu: "500m" - memory: 1Gi - requests: - cpu: "500m" - memory: 1Gi - securityContext: - capabilities: - add: - - IPC_LOCK - lifecycle: - preStop: - exec: - command: ["/bin/sh", "-c", "PID=$(pidof java) && kill $PID && while ps -p $PID > /dev/null; do sleep 1; done"] - env: - - name: MAX_HEAP_SIZE - value: 512M - - name: HEAP_NEWSIZE - value: 100M - - name: CASSANDRA_SEEDS - value: "cassandra-0.cassandra.default.svc.cluster.local" - - name: CASSANDRA_CLUSTER_NAME - value: "K8Demo" - - name: CASSANDRA_DC - value: "DC1-K8Demo" - - name: CASSANDRA_RACK - value: "Rack1-K8Demo" - - name: CASSANDRA_AUTO_BOOTSTRAP - value: "false" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - readinessProbe: - exec: - command: - - /bin/bash - - -c - - /ready-probe.sh - initialDelaySeconds: 15 - timeoutSeconds: 5 - # These volume mounts are persistent. They are like inline claims, - # but not exactly because the names need to match exactly one of - # the stateful pod volumes. - volumeMounts: - - name: cassandra-data - mountPath: /cassandra_data - # These are converted to volume claims by the controller - # and mounted at the paths mentioned above. - # do not use these in production until ssd GCEPersistentDisk or other ssd pd - volumeClaimTemplates: - - metadata: - name: cassandra-data - annotations: - volume.beta.kubernetes.io/storage-class: fast - spec: - accessModes: [ "ReadWriteOnce" ] - resources: - requests: - storage: 1Gi ---- -kind: StorageClass -apiVersion: storage.k8s.io/v1beta1 -metadata: - name: fast -provisioner: kubernetes.io/gce-pd -parameters: - type: pd-ssd -``` +{{< codenew file="application/cassandra/cassandra-service.yaml" >}} -创建 Cassandra StatefulSet 如下: +创建一个 Service 来跟踪 `cassandra-service.yaml` 文件中的所有 Cassandra StatefulSet: -```console -kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml +```shell +kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-service.yaml ``` -## 步骤 3:验证和修改 Cassandra StatefulSet - -这个 StatefulSet 的部署展示了 StatefulSets 提供的两个新特性: - -1. Pod 的名称已知 -2. Pod 以递增顺序部署 + +### 验证(可选) {#validating} -首先,运行下面的 `kubectl` 命令,验证 StatefulSet 已经被成功部署。 +获取 Cassandra Service。 -```console -$ kubectl get statefulset cassandra +```shell +kubectl get svc cassandra ``` -这个命令的响应应该像这样: + +响应是: -```console -NAME DESIRED CURRENT AGE -cassandra 3 3 13s ``` - -接下来观察 Cassandra pod 以一个接一个的形式部署。StatefulSet 资源按照数字序号的模式部署 pod:1, 2, 3 等。如果在 pod 部署前执行下面的命令,你就能够看到这种顺序的创建过程。 - -```console -$ kubectl get pods -l="app=cassandra" -NAME READY STATUS RESTARTS AGE -cassandra-0 1/1 Running 0 1m -cassandra-1 0/1 ContainerCreating 0 8s +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +cassandra ClusterIP None 9042/TCP 45s ``` -上面的示例显示了三个 Cassandra StatefulSet pod 中的两个已经部署。一旦所有的 pod 都部署成功,相同的命令会显示一个完整的 StatefulSet。 - -```console -$ kubectl get pods -l="app=cassandra" -NAME READY STATUS RESTARTS AGE -cassandra-0 1/1 Running 0 10m -cassandra-1 1/1 Running 0 9m -cassandra-2 1/1 Running 0 8m -``` + +如果没有看到名为 `cassandra` 的服务,则表示创建失败。 +请阅读[Debug Services](/zh/docs/tasks/debug-application-cluster/debug-service/),以解决常见问题。 -运行 Cassandra 工具 `nodetool` 将显示 ring 环的状态。 - -```console -$ kubectl exec cassandra-0 -- nodetool status -Datacenter: DC1-K8Demo -====================== -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.4.2.4 65.26 KiB 32 63.7% a9d27f81-6783-461d-8583-87de2589133e Rack1-K8Demo -UN 10.4.0.4 102.04 KiB 32 66.7% 5559a58c-8b03-47ad-bc32-c621708dc2e4 Rack1-K8Demo -UN 10.4.1.4 83.06 KiB 32 69.6% 9dce943c-581d-4c0e-9543-f519969cc805 Rack1-K8Demo -``` + +## 使用 StatefulSet 创建 Cassandra Ring -system_traces system_schema system_auth system system_distributed -``` +下面包含的 StatefulSet 清单创建了一个由三个 Pod 组成的 Cassandra ring。 -你需要使用 `kubectl edit` 来增加或减小 Cassandra StatefulSet 的大小。你可以在[文档](/zh/docs/user-guide/kubectl/kubectl_edit) 中找到更多关于 `edit` 命令的信息。 +{{< note >}} +本示例使用 Minikube 的默认配置程序。 +请为正在使用的云更新以下 StatefulSet。 +{{< /note >}} -使用以下命令编辑 StatefulSet。 +{{< codenew file="application/cassandra/cassandra-statefulset.yaml" >}} -```console -$ kubectl edit statefulset cassandra -``` + +使用 `cassandra-statefulset.yaml` 文件创建 Cassandra StatefulSet : -这会在你的命令行中创建一个编辑器。你需要修改的行是 `replicas`。这个例子没有包含终端窗口的所有内容,下面示例中的最后一行就是你希望改变的 replicas 行。 - -```console -# Please edit the object below. Lines beginning with a '#' will be ignored, -# and an empty file will abort the edit. If an error occurs while saving this file will be -# reopened with the relevant failures. -# -apiVersion: apps/v1beta1 -kind: StatefulSet -metadata: - creationTimestamp: 2016-08-13T18:40:58Z - generation: 1 - labels: - app: cassandra - name: cassandra - namespace: default - resourceVersion: "323" - uid: 7a219483-6185-11e6-a910-42010a8a0fc0 -spec: - replicas: 3 +```shell +# 如果你能未经修改地 apply cassandra-statefulset.yaml,请使用此命令 +kubectl apply -f https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml ``` + +如果你为了适合你的集群需要修改 `cassandra-statefulset.yaml`, +下载 https://k8s.io/examples/application/cassandra/cassandra-statefulset.yaml, +然后 apply 修改后的清单。 -按下面的示例修改清单文件并保存。 - -```console -spec: - replicas: 4 +```shell +# 如果使用本地的 cassandra-statefulset.yaml ,请使用此命令 +kubectl apply -f cassandra-statefulset.yaml ``` -这个 StatefulSet 现在将包含四个 pod。 + +## 验证 Cassandra StatefulSet -这个command的响应应该像这样: +1.获取 Cassandra StatefulSet: -```console -NAME DESIRED CURRENT AGE -cassandra 4 4 36m -``` + ```shell + kubectl get statefulset cassandra + ``` + + + 响应应该与此类似: + ``` + NAME DESIRED CURRENT AGE + cassandra 3 0 13s + ``` -对于 Kubernetes 1.5 发布版,beta StatefulSet 资源没有像 Deployment, ReplicaSet, Replication Controller 或者 Job 一样,包含 `kubectl scale` 功能, + + `StatefulSet` 资源会按顺序部署 Pod。 -## 步骤 4:删除 Cassandra StatefulSet +2.获取 Pod 查看已排序的创建状态: + + ```shell + kubectl get pods -l="app=cassandra" + ``` + + 响应应该与此类似: -删除或者缩容 StatefulSet 时不会删除与之关联的 volumes。这样做是为了优先保证安全。你的数据比其它会被自动清除的 StatefulSet 关联资源更宝贵。删除 Persistent Volume Claims 可能会导致关联的 volumes 被删除,这种行为依赖 storage class 和 reclaim policy。永远不要期望能在 claim 删除后访问一个 volume。 + ```shell + NAME READY STATUS RESTARTS AGE + cassandra-0 1/1 Running 0 1m + cassandra-1 0/1 ContainerCreating 0 8s + ``` + + 这三个 Pod 要花几分钟的时间才能部署。部署之后,相同的命令将返回类似于以下的输出: + + ``` + NAME READY STATUS RESTARTS AGE + cassandra-0 1/1 Running 0 10m + cassandra-1 1/1 Running 0 9m + cassandra-2 1/1 Running 0 8m + ``` + +3.运行第一个 Pod 中的 Cassandra [nodetool](https://cwiki.apache.org/confluence/display/CASSANDRA2/NodeTool),以显示 ring 的状态。 -使用如下命令删除 StatefulSet。 + ```shell + kubectl exec -it cassandra-0 -- nodetool status + ``` -```console -$ grace=$(kubectl get po cassandra-0 -o=jsonpath='{.spec.terminationGracePeriodSeconds}') \ - && kubectl delete statefulset -l app=cassandra \ - && echo "Sleeping $grace" \ - && sleep $grace \ - && kubectl delete pvc -l app=cassandra -``` + + 响应应该与此类似: + + ``` + Datacenter: DC1-K8Demo + ====================== + Status=Up/Down + |/ State=Normal/Leaving/Joining/Moving + -- Address Load Tokens Owns (effective) Host ID Rack + UN 172.17.0.5 83.57 KiB 32 74.0% e2dd09e6-d9d3-477e-96c5-45094c08db0f Rack1-K8Demo + UN 172.17.0.4 101.04 KiB 32 58.8% f89d6835-3a42-4419-92b3-0e62cae1479c Rack1-K8Demo + UN 172.17.0.6 84.74 KiB 32 67.1% a6a1e8c2-3dc5-4417-b1a0-26507af2aaad Rack1-K8Demo + ``` + +## 修改 Cassandra StatefulSet -Kubernetes _[Replication Controller](/zh/docs/user-guide/replication-controller)_ 负责复制一个完全相同的 pod 集合。像 Service 一样,它具有一个 selector query,用来识别它的集合成员。和 Service 不一样的是,它还具有一个期望的副本数,并且会通过创建或删除 Pod 来保证 Pod 的数量满足它期望的状态。 +使用 `kubectl edit` 修改 Cassandra StatefulSet 的大小。 -和我们刚才定义的 Service 一起,Replication Controller 能够让我们轻松的构建一个复制的、可扩展的 Cassandra 集群。 +1.运行以下命令: -让我们创建一个具有两个初始副本的 replication controller。 + ```shell + kubectl edit statefulset cassandra + ``` -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - name: cassandra - # The labels will be applied automatically - # from the labels in the pod template, if not set - # labels: - # app: cassandra -spec: - replicas: 2 - # The selector will be applied automatically - # from the labels in the pod template, if not set. - # selector: - # app: cassandra - template: + + 此命令你的终端中打开一个编辑器。需要更改的是 `replicas` 字段。下面是 StatefulSet 文件的片段示例: + + ```yaml + # Please edit the object below. Lines beginning with a '#' will be ignored, + # and an empty file will abort the edit. If an error occurs while saving this file will be + # reopened with the relevant failures. + # + apiVersion: apps/v1 + kind: StatefulSet metadata: + creationTimestamp: 2016-08-13T18:40:58Z + generation: 1 labels: - app: cassandra - spec: - containers: - - command: - - /run.sh - resources: - limits: - cpu: 0.5 - env: - - name: MAX_HEAP_SIZE - value: 512M - - name: HEAP_NEWSIZE - value: 100M - - name: CASSANDRA_SEED_PROVIDER - value: "io.k8s.cassandra.KubernetesSeedProvider" - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: gcr.io/google-samples/cassandra:v12 - name: cassandra - ports: - - containerPort: 7000 - name: intra-node - - containerPort: 7001 - name: tls-intra-node - - containerPort: 7199 - name: jmx - - containerPort: 9042 - name: cql - volumeMounts: - - mountPath: /cassandra_data - name: data - volumes: - - name: data - emptyDir: {} -``` - -[下载示例](https://raw.githubusercontent.com/kubernetes/examples/master/cassandra-controller.yaml) - -在这个描述中需要注意几件事情。 - -`selector` 属性包含了控制器的 selector query。它能够被显式指定,或者在没有设置时,像此处一样从 pod 模板中的 labels 中自动应用。 - -Pod 模板的标签 `app:cassandra` 匹配步骤1中的 Service selector。这就是 Service 如何选择 replication controller 创建的 pod 的原理。 - -`replicas` 属性指明了期望的副本数量,在本例中最开始为 2。我们很快将要扩容更多数量。 - -创建 Replication Controller: - -```console - -$ kubectl create -f cassandra/cassandra-controller.yaml - -``` - -你可以列出新建的 controller: - -```console - -$ kubectl get rc -o wide -NAME DESIRED CURRENT AGE CONTAINER(S) IMAGE(S) SELECTOR -cassandra 2 2 11s cassandra gcr.io/google-samples/cassandra:v12 app=cassandra - -``` - -现在,如果你列出集群中的 pod,并且使用 `app=cassandra` 标签过滤,你应该能够看到两个 Cassandra pod。(`wide` 参数使你能够看到 pod 被调度到了哪个 Kubernetes 节点上) - -```console -$ kubectl get pods -l="app=cassandra" -o wide -NAME READY STATUS RESTARTS AGE NODE -cassandra-21qyy 1/1 Running 0 1m kubernetes-minion-b286 -cassandra-q6sz7 1/1 Running 0 1m kubernetes-minion-9ye5 -``` - - -因为这些 pod 拥有 `app=cassandra` 标签,它们被映射给了我们在步骤 1 中创建的 service。 - -你可以使用下面的 service endpoint 查询命令来检查 Pod 是否对 Service 可用。 - -```console - -$ kubectl get endpoints cassandra -o yaml -apiVersion: v1 -kind: Endpoints -metadata: - creationTimestamp: 2015-06-21T22:34:12Z - labels: - app: cassandra - name: cassandra - namespace: default - resourceVersion: "944373" - uid: a3d6c25f-1865-11e5-a34e-42010af01bcc -subsets: -- addresses: - - ip: 10.244.3.15 - targetRef: - kind: Pod + app: cassandra name: cassandra namespace: default - resourceVersion: "944372" - uid: 9ef9895d-1865-11e5-a34e-42010af01bcc - ports: - - port: 9042 - protocol: TCP - -``` - - -为了显示 `SeedProvider` 逻辑是按设想在运行,你可以使用 `nodetool` 命令来检查 Cassandra 集群的状态。为此,请使用 `kubectl exec` 命令,这样你就能在一个 Cassandra pod 上运行 `nodetool`。同样的,请替换 `cassandra-xxxxx` 为任意一个 pods的真实名字。 - -```console - -$ kubectl exec -ti cassandra-xxxxx -- nodetool status -Datacenter: datacenter1 -======================= -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.244.0.5 74.09 KB 256 100.0% 86feda0f-f070-4a5b-bda1-2eeb0ad08b77 rack1 -UN 10.244.3.3 51.28 KB 256 100.0% dafe3154-1d67-42e1-ac1d-78e7e80dce2b rack1 - -``` - - -## 步骤 6:Cassandra 集群扩容 - - -现在,让我们把 Cassandra 集群扩展到 4 个 pod。我们通过告诉 Replication Controller 现在我们需要 4 个副本来完成。 - -```sh - -$ kubectl scale rc cassandra --replicas=4 - -``` - -你可以看到列出了新的 pod: - -```console - -$ kubectl get pods -l="app=cassandra" -o wide -NAME READY STATUS RESTARTS AGE NODE -cassandra-21qyy 1/1 Running 0 6m kubernetes-minion-b286 -cassandra-81m2l 1/1 Running 0 47s kubernetes-minion-b286 -cassandra-8qoyp 1/1 Running 0 47s kubernetes-minion-9ye5 -cassandra-q6sz7 1/1 Running 0 6m kubernetes-minion-9ye5 - -``` - - -一会儿你就能再次检查 Cassandra 集群的状态,你可以看到新的 pod 已经被自定义的 `SeedProvider` 检测到: - -```console - -$ kubectl exec -ti cassandra-xxxxx -- nodetool status -Datacenter: datacenter1 -======================= -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.244.0.6 51.67 KB 256 48.9% d07b23a5-56a1-4b0b-952d-68ab95869163 rack1 -UN 10.244.1.5 84.71 KB 256 50.7% e060df1f-faa2-470c-923d-ca049b0f3f38 rack1 -UN 10.244.1.6 84.71 KB 256 47.0% 83ca1580-4f3c-4ec5-9b38-75036b7a297f rack1 -UN 10.244.0.5 68.2 KB 256 53.4% 72ca27e2-c72c-402a-9313-1e4b61c2f839 rack1 - -``` - - -## 步骤 7:删除 Replication Controller - - -在你开始步骤 5 之前, __删除__你在上面创建的 __replication controller__。 - -```sh - -$ kubectl delete rc cassandra - -``` - -## 步骤 8:使用 DaemonSet 替换 Replication Controller - - -在 Kubernetes中,[_DaemonSet_](/zh/docs/admin/daemons) 能够将 pod 一对一的分布到 Kubernetes 节点上。和 _ReplicationController_ 相同的是它也有一个用于识别它的集合成员的 selector query。但和 _ReplicationController_ 不同的是,它拥有一个节点 selector,用于限制基于模板的 pod 可以调度的节点。并且 pod 的复制不是基于一个设置的数量,而是为每一个节点分配一个 pod。 - -示范用例:当部署到云平台时,预期情况是实例是短暂的并且随时可能终止。Cassandra 被搭建成为在各个节点间复制数据以便于实现数据冗余。这样的话,即使一个实例终止了,存储在它上面的数据却没有,并且集群会通过重新复制数据到其它运行节点来作为响应。 - -`DaemonSet` 设计为在 Kubernetes 集群中的每个节点上放置一个 pod。那样就会给我们带来数据冗余度。让我们创建一个 DaemonSet 来启动我们的存储集群: - - -```yaml -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - labels: - name: cassandra - name: cassandra -spec: - template: - metadata: - labels: - app: cassandra + resourceVersion: "323" + uid: 7a219483-6185-11e6-a910-42010a8a0fc0 spec: - # Filter to specific nodes: - # nodeSelector: - # app: cassandra - containers: - - command: - - /run.sh - env: - - name: MAX_HEAP_SIZE - value: 512M - - name: HEAP_NEWSIZE - value: 100M - - name: CASSANDRA_SEED_PROVIDER - value: "io.k8s.cassandra.KubernetesSeedProvider" - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: gcr.io/google-samples/cassandra:v12 - name: cassandra - ports: - - containerPort: 7000 - name: intra-node - - containerPort: 7001 - name: tls-intra-node - - containerPort: 7199 - name: jmx - - containerPort: 9042 - name: cql - # If you need it, it will go away in C* 4.0. - #- containerPort: 9160 - # name: thrift - resources: - requests: - cpu: 0.5 - volumeMounts: - - mountPath: /cassandra_data - name: data - volumes: - - name: data - emptyDir: {} -``` - - -[下载示例](https://raw.githubusercontent.com/kubernetes/examples/master/cassandra-daemonset.yaml) - - -这个 DaemonSet 绝大部分的定义和上面的 ReplicationController 完全相同;它只是简单的给 daemonset 一个创建新的 Cassandra pod 的方法,并且以集群中所有的 Cassandra 节点为目标。 - - -不同之处在于 `nodeSelector` 属性,它允许 DaemonSet 以全部节点的一个子集为目标(你可以向其他资源一样标记节点),并且没有 `replicas` 属性,因为它使用1对1的 node-pod 关系。 - - -创建这个 DaemonSet: - -```console - -$ kubectl create -f cassandra/cassandra-daemonset.yaml - -``` - - -你可能需要禁用配置文件检查,像这样: + replicas: 3 + ``` -```console - -$ kubectl create -f cassandra/cassandra-daemonset.yaml --validate=false - -``` - - -你可以看到 DaemonSet 已经在运行: - -```console - -$ kubectl get daemonset -NAME DESIRED CURRENT NODE-SELECTOR -cassandra 3 3 - -``` - - -现在,如果你列出集群中的 pods,并且使用 `app=cassandra` 标签过滤,你应该能够看到你的网络中的每一个节点上都有一个(且只有一个)新的 cassandra pod。 - -```console - -$ kubectl get pods -l="app=cassandra" -o wide -NAME READY STATUS RESTARTS AGE NODE -cassandra-ico4r 1/1 Running 0 4s kubernetes-minion-rpo1 -cassandra-kitfh 1/1 Running 0 1s kubernetes-minion-9ye5 -cassandra-tzw89 1/1 Running 0 2s kubernetes-minion-b286 + +2.将副本数 (replicas) 更改为 4,然后保存清单。 -为了证明这是按设想的在工作,你可以再次使用 `nodetool` 命令来检查集群的状态。为此,请使用 `kubectl exec` 命令在任何一个新建的 cassandra pod 上运行 `nodetool`。 + StatefulSet 现在可以扩展到运行 4 个 Pod。 -```console +3.获取 Cassandra StatefulSet 验证更改: -$ kubectl exec -ti cassandra-xxxxx -- nodetool status -Datacenter: datacenter1 -======================= -Status=Up/Down -|/ State=Normal/Leaving/Joining/Moving --- Address Load Tokens Owns (effective) Host ID Rack -UN 10.244.0.5 74.09 KB 256 100.0% 86feda0f-f070-4a5b-bda1-2eeb0ad08b77 rack1 -UN 10.244.4.2 32.45 KB 256 100.0% 0b1be71a-6ffb-4895-ac3e-b9791299c141 rack1 -UN 10.244.3.3 51.28 KB 256 100.0% dafe3154-1d67-42e1-ac1d-78e7e80dce2b rack1 + ```shell + kubectl get statefulset cassandra + ``` -``` + + 响应应该与此类似: + ``` + NAME DESIRED CURRENT AGE + cassandra 4 4 36m + ``` -**注意**:这个示例让你在创建 DaemonSet 前删除了 cassandra 的 Replication Controller。这是因为为了保持示例的简单,RC 和 DaemonSet 使用了相同的 `app=cassandra` 标签(如此它们的 pod 映射到了我们创建的 service,这样 SeedProvider 就能识别它们)。 +## {{% heading "cleanup" %}} + +删除或缩小 StatefulSet 不会删除与 StatefulSet 关联的卷。 +这个设置是出于安全考虑,因为你的数据比自动清除所有相关的 StatefulSet 资源更有价值。 -## 步骤 9:资源清理 +{{< warning >}} +根据存储类和回收策略,删除 *PersistentVolumeClaims* 可能导致关联的卷也被删除。 +千万不要认为其容量声明被删除,你就能访问数据。 +{{< /warning >}} +1.运行以下命令(连在一起成为一个单独的命令)删除 Cassandra StatefulSet 中的所有内容: -当你准备删除你的资源时,按以下执行: + ```shell + grace=$(kubectl get pod cassandra-0 -o=jsonpath='{.spec.terminationGracePeriodSeconds}') \ + && kubectl delete statefulset -l app=cassandra \ + && echo "Sleeping ${grace} seconds" 1>&2 \ + && sleep $grace \ + && kubectl delete persistentvolumeclaim -l app=cassandra + ``` -```console + +2.运行以下命令,删除你为 Cassandra 设置的 Service: -$ kubectl delete service -l app=cassandra -$ kubectl delete daemonset cassandra + ```shell + kubectl delete service -l app=cassandra + ``` -``` + +## Cassandra 容器环境变量 +本教程中的 Pod 使用来自 Google [container registry](https://cloud.google.com/container-registry/docs/) +的 [`gcr.io/google-samples/cassandra:v13`](https://github.com/kubernetes/examples/blob/master/cassandra/image/Dockerfile) 镜像。 +上面的 Docker 镜像基于 [debian-base](https://github.com/kubernetes/kubernetes/tree/master/build/debian-base),并且包含 OpenJDK 8。 +该映像包括来自 Apache Debian 存储库的标准 Cassandra 安装。 +通过使用环境变量,您可以更改插入到 `cassandra.yaml` 中的值。 -我们使用了一个自定义的 [`SeedProvider`](https://gitbox.apache.org/repos/asf?p=cassandra.git;a=blob;f=src/java/org/apache/cassandra/locator/SeedProvider.java;h=7efa9e050a4604c2cffcb953c3c023a2095524fe;hb=c2e11bd4224b2110abe6aa84c8882e85980e3491) 来在 Kubernetes 之上运行 Cassandra。仅当你通过 replication control 或者 daemonset 部署 Cassandra 时才需要使用自定义的 seed provider。在 Cassandra 中,`SeedProvider` 引导 Cassandra 使用 gossip 协议来查找其它 Cassandra 节点。Seed 地址是被视为连接端点的主机。Cassandra 实例使用 seed 列表来查找彼此并学习 ring 环拓扑。[`KubernetesSeedProvider`](https://github.com/kubernetes/examples/blob/master/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java) 通过 Kubernetes API 发现 Cassandra seeds IP 地址,那些 Cassandra 实例在 Cassandra Service 中定义。 +| Environment variable | Default value | +| ------------------------ |:---------------: | +| `CASSANDRA_CLUSTER_NAME` | `'Test Cluster'` | +| `CASSANDRA_NUM_TOKENS` | `32` | +| `CASSANDRA_RPC_ADDRESS` | `0.0.0.0` | -请查阅自定义 seed provider 的 [README](https://git.k8s.io/examples/cassandra/java/README.md) 文档,获取 `KubernetesSeedProvider` 进阶配置。对于本示例来说,你应该不需要自定义 Seed Provider 的配置。 -查看本示例的 [image](https://github.com/kubernetes/examples/tree/master/cassandra/image) 目录,了解如何构建容器的 docker 镜像及其内容。 -你可能还注意到我们设置了一些 Cassandra 参数(`MAX_HEAP_SIZE`和`HEAP_NEWSIZE`),并且增加了关于 [namespace](/zh/docs/user-guide/namespaces) 的信息。我们还告诉 Kubernetes 容器暴露了 `CQL` 和 `Thrift` API 端口。最后,我们告诉集群管理器我们需要 0.1 cpu(0.1 核)。 +## {{% heading "whatsnext" %}} -[!Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cassandra/README.md?pixel)]() + +* 了解如何[扩缩 StatefulSet](/docs/tasks/run-application/scale-stateful-set/)。 +* 了解有关 [*KubernetesSeedProvider*](https://github.com/kubernetes/examples/blob/master/cassandra/java/src/main/java/io/k8s/cassandra/KubernetesSeedProvider.java) 的更多信息 +* 查看更多自定义 [Seed Provider Configurations](https://git.k8s.io/examples/cassandra/java/README.md) diff --git a/content/zh/docs/tutorials/stateless-application/expose-external-ip-address.md b/content/zh/docs/tutorials/stateless-application/expose-external-ip-address.md index 0e38afc1d6c14..8fe79a9a5d010 100644 --- a/content/zh/docs/tutorials/stateless-application/expose-external-ip-address.md +++ b/content/zh/docs/tutorials/stateless-application/expose-external-ip-address.md @@ -20,7 +20,7 @@ external IP address. ## {{% heading "prerequisites" %}} - * 安装 [kubectl](/zh/docs/tasks/tools/install-kubectl/). + * 安装 [kubectl](/zh/docs/tasks/tools/). * 使用 Google Kubernetes Engine 或 Amazon Web Services 等云供应商创建 Kubernetes 集群。 本教程创建了一个[外部负载均衡器](/zh/docs/tasks/access-application-cluster/create-external-load-balancer/), 需要云供应商。 diff --git a/content/zh/docs/tutorials/stateless-application/guestbook.md b/content/zh/docs/tutorials/stateless-application/guestbook.md index b7ef978490fa6..cae93e2ff9833 100644 --- a/content/zh/docs/tutorials/stateless-application/guestbook.md +++ b/content/zh/docs/tutorials/stateless-application/guestbook.md @@ -100,16 +100,15 @@ The manifest file, included below, specifies a Deployment controller that runs a 1. 在下载清单文件的目录中启动终端窗口。 2. 从 `mongo-deployment.yaml` 文件中应用 MongoDB Deployment: + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-deployment.yaml ``` - - - @@ -156,15 +155,15 @@ The guestbook application needs to communicate to the MongoDB to write its data. --> 1. 使用下面的 `mongo-service.yaml` 文件创建 MongoDB 的服务: + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/mongo-service.yaml ``` - - @@ -182,7 +181,7 @@ kubectl apply -f ./content/en/examples/application/guestbook/mongo-service.yaml ```shell NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.0.0.1 443/TCP 1m - mongo ClusterIP 10.0.0.151 6379/TCP 8s + mongo ClusterIP 10.0.0.151 27017/TCP 8s ``` 1. 从 `frontend-deployment.yaml` 应用前端 Deployment 文件: + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-deployment.yaml ``` - - @@ -253,10 +252,11 @@ kubectl apply -f ./content/en/examples/application/guestbook/frontend-deployment ### 创建前端服务 应用的 `mongo` 服务只能在 Kubernetes 集群中访问,因为服务的默认类型是 -[ClusterIP](/zh/docs/concepts/services-networking/service/#publishing-services---service-types)。`ClusterIP` 为服务指向的 Pod 集提供一个 IP 地址。这个 IP 地址只能在集群中访问。 +[ClusterIP](/zh/docs/concepts/services-networking/service/#publishing-services-service-types)。 +`ClusterIP` 为服务指向的 Pod 集提供一个 IP 地址。这个 IP 地址只能在集群中访问。 1. 从 `frontend-service.yaml` 文件中应用前端服务: + + ```shell kubectl apply -f https://k8s.io/examples/application/guestbook/frontend-service.yaml ``` - - @@ -303,7 +303,7 @@ kubectl apply -f ./content/en/examples/application/guestbook/frontend-service.ya ``` NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.0.0.112 80/TCP 6s + frontend ClusterIP 10.0.0.112 80/TCP 6s kubernetes ClusterIP 10.0.0.1 443/TCP 4m mongo ClusterIP 10.0.0.151 6379/TCP 2m ``` @@ -364,8 +364,8 @@ If you deployed the `frontend-service.yaml` manifest with type: `LoadBalancer` y 响应应该与此类似: ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - frontend ClusterIP 10.51.242.136 109.197.92.229 80:32372/TCP 1m + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.51.242.136 109.197.92.229 80:32372/TCP 1m ```
FieldDescription
format*
+
format [Required]
string
Format Flag specifies the structure of log messages. + + Format Flag specifies the structure of log messages. default value of format is `text`
sanitization*
+
sanitization [Required]
bool
[Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). + + [Experimental] When enabled prevents logging of fields tagged as sensitive (passwords, keys, tokens). Runtime log sanitization may introduce significant computation overhead and therefore should not be enabled in production.`)