diff --git a/OWNERS b/OWNERS index 0258c32f387a7..02a5269ff2efe 100644 --- a/OWNERS +++ b/OWNERS @@ -1,7 +1,6 @@ # Reviewers can /lgtm /approve but not sufficient for auto-merge without an # approver reviewers: -- tengqm - zhangxiaoyu-zidif - xiangpengzhao # Approvers have all the ability of reviewers but their /approve makes @@ -15,3 +14,4 @@ approvers: - zacharysarah - chenopis - mistyhacks +- tengqm diff --git a/content/en/_index.html b/content/en/_index.html index abeb666558a2f..f957d245bbb36 100644 --- a/content/en/_index.html +++ b/content/en/_index.html @@ -42,12 +42,17 @@

Run Anywhere

-

Kubernetes: Finally… A True Cloud Platform

-

Sam Ghods, Co-Founder and Services Architect at Box, gives a passionate talk at KubeCon Seattle 2016 showing that with Kubernetes, we have for the first time a universal interface that one can build real deployment tooling against.

+

The Challenges of Migrating 150+ Microservices to Kubernetes

+

By Sarah Wells, Technical Director for Operations and Reliability, Financial Times



- Attend KubeCon in Copenhagen on May 2-4, 2018 + Attend KubeCon in Shanghai on Nov. 14-15, 2018 +
+
+
+
+ Attend KubeCon in Seattle on Dec. 11-13, 2018
@@ -163,6 +168,6 @@
- + diff --git a/content/en/blog/_posts/2018-04-11-migrating-the-kubernetes-blog.md b/content/en/blog/_posts/2018-04-11-migrating-the-kubernetes-blog.md index 7862fbb5676cc..ae37b649c5c6f 100644 --- a/content/en/blog/_posts/2018-04-11-migrating-the-kubernetes-blog.md +++ b/content/en/blog/_posts/2018-04-11-migrating-the-kubernetes-blog.md @@ -23,7 +23,7 @@ We hope that making blog submissions more accessible will encourage greater comm You can submit a blog post for consideration one of two ways: * Submit a Google Doc through the [blog submission form](https://docs.google.com/forms/d/e/1FAIpQLSch_phFYMTYlrTDuYziURP6nLMijoXx_f7sLABEU5gWBtxJHQ/viewform) -* Open a pull request against the [website repository](https://github.com/kubernetes/website/tree/master/blog) as described [here](/docs/home/contribute/create-pull-request/) +* Open a pull request against the [website repository](https://github.com/kubernetes/website/tree/master/content/en/blog/_posts) as described [here](/docs/home/contribute/create-pull-request/) If you have a post that you want to remain confidential until your publish date, please submit your post via the Google form. Otherwise, you can choose your submission process based on your comfort level and preferred workflow. diff --git a/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md b/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md index 334446f79ccae..0c867dc9ee6fe 100644 --- a/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md +++ b/content/en/blog/_posts/2018-04-13-local-persistent-volumes-beta.md @@ -23,8 +23,9 @@ For those reasons, most applications should continue to use highly available, re Some use cases that are suitable for local storage include: * Caching of datasets that can leverage data gravity for fast processing -* Distributed storage systems that shard or replicate data across multiple nodes - Examples include distributed datastores like Cassandra, or distributed file systems like Gluster or Ceph. +* Distributed storage systems that shard or replicate data across multiple + nodes. Examples include distributed datastores like Cassandra, or distributed + file systems like Gluster or Ceph. Suitable workloads are tolerant of node failures, data unavailability, and data loss. They provide critical, latency-sensitive infrastructure services to the rest of the cluster, and should run with high priority compared to other workloads. @@ -73,7 +74,7 @@ spec: operator: In values: - my-node -``` +``` Note that there’s a new nodeAffinity field in the PersistentVolume object: this is how the Kubernetes scheduler understands that this PersistentVolume is tied to a specific node. nodeAffinity is a required field for local PersistentVolumes. diff --git a/content/en/blog/_posts/2018-05-24-kubernetes-containerd-integration-goes-ga.md b/content/en/blog/_posts/2018-05-24-kubernetes-containerd-integration-goes-ga.md new file mode 100644 index 0000000000000..6af3099662491 --- /dev/null +++ b/content/en/blog/_posts/2018-05-24-kubernetes-containerd-integration-goes-ga.md @@ -0,0 +1,127 @@ +--- +layout: blog +title: Kubernetes Containerd Integration Goes GA +date: Thursday, May 24, 2018 +--- +# Kubernetes Containerd Integration Goes GA +**Authors**: Lantao Liu, Software Engineer, Google and Mike Brown, Open Source Developer Advocate, IBM + +In a previous blog - [Containerd Brings More Container Runtime Options for Kubernetes](https://kubernetes.io/blog/2017/11/containerd-container-runtime-options-kubernetes), we introduced the alpha version of the Kubernetes containerd integration. With another 6 months of development, the integration with containerd is now generally available! You can now use [containerd 1.1](https://github.com/containerd/containerd/releases/tag/v1.1.0) as the container runtime for production Kubernetes clusters! + +Containerd 1.1 works with Kubernetes 1.10 and above, and supports all Kubernetes features. The test coverage of containerd integration on [Google Cloud Platform](https://cloud.google.com/) in Kubernetes test infrastructure is now equivalent to the Docker integration (See: [test dashboard)](https://k8s-testgrid.appspot.com/sig-node-containerd). + +_We're very glad to see containerd rapidly grow to this big milestone. Alibaba Cloud started to use containerd actively since its first day, and thanks to the simplicity and robustness emphasise, make it a perfect container engine running in our Serverless Kubernetes product, which has high qualification on performance and stability. No doubt, containerd will be a core engine of container era, and continue to driving innovation forward._ + +

+— Xinwei, Staff Engineer in Alibaba Cloud

+ +# Architecture Improvements +The Kubernetes containerd integration architecture has evolved twice. Each evolution has made the stack more stable and efficient. + +## Containerd 1.0 - CRI-Containerd (end of life) +cri-containerd architecture + +For containerd 1.0, a daemon called cri-containerd was required to operate between Kubelet and containerd. Cri-containerd handled the [Container Runtime Interface (CRI)](https://kubernetes.io/blog/2016/12/container-runtime-interface-cri-in-kubernetes/) service requests from Kubelet and used containerd to manage containers and container images correspondingly. Compared to the Docker CRI implementation ([dockershim](https://github.com/kubernetes/kubernetes/tree/v1.10.2/pkg/kubelet/dockershim)), this eliminated one extra hop in the stack. + +However, cri-containerd and containerd 1.0 were still 2 different daemons which interacted via grpc. The extra daemon in the loop made it more complex for users to understand and deploy, and introduced unnecessary communication overhead. + +## Containerd 1.1 - CRI Plugin (current) +containerd architecture + +In containerd 1.1, the cri-containerd daemon is now refactored to be a containerd CRI plugin. The CRI plugin is built into containerd 1.1, and enabled by default. Unlike cri-containerd, the CRI plugin interacts with containerd through direct function calls. This new architecture makes the integration more stable and efficient, and eliminates another grpc hop in the stack. Users can now use Kubernetes with containerd 1.1 directly. The cri-containerd daemon is no longer needed. + +# Performance +Improving performance was one of the major focus items for the containerd 1.1 release. Performance was optimized in terms of pod startup latency and daemon resource usage. + +The following results are a comparison between containerd 1.1 and Docker 18.03 CE. The containerd 1.1 integration uses the CRI plugin built into containerd; and the Docker 18.03 CE integration uses the dockershim. + +The results were generated using the Kubernetes node performance benchmark, which is part of [Kubernetes node e2e test](https://github.com/kubernetes/community/blob/master/contributors/devel/e2e-node-tests.md). Most of the containerd benchmark data is publicly accessible on the [node performance dashboard](http://node-perf-dash.k8s.io/). + +### Pod Startup Latency +The "105 pod batch startup benchmark" results show that the containerd 1.1 integration has lower pod startup latency than Docker 18.03 CE integration with dockershim (lower is better). + +

latency

+ +### CPU and Memory +At the steady state, with 105 pods, the containerd 1.1 integration consumes less CPU and memory overall compared to Docker 18.03 CE integration with dockershim. The results vary with the number of pods running on the node, 105 is chosen because it is the current default for the maximum number of user pods per node. + +As shown in the figures below, compared to Docker 18.03 CE integration with dockershim, the containerd 1.1 integration has 30.89% lower kubelet cpu usage, 68.13% lower container runtime cpu usage, 11.30% lower kubelet resident set size (RSS) memory usage, 12.78% lower container runtime RSS memory usage. + +cpumemory + +# crictl +Container runtime command-line interface (CLI) is a useful tool for system and application troubleshooting. When using Docker as the container runtime for Kubernetes, system administrators sometimes login to the Kubernetes node to run Docker commands for collecting system and/or application information. For example, one may use _docker ps_ and _docker inspect_ to check application process status, _docker images_ to list images on the node, and _docker info_ to identify container runtime configuration, etc. + +For containerd and all other CRI-compatible container runtimes, e.g. dockershim, we recommend using _crictl_ as a replacement CLI over the Docker CLI for troubleshooting pods, containers, and container images on Kubernetes nodes. + +_crictl_ is a tool providing a similar experience to the Docker CLI for Kubernetes node troubleshooting and _crictl_ works consistently across all CRI-compatible container runtimes. It is hosted in the [kubernetes-incubator/cri-tools](https://github.com/kubernetes-incubator/cri-tools) repository and the current version is [v1.0.0-beta.1](https://github.com/kubernetes-incubator/cri-tools/releases/tag/v1.0.0-beta.1). _crictl_ is designed to resemble the Docker CLI to offer a better transition experience for users, but it is not exactly the same. There are a few important differences, explained below. + +## Limited Scope - crictl is a Troubleshooting Tool +The scope of _crictl_ is limited to troubleshooting, it is not a replacement to docker or kubectl. Docker's CLI provides a rich set of commands, making it a very useful development tool. But it is not the best fit for troubleshooting on Kubernetes nodes. Some Docker commands are not useful to Kubernetes, such as _docker network_ and _docker build_; and some may even break the system, such as _docker rename_. _crictl_ provides just enough commands for node troubleshooting, which is arguably safer to use on production nodes. + +## Kubernetes Oriented +_crictl_ offers a more kubernetes-friendly view of containers. Docker CLI lacks core Kubernetes concepts, e.g. _pod_ and _[namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/)_, so it can't provide a clear view of containers and pods. One example is that _docker ps_ shows somewhat obscure, long Docker container names, and shows pause containers and application containers together: + +docker ps + +However, [pause containers](https://www.ianlewis.org/en/almighty-pause-container) are a pod implementation detail, where one pause container is used for each pod, and thus should not be shown when listing containers that are members of pods. + +_crictl_, by contrast, is designed for Kubernetes. It has different sets of commands for pods and containers. For example, _crictl pods_ lists pod information, and _crictl ps_ only lists application container information. All information is well formatted into table columns. + +crictl pods +crictl ps + +As another example, _crictl pods_ includes a _--namespace_ option for filtering pods by the namespaces specified in Kubernetes. + +crictl pods filter + +For more details about how to use _crictl_ with containerd: + +* [Document](https://github.com/containerd/cri/blob/master/docs/crictl.md) +* [Demo video](https://asciinema.org/a/179047) + +# What about Docker Engine? +"Does switching to containerd mean I can't use Docker Engine anymore?" We hear this question a lot, the short answer is NO. + +Docker Engine is built on top of containerd. The next release of [Docker Community Edition (Docker CE)](https://www.docker.com/community-edition) will use containerd version 1.1. Of course, it will have the CRI plugin built-in and enabled by default. This means users will have the option to continue using Docker Engine for other purposes typical for Docker users, while also being able to configure Kubernetes to use the underlying containerd that came with and is simultaneously being used by Docker Engine on the same node. See the architecture figure below showing the same containerd being used by Docker Engine and Kubelet: + +

docker-ce

+ +Since containerd is being used by both Kubelet and Docker Engine, this means users who choose the containerd integration will not just get new Kubernetes features, performance, and stability improvements, they will also have the option of keeping Docker Engine around for other use cases. + +A containerd [namespace](https://github.com/containerd/containerd/blob/master/docs/namespaces.md) mechanism is employed to guarantee that Kubelet and Docker Engine won't see or have access to containers and images created by each other. This makes sure they won't interfere with each other. This also means that: + +* Users won't see Kubernetes created containers with the _docker ps_ command. Please use _crictl ps_ instead. And vice versa, users won't see Docker CLI created containers in Kubernetes or with _crictl ps_ command. The _crictl create_ and _crictl runp_ commands are only for troubleshooting. Manually starting pod or container with _crictl_ on production nodes is not recommended. +* Users won't see Kubernetes pulled images with the _docker images_ command. Please use the _crictl images_ command instead. And vice versa, Kubernetes won't see images created by _docker pull_, _docker load_ or _docker build_ commands. Please use the _crictl pull_ command instead, and _[ctr](https://github.com/containerd/containerd/blob/master/docs/man/ctr.1.md) cri load_ if you have to load an image. + +# Summary +* Containerd 1.1 natively supports CRI. It can be used directly by Kubernetes. +* Containerd 1.1 is production ready. +* Containerd 1.1 has good performance in terms of pod startup latency and system resource utilization. +* _crictl_ is the CLI tool to talk with containerd 1.1 and other CRI-conformant container runtimes for node troubleshooting. +* The next stable release of Docker CE will include containerd 1.1. Users have the option to continue using Docker for use cases not specific to Kubernetes, and configure Kubernetes to use the same underlying containerd that comes with Docker. + +We'd like to thank all the contributors from Google, IBM, Docker, ZTE, ZJU and many other individuals who made this happen! + +For a detailed list of changes in the containerd 1.1 release, please see the release notes here: [https://github.com/containerd/containerd/releases/tag/v1.1.0](https://github.com/containerd/containerd/releases/tag/v1.1.0) + +# Try it out +To setup a Kubernetes cluster using containerd as the container runtime: + +* For a production quality cluster on GCE brought up with kube-up.sh, see [here](https://github.com/containerd/cri/blob/v1.0.0/docs/kube-up.md). +* For a multi-node cluster installer and bring up steps using ansible and kubeadm, see [here](https://github.com/containerd/cri/blob/v1.0.0/contrib/ansible/README.md). +* For creating a cluster from scratch on Google Cloud, see [Kubernetes the Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way). +* For a custom installation from release tarball, see [here](https://github.com/containerd/cri/blob/v1.0.0/docs/installation.md). +* To install using LinuxKit on a local VM, see [here](https://github.com/linuxkit/linuxkit/tree/master/projects/kubernetes). + +# Contribute +The containerd CRI plugin is an open source github project within containerd [https://github.com/containerd/cri](https://github.com/containerd/cri). Any contributions in terms of ideas, issues, and/or fixes are welcome. The [getting started guide for developers](https://github.com/containerd/cri#getting-started-for-developers) is a good place to start for contributors. + +# Community +The project is developed and maintained jointly by members of the Kubernetes SIG-Node community and the containerd community. We'd love to hear feedback from you. To join the communities: + +* [sig-node community site](https://github.com/kubernetes/community/tree/master/sig-node) +* Slack: + * #sig-node channel in [kubernetes.slack.com](http://kubernetes.slack.com) + * #containerd channel in [https://dockr.ly/community](https://dockr.ly/community) +* Mailing List: [https://groups.google.com/forum/#!forum/kubernetes-sig-node](https://groups.google.com/forum/#!forum/kubernetes-sig-node) diff --git a/content/en/blog/_posts/2018-05-29-announcing-kustomize.md b/content/en/blog/_posts/2018-05-29-announcing-kustomize.md new file mode 100644 index 0000000000000..3ca0b54401ee8 --- /dev/null +++ b/content/en/blog/_posts/2018-05-29-announcing-kustomize.md @@ -0,0 +1,241 @@ +--- +layout: blog +title: Introducing kustomize; Template-free Configuration Customization for Kubernetes +date: 2018-05-29 +--- + +**Authors:** Jeff Regan (Google), Phil Wittrock (Google) + +[**kustomize**]: https://github.com/kubernetes-sigs/kustomize +[hello world]: https://github.com/kubernetes-sigs/kustomize/blob/master/examples/helloWorld +[kustomization]: https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#kustomization +[mailing list]: https://groups.google.com/forum/#!forum/kustomize +[open an issue]: https://github.com/kubernetes-sigs/kustomize/issues/new +[subproject]: https://github.com/kubernetes/community/blob/master/keps/sig-cli/0008-kustomize.md +[SIG-CLI]: https://github.com/kubernetes/community/tree/master/sig-cli +[workflow]: https://github.com/kubernetes-sigs/kustomize/blob/master/docs/workflows.md + +If you run a Kubernetes environment, chances are you’ve +customized a Kubernetes configuration — you've copied +some API object YAML files and editted them to suit +your needs. + +But there are drawbacks to this approach — it can be +hard to go back to the source material and incorporate +any improvements that were made to it. Today Google is +announcing [**kustomize**], a command-line tool +contributed as a [subproject] of [SIG-CLI]. The tool +provides a new, purely *declarative* approach to +configuration customization that adheres to and +leverages the familiar and carefully designed +Kubernetes API. + +Here’s a common scenario. Somewhere on the internet you +find someone’s Kubernetes configuration for a content +management system. It's a set of files containing YAML +specifications of Kubernetes API objects. Then, in some +corner of your own company you find a configuration for +a database to back that CMS — a database you prefer +because you know it well. + +You want to use these together, somehow. Further, you +want to customize the files so that your resource +instances appear in the cluster with a label that +distinguishes them from a colleague’s resources who’s +doing the same thing in the same cluster. +You also want to set appropriate values for CPU, memory +and replica count. + +Additionally, you’ll want *multiple variants* of the +entire configuration: a small variant (in terms of +computing resources used) devoted to testing and +experimentation, and a much larger variant devoted to +serving outside users in production. Likewise, other +teams will want their own variants. + +This raises all sorts of questions. Do you copy your +configuration to multiple locations and edit them +independently? What if you have dozens of development +teams who need slightly different variations of the +stack? How do you maintain and upgrade the aspects of +configuration that they share in common? Workflows +using **kustomize** provide answers to these questions. + +## Customization is reuse + +Kubernetes configurations aren't code (being YAML +specifications of API objects, they are more strictly +viewed as data), but configuration lifecycle has many +similarities to code lifecycle. + +You should keep configurations in version +control. Configuration owners aren’t necessarily the +same set of people as configuration +users. Configurations may be used as parts of a larger +whole. Users will want to *reuse* configurations for +different purposes. + +One approach to configuration reuse, as with code +reuse, is to simply copy it all and customize the +copy. As with code, severing the connection to the +source material makes it difficult to benefit from +ongoing improvements to the source material. Taking +this approach with many teams or environments, each +with their own variants of a configuration, makes a +simple upgrade intractable. + +Another approach to reuse is to express the source +material as a parameterized template. A tool processes +the template—executing any embedded scripting and +replacing parameters with desired values—to generate +the configuration. Reuse comes from using different +sets of values with the same template. The challenge +here is that the templates and value files are not +specifications of Kubernetes API resources. They are, +necessarily, a new thing, a new language, that wraps +the Kubernetes API. And yes, they can be powerful, but +bring with them learning and tooling costs. Different +teams want different changes—so almost every +specification that you can include in a YAML file +becomes a parameter that needs a value. As a result, +the value sets get large, since all parameters (that +don't have trusted defaults) must be specified for +replacement. This defeats one of the goals of +reuse—keeping the differences between the variants +small in size and easy to understand in the absence of +a full resource declaration. + +## A new option for configuration customization + +Compare that to **kustomize**, where the tool’s +behavior is determined by declarative specifications +expressed in a file called `kustomization.yaml`. + +The **kustomize** program reads the file and the +Kubernetes API resource files it references, then emits +complete resources to standard output. This text output +can be further processed by other tools, or streamed +directly to **kubectl** for application to a cluster. + +For example, if a file called `kustomization.yaml` +containing + +``` + commonLabels: + app: hello + resources: + - deployment.yaml + - configMap.yaml + - service.yaml +``` + +is in the current working directory, along with +the three resource files it mentions, then running + +``` +kustomize build +``` + +emits a YAML stream that includes the three given +resources, and adds a common label `app: hello` to +each resource. + +Similarly, you can use a *commonAnnotations* field to +add an annotation to all resources, and a *namePrefix* +field to add a common prefix to all resource +names. This trivial yet common customization is just +the beginning. + +A more common use case is that you’ll need multiple +variants of a common set of resources, e.g., a +*development*, *staging* and *production* variant. + +For this purpose, **kustomize** supports the idea of an +*overlay* and a *base*. Both are represented by a +kustomization file. The base declares things that the +variants share in common (both resources and a common +customization of those resources), and the overlays +declare the differences. + +Here’s a file system layout to manage a *staging* and +*production* variant of a given cluster app: + +``` + someapp/ + ├── base/ + │ ├── kustomization.yaml + │ ├── deployment.yaml + │ ├── configMap.yaml + │ └── service.yaml + └── overlays/ + ├── production/ + │ └── kustomization.yaml + │ ├── replica_count.yaml + └── staging/ + ├── kustomization.yaml + └── cpu_count.yaml +``` + +The file `someapp/base/kustomization.yaml` specifies the +common resources and common customizations to those +resources (e.g., they all get some label, name prefix +and annotation). + +The contents of +`someapp/overlays/production/kustomization.yaml` could +be + +``` + commonLabels: + env: production + bases: + - ../../base + patches: + - replica_count.yaml +``` + +This kustomization specifies a *patch* file +`replica_count.yaml`, which could be: + +``` + apiVersion: apps/v1 + kind: Deployment + metadata: + name: the-deployment + spec: + replicas: 100 +``` + +A patch is a partial resource declaration, in this case +a patch of the deployment in +`someapp/base/deployment.yaml`, modifying only the +*replicas* count to handle production traffic. + +The patch, being a partial deployment spec, has a clear +context and purpose and can be validated even if it’s +read in isolation from the remaining +configuration. It’s not just a context free *{parameter +name, value}* tuple. + +To create the resources for the production variant, run + +``` +kustomize build someapp/overlays/production +``` + +The result is printed to stdout as a set of complete +resources, ready to be applied to a cluster. A +similar command defines the staging environment. + +## In summary + +With **kustomize**, you can manage an arbitrary number +of distinctly customized Kubernetes configurations +using only Kubernetes API resource files. Every +artifact that **kustomize** uses is plain YAML and can +be validated and processed as such. kustomize encourages +a fork/modify/rebase [workflow]. + +To get started, try the [hello world] example. +For discussion and feedback, join the [mailing list] or +[open an issue]. Pull requests are welcome. diff --git a/content/en/blog/_posts/2018-05-30-say-hello-to-discuss-kubernetes.md b/content/en/blog/_posts/2018-05-30-say-hello-to-discuss-kubernetes.md new file mode 100644 index 0000000000000..23ad393515c59 --- /dev/null +++ b/content/en/blog/_posts/2018-05-30-say-hello-to-discuss-kubernetes.md @@ -0,0 +1,24 @@ +--- +layout: blog +title: Say Hello to Discuss Kubernetes +date: Wednesday, May 30, 2018 +--- + +**Author**: Jorge Castro (Heptio) + +Communication is key when it comes to engaging a community of over 35,000 people in a global and remote environment. Keeping track of everything in the Kubernetes community can be an overwhelming task. On one hand we have our official resources, like Stack Overflow, GitHub, and the mailing lists, and on the other we have more ephemeral resources like Slack, where you can hop in, chat with someone, and then go on your merry way. + +Slack is great for casual and timely conversations and keeping up with other community members, but communication can't be easily referenced in the future. Plus it can be hard to raise your hand in a room filled with 35,000 participants and find a voice. Mailing lists are useful when trying to reach a specific group of people with a particular ask and want to keep track of responses on the thread, but can be daunting with a large amount of people. Stack Overflow and GitHub are ideal for collaborating on projects or questions that involve code and need to be searchable in the future, but certain topics like "What's your favorite CI/CD tool" or "[Kubectl tips and tricks](https://discuss.kubernetes.io/t/kubectl-tips-and-tricks/192)" are offtopic there. + +While our current assortment of communication channels are valuable in their own rights, we found that there was still a gap between email and real time chat. Across the rest of the web, many other open source projects like Docker, Mozilla, Swift, Ghost, and Chef have had success building communities on top of [Discourse](https://www.discourse.org/features), an open source discussion platform. So what if we could use this tool to bring our discussions together under a modern roof, with an open API, and perhaps not let so much of our information fade into the ether? There's only one way to find out: Welcome to [discuss.kubernetes.io](https://discuss.kubernetes.io) + +![discuss_screenshot](/images/blog/2018-05-30-say-hello-to-discuss-kubernetes.png) + + +Right off the bat we have categories that users can browse. Checking and posting in these categories allow users to participate in things they might be interested in without having to commit to subscribing to a list. Granular notification controls allow the users to subscribe to just the category or tag they want, and allow for responding to topics via email. + +Ecosystem partners and developers now have a place where they can [announce projects](https://discuss.kubernetes.io/c/announcements) that they're working on to users without wondering if it would be offtopic on an official list. We can make this place be not just about core Kubernetes, but about the hundreds of wonderful tools our community is building. + +This new community forum gives people a place to go where they can discuss Kubernetes, and a sounding board for developers to make announcements of things happening around Kubernetes, all while being searchable and easily accessible to a wider audience. + +Hop in and take a look. We're just getting started, so you might want to begin by [introducing yourself](https://discuss.kubernetes.io/t/introduce-yourself-here/56) and then browsing around. Apps are also available for [Android ](https://play.google.com/store/apps/details?id=com.discourse&hl=en_US&rdid=com.discourse&pli=1)and [iOS](https://itunes.apple.com/us/app/discourse-app/id1173672076?mt=8). diff --git a/content/en/docs/concepts/architecture/cloud-controller.md b/content/en/docs/concepts/architecture/cloud-controller.md index 1d76435975b39..81b1012579692 100644 --- a/content/en/docs/concepts/architecture/cloud-controller.md +++ b/content/en/docs/concepts/architecture/cloud-controller.md @@ -244,7 +244,7 @@ rules: The following cloud providers have implemented CCMs: * Digital Ocean -* Oracle +* [Oracle](https://github.com/oracle/oci-cloud-controller-manager) * Azure * GCE * AWS diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index f15d9659404ae..9fd80a3837961 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -43,7 +43,7 @@ The `conditions` field describes the status of all `Running` nodes. | Node Condition | Description | |----------------|-------------| | `OutOfDisk` | `True` if there is insufficient free space on the node for adding new pods, otherwise `False` | -| `Ready` | `True` if the node is healthy and ready to accept pods, `False` if the node is not healthy and is not accepting pods, and `Unknown` if the node controller has not heard from the node in the last 40 seconds | +| `Ready` | `True` if the node is healthy and ready to accept pods, `False` if the node is not healthy and is not accepting pods, and `Unknown` if the node controller has not heard from the node in the last `node-monitor-grace-period` (default is 40 seconds) | | `MemoryPressure` | `True` if pressure exists on the node memory -- that is, if the node memory is low; otherwise `False` | | `PIDPressure` | `True` if pressure exists on the processes -- that is, if there are too many processes on the node; otherwise `False` | | `DiskPressure` | `True` if pressure exists on the disk size -- that is, if the disk capacity is low; otherwise `False` | diff --git a/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md b/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md index 5f6c7e167a644..489c26ffc40e7 100644 --- a/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md +++ b/content/en/docs/concepts/cluster-administration/cluster-administration-overview.md @@ -66,7 +66,7 @@ If you are using a guide involving Salt, see [Configuring Kubernetes with Salt]( ## Optional Cluster Services -* [DNS Integration with SkyDNS](/docs/concepts/services-networking/dns-pod-service/) describes how to resolve a DNS name directly to a Kubernetes service. +* [DNS Integration](/docs/concepts/services-networking/dns-pod-service/) describes how to resolve a DNS name directly to a Kubernetes service. * [Logging and Monitoring Cluster Activity](/docs/concepts/cluster-administration/logging/) explains how logging in Kubernetes works and how to implement it. diff --git a/content/en/docs/concepts/cluster-administration/federation.md b/content/en/docs/concepts/cluster-administration/federation.md index 92467f70c6adf..45c05da8e5000 100644 --- a/content/en/docs/concepts/cluster-administration/federation.md +++ b/content/en/docs/concepts/cluster-administration/federation.md @@ -4,6 +4,9 @@ content_template: templates/concept --- {{% capture overview %}} + +{{< include "federation-current-state.md" >}} + This page explains why and how to manage multiple Kubernetes clusters using federation. {{% /capture %}} @@ -96,7 +99,7 @@ The following guides explain some of the resources in detail: * [Services](/docs/concepts/cluster-administration/federation-service-discovery/) -The [API reference docs](/docs/reference/generated/federation/) list all the +The [API reference docs](/docs/reference/federation/) list all the resources supported by federation apiserver. ## Cascading deletion diff --git a/content/en/docs/concepts/cluster-administration/networking.md b/content/en/docs/concepts/cluster-administration/networking.md index 607716b68e783..61457b45469bd 100644 --- a/content/en/docs/concepts/cluster-administration/networking.md +++ b/content/en/docs/concepts/cluster-administration/networking.md @@ -106,6 +106,18 @@ imply any preferential status. [Cisco Application Centric Infrastructure](https://www.cisco.com/c/en/us/solutions/data-center-virtualization/application-centric-infrastructure/index.html) offers an integrated overlay and underlay SDN solution that supports containers, virtual machines, and bare metal servers. [ACI](https://www.github.com/noironetworks/aci-containers) provides container networking integration for ACI. An overview of the integration is provided [here](https://www.cisco.com/c/dam/en/us/solutions/collateral/data-center-virtualization/application-centric-infrastructure/solution-overview-c22-739493.pdf). +### AOS from Apstra + +[AOS](http://www.apstra.com/products/aos/) is an Intent-Based Networking system that creates and manages complex datacenter environments from a simple integrated platform. AOS leverages a highly scalable distributed design to eliminate network outages while minimizing costs. + +The AOS Reference Design currently supports Layer-3 connected hosts that eliminate legacy Layer-2 switching problems. These Layer-3 hosts can be Linux servers (Debian, Ubuntu, CentOS) that create BGP neighbor relationships directly with the top of rack switches (TORs). AOS automates the routing adjacencies and then provides fine grained control over the route health injections (RHI) that are common in a Kubernetes deployment. + +AOS has a rich set of REST API endpoints that enable Kubernetes to quickly change the network policy based on application requirements. Further enhancements will integrate the AOS Graph model used for the network design with the workload provisioning, enabling an end to end management system for both private and public clouds. + +AOS supports the use of common vendor equipment from manufacturers including Cisco, Arista, Dell, Mellanox, HPE, and a large number of white-box systems and open network operating systems like Microsoft SONiC, Dell OPX, and Cumulus Linux. + +Details on how the AOS system works can be accessed here: http://www.apstra.com/products/how-it-works/ + ### Big Cloud Fabric from Big Switch Networks [Big Cloud Fabric](https://www.bigswitch.com/container-network-automation) is a cloud native networking architecture, designed to run Kubernetes in private cloud/on-premise environments. Using unified physical & virtual SDN, Big Cloud Fabric tackles inherent container networking problems such as load balancing, visibility, troubleshooting, security policies & container traffic monitoring. @@ -122,6 +134,12 @@ containers. Cilium is L7/HTTP aware and can enforce network policies on L3-L7 using an identity based security model that is decoupled from network addressing. +### CNI-Genie from Huawei + +[CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) is a CNI plugin that enables Kubernetes to [simultaneously have access to different implementations](https://github.com/Huawei-PaaS/CNI-Genie/blob/master/docs/multiple-cni-plugins/README.md#what-cni-genie-feature-1-multiple-cni-plugins-enables) of the [Kubernetes network model](https://git.k8s.io/website/docs/concepts/cluster-administration/networking.md#kubernetes-model) in runtime. This includes any implementation that runs as a [CNI plugin](https://github.com/containernetworking/cni#3rd-party-plugins), such as [Flannel](https://github.com/coreos/flannel#flannel), [Calico](http://docs.projectcalico.org/), [Romana](http://romana.io), [Weave-net](https://www.weave.works/products/weave-net/). + +CNI-Genie also supports [assigning multiple IP addresses to a pod](https://github.com/Huawei-PaaS/CNI-Genie/blob/master/docs/multiple-ips/README.md#feature-2-extension-cni-genie-multiple-ip-addresses-per-pod), each from a different CNI plugin. + ### Contiv [Contiv](https://github.com/contiv/netplugin) provides configurable networking (native l3 using BGP, overlay using vxlan, classic l2, or Cisco-SDN/ACI) for various use cases. [Contiv](http://contiv.io) is all open sourced. @@ -247,11 +265,6 @@ Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-pl or stand-alone. In either version, it doesn't require any configuration or extra code to run, and in both cases, the network provides one IP address per pod - as is standard for Kubernetes. -### CNI-Genie from Huawei - -[CNI-Genie](https://github.com/Huawei-PaaS/CNI-Genie) is a CNI plugin that enables Kubernetes to [simultaneously have access to different implementations](https://github.com/Huawei-PaaS/CNI-Genie/blob/master/docs/multiple-cni-plugins/README.md#what-cni-genie-feature-1-multiple-cni-plugins-enables) of the [Kubernetes network model](https://git.k8s.io/website/docs/concepts/cluster-administration/networking.md#kubernetes-model) in runtime. This includes any implementation that runs as a [CNI plugin](https://github.com/containernetworking/cni#3rd-party-plugins), such as [Flannel](https://github.com/coreos/flannel#flannel), [Calico](http://docs.projectcalico.org/), [Romana](http://romana.io), [Weave-net](https://www.weave.works/products/weave-net/). - -CNI-Genie also supports [assigning multiple IP addresses to a pod](https://github.com/Huawei-PaaS/CNI-Genie/blob/master/docs/multiple-ips/README.md#feature-2-extension-cni-genie-multiple-ip-addresses-per-pod), each from a different CNI plugin. ## Other reading diff --git a/content/en/docs/concepts/configuration/assign-pod-node.md b/content/en/docs/concepts/configuration/assign-pod-node.md index 2e21fb59cc46c..4b0a310483013 100644 --- a/content/en/docs/concepts/configuration/assign-pod-node.md +++ b/content/en/docs/concepts/configuration/assign-pod-node.md @@ -16,7 +16,7 @@ that a pod ends up on a machine with an SSD attached to it, or to co-locate pods services that communicate a lot into the same availability zone. You can find all the files for these examples [in our docs -repo here](https://github.com/kubernetes/website/tree/{{< param "docsbranch" >}}/docs/user-guide/node-selection). +repo here](https://github.com/kubernetes/website/tree/{{< param "docsbranch" >}}/docs/concepts/configuration/). {{< toc >}} @@ -134,7 +134,7 @@ value is `another-node-label-value` should be preferred. You can see the operator `In` being used in the example. The new node affinity syntax supports the following operators: `In`, `NotIn`, `Exists`, `DoesNotExist`, `Gt`, `Lt`. You can use `NotIn` and `DoesNotExist` to achieve node anti-affinity behavior, or use -[node taints](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/) to repel pods from specific nodes. +[node taints](/docs/concepts/configuration/taint-and-toleration/) to repel pods from specific nodes. If you specify both `nodeSelector` and `nodeAffinity`, *both* must be satisfied for the pod to be scheduled onto a candidate node. @@ -323,7 +323,7 @@ web-server-1287567482-s330j 1/1 Running 0 7m 10.192.3 The above example uses `PodAntiAffinity` rule with `topologyKey: "kubernetes.io/hostname"` to deploy the redis cluster so that no two instances are located on the same host. -See [ZooKeeper tutorial](https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/#tolerating-node-failure) +See [ZooKeeper tutorial](/docs/tutorials/stateful-application/zookeeper/#tolerating-node-failure) for an example of a StatefulSet configured with anti-affinity for high availability, using the same technique. For more information on inter-pod affinity/anti-affinity, see the diff --git a/content/en/docs/concepts/configuration/manage-compute-resources-container.md b/content/en/docs/concepts/configuration/manage-compute-resources-container.md index 2db15bf3f32ea..b1ba81939b90a 100644 --- a/content/en/docs/concepts/configuration/manage-compute-resources-container.md +++ b/content/en/docs/concepts/configuration/manage-compute-resources-container.md @@ -446,7 +446,7 @@ extender. { "urlPrefix":"", "bindVerb": "bind", - "ManagedResources": [ + "managedResources": [ { "name": "example.com/foo", "ignoredByScheduler": true diff --git a/content/en/docs/concepts/configuration/pod-priority-preemption.md b/content/en/docs/concepts/configuration/pod-priority-preemption.md index 13df076be8d32..bedb276737634 100644 --- a/content/en/docs/concepts/configuration/pod-priority-preemption.md +++ b/content/en/docs/concepts/configuration/pod-priority-preemption.md @@ -193,7 +193,7 @@ Pod P. By doing this, scheduler makes Pod P eligible to preempt Pods on another #### Graceful termination of preemption victims When Pods are preempted, the victims get their -[graceful termination period](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods). +[graceful termination period](/docs/concepts/workloads/pods/pod/#termination-of-pods). They have that much time to finish their work and exit. If they don't, they are killed. This graceful termination period creates a time gap between the point that the scheduler preempts Pods and the time when the pending Pod (P) can be @@ -206,7 +206,7 @@ to zero or a small number. #### PodDisruptionBudget is supported, but not guaranteed! -A [Pod Disruption Budget (PDB)](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/) +A [Pod Disruption Budget (PDB)](/docs/concepts/workloads/pods/disruptions/) allows application owners to limit the number Pods of a replicated application that are down simultaneously from voluntary disruptions. Kubernetes 1.9 supports PDB when preempting Pods, but respecting PDB is best effort. The Scheduler tries to diff --git a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md index 13cfba4d8b5ce..d8c66a4520206 100644 --- a/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md +++ b/content/en/docs/concepts/extend-kubernetes/api-extension/custom-resources.md @@ -15,7 +15,7 @@ This page explains [*custom resources*](/docs/concepts/api-extension/custom-reso {{% capture body %}} ## Custom resources -A *resource* is an endpoint in the [Kubernetes API](/docs/reference/api-overview/) that stores a collection of [API objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) of a certain kind. For example, the built-in *pods* resource contains a collection of Pod objects. +A *resource* is an endpoint in the [Kubernetes API](/docs/reference/using-api/api-overview/) that stores a collection of [API objects](/docs/concepts/overview/working-with-objects/kubernetes-objects/) of a certain kind. For example, the built-in *pods* resource contains a collection of Pod objects. A *custom resource* is an extension of the Kubernetes API that is not necessarily available on every Kubernetes cluster. @@ -165,15 +165,17 @@ Aggregated APIs offer more advanced API features and customization of other feat | Feature | Description | CRDs | Aggregated API | | ------- | ----------- | ---- | -------------- | -| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Beta feature of CRDs in v1.9. Checks limited to what is supported by OpenAPI v3.0. | Yes, arbitrary validation checks | -| Defaulting | See above | No, but can achieve the same effect with an Initializer (requires programming) | Yes | -| Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | No | Yes | +| Validation | Help users prevent errors and allow you to evolve your API independently of your clients. These features are most useful when there are many clients who can't all update at the same time. | Yes. Most validation can be specified in the CRD using [OpenAPI v3.0 validation](/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation). Any other validations supported by addition of a Validating Webhook. | Yes, arbitrary validation checks | +| Defaulting | See above | Yes, via a Mutating Webhook; Planned, via CRD OpenAPI schema. | Yes | +| Multi-versioning | Allows serving the same object through two API versions. Can help ease API changes like renaming fields. Less important if you control your client versions. | No, but planned | Yes | | Custom Storage | If you need storage with a different performance mode (for example, time-series database instead of key-value store) or isolation for security (for example, encryption secrets or different | No | Yes | -| Custom Business Logic | Perform arbitrary checks or actions when creating, reading, updating or deleting an object | No, but can get some of the same effects with Initializers or Finalizers (requires programming) | Yes | -| Subresources |
  • Add extra operations other than CRUD, such as "scale" or "exec"
  • Allows systems like HorizontalPodAutoscaler and PodDisruptionBudget interact with your new resource
  • Finer-grained access control: user writes spec section, controller writes status section.
  • Allows incrementing object Generation on custom resource data mutation (requires separate spec and status sections in the resource)
| No but planned | Yes, any Subresource | -| strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/run-application/update-api-object-kubectl-patch/) | No | Yes | +| Custom Business Logic | Perform arbitrary checks or actions when creating, reading, updating or deleting an object | Yes, using Webhooks. | Yes | +| Scale Subresource | Allows systems like HorizontalPodAutoscaler and PodDisruptionBudget interact with your new resource | [Yes](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#scale-subresource) | Yes | +| Status Subresource |
  • Finer-grained access control: user writes spec section, controller writes status section.
  • Allows incrementing object Generation on custom resource data mutation (requires separate spec and status sections in the resource)
| [Yes](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#status-subresource) | Yes | +| Other Subresources | Add operations other than CRUD, such as "logs" or "exec". | No | Yes | +| strategic-merge-patch | The new endpoints support PATCH with `Content-Type: application/strategic-merge-patch+json`. Useful for updating objects that may be modified both locally, and by the server. For more information, see ["Update API Objects in Place Using kubectl patch"](/docs/tasks/run-application/update-api-object-kubectl-patch/) | No, but similar functionality planned | Yes | | Protocol Buffers | The new resource supports clients that want to use Protocol Buffers | No | Yes | -| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | No but planned | Yes | +| OpenAPI Schema | Is there an OpenAPI (swagger) schema for the types that can be dynamically fetched from the server? Is the user protected from misspelling field names by ensuring only allowed fields are set? Are types enforced (in other words, don't put an `int` in a `string` field?) | No, but planned | Yes | #### Common Features @@ -222,7 +224,7 @@ Aggregated API servers may or may not use the same authentication, authorization ## Accessing a custom resource -Kubernetes [client libraries](/docs/reference/client-libraries/) can be used to access custom resources. Not all client libraries support custom resources. The go and python client libraries do. +Kubernetes [client libraries](/docs/reference/using-api/client-libraries/) can be used to access custom resources. Not all client libraries support custom resources. The go and python client libraries do. When you add a custom resource, you can access it using: diff --git a/content/en/docs/concepts/overview/kubernetes-api.md b/content/en/docs/concepts/overview/kubernetes-api.md index c745cd6086da4..77606a1e38e0c 100644 --- a/content/en/docs/concepts/overview/kubernetes-api.md +++ b/content/en/docs/concepts/overview/kubernetes-api.md @@ -18,7 +18,7 @@ Kubernetes itself is decomposed into multiple components, which interact through ## API changes -In our experience, any system that is successful needs to grow and change as new use cases emerge or existing ones change. Therefore, we expect the Kubernetes API to continuously change and grow. However, we intend to not break compatibility with existing clients, for an extended period of time. In general, new API resources and new resource fields can be expected to be added frequently. Elimination of resources or fields will require following the [API deprecation policy](https://kubernetes.io/docs/reference/deprecation-policy/). +In our experience, any system that is successful needs to grow and change as new use cases emerge or existing ones change. Therefore, we expect the Kubernetes API to continuously change and grow. However, we intend to not break compatibility with existing clients, for an extended period of time. In general, new API resources and new resource fields can be expected to be added frequently. Elimination of resources or fields will require following the [API deprecation policy](/docs/reference/using-api/deprecation-policy/). What constitutes a compatible change and how to change the API are detailed by the [API change document](https://git.k8s.io/community/contributors/devel/api_changes.md). diff --git a/content/en/docs/concepts/overview/object-management-kubectl/declarative-config.md b/content/en/docs/concepts/overview/object-management-kubectl/declarative-config.md index 223cdbb8e7e85..098a7974810a7 100644 --- a/content/en/docs/concepts/overview/object-management-kubectl/declarative-config.md +++ b/content/en/docs/concepts/overview/object-management-kubectl/declarative-config.md @@ -691,7 +691,7 @@ is lost. The API server sets certain fields to default values in the live configuration if they are not specified when the object is created. -Here's a configuration file for a Deployment. The file does not specify `strategy` or `selector`: +Here's a configuration file for a Deployment. The file does not specify `strategy`: {{< code file="simple_deployment.yaml" >}} @@ -721,7 +721,7 @@ spec: minReadySeconds: 5 replicas: 1 # defaulted by apiserver selector: - matchLabels: # defaulted by apiserver - derived from template.metadata.labels + matchLabels: app: nginx strategy: rollingUpdate: # defaulted by apiserver - derived from strategy.type @@ -750,10 +750,6 @@ spec: # ... ``` -**Note:** Some of the fields' default values have been derived from -the values of other fields that were specified in the configuration file, -such as the `selector` field. - In a patch request, defaulted fields are not re-defaulted unless they are explicitly cleared as part of a patch request. This can cause unexpected behavior for fields that are defaulted based diff --git a/content/en/docs/concepts/overview/what-is-kubernetes.md b/content/en/docs/concepts/overview/what-is-kubernetes.md index 90da314b087f5..3adf53a238c1f 100644 --- a/content/en/docs/concepts/overview/what-is-kubernetes.md +++ b/content/en/docs/concepts/overview/what-is-kubernetes.md @@ -25,9 +25,10 @@ best-of-breed ideas and practices from the community. ## Why do I need Kubernetes and what can it do? Kubernetes has a number of features. It can be thought of as: -* a container platform -* a microservices platform -* a portable cloud platform + +- a container platform +- a microservices platform +- a portable cloud platform and a lot more. Kubernetes provides a **container-centric** management environment. It @@ -57,7 +58,7 @@ tools to checkpoint state. Additionally, the [Kubernetes control plane](/docs/concepts/overview/components/) is built upon the same -[APIs](/docs/reference/api-overview/) that are available to developers +[APIs](/docs/reference/using-api/api-overview/) that are available to developers and users. Users can write their own controllers, such as [schedulers](https://github.com/kubernetes/community/blob/{{< param "githubbranch" >}}/contributors/devel/scheduler.md), with [their own diff --git a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md index 71f3f74be0d2d..0962242625f45 100644 --- a/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md +++ b/content/en/docs/concepts/overview/working-with-objects/kubernetes-objects.md @@ -18,7 +18,7 @@ This page explains how Kubernetes objects are represented in the Kubernetes API, A Kubernetes object is a "record of intent"--once you create the object, the Kubernetes system will constantly work to ensure that object exists. By creating an object, you're effectively telling the Kubernetes system what you want your cluster's workload to look like; this is your cluster's **desired state**. -To work with Kubernetes objects--whether to create, modify, or delete them--you'll need to use the [Kubernetes API](/docs/concepts/overview/kubernetes-api/). When you use the `kubectl` command-line interface, for example, the CLI makes the necessary Kubernetes API calls for you. You can also use the Kubernetes API directly in your own programs using one of the [Client Libraries](/docs/reference/client-libraries/). +To work with Kubernetes objects--whether to create, modify, or delete them--you'll need to use the [Kubernetes API](/docs/concepts/overview/kubernetes-api/). When you use the `kubectl` command-line interface, for example, the CLI makes the necessary Kubernetes API calls for you. You can also use the Kubernetes API directly in your own programs using one of the [Client Libraries](/docs/reference/using-api/client-libraries/). ### Object Spec and Status diff --git a/content/en/docs/concepts/services-networking/connect-applications-service.md b/content/en/docs/concepts/services-networking/connect-applications-service.md index d2ca05c90ff63..c1621e0ce4ba5 100644 --- a/content/en/docs/concepts/services-networking/connect-applications-service.md +++ b/content/en/docs/concepts/services-networking/connect-applications-service.md @@ -134,7 +134,7 @@ KUBERNETES_SERVICE_PORT_HTTPS=443 ### DNS -Kubernetes offers a DNS cluster addon Service that uses skydns to automatically assign dns names to other Services. You can check if it's running on your cluster: +Kubernetes offers a DNS cluster addon Service that automatically assigns dns names to other Services. You can check if it's running on your cluster: ```shell $ kubectl get services kube-dns --namespace=kube-system diff --git a/content/en/docs/concepts/services-networking/ingress.md b/content/en/docs/concepts/services-networking/ingress.md index 639cae6aed5d6..adef0d3278bf7 100644 --- a/content/en/docs/concepts/services-networking/ingress.md +++ b/content/en/docs/concepts/services-networking/ingress.md @@ -86,7 +86,11 @@ __Global Parameters__: For the sake of simplicity the example Ingress has no glo ## Ingress controllers -In order for the Ingress resource to work, the cluster must have an Ingress controller running. This is unlike other types of controllers, which typically run as part of the `kube-controller-manager` binary, and which are typically started automatically as part of cluster creation. You need to choose the ingress controller implementation that is the best fit for your cluster, or implement one. We currently support and maintain [GCE](https://git.k8s.io/ingress-gce/README.md) and [nginx](https://git.k8s.io/ingress-nginx/README.md) controllers. F5 Networks provides [support and maintenance](https://support.f5.com/csp/article/K86859508) for the [F5 BIG-IP Controller for Kubernetes](http://clouddocs.f5.com/products/connectors/k8s-bigip-ctlr/latest). +In order for the Ingress resource to work, the cluster must have an Ingress controller running. This is unlike other types of controllers, which typically run as part of the `kube-controller-manager` binary, and which are typically started automatically as part of cluster creation. Choose the ingress controller implementation that best fits your cluster, or implement a new ingress controller. + +* Kubernetes currently supports and maintains [GCE](https://git.k8s.io/ingress-gce/README.md) and [nginx](https://git.k8s.io/ingress-nginx/README.md) controllers. +* F5 Networks provides [support and maintenance](https://support.f5.com/csp/article/K86859508) for the [F5 BIG-IP Controller for Kubernetes](http://clouddocs.f5.com/products/connectors/k8s-bigip-ctlr/latest). +* [Kong](https://konghq.com/) offers [community](https://discuss.konghq.com/c/kubernetes) or [commercial](https://konghq.com/api-customer-success/) support and maintenance for the [Kong Ingress Controller for Kubernetes](https://konghq.com/blog/kubernetes-ingress-controller-for-kong/) {{< note >}} Review the documentation for your controller to find its specific support policy. @@ -233,7 +237,7 @@ Note that there is a gap between TLS features supported by various Ingress contr ### Loadbalancing -An Ingress controller is bootstrapped with some load balancing policy settings that it applies to all Ingress, such as the load balancing algorithm, backend weight scheme, and others. More advanced load balancing concepts (e.g.: persistent sessions, dynamic weights) are not yet exposed through the Ingress. You can still get these features through the [service loadbalancer](https://github.com/kubernetes/ingress-nginx/blob/master/docs/catalog.md). With time, we plan to distill load balancing patterns that are applicable cross platform into the Ingress resource. +An Ingress controller is bootstrapped with some load balancing policy settings that it applies to all Ingress, such as the load balancing algorithm, backend weight scheme, and others. More advanced load balancing concepts (e.g.: persistent sessions, dynamic weights) are not yet exposed through the Ingress. You can still get these features through the [service loadbalancer](https://github.com/kubernetes/ingress-nginx/blob/master/docs/ingress-controller-catalog.md). With time, we plan to distill load balancing patterns that are applicable cross platform into the Ingress resource. It's also worth noting that even though health checks are not exposed directly through the Ingress, there exist parallel concepts in Kubernetes such as [readiness probes](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) which allow you to achieve the same end result. Please review the controller specific docs to see how they handle health checks ([nginx](https://git.k8s.io/ingress-nginx/README.md), [GCE](https://git.k8s.io/ingress-gce/README.md#health-checks)). diff --git a/content/en/docs/concepts/services-networking/service.md b/content/en/docs/concepts/services-networking/service.md index a9fbb84d3e2b4..9a152cd853598 100644 --- a/content/en/docs/concepts/services-networking/service.md +++ b/content/en/docs/concepts/services-networking/service.md @@ -400,7 +400,7 @@ The default is `ClusterIP`. ### Type NodePort -If you set the `type` field to `"NodePort"`, the Kubernetes master will +If you set the `type` field to `NodePort`, the Kubernetes master will allocate a port from a flag-configured range (default: 30000-32767), and each Node will proxy that port (the same port number on every Node) into your `Service`. That port will be reported in your `Service`'s `spec.ports[*].nodePort` field. @@ -422,7 +422,7 @@ and `spec.clusterIP:spec.ports[*].port`. (If the `--nodeport-addresses` flag in ### Type LoadBalancer On cloud providers which support external load balancers, setting the `type` -field to `"LoadBalancer"` will provision a load balancer for your `Service`. +field to `LoadBalancer` will provision a load balancer for your `Service`. The actual creation of the load balancer happens asynchronously, and information about the provisioned balancer will be published in the `Service`'s `status.loadBalancer` field. For example: diff --git a/content/en/docs/concepts/storage/storage-classes.md b/content/en/docs/concepts/storage/storage-classes.md index 76bf11dc72053..82bf103c8e7a5 100644 --- a/content/en/docs/concepts/storage/storage-classes.md +++ b/content/en/docs/concepts/storage/storage-classes.md @@ -167,6 +167,7 @@ provisioner: kubernetes.io/gce-pd parameters: type: pd-standard zones: us-central1-a, us-central1-b + replication-type: none ``` * `type`: `pd-standard` or `pd-ssd`. Default: `pd-standard` @@ -177,6 +178,18 @@ parameters: is specified, volumes are generally round-robin-ed across all active zones where Kubernetes cluster has a node. `zone` and `zones` parameters must not be used at the same time. +* `replication-type`: `none` or `regional-pd`. Default: `none`. + +If `replication-type` is set to `none`, a regular (zonal) PD will be provisioned. + +If `replication-type` is set to `regional-pd`, a +[Regional Persistent Disk](https://cloud.google.com/compute/docs/disks/#repds) +will be provisioned. In this case, users must use `zones` instead of `zone` to +specify the desired replication zones. If exactly two zones are specified, the +Regional PD will be provisioned in those zones. If more than two zones are +specified, Kubernetes will arbitrarily choose among the specified zones. If the +`zones` parameter is omitted, Kubernetes will arbitrarily choose among zones +managed by the cluster. ### Glusterfs diff --git a/content/en/docs/concepts/storage/volumes.md b/content/en/docs/concepts/storage/volumes.md index ef48a0c421a40..3011e897416c7 100644 --- a/content/en/docs/concepts/storage/volumes.md +++ b/content/en/docs/concepts/storage/volumes.md @@ -368,6 +368,38 @@ spec: fsType: ext4 ``` +#### Regional Persistent Disks +{{< feature-state for_k8s_version="v1.10" state="beta" >}} + +The [Regional Persistent Disks](https://cloud.google.com/compute/docs/disks/#repds) feature allows the creation of Persistent Disks that are available in two zones within the same region. In order to use this feature, the volume must be provisioned as a PersistentVolume; referencing the volume directly from a pod is not supported. + +#### Manually provisioning a Regional PD PersistentVolume +Dynamic provisioning is possible using a [StorageClass for GCE PD](/docs/concepts/storage/storage-classes/#gce). +Before creating a PersistentVolume, you must create the PD: +```shell +gcloud beta compute disks create --size=500GB my-data-disk + --region us-central1 + --replica-zones us-central1-a,us-central1-b +``` +Example PersistentVolume spec: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: test-volume + labels: + failure-domain.beta.kubernetes.io/zone: us-central1-a__us-central1-b +spec: + capacity: + storage: 400Gi + accessModes: + - ReadWriteOnce + gcePersistentDisk: + pdName: my-data-disk + fsType: ext4 +``` + ### gitRepo A `gitRepo` volume is an example of what can be done as a volume plugin. It @@ -527,7 +559,7 @@ durability characteristics of the underlying disk. The following is an example PersistentVolume spec using a `local` volume and `nodeAffinity`: -``` yaml +```yaml apiVersion: v1 kind: PersistentVolume metadata: @@ -885,7 +917,7 @@ spec: ``` For more information including Dynamic Provisioning and Persistent Volume Claims, please see the -[StorageOS examples](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/storageos). +[StorageOS examples](https://github.com/kubernetes/examples/blob/master/staging/volumes/storageos). ### vsphereVolume diff --git a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md index a916ac7ded14c..54dc3afef8791 100644 --- a/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md +++ b/content/en/docs/concepts/workloads/controllers/jobs-run-to-completion.md @@ -205,8 +205,7 @@ policy for the embedded template to "`Never`". ## Job Termination and Cleanup -When a Job completes, no more Pods are created, but the Pods are not deleted either. Since they are terminated, -they don't show up with `kubectl get pods`, but they will show up with `kubectl get pods -a`. Keeping them around +When a Job completes, no more Pods are created, but the Pods are not deleted either. Keeping them around allows you to still view the logs of completed pods to check for errors, warnings, or other diagnostic output. The job object also remains after it is completed so that you can view its status. It is up to the user to delete old jobs after noting their status. Delete the job with `kubectl` (e.g. `kubectl delete jobs/pi` or `kubectl delete -f ./job.yaml`). When you delete the job using `kubectl`, all the pods it created are deleted too. diff --git a/content/en/docs/concepts/workloads/pods/disruptions.md b/content/en/docs/concepts/workloads/pods/disruptions.md index 1b472317bbe6d..eaa250cc424af 100644 --- a/content/en/docs/concepts/workloads/pods/disruptions.md +++ b/content/en/docs/concepts/workloads/pods/disruptions.md @@ -112,7 +112,7 @@ and the tool periodically retries all failed requests until all pods are terminated, or until a configurable timeout is reached. A PDB specifies the number of replicas that an application can tolerate having, relative to how -many it is intended to have. For example, a Deployment which has a `spec.replicas: 5` is +many it is intended to have. For example, a Deployment which has a `.spec.replicas: 5` is supposed to have 5 pods at any given time. If its PDB allows for there to be 4 at a time, then the Eviction API will allow voluntary disruption of one, but not two pods, at a time. diff --git a/content/en/docs/concepts/workloads/pods/init-containers.md b/content/en/docs/concepts/workloads/pods/init-containers.md index d04d8909e8bce..056b11c851fcc 100644 --- a/content/en/docs/concepts/workloads/pods/init-containers.md +++ b/content/en/docs/concepts/workloads/pods/init-containers.md @@ -37,8 +37,8 @@ To specify a Container as an Init Container, add the `initContainers` field on t a JSON array of objects of type [Container](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#container-v1-core) alongside the app `containers` array. -The status of the init containers is returned in `status.initContainerStatuses` -field as an array of the container statuses (similar to the `status.containerStatuses` +The status of the init containers is returned in `.status.initContainerStatuses` +field as an array of the container statuses (similar to the `.status.containerStatuses` field). ### Differences from regular Containers @@ -310,15 +310,15 @@ reasons: ## Support and compatibility A cluster with Apiserver version 1.6.0 or greater supports Init Containers -using the `spec.initContainers` field. Previous versions support Init Containers -using the alpha or beta annotations. The `spec.initContainers` field is also mirrored +using the `.spec.initContainers` field. Previous versions support Init Containers +using the alpha or beta annotations. The `.spec.initContainers` field is also mirrored into alpha and beta annotations so that Kubelets version 1.3.0 or greater can execute Init Containers, and so that a version 1.6 apiserver can safely be rolled back to version 1.5.x without losing Init Container functionality for existing created pods. In Apiserver and Kubelet versions 1.8.0 or greater, support for the alpha and beta annotations is removed, requiring a conversion from the deprecated annotations to the -`spec.initContainers` field. +`.spec.initContainers` field. {{% /capture %}} diff --git a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md index 59ddd17dbc3db..98125e37583d3 100644 --- a/content/en/docs/concepts/workloads/pods/pod-lifecycle.md +++ b/content/en/docs/concepts/workloads/pods/pod-lifecycle.md @@ -159,7 +159,7 @@ once bound to a node, a Pod will never be rebound to another node. In general, Pods do not disappear until someone destroys them. This might be a human or a controller. The only exception to this rule is that Pods with a `phase` of Succeeded or Failed for more than some -duration (determined by the master) will expire and be automatically destroyed. +duration (determined by `terminated-pod-gc-threshold` in the master) will expire and be automatically destroyed. Three types of controllers are available: diff --git a/content/en/docs/concepts/workloads/pods/podpreset.md b/content/en/docs/concepts/workloads/pods/podpreset.md index 74fd2dd75076b..4cd991e536e80 100644 --- a/content/en/docs/concepts/workloads/pods/podpreset.md +++ b/content/en/docs/concepts/workloads/pods/podpreset.md @@ -51,7 +51,7 @@ Pods, Kubernetes modifies the Pod Spec. For changes to `Env`, `EnvFrom`, and the Pod; for changes to `Volume`, Kubernetes modifies the Pod Spec. {{< note >}} -**Note:** A Pod Preset is capable of modifying the `spec.containers` field in a +**Note:** A Pod Preset is capable of modifying the `.spec.containers` field in a Pod spec when appropriate. *No* resource definition from the Pod Preset will be applied to the `initContainers` field. {{< /note >}} diff --git a/content/en/docs/getting-started-guides/clc.md b/content/en/docs/getting-started-guides/clc.md index e4792f40c835d..69488fadc5842 100644 --- a/content/en/docs/getting-started-guides/clc.md +++ b/content/en/docs/getting-started-guides/clc.md @@ -252,7 +252,7 @@ kubectl cluster-info ### Accessing the cluster programmatically -It's possible to use the locally stored client certificates to access the apiserver. For example, you may want to use any of the [Kubernetes API client libraries](/docs/reference/client-libraries/) to program against your Kubernetes cluster in the programming language of your choice. +It's possible to use the locally stored client certificates to access the apiserver. For example, you may want to use any of the [Kubernetes API client libraries](/docs/reference/using-api/client-libraries/) to program against your Kubernetes cluster in the programming language of your choice. To demonstrate how to use these locally stored certificates, we provide the following example of using ```curl``` to communicate to the master apiserver via https: diff --git a/content/en/docs/getting-started-guides/minikube.md b/content/en/docs/getting-started-guides/minikube.md index 585baa00f93b8..c7ce7e5674a53 100644 --- a/content/en/docs/getting-started-guides/minikube.md +++ b/content/en/docs/getting-started-guides/minikube.md @@ -46,7 +46,7 @@ Running pre-create checks... Creating machine... Starting local Kubernetes cluster... -$ kubectl run hello-minikube --image=k8s.gcr.io/echoserver:1.4 --port=8080 +$ kubectl run hello-minikube --image=k8s.gcr.io/echoserver:1.10 --port=8080 deployment "hello-minikube" created $ kubectl expose deployment hello-minikube --type=NodePort service "hello-minikube" exposed diff --git a/content/en/docs/getting-started-guides/ubuntu/operational-considerations.md b/content/en/docs/getting-started-guides/ubuntu/operational-considerations.md index a2128eb09ede5..c93b81cc59058 100644 --- a/content/en/docs/getting-started-guides/ubuntu/operational-considerations.md +++ b/content/en/docs/getting-started-guides/ubuntu/operational-considerations.md @@ -25,7 +25,7 @@ The Juju Controller: To bootstrap a controller with constraints run the following command: ``` -juju bootstrap --contraints "mem=8GB cpu-cores=4 root-disk=128G" +juju bootstrap --constraints "mem=8GB cpu-cores=4 root-disk=128G" ``` Juju will select the cheapest instance type matching your constraints on your target cloud. You can also use the ```instance-type``` constraint in conjunction with ```root-disk``` for strict control. For more information about the constraints available, refer to the [official documentation](https://jujucharms.com/docs/stable/reference-constraints) diff --git a/content/en/docs/getting-started-guides/windows/_index.md b/content/en/docs/getting-started-guides/windows/_index.md index 0799cce3ad244..b09f5ae90fdfa 100644 --- a/content/en/docs/getting-started-guides/windows/_index.md +++ b/content/en/docs/getting-started-guides/windows/_index.md @@ -496,6 +496,40 @@ spec: - containerPort: 80 ``` +### Kubelet and kube-proxy can now run as Windows services + +Starting with kubernetes v1.11, kubelet and kube-proxy can run as Windows services. + +This means that you can now register them as Windows services via `sc` command. More details about how to create Windows services with `sc` can be found [here](https://support.microsoft.com/en-us/help/251192/how-to-create-a-windows-service-by-using-sc-exe). + +**Examples:** + +To create the service: +``` +PS > sc.exe create binPath= " --service " +CMD > sc create binPath= " --service " +``` +Please note that if the arguments contain spaces, it must be escaped. Example: +``` +PS > sc.exe create kubelet binPath= "C:\kubelet.exe --service --hostname-override 'minion' " +CMD > sc create kubelet binPath= "C:\kubelet.exe --service --hostname-override 'minion' " +``` +To start the service: +``` +PS > Start-Service kubelet; Start-Service kube-proxy +CMD > net start kubelet && net start kube-proxy +``` +To stop the service: +``` +PS > Stop-Service kubelet (-Force); Stop-Service kube-proxy (-Force) +CMD > net stop kubelet && net stop kube-proxy +``` +To query the service: +``` +PS > Get-Service kubelet; Get-Service kube-proxy; +CMD > sc.exe queryex kubelet && sc qc kubelet && sc.exe queryex kube-proxy && sc.exe qc kube-proxy +``` + ## Known Limitations for Windows Server Containers with v1.9 Some of these limitations will be addressed by the community in future releases of Kubernetes: diff --git a/content/en/docs/home/contribute/create-pull-request.md b/content/en/docs/home/contribute/create-pull-request.md index d6c6a57516417..61e7c23a36039 100644 --- a/content/en/docs/home/contribute/create-pull-request.md +++ b/content/en/docs/home/contribute/create-pull-request.md @@ -47,11 +47,25 @@ that is the best fit for your content. ## Viewing your changes locally -When you submit a pull request, you can see a preview of your changes at -[Netlify](https://www.netlify.com/). If you prefer to see a preview of your changes -before you submit a pull request, you can build a preview locally. For more information, see -[Staging locally](/docs/home/contribute/stage-documentation-changes/#staging-locally-without-docker). +You can use Hugo to see a preview of your changes locally. + +1. [Install Hugo](https://gohugo.io/getting-started/installing/) +version 0.40.3 or later. + +1. Go to the root directory of your clone of the Kubernetes docs, and +enter this command: + + hugo server +1. In your browser's address bar, enter `localhost:1313`. + +## Viewing you changes in the Netlify preview + +When you submit a pull request, you can see a preview of your changes at +[Netlify](https://www.netlify.com/). In your pull request, at the bottom, +to the right of **deploy/netlify**, click **Details**. Also, there is often +a link to the Netlify preview in the pull request comments. + ## Submitting a pull request to the master branch (Current Release) If you want your change to be published in the released version Kubernetes docs, diff --git a/content/en/docs/home/contribute/stage-documentation-changes.md b/content/en/docs/home/contribute/stage-documentation-changes.md deleted file mode 100644 index ca66897fcc764..0000000000000 --- a/content/en/docs/home/contribute/stage-documentation-changes.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: Staging Your Documentation Changes -content_template: templates/task ---- - -{{% capture overview %}} -This page shows how to stage content that you want to contribute -to the Kubernetes documentation. -{{% /capture %}} - -{{% capture prerequisites %}} -Create a fork of the Kubernetes documentation repository as described in -[Creating a Documentation Pull Request](/docs/home/contribute/create-pull-request/). -{{% /capture %}} - -{{% capture steps %}} - -## Staging a pull request - -When you create a pull request, either against the master or <vnext> -branch, your changes are staged in a custom subdomain on Netlify so that -you can see your changes in rendered form before the pull request is merged. - -1. In your GitHub account, in your new branch, submit a pull request to the -kubernetes/website repository. This opens a page that shows the -status of your pull request. - -1. Scroll down to the list of automated checks. Click **Show all checks**. -Wait for the **deploy/netlify** check to complete. To the right of -**deploy/netlify**, click **Details**. This opens a staging site where you -can see your changes. - -## Staging locally using Docker - -You can use the k8sdocs Docker image to run a local staging server. If you're -interested, you can view the -
Dockerfile -for this image. - -1. Install Docker if you don't already have it. - -1. Clone your fork to your local development machine. - -1. In the root of your cloned repository, enter this command to start a local -web server: - - make stage - - This will run the following command: - - docker run -ti --rm -v "$PWD":/k8sdocs -p 4000:4000 gcr.io/google-samples/k8sdocs:1.1 - -1. View your staged content at `http://localhost:4000`. - -## Staging locally without Docker - -1. Install Ruby 2.2 or later. - -1. Install Ruby Gems. - -1. Verify that Ruby and RubyGems are installed: - - gem --version - -1. Install the GitHub Pages package, which includes Jekyll: - - gem install github-pages - -1. Clone your fork to your local development machine. - -1. In the root of your cloned repository, enter this command to start a local -web server: - - jekyll serve - -1. View your staged content at `http://localhost:4000`. - -{{< note >}} -**Note:** If you do not want Jekyll to interfere with your other globally installed gems, you can use `bundler`:

```gem install bundler```
```bundle install```
```bundler exec jekyll serve```

Regardless of whether you use `bundler` or not, your copy of the site will then be viewable at: http://localhost:4000 -{{< /note >}} - -{{% /capture %}} - -{{% capture whatsnext %}} -* Learn about [writing a new topic](/docs/home/contribute/write-new-topic/). -* Learn about [using page templates](/docs/home/contribute/page-templates/). -* Learn about [creating a pull request](/docs/home/contribute/create-pull-request/). -{{% /capture %}} - - diff --git a/content/en/docs/imported/release/notes.md b/content/en/docs/imported/release/notes.md index 838d56ba1ac06..750afc7f04974 100644 --- a/content/en/docs/imported/release/notes.md +++ b/content/en/docs/imported/release/notes.md @@ -3,106 +3,6 @@ title: v1.10 Release Notes --- -- [v1.10.0](#v1100) - - [Downloads for v1.10.0](#downloads-for-v1100) - - [Client Binaries](#client-binaries) - - [Server Binaries](#server-binaries) - - [Node Binaries](#node-binaries) - - [Major Themes](#major-themes) - - [Node](#node) - - [Storage](#storage) - - [Windows](#windows) - - [OpenStack](#openstack) - - [API-machinery](#api-machinery) - - [Auth](#auth) - - [Azure](#azure) - - [CLI](#cli) - - [Cluster Lifecycle](#cluster-lifecycle) - - [Network](#network) - - [Before Upgrading](#before-upgrading) - - [Known Issues](#known-issues) - - [Deprecations](#deprecations) - - [Other Notable Changes](#other-notable-changes) - - [Apps](#apps) - - [AWS](#aws) - - [Auth](#auth-1) - - [CLI](#cli-1) - - [Cluster Lifecycle](#cluster-lifecycle-1) - - [GCP](#gcp) - - [Instrumentation](#instrumentation) - - [Node](#node-1) - - [OpenStack](#openstack-1) - - [Scalability](#scalability) - - [Storage](#storage-1) - - [Windows](#windows-1) - - [Autoscaling](#autoscaling) - - [API-Machinery](#api-machinery-1) - - [Network](#network-1) - - [Azure](#azure-1) - - [Scheduling](#scheduling) - - [Other changes](#other-changes) - - [Non-user-facing Changes](#non-user-facing-changes) - - [External Dependencies](#external-dependencies) -- [v1.10.0-rc.1](#v1100-rc1) - - [Downloads for v1.10.0-rc.1](#downloads-for-v1100-rc1) - - [Client Binaries](#client-binaries-1) - - [Server Binaries](#server-binaries-1) - - [Node Binaries](#node-binaries-1) - - [Changelog since v1.10.0-beta.4](#changelog-since-v1100-beta4) - - [Other notable changes](#other-notable-changes-1) -- [v1.10.0-beta.4](#v1100-beta4) - - [Downloads for v1.10.0-beta.4](#downloads-for-v1100-beta4) - - [Client Binaries](#client-binaries-2) - - [Server Binaries](#server-binaries-2) - - [Node Binaries](#node-binaries-2) - - [Changelog since v1.10.0-beta.3](#changelog-since-v1100-beta3) - - [Other notable changes](#other-notable-changes-2) -- [v1.10.0-beta.3](#v1100-beta3) - - [Downloads for v1.10.0-beta.3](#downloads-for-v1100-beta3) - - [Client Binaries](#client-binaries-3) - - [Server Binaries](#server-binaries-3) - - [Node Binaries](#node-binaries-3) - - [Changelog since v1.10.0-beta.2](#changelog-since-v1100-beta2) - - [Other notable changes](#other-notable-changes-3) -- [v1.10.0-beta.2](#v1100-beta2) - - [Downloads for v1.10.0-beta.2](#downloads-for-v1100-beta2) - - [Client Binaries](#client-binaries-4) - - [Server Binaries](#server-binaries-4) - - [Node Binaries](#node-binaries-4) - - [Changelog since v1.10.0-beta.1](#changelog-since-v1100-beta1) - - [Action Required](#action-required) - - [Other notable changes](#other-notable-changes-4) -- [v1.10.0-beta.1](#v1100-beta1) - - [Downloads for v1.10.0-beta.1](#downloads-for-v1100-beta1) - - [Client Binaries](#client-binaries-5) - - [Server Binaries](#server-binaries-5) - - [Node Binaries](#node-binaries-5) - - [Changelog since v1.10.0-alpha.3](#changelog-since-v1100-alpha3) - - [Action Required](#action-required-1) - - [Other notable changes](#other-notable-changes-5) -- [v1.10.0-alpha.3](#v1100-alpha3) - - [Downloads for v1.10.0-alpha.3](#downloads-for-v1100-alpha3) - - [Client Binaries](#client-binaries-6) - - [Server Binaries](#server-binaries-6) - - [Node Binaries](#node-binaries-6) - - [Changelog since v1.10.0-alpha.2](#changelog-since-v1100-alpha2) - - [Other notable changes](#other-notable-changes-6) -- [v1.10.0-alpha.2](#v1100-alpha2) - - [Downloads for v1.10.0-alpha.2](#downloads-for-v1100-alpha2) - - [Client Binaries](#client-binaries-7) - - [Server Binaries](#server-binaries-7) - - [Node Binaries](#node-binaries-7) - - [Changelog since v1.10.0-alpha.1](#changelog-since-v1100-alpha1) - - [Action Required](#action-required-2) - - [Other notable changes](#other-notable-changes-7) -- [v1.10.0-alpha.1](#v1100-alpha1) - - [Downloads for v1.10.0-alpha.1](#downloads-for-v1100-alpha1) - - [Client Binaries](#client-binaries-8) - - [Server Binaries](#server-binaries-8) - - [Node Binaries](#node-binaries-8) - - [Changelog since v1.9.0](#changelog-since-v190) - - [Action Required](#action-required-3) - - [Other notable changes](#other-notable-changes-8) @@ -2034,6 +1934,7 @@ filename | sha256 hash See the [Releases Page](https://github.com/kubernetes/kubernetes/releases) for older releases. Release notes of older releases can be found in: + - [CHANGELOG-1.2.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.2.md) - [CHANGELOG-1.3.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.3.md) - [CHANGELOG-1.4.md](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.4.md) diff --git a/content/en/docs/reference/_index.md b/content/en/docs/reference/_index.md index 5c79c55d94924..3926ed9a1681c 100644 --- a/content/en/docs/reference/_index.md +++ b/content/en/docs/reference/_index.md @@ -9,7 +9,7 @@ weight: 70 ## API Reference -* [Kubernetes API Overview](/docs/reference/api-overview/) - Overview of the API for Kubernetes. +* [Kubernetes API Overview](/docs/reference/using-api/api-overview/) - Overview of the API for Kubernetes. * Kubernetes API Versions * [1.10](/docs/reference/generated/kubernetes-api/v1.10/) * [1.9](https://v1-9.docs.kubernetes.io/docs/reference/) @@ -21,7 +21,7 @@ weight: 70 ## API Client Libraries To call the Kubernetes API from a programming language, you can use -[client libraries](/docs/reference/client-libraries/). Officially supported +[client libraries](/docs/reference/using-api/client-libraries/). Officially supported client libraries: - [Kubernetes Go client library](https://github.com/kubernetes/client-go/) diff --git a/content/en/docs/reference/access-authn-authz/authentication.md b/content/en/docs/reference/access-authn-authz/authentication.md index e477cf1a7246d..81be4a8dfb69d 100644 --- a/content/en/docs/reference/access-authn-authz/authentication.md +++ b/content/en/docs/reference/access-authn-authz/authentication.md @@ -568,7 +568,7 @@ to the impersonated user info. * A user makes an API call with their credentials _and_ impersonation headers. * API server authenticates the user. -* API server ensures the authenticated users has impersonation privileges. +* API server ensures the authenticated users have impersonation privileges. * Request user info is replaced with impersonation values. * Request is evaluated, authorization acts on impersonated user info. diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index 669ec31a33fb3..baa0a9a37eba5 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -33,9 +33,9 @@ This page describes how to use Admission Webhooks and Initializers. Admission webhooks are HTTP callbacks that receive admission requests and do something with them. You can define two types of admission webhooks, -[validating admission Webhook](/docs/admin/admission-controllers.md#validatingadmissionwebhook-alpha-in-18-beta-in-19) +[validating admission Webhook](/docs/admin/admission-controllers/#validatingadmissionwebhook-alpha-in-18-beta-in-19) and -[mutating admission webhook](/docs/admin/admission-controllers.md#mutatingadmissionwebhook-beta-in-19). +[mutating admission webhook](/docs/admin/admission-controllers/#mutatingadmissionwebhook-beta-in-19). With validating admission Webhooks, you may reject requests to enforce custom admission policies. With mutating admission Webhooks, you may change requests to enforce custom defaults. diff --git a/content/en/docs/reference/command-line-tools-reference/feature-gates.md b/content/en/docs/reference/command-line-tools-reference/feature-gates.md index 603dbc0837596..fb4041e7938a1 100644 --- a/content/en/docs/reference/command-line-tools-reference/feature-gates.md +++ b/content/en/docs/reference/command-line-tools-reference/feature-gates.md @@ -1,14 +1,12 @@ --- title: Feature Gates weight: 10 +notitle: true --- -{{% capture overview %}} This page contains an overview of the various feature gates an administrator can specify on different Kubernetes components. -{{% /capture %}} -{{% capture body %}} ## Overview @@ -123,9 +121,10 @@ A *Beta* feature means: incompatible changes in subsequent releases. If you have multiple clusters that can be upgraded independently, you may be able to relax this restriction. +{{< note >}} **Note:** Please do try *Beta* features and give feedback on them! After they exit beta, it may not be practical for us to make more changes. -{: .note} +{{< /note >}} A *GA* feature is also referred to as a *stable* feature. It means: @@ -223,5 +222,4 @@ Each feature gate is designed for enabling/disabling a specific feature: enables the usage of [`local`](/docs/concepts/storage/volumes/#local) volume type when used together with the `PersistentLocalVolumes` feature gate. -{{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md index 6aa0877ec7415..99ca404fdbf01 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md +++ b/content/en/docs/reference/setup-tools/kubeadm/implementation-details.md @@ -65,10 +65,10 @@ in a majority of cases, and the most intuitive location; other constants paths a ## kubeadm init workflow internal design -The `kubeadm init` [internal workflow](kubeadm-init.md/#init-workflow) consists of a sequence of atomic work tasks to perform, +The `kubeadm init` [internal workflow](/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-workflow) consists of a sequence of atomic work tasks to perform, as described in `kubeadm init`. -The [`kubeadm alpha phase`](kubeadm-alpha.md) command allows users to invoke individually each task, and ultimately offers a reusable and composable +The [`kubeadm alpha phase`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/) command allows users to invoke individually each task, and ultimately offers a reusable and composable API/toolbox that can be used by other Kubernetes bootstrap tools, by any IT automation tool or by advanced user for creating custom clusters. @@ -119,7 +119,7 @@ In any case the user can skip specific preflight checks (or eventually all prefl Please note that: -1. Preflight checks can be invoked individually with the [`kubeadm alpha phase preflight`](kubeadm-alpha.md/#cmd-phase-preflight) command +1. Preflight checks can be invoked individually with the [`kubeadm alpha phase preflight`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-preflight) command ### Generate the necessary certificates @@ -148,14 +148,14 @@ Certificates are stored by default in `/etc/kubernetes/pki`, but this directory 1. If a given certificate and private key pair both exist, and its content is evaluated compliant with the above specs, the existing files will be used and the generation phase for the given certificate skipped. This means the user can, for example, copy an existing CA to `/etc/kubernetes/pki/ca.{crt,key}`, and then kubeadm will use those files for signing the rest of the certs. - See also [using custom certificates](kubeadm-init.md/#custom-certificates) + See also [using custom certificates](/docs/reference/setup-tools/kubeadm/kubeadm-init/#custom-certificates) 2. Only for the CA, it is possible to provide the `ca.crt` file but not the `ca.key` file, if all other certificates and kubeconfig files already are in place kubeadm recognize this condition and activates the ExternalCA , which also implies the `csrsigner`controller in controller-manager won't be started -3. If kubeadm is running in [ExternalCA mode](kubeadm-init.md/#external-ca-mode); all the certificates must be provided by the user, +3. If kubeadm is running in [ExternalCA mode](/docs/reference/setup-tools/kubeadm/kubeadm-init/#external-ca-mode); all the certificates must be provided by the user, because kubeadm cannot generate them by itself 4. In case of kubeadm is executed in the `--dry-run` mode, certificates files are written in a temporary folder -5. Certificate generation can be invoked individually with the [`kubeadm alpha phase certs all`](kubeadm-alpha.md/#cmd-phase-certs) command +5. Certificate generation can be invoked individually with the [`kubeadm alpha phase certs all`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-certs) command ### Generate kubeconfig files for control plane components @@ -181,9 +181,9 @@ Please note that: 1. `ca.crt` certificate is embedded in all the kubeconfig files. 2. If a given kubeconfig file exists, and its content is evaluated compliant with the above specs, the existing file will be used and the generation phase for the given kubeconfig skipped -3. If kubeadm is running in [ExternalCA mode](kubeadm-init.md/#external-ca-mode), all the required kubeconfig must be provided by the user as well, because kubeadm cannot generate any of them by itself +3. If kubeadm is running in [ExternalCA mode](/docs/reference/setup-tools/kubeadm/kubeadm-init/#external-ca-mode), all the required kubeconfig must be provided by the user as well, because kubeadm cannot generate any of them by itself 4. In case of kubeadm is executed in the `--dry-run` mode, kubeconfig files are written in a temporary folder -5. Kubeconfig files generation can be invoked individually with the [`kubeadm alpha phase kubeconfig all`](kubeadm-alpha.md/#cmd-phase-kubeconfig) command +5. Kubeconfig files generation can be invoked individually with the [`kubeadm alpha phase kubeconfig all`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-kubeconfig) command ### Generate static Pod manifests for control plane components @@ -200,17 +200,17 @@ Static Pod manifest share a set of common properties: * If using a local etcd server, `etcd-servers` address will be set to `127.0.0.1:2379` - Leader election is enabled for both the controller-manager and the scheduler - Controller-manager and the scheduler will reference kubeconfig files with their respective, unique identities -- All static Pods gets any extra flags specified by the user as described in [passing custom arguments to control plane components](kubeadm-init.md/#custom-args) +- All static Pods gets any extra flags specified by the user as described in [passing custom arguments to control plane components](/docs/reference/setup-tools/kubeadm/kubeadm-init/#custom-args) - All static Pods gets any extra Volumes specified by the user (Host path) Please note that: 1. All the images, for the `--kubernetes-version`/current architecture, will be pulled from `k8s.gcr.io`; In case an alternative image repository or CI image repository is specified this one will be used; In case a specific container image - should be used for all control plane components, this one will be used. see [using custom images](kubeadm-init.md/#custom-images) + should be used for all control plane components, this one will be used. see [using custom images](/docs/reference/setup-tools/kubeadm/kubeadm-init/#custom-images) for more details 2. In case of kubeadm is executed in the `--dry-run` mode, static Pods files are written in a temporary folder -3. Static Pod manifest generation for master components can be invoked individually with the [`kubeadm alpha phase controlplane all`](kubeadm-alpha.md/#cmd-phase-controlplane) command +3. Static Pod manifest generation for master components can be invoked individually with the [`kubeadm alpha phase controlplane all`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-controlplane) command #### API server @@ -303,9 +303,9 @@ a local etcd instance running in a Pod with following attributes: Please note that: 1. The etcd image will be pulled from `k8s.gcr.io`. In case an alternative image repository is specified this one will be used; - In case an alternative image name is specified, this one will be used. see [using custom images](kubeadm-init.md/#custom-images) for more details + In case an alternative image name is specified, this one will be used. see [using custom images](/docs/reference/setup-tools/kubeadm/kubeadm-init/#custom-images) for more details 2. in case of kubeadm is executed in the `--dry-run` mode, the etcd static Pod manifest is written in a temporary folder -3. Static Pod manifest generation for local etcd can be invoked individually with the [`kubeadm alpha phase etcd local`](kubeadm-alpha.md/#cmd-phase-etcd) command +3. Static Pod manifest generation for local etcd can be invoked individually with the [`kubeadm alpha phase etcd local`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-etcd) command ### (optional and alpha in v1.9) Write init kubelet configuration @@ -321,7 +321,7 @@ Please note that: 1. To make dynamic kubelet configuration work, flag `--dynamic-config-dir=/var/lib/kubelet/config/dynamic` should be specified in `/etc/systemd/system/kubelet.service.d/10-kubeadm.conf` 1. Kubelet init configuration can be changed by using kubeadm MasterConfiguration file by setting `.kubeletConfiguration.baseConfig`. - See [using kubeadm init with a configuration file](kubeadm-init.md/#config-file) for more detail + See [using kubeadm init with a configuration file](/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file) for more detail ### Wait for the control plane to come up @@ -353,9 +353,9 @@ state and make new decisions based on that data. Please note that: 1. Before uploading, sensitive information like e.g. the token are stripped from the configuration -2. Upload of master configuration can be invoked individually with the [`kubeadm alpha phase upload-config`](kubeadm-alpha.md/#cmd-phase-upload-config) command +2. Upload of master configuration can be invoked individually with the [`kubeadm alpha phase upload-config`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-upload-config) command 3. If you initialized your cluster using kubeadm v1.7.x or lower, you must create manually the master configuration ConfigMap - before `kubeadm upgrade` to v1.8 . In order to facilitate this task, the [`kubeadm config upload (from-flags|from-file)`](kubeadm-config.md) + before `kubeadm upgrade` to v1.8 . In order to facilitate this task, the [`kubeadm config upload (from-flags|from-file)`](/docs/reference/setup-tools/kubeadm/kubeadm-config/) was implemented ### Mark master @@ -367,7 +367,7 @@ As soon as the control plane is available, kubeadm executes following actions: Please note that: -1. Mark master phase can be invoked individually with the [`kubeadm alpha phase mark-master`](kubeadm-alpha.md/#cmd-phase-mark-master) command +1. Mark master phase can be invoked individually with the [`kubeadm alpha phase mark-master`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-mark-master) command ### Configure TLS-Bootstrapping for node joining @@ -378,7 +378,7 @@ existing cluster; for more details see also [design proposal](https://github.com setting API server and controller flags as already described in previous paragraphs. Please note that: -1. TLS bootstrapping for nodes can be configured with the [`kubeadm alpha phase bootstrap-token all`](kubeadm-alpha.md/#cmd-phase-bootstrap-token) +1. TLS bootstrapping for nodes can be configured with the [`kubeadm alpha phase bootstrap-token all`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-bootstrap-token) command, executing all the configuration steps described in following paragraphs; alternatively, each step can be invoked individually #### Create a bootstrap token @@ -390,7 +390,7 @@ Please note that: 1. The default token created by `kubeadm init` will be used to validate temporary user during TLS bootstrap process; those users will be member of `system:bootstrappers:kubeadm:default-node-token` group 2. The token has a limited validity, default 24 hours (the interval may be changed with the `—token-ttl` flag) -3. Additional tokens can be created with the [`kubeadm token`](kubeadm-token.md) command, that provide as well other useful functions +3. Additional tokens can be created with the [`kubeadm token`](/docs/reference/setup-tools/kubeadm/kubeadm-token/) command, that provide as well other useful functions for token management #### Allow joining nodes to call CSR API @@ -436,7 +436,7 @@ can handle to serving the `cluster-info` ConfigMap. Kubeadm installs the internal DNS server and the kube-proxy addon components via the API server. Please note that: -1. This phase can be invoked individually with the [`kubeadm alpha phase addon all`](kubeadm-alpha.md/#cmd-phase-addon) command. +1. This phase can be invoked individually with the [`kubeadm alpha phase addon all`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-addon) command. #### proxy @@ -479,7 +479,7 @@ following procedure for API server, scheduler and controller manager static Pods Please note that: 1. Self hosting is not yet resilient to node restarts; this can be fixed with external checkpointing or with kubelet checkpointing - for the control plane Pods. See [self-hosting](kubeadm-init.md/#self-hosting) for more details. + for the control plane Pods. See [self-hosting](/docs/reference/setup-tools/kubeadm/kubeadm-init/#self-hosting) for more details. 2. If invoked with `—features-gates=StoreCertsInSecrets` following additional steps will be executed @@ -489,7 +489,7 @@ Please note that: - Creation of `schedler.conf` and `controller-manager.conf` secrets in`kube-system` namespace with respective kubeconfig files - Mutation of all the Pod specs by replacing host path volumes with projected volumes from the secrets above -3. This phase can be invoked individually with the [`kubeadm alpha phase selfhosting convert-from-staticpods`](kubeadm-alpha.md/#cmd-phase-self-hosting) command. +3. This phase can be invoked individually with the [`kubeadm alpha phase selfhosting convert-from-staticpods`](/docs/reference/setup-tools/kubeadm/kubeadm-alpha/#cmd-phase-self-hosting) command. ## kubeadm join phases internal design @@ -542,7 +542,7 @@ Please note that: If `kubeadm join` is invoked with `--discovery-file`, file discovery is used; this file can be a local file or downloaded via an HTTPS URL; in case of HTTPS, the host installed CA bundle is used to verify the connection. With file discovery, the cluster CA certificates is provided into the file itself; in fact, the discovery file is a kubeconfig -file with only `server` and `certificate-authority-data` attributes set, as described in [`kubeadm join`](/kubeadm-join.md/#file-or-https-based-discovery) reference doc; +file with only `server` and `certificate-authority-data` attributes set, as described in [`kubeadm join`](/docs/reference/setup-tools/kubeadm/kubeadm-join/#file-or-https-based-discovery) reference doc; when the connection with the cluster is established, kubeadm try to access the `cluster-info` ConfigMap, and if available, uses it. ## TLS Bootstrap diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md index a667744370dd0..ae8afd71dff1a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-alpha.md @@ -13,20 +13,9 @@ weight: 90 In v1.8.0, kubeadm introduced the `kubeadm alpha phase` command with the aim of making kubeadm more modular. This modularity enables you to invoke atomic sub-steps of the bootstrap process; you can let kubeadm do some parts and fill in yourself where you need customizations. -`kubeadm alpha phase` is consistent with [kubeadm init workflow](kubeadm-init.md#init-workflow), +`kubeadm alpha phase` is consistent with [kubeadm init workflow](/docs/reference/setup-tools/kubeadm/kubeadm-init/#init-workflow), and behind the scene both use the same code. -* [kubeadm alpha phase preflight](#cmd-phase-preflight) -* [kubeadm alpha phase certs](#cmd-phase-certs) -* [kubeadm alpha phase kubeconfig](#cmd-phase-kubeconfig) -* [kubeadm alpha phase controlplane](#cmd-phase-controlplane) -* [kubeadm alpha phase etcd](#cmd-phase-etcd) -* [kubeadm alpha phase mark-master](#cmd-phase-mark-master) -* [kubeadm alpha phase bootstrap-token](#cmd-phase-bootstrap-token) -* [kubeadm alpha phase upload-config](#cmd-phase-upload-config) -* [kubeadm alpha phase addon](#cmd-phase-addon) -* [kubeadm alpha phase selfhosting](#cmd-phase-self-hosting) - ## kubeadm alpha phase preflight {#cmd-phase-preflight} You can execute preflight checks both for the master node, like in `kubeadm init`, or for the worker node @@ -117,7 +106,7 @@ or selectively configure single elements. ## kubeadm alpha phase upload-config {#cmd-phase-upload-config} You can use this command to upload the kubeadm configuration to your cluster. -Alternatively, you can use [kubeadm config](kubeadm-config.md). +Alternatively, you can use [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config/). {{< tabs name="upload-config" >}} {{< tab name="mark-master" include="generated/kubeadm_alpha_phase_upload-config.md" />}} @@ -143,7 +132,7 @@ install them selectively. ## kubeadm alpha phase self-hosting {#cmd-phase-self-hosting} {{< caution >}} -**Caution:** Self-hosting is an alpha feature. See [kubeadm init](kubeadm-init.md) documentation for self-hosting limitations. +**Caution:** Self-hosting is an alpha feature. See [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) documentation for self-hosting limitations. {{< /caution >}} {{< tabs name="tab-self-hosting" >}} @@ -152,6 +141,6 @@ install them selectively. ## What's next -* [kubeadm init](kubeadm-init.md) to bootstrap a Kubernetes master node -* [kubeadm join](kubeadm-join.md) to connect a node to the cluster -* [kubeadm reset](kubeadm-reset.md) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes master node +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to connect a node to the cluster +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md index 7c053943f9bb4..3d095830c36df 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-config.md @@ -30,5 +30,5 @@ may use `kubeadm upgrade`. {{% /capture %}} {{% capture whatsnext %}} -* [kubeadm upgrade](kubeadm-upgrade.md) to upgrade a Kubernetes cluster to a newer version +* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade/) to upgrade a Kubernetes cluster to a newer version {{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md index 83789810186bd..863d0c7d613c8 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-join.md @@ -232,7 +232,7 @@ discoveryTokenUnsafeSkipCAVerification: {{% /capture %}} {{% capture whatsnext %}} -* [kubeadm init](kubeadm-init.md) to bootstrap a Kubernetes master node -* [kubeadm token](kubeadm-token.md) to manage tokens for `kubeadm join` -* [kubeadm reset](kubeadm-reset.md) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes master node +* [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token/) to manage tokens for `kubeadm join` +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` {{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md index d751ab10a2fac..611c722666437 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-reset.md @@ -28,6 +28,6 @@ See the [etcd documentation](https://github.com/coreos/etcd/tree/master/etcdctl) {{% /capture %}} {{% capture whatsnext %}} -* [kubeadm init](kubeadm-init.md) to bootstrap a Kubernetes master node -* [kubeadm join](kubeadm-join.md) to bootstrap a Kubernetes worker node and join it to the cluster +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init/) to bootstrap a Kubernetes master node +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to bootstrap a Kubernetes worker node and join it to the cluster {{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md index 8c9cfeddad64a..8c8e94efbb17a 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-token.md @@ -32,5 +32,5 @@ such a token and also to create and manage new ones. {{% /capture %}} {{% capture whatsnext %}} -* [kubeadm join](kubeadm-join.md) to bootstrap a Kubernetes worker node and join it to the cluster +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join/) to bootstrap a Kubernetes worker node and join it to the cluster {{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md index a95c65abdc404..29b56605edc28 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md @@ -42,5 +42,5 @@ applied to static pod manifests. {{% /capture %}} {{% capture whatsnext %}} -* [kubeadm config](kubeadm-config.md) if you initialized your cluster using kubeadm v1.7.x or lower, to configure your cluster for `kubeadm upgrade` +* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config/) if you initialized your cluster using kubeadm v1.7.x or lower, to configure your cluster for `kubeadm upgrade` {{% /capture %}} diff --git a/content/en/docs/reference/setup-tools/kubeadm/kubeadm.md b/content/en/docs/reference/setup-tools/kubeadm/kubeadm.md index 3a3c2a3edc5bd..c680eb5c408cf 100644 --- a/content/en/docs/reference/setup-tools/kubeadm/kubeadm.md +++ b/content/en/docs/reference/setup-tools/kubeadm/kubeadm.md @@ -14,9 +14,11 @@ Instead, we expect higher-level and more tailored tooling to be built on top of ## What's next -* [kubeadm init](../kubeadm-init/) to bootstrap a Kubernetes master node -* [kubeadm join](../kubeadm-join/) to bootstrap a Kubernetes worker node and join it to the cluster -* [kubeadm upgrade](../kubeadm-upgrade/) to upgrade a Kubernetes cluster to a newer version -* [kubeadm config](../kubeadm-config/) if you initialized your cluster using kubeadm v1.7.x or lower, to configure your cluster for `kubeadm upgrade` -* [kubeadm token](../kubeadm-token/) to manage tokens for `kubeadm join` -* [kubeadm reset](../kubeadm-reset/) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm init](/docs/reference/setup-tools/kubeadm/kubeadm-init) to bootstrap a Kubernetes master node +* [kubeadm join](/docs/reference/setup-tools/kubeadm/kubeadm-join) to bootstrap a Kubernetes worker node and join it to the cluster +* [kubeadm upgrade](/docs/reference/setup-tools/kubeadm/kubeadm-upgrade) to upgrade a Kubernetes cluster to a newer version +* [kubeadm config](/docs/reference/setup-tools/kubeadm/kubeadm-config) if you initialized your cluster using kubeadm v1.7.x or lower, to configure your cluster for `kubeadm upgrade` +* [kubeadm token](/docs/reference/setup-tools/kubeadm/kubeadm-token) to manage tokens for `kubeadm join` +* [kubeadm reset](/docs/reference/setup-tools/kubeadm/kubeadm-reset) to revert any changes made to this host by `kubeadm init` or `kubeadm join` +* [kubeadm version](/docs/reference/setup-tools/kubeadm/kubeadm-version) to print the kubeadm version +* [kubeadm alpha](/docs/reference/setup-tools/kubeadm/kubeadm-alpha) to preview a set of features made available for gathering feedback from the community diff --git a/content/en/docs/reference/using-api/api-overview.md b/content/en/docs/reference/using-api/api-overview.md index 27e0275c443a1..e20c7aaf7bcb6 100644 --- a/content/en/docs/reference/using-api/api-overview.md +++ b/content/en/docs/reference/using-api/api-overview.md @@ -23,7 +23,7 @@ Most operations can be performed through the command-line tools, such as [kubeadm](/docs/reference/setup-tools/kubeadm/kubeadm/), which in turn use the API. However, you can also access the API directly using REST calls. -Consider using one of the [client libraries](/docs/reference/client-libraries/) +Consider using one of the [client libraries](/docs/reference/using-api/client-libraries/) if you are writing an application using the Kubernetes API. ## API versioning diff --git a/content/en/docs/reference/using-api/client-libraries.md b/content/en/docs/reference/using-api/client-libraries.md index 00902f55ad02b..1cd609a2b4a18 100644 --- a/content/en/docs/reference/using-api/client-libraries.md +++ b/content/en/docs/reference/using-api/client-libraries.md @@ -12,7 +12,7 @@ API from various programming languages. {{% /capture %}} {{% capture body %}} -To write applications using the [Kubernetes REST API](/docs/reference/api-overview/), +To write applications using the [Kubernetes REST API](/docs/reference/using-api/api-overview/), you do not need to implement the API calls and request/response types yourself. You can use a client library for the programming language you are using. diff --git a/content/en/docs/reference/using-api/deprecation-policy.md b/content/en/docs/reference/using-api/deprecation-policy.md index 8e8e2e105295f..2bf41f3b4e127 100644 --- a/content/en/docs/reference/using-api/deprecation-policy.md +++ b/content/en/docs/reference/using-api/deprecation-policy.md @@ -20,7 +20,7 @@ This document details the deprecation policy for various facets of the system. Since Kubernetes is an API-driven system, the API has evolved over time to reflect the evolving understanding of the problem space. The Kubernetes API is actually a set of APIs, called "API groups", and each API group is -independently versioned. [API versions](/docs/reference/api-overview/#api-versioning) fall +independently versioned. [API versions](/docs/reference/using-api/api-overview/#api-versioning) fall into 3 main tracks, each of which has different policies for deprecation: | Example | Track | @@ -37,6 +37,8 @@ includes: * REST resources (aka API objects) * Fields of REST resources + * Annotations on REST resources, including "beta" annotations but not + including "alpha" annotations. * Enumerated or constant values * Component config structures @@ -75,15 +77,20 @@ versions. Beta API versions *may not* replace GA API versions. versions must be supported after their announced deprecation for a duration of no less than:** - * **GA: 1 year or 2 releases (whichever is longer)** - * **Beta: 6 months or 2 releases (whichever is longer)** + * **GA: 12 months or 3 releases (whichever is longer)** + * **Beta: 9 months or 3 releases (whichever is longer)** * **Alpha: 0 releases** +This covers the maximum supported version skew of 2 releases. + NOTE: Until [#52185](https://github.com/kubernetes/kubernetes/issues/52185) is -resolved, no API versions may be removed. +resolved, no API versions that have been persisted to storage may be removed. +Serving REST endpoints for those versions may be disabled (subject to the +deprecation timelines in this document), but the API server must remain capable +of decoding/converting previously persisted data from storage. **Rule #4b: The "preferred" API version and the "storage version" for a given -group may not advance util after a release has been made that supports both the +group may not advance until after a release has been made that supports both the new version and the previous version** Users must be able to upgrade to a new release of Kubernetes and then roll back @@ -151,11 +158,10 @@ API versions are supported in a series of subsequent releases. X+5 - v1, v1beta2 (deprecated) + v1, v1beta1 (deprecated), v1beta2 (deprecated) v1beta2 @@ -166,12 +172,19 @@ API versions are supported in a series of subsequent releases. v1 X+7 + v1, v1beta2 (deprecated) v1 + + + + X+8 + v2alpha1, v1 v1 - - X+8 - v2alpha1, v1 - v1 - - X+9 v2alpha2, v1 @@ -228,6 +235,12 @@ API versions are supported in a series of subsequent releases. X+13 + v2, v2beta1 (deprecated), v2beta2 (deprecated), v1 (deprecated) + v2 + + + + X+14 v2, v2beta2 (deprecated), v1 (deprecated) v2 @@ -237,7 +250,7 @@ API versions are supported in a series of subsequent releases. - X+14 + X+15 v2, v1 (deprecated) v2 @@ -246,12 +259,6 @@ API versions are supported in a series of subsequent releases. - - X+15 - v2, v1 (deprecated) - v2 - - X+16 v2, v1 (deprecated) @@ -322,7 +329,7 @@ follows: **Rule #5a: CLI elements of user-facing components (e.g. kubectl) must function after their announced deprecation for no less than:** - * **GA: 1 year or 2 releases (whichever is longer)** + * **GA: 12 months or 2 releases (whichever is longer)** * **Beta: 3 months or 1 release (whichever is longer)** * **Alpha: 0 releases** diff --git a/content/en/docs/setup/independent/create-cluster-kubeadm.md b/content/en/docs/setup/independent/create-cluster-kubeadm.md index ef8d5d35cb0a2..b7ded03deda8e 100644 --- a/content/en/docs/setup/independent/create-cluster-kubeadm.md +++ b/content/en/docs/setup/independent/create-cluster-kubeadm.md @@ -86,6 +86,15 @@ timeframe; which also applies to `kubeadm`. 1. 2 CPUs or more on the master 1. Full network connectivity between all machines in the cluster (public or private network is fine) + +{{< note >}} +**Note:** This guide results in a Kubernetes cluster with one master and a +number of nodes that you decide. A single master is not highly available. If +you want to set up a multi-master cluster for high availability, you can +follow +[this guide instead](https://kubernetes.io/docs/setup/independent/high-availability/). +{{< /note >}} + {{% /capture %}} {{% capture steps %}} @@ -249,7 +258,7 @@ kubectl apply -f Please select one of the tabs to see installation instructions for the respective third-party Pod Network Provider. {{% /tab %}} {{% tab name="Calico" %}} -Refer to the Calico documentation for a [kubeadm quickstart](https://docs.projectcalico.org/latest/getting-started/kubernetes/), a [kubeadm installation guide](http://docs.projectcalico.org/latest/getting-started/kubernetes/installation/hosted/kubeadm/), and other resources. +For more information about using Calico, see [Quickstart for Calico on Kubernetes](https://docs.projectcalico.org/latest/getting-started/kubernetes/), [Installing Calico for policy and networking](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/calico), and other related resources. **Note:** @@ -257,11 +266,12 @@ Refer to the Calico documentation for a [kubeadm quickstart](https://docs.projec - Calico works on `amd64` only. ```shell -kubectl apply -f https://docs.projectcalico.org/v3.0/getting-started/kubernetes/installation/hosted/kubeadm/1.7/calico.yaml +kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml +kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml ``` {{% /tab %}} {{% tab name="Canal" %}} -The official Canal set-up guide is [here](https://github.com/projectcalico/canal/tree/master/k8s-install). +Canal uses Calico for policy and Flannel for networking. Refer to the Calico documentation for the [official getting started guide](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/flannel). **Note:** @@ -269,8 +279,8 @@ The official Canal set-up guide is [here](https://github.com/projectcalico/canal - Canal works on `amd64` only. ```shell -kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.7/rbac.yaml -kubectl apply -f https://raw.githubusercontent.com/projectcalico/canal/master/k8s-install/1.7/canal.yaml +kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/rbac.yaml +kubectl apply -f https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/hosted/canal/canal.yaml ``` {{% /tab %}} {{% tab name="Flannel" %}} @@ -325,9 +335,7 @@ Weave Net sets hairpin mode by default. This allows Pods to access themselves vi if they don't know their PodIP. ```shell -export kubever=$(kubectl version | base64 | tr -d ' -') -kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever" +kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" ``` {{% /tab %}} {{< /tabs >}} diff --git a/content/en/docs/setup/independent/high-availability.md b/content/en/docs/setup/independent/high-availability.md index 22200fc2f6f5f..6eda806baabb9 100644 --- a/content/en/docs/setup/independent/high-availability.md +++ b/content/en/docs/setup/independent/high-availability.md @@ -55,102 +55,98 @@ For **Option 2**: you can skip to the next step. Any reference to `etcd0`, `etcd 1. Install `cfssl` and `cfssljson`: - ```shell - curl -o /usr/local/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 - curl -o /usr/local/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 - chmod +x /usr/local/bin/cfssl* - ``` + ```bash + curl -o /usr/local/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 + curl -o /usr/local/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 + chmod +x /usr/local/bin/cfssl* + ``` 1. SSH into `etcd0` and run the following: - ```shell - mkdir -p /etc/kubernetes/pki/etcd - cd /etc/kubernetes/pki/etcd - ``` - ```shell - cat >ca-config.json <ca-csr.json <}} - **Optional:** You can modify `ca-csr.json` to add a section for `names`. - See [the CFSSL wiki](https://github.com/cloudflare/cfssl/wiki/Creating-a-new-CSR) for an example. - {{< /note >}} - -1. Next, generate the CA certs like so: - - ```shell - cfssl gencert -initca ca-csr.json | cfssljson -bare ca - - ``` - -### Generate etcd client certs + ```bash + mkdir -p /etc/kubernetes/pki/etcd + cd /etc/kubernetes/pki/etcd + + cat >ca-config.json <ca-csr.json <}} + **Optional:** You can modify `ca-csr.json` to add a section for `names`. + See [the CFSSL wiki](https://github.com/cloudflare/cfssl/wiki/Creating-a-new-CSR) for an example. + {{< /note >}} - While on `etcd0`, run the following: +1. Next, generate the CA certs: + ```bash + cfssl gencert -initca ca-csr.json | cfssljson -bare ca - + ``` - ```shell - cat >client.json <client.json <" + ``` - ```shell - ssh-keygen -t rsa -b 4096 -C "" - ``` + Make sure to replace `` with your email, a placeholder, or an empty string. Keep hitting enter until files exist in `~/.ssh`. - Make sure to replace `` with your email, a placeholder, or an empty string. Keep hitting enter until files exist in `~/.ssh`. - -1. Output the contents of the public key file for `etcd1` and `etcd2`, like so: - - ```shell - cat ~/.ssh/id_rsa.pub - ``` +1. Output the contents of the public key file for `etcd1` and `etcd2`: + ```bash + cat ~/.ssh/id_rsa.pub + ``` 1. Finally, copy the output for each and paste them into `etcd0`'s `~/.ssh/authorized_keys` file. This will permit `etcd1` and `etcd2` to SSH in to the machine. ### Generate etcd server and peer certs 1. In order to generate certs, each etcd machine needs the root CA generated by `etcd0`. On `etcd1` and `etcd2`, run the following: - - ```shell - mkdir -p /etc/kubernetes/pki/etcd - cd /etc/kubernetes/pki/etcd - scp root@:/etc/kubernetes/pki/etcd/ca.pem . - scp root@:/etc/kubernetes/pki/etcd/ca-key.pem . - scp root@:/etc/kubernetes/pki/etcd/client.pem . - scp root@:/etc/kubernetes/pki/etcd/client-key.pem . - scp root@:/etc/kubernetes/pki/etcd/ca-config.json . - ``` - - Where `` corresponds to the public or private IPv4 of `etcd0`. + ```bash + mkdir -p /etc/kubernetes/pki/etcd + cd /etc/kubernetes/pki/etcd + scp root@:/etc/kubernetes/pki/etcd/ca.pem . + scp root@:/etc/kubernetes/pki/etcd/ca-key.pem . + scp root@:/etc/kubernetes/pki/etcd/client.pem . + scp root@:/etc/kubernetes/pki/etcd/client-key.pem . + scp root@:/etc/kubernetes/pki/etcd/ca-config.json . + ``` + + Where `` corresponds to the public or private IPv4 of `etcd0`. 1. Once this is done, run the following on all etcd machines: - - ```shell - cfssl print-defaults csr > config.json - sed -i '0,/CN/{s/example\.net/'"$PEER_NAME"'/}' config.json - sed -i 's/www\.example\.net/'"$PRIVATE_IP"'/' config.json - sed -i 's/example\.net/'"$PEER_NAME"'/' config.json - - cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server config.json | cfssljson -bare server - cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer config.json | cfssljson -bare peer - ``` - - The above will replace the default configuration with your machine's hostname as the peer name, and its IP addresses. Make sure - these are correct before generating the certs. If you found an error, reconfigure `config.json` and re-run the `cfssl` commands. - -This will result in the following files: `peer.pem`, `peer-key.pem`, `server.pem`, `server-key.pem`. + ```bash + cfssl print-defaults csr > config.json + sed -i '0,/CN/{s/example\.net/'"$PEER_NAME"'/}' config.json + sed -i 's/www\.example\.net/'"$PRIVATE_IP"'/' config.json + sed -i 's/example\.net/'"$PEER_NAME"'/' config.json + ``` + ```bash + cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server config.json | cfssljson -bare server + cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer config.json | cfssljson -bare peer + ``` + + The above will replace the default configuration with your machine's hostname as the peer name, and its IP addresses. Make sure + these are correct before generating the certs. If you found an error, reconfigure `config.json` and re-run the `cfssl` commands. + +This results in the following files: `peer.pem`, `peer-key.pem`, `server.pem`, `server-key.pem`. ### {{< tabs name="etcd_mode" >}} {{% tab name="Choose one..." %}} Please select one of the tabs to see installation instructions for the respective way to run etcd. {{% /tab %}} {{% tab name="systemd" %}} -1. First you will install etcd binaries like so: +1. First, install etcd binaries: + ```bash + ETCD_VERSION="v3.1.12" curl -sSL https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz | tar -xzv --strip-components=1 -C /usr/local/bin/ + ``` - ```shell - ETCD_VERSION="v3.1.12"; curl -sSL https://github.com/coreos/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz | tar -xzv --strip-components=1 -C /usr/local/bin/ - ``` + It is worth noting that etcd v3.1.12 is the preferred version for Kubernetes v1.10. For other versions of Kubernetes please consult [the changelog](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md). - It is worth noting that etcd v3.1.12 is the preferred version for Kubernetes v1.10. For other versions of Kubernetes please consult [the changelog](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md). - - Also, please realize that most distributions of Linux already have a version of etcd installed, so you will be replacing the system default. + Also, please realize that most distributions of Linux already have a version of etcd installed, so you will be replacing the system default. 1. Next, generate the environment file that systemd will use: - - ``` - touch /etc/etcd.env - echo "PEER_NAME=${PEER_NAME}" >> /etc/etcd.env - echo "PRIVATE_IP=${PRIVATE_IP}" >> /etc/etcd.env - ``` - -1. Now copy the systemd unit file like so: - - ```shell - cat >/etc/systemd/system/etcd.service < --data-dir /var/lib/etcd --listen-client-urls http://localhost:2379 --advertise-client-urls http://localhost:2379 --listen-peer-urls http://localhost:2380 --initial-advertise-peer-urls http://localhost:2380 --cert-file=/etc/kubernetes/pki/etcd/server.pem --key-file=/etc/kubernetes/pki/etcd/server-key.pem --client-cert-auth --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem --peer-cert-file=/etc/kubernetes/pki/etcd/peer.pem --peer-key-file=/etc/kubernetes/pki/etcd/peer-key.pem --peer-client-cert-auth --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem --initial-cluster =https://:2380,=https://:2380,=https://:2380 --initial-cluster-token my-etcd-token --initial-cluster-state new - - [Install] - WantedBy=multi-user.target - EOF - ``` - - Make sure you replace ``, `` and `` with the appropriate IPv4 addresses. Replace `` with the name of this etcd member. Modify the values of `--listen-client-urls`, `--advertise-client-urls`, `--listen-peer-urls` and `--initial-advertise-peer-urls` if needed. Replace ``, `` and `` with real hostnames of each machine. These machines must be able to reach every other using DNS or make sure that records are added to `/etc/hosts`. - -1. Finally, launch etcd like so: - - ```shell - systemctl daemon-reload - systemctl start etcd - ``` + ```bash + touch /etc/etcd.env + echo "PEER_NAME=${PEER_NAME}" >> /etc/etcd.env + echo "PRIVATE_IP=${PRIVATE_IP}" >> /etc/etcd.env + ``` + +1. Now copy the systemd unit file: + ```none + cat >/etc/systemd/system/etcd.service < --data-dir /var/lib/etcd --listen-client-urls http://localhost:2379 --advertise-client-urls http://localhost:2379 --listen-peer-urls http://localhost:2380 --initial-advertise-peer-urls http://localhost:2380 --cert-file=/etc/kubernetes/pki/etcd/server.pem --key-file=/etc/kubernetes/pki/etcd/server-key.pem --client-cert-auth --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem --peer-cert-file=/etc/kubernetes/pki/etcd/peer.pem --peer-key-file=/etc/kubernetes/pki/etcd/peer-key.pem --peer-client-cert-auth --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem --initial-cluster =https://:2380,=https://:2380,=https://:2380 --initial-cluster-token my-etcd-token --initial-cluster-state new + + [Install] + WantedBy=multi-user.target + EOF + ``` + + Make sure you replace ``, `` and `` with the appropriate IPv4 addresses. Replace `` with the name of this etcd member. Modify the values of `--listen-client-urls`, `--advertise-client-urls`, `--listen-peer-urls` and `--initial-advertise-peer-urls` if needed. Replace ``, `` and `` with real hostnames of each machine. These machines must be able to reach every other using DNS or make sure that records are added to `/etc/hosts`. + +1. Finally, launch etcd: + ```bash + systemctl daemon-reload + systemctl start etcd + ``` 1. Check that it launched successfully: + ```bash + systemctl status etcd + ``` - ```shell - systemctl status etcd - ``` {{% /tab %}} {{% tab name="Static Pods" %}} **Note**: This is only supported on nodes that have the all dependencies for the kubelet installed. If you are hosting etcd on the master nodes, this has already been set up. If you are hosting etcd on dedicated nodes, you should either use systemd or run the [installation guide](/docs/setup/independent/install-kubeadm/) on each dedicated etcd machine. -1. The first step is to run the following to generate the manifest file: - - ```shell - cat >/etc/kubernetes/manifests/etcd.yaml < - namespace: kube-system - spec: - containers: - - command: - - etcd --name - --data-dir /var/lib/etcd - --listen-client-urls http://localhost:2379 - --advertise-client-urls http://localhost:2379 - --listen-peer-urls http://localhost:2380 - --initial-advertise-peer-urls http://localhost:2380 - --cert-file=/certs/server.pem - --key-file=/certs/server-key.pem - --client-cert-auth - --trusted-ca-file=/certs/ca.pem - --peer-cert-file=/certs/peer.pem - --peer-key-file=/certs/peer-key.pem - --peer-client-cert-auth - --peer-trusted-ca-file=/certs/ca.pem - --initial-cluster etcd0=https://:2380,etcd1=https://:2380,etcd2=https://:2380 - --initial-cluster-token my-etcd-token - --initial-cluster-state new - image: k8s.gcr.io/etcd-amd64:3.1.10 - livenessProbe: - httpGet: - path: /health - port: 2379 - scheme: HTTP - initialDelaySeconds: 15 - timeoutSeconds: 15 - name: etcd - env: - - name: PUBLIC_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PRIVATE_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: PEER_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumeMounts: - - mountPath: /var/lib/etcd - name: etcd - - mountPath: /certs - name: certs - hostNetwork: true - volumes: - - hostPath: - path: /var/lib/etcd - type: DirectoryOrCreate - name: etcd - - hostPath: - path: /etc/kubernetes/pki/etcd - name: certs - EOF - ``` - - Make sure you replace: - * `` with the name of the node you're running on (e.g. `etcd0`, `etcd1` or `etcd2`) - * ``, `` and `` with the public IPv4s of the other machines that host etcd. +Run the following to generate the manifest file: + + + cat >/etc/kubernetes/manifests/etcd.yaml < + namespace: kube-system + spec: + containers: + - command: + - etcd --name + - --data-dir /var/lib/etcd + - --listen-client-urls http://localhost:2379 + - --advertise-client-urls http://localhost:2379 + - --listen-peer-urls http://localhost:2380 + - --initial-advertise-peer-urls http://localhost:2380 + - --cert-file=/certs/server.pem + - --key-file=/certs/server-key.pem + - --client-cert-auth + - --trusted-ca-file=/certs/ca.pem + - --peer-cert-file=/certs/peer.pem + - --peer-key-file=/certs/peer-key.pem + - --peer-client-cert-auth + - --peer-trusted-ca-file=/certs/ca.pem + - --initial-cluster etcd0=https://:2380,etcd1=https://:2380,etcd2=https://:2380 + - --initial-cluster-token my-etcd-token + - --initial-cluster-state new + image: k8s.gcr.io/etcd-amd64:3.1.10 + livenessProbe: + httpGet: + path: /health + port: 2379 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: etcd + env: + - name: PUBLIC_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PRIVATE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: PEER_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - mountPath: /var/lib/etcd + name: etcd + - mountPath: /certs + name: certs + hostNetwork: true + volumes: + - hostPath: + path: /var/lib/etcd + type: DirectoryOrCreate + name: etcd + - hostPath: + path: /etc/kubernetes/pki/etcd + name: certs + EOF + +Make sure you replace: +* `` with the name of the node you're running on (e.g. `etcd0`, `etcd1` or `etcd2`) +* ``, `` and `` with the public IPv4s of the other machines that host etcd. {{% /tab %}} {{< /tabs >}} @@ -365,22 +369,21 @@ As an example we outline a simple setup based on keepalived. Depending on enviro 1. Install keepalived, e.g. using your distribution's package manager. The configuration shown here works with version `1.3.5` but is expected to work with may other versions. Make sure to have it enabled (chkconfig, systemd, ...) so that it starts automatically when the respective node comes up. 2. Create the following configuration file _/etc/keepalived/keepalived.conf_ on all master nodes: - - ```shell - ! Configuration File for keepalived - global_defs { + ```none + ! Configuration File for keepalived + global_defs { router_id LVS_DEVEL - } + } - vrrp_script check_apiserver { + vrrp_script check_apiserver { script "/etc/keepalived/check_apiserver.sh" interval 3 weight -2 fall 10 rise 2 - } + } - vrrp_instance VI_1 { + vrrp_instance VI_1 { state interface virtual_router_id 51 @@ -395,8 +398,8 @@ As an example we outline a simple setup based on keepalived. Depending on enviro track_script { check_apiserver } - } - ``` + } + ``` In the section `vrrp_instance VI_1`, change few lines depending on your setup: @@ -407,20 +410,19 @@ As an example we outline a simple setup based on keepalived. Depending on enviro * `virtual_ipaddresses` should contain the virtual IP for the master nodes. 3. Install the following health check script to _/etc/keepalived/check_apiserver.sh_ on all master nodes: + ```bash + #!/bin/sh - ```shell - #!/bin/sh - - errorExit() { + errorExit() { echo "*** $*" 1>&2 exit 1 - } + } - curl --silent --max-time 2 --insecure https://localhost:6443/ -o /dev/null || errorExit "Error GET https://localhost:6443/" - if ip addr | grep -q ; then + curl --silent --max-time 2 --insecure https://localhost:6443/ -o /dev/null || errorExit "Error GET https://localhost:6443/" + if ip addr | grep -q ; then curl --silent --max-time 2 --insecure https://:6443/ -o /dev/null || errorExit "Error GET https://:6443/" - fi - ``` + fi + ``` Replace the `` by your chosen virtual IP. @@ -436,55 +438,52 @@ Only follow this step if your etcd is hosted on dedicated nodes (**Option 1**). 1. Generate SSH keys for each of the master nodes by following the steps in the [create ssh access](#create-ssh-access) section. After doing this, each master will have an SSH key in `~/.ssh/id_rsa.pub` and an entry in `etcd0`'s `~/.ssh/authorized_keys` file. 1. Run the following: - - ```shell - mkdir -p /etc/kubernetes/pki/etcd - scp root@:/etc/kubernetes/pki/etcd/ca.pem /etc/kubernetes/pki/etcd - scp root@:/etc/kubernetes/pki/etcd/client.pem /etc/kubernetes/pki/etcd - scp root@:/etc/kubernetes/pki/etcd/client-key.pem /etc/kubernetes/pki/etcd - ``` + ```bash + mkdir -p /etc/kubernetes/pki/etcd + scp root@:/etc/kubernetes/pki/etcd/ca.pem /etc/kubernetes/pki/etcd + scp root@:/etc/kubernetes/pki/etcd/client.pem /etc/kubernetes/pki/etcd + scp root@:/etc/kubernetes/pki/etcd/client-key.pem /etc/kubernetes/pki/etcd + ``` ## Run `kubeadm init` on `master0` {#kubeadm-init-master0} 1. In order for kubeadm to run, you first need to write a configuration file: - - ```shell - cat >config.yaml < - etcd: - endpoints: - - https://:2379 - - https://:2379 - - https://:2379 - caFile: /etc/kubernetes/pki/etcd/ca.pem - certFile: /etc/kubernetes/pki/etcd/client.pem - keyFile: /etc/kubernetes/pki/etcd/client-key.pem - networking: - podSubnet: - apiServerCertSANs: - - - apiServerExtraArgs: - apiserver-count: "3" - EOF - ``` - - Ensure that the following placeholders are replaced: - - - `` with the private IPv4 of the master server. - - ``, `` and `` with the IP addresses of your three etcd nodes - - `` with your Pod CIDR. Please read the [CNI network section](/docs/setup/independent/create-cluster-kubeadm/#pod-network) of the docs for more information. Some CNI providers do not require a value to be set. - - `` with the virtual IP set up in the load balancer. Please read [setting up a master load balancer](/docs/setup/independent/high-availability/#set-up-master-load-balancer) section of the docs for more information. - - **Note:** If you are using Kubernetes 1.9+, you can replace the `apiserver-count: 3` extra argument with `endpoint-reconciler-type: lease`. For more information, see [the documentation](/docs/admin/high-availability/#endpoint-reconciler). - -1. When this is done, run kubeadm like so: - - ```shell - kubeadm init --config=config.yaml - ``` + ```bash + cat >config.yaml < + etcd: + endpoints: + - https://:2379 + - https://:2379 + - https://:2379 + caFile: /etc/kubernetes/pki/etcd/ca.pem + certFile: /etc/kubernetes/pki/etcd/client.pem + keyFile: /etc/kubernetes/pki/etcd/client-key.pem + networking: + podSubnet: + apiServerCertSANs: + - + apiServerExtraArgs: + apiserver-count: "3" + EOF + ``` + + Ensure that the following placeholders are replaced: + + - `` with the private IPv4 of the master server. + - ``, `` and `` with the IP addresses of your three etcd nodes + - `` with your Pod CIDR. Please read the [CNI network section](/docs/setup/independent/create-cluster-kubeadm/#pod-network) of the docs for more information. Some CNI providers do not require a value to be set. + - `` with the virtual IP set up in the load balancer. Please read [setting up a master load balancer](/docs/setup/independent/high-availability/#set-up-master-load-balancer) section of the docs for more information. + + **Note:** If you are using Kubernetes 1.9+, you can replace the `apiserver-count: 3` extra argument with `endpoint-reconciler-type: lease`. For more information, see [the documentation](/docs/admin/high-availability/#endpoint-reconciler). + +1. When this is done, run kubeadm: + ```bash + kubeadm init --config=config.yaml + ``` ## Run `kubeadm init` on `master1` and `master2` @@ -494,15 +493,14 @@ Before running kubeadm on the other masters, you need to first copy the K8s CA c 1. Follow the steps in the [create ssh access](#create-ssh-access) section, but instead of adding to `etcd0`'s `authorized_keys` file, add them to `master0`. 1. Once you've done this, run: - - ```shell - scp root@:/etc/kubernetes/pki/* /etc/kubernetes/pki - rm apiserver.* - ``` + ```bash + scp root@:/etc/kubernetes/pki/* /etc/kubernetes/pki + rm apiserver.* + ``` #### Option 2: Copy paste -1. Copy the contents of `/etc/kubernetes/pki/ca.crt`, `/etc/kubernetes/pki/ca.key`, `/etc/kubernetes/pki/sa.key` and `/etc/kubernetes/pki/sa.pub` and create these files manually on `master1` and `master2`. +Copy the contents of `/etc/kubernetes/pki/ca.crt`, `/etc/kubernetes/pki/ca.key`, `/etc/kubernetes/pki/sa.key` and `/etc/kubernetes/pki/sa.pub` and create these files manually on `master1` and `master2`. When this is done, you can follow the [previous step](#kubeadm-init-master0) to install the control plane with kubeadm. @@ -523,22 +521,16 @@ Next provision and set up the worker nodes. To do this, you will need to provisi ## Configure workers 1. Reconfigure kube-proxy to access kube-apiserver via the load balancer: - - ```shell - kubectl get configmap -n kube-system kube-proxy -o yaml > kube-proxy-cm.yaml - sed -i 's#server:.*#server: https://:6443#g' kube-proxy-cm.yaml - kubectl apply -f kube-proxy-cm.yaml --force - # restart all kube-proxy pods to ensure that they load the new configmap - kubectl delete pod -n kube-system -l k8s-app=kube-proxy - ``` + ```bash + kubectl get configmap -n kube-system kube-proxy -o yaml > kube-proxy-cm.yaml + sed -i 's#server:.*#server: https://:6443#g' kube-proxy-cm.yaml + kubectl apply -f kube-proxy-cm.yaml --force + # restart all kube-proxy pods to ensure that they load the new configmap + kubectl delete pod -n kube-system -l k8s-app=kube-proxy + ``` 1. Reconfigure the kubelet to access kube-apiserver via the load balancer: - - ```shell - sudo sed -i 's#server:.*#server: https://:6443#g' /etc/kubernetes/kubelet.conf - sudo systemctl restart kubelet - ``` - - - - + ```bash + sudo sed -i 's#server:.*#server: https://:6443#g' /etc/kubernetes/kubelet.conf + sudo systemctl restart kubelet + ``` diff --git a/content/en/docs/setup/pick-right-solution.md b/content/en/docs/setup/pick-right-solution.md index 3a1337e8d75d4..b820560aeb2e7 100644 --- a/content/en/docs/setup/pick-right-solution.md +++ b/content/en/docs/setup/pick-right-solution.md @@ -28,7 +28,7 @@ a Kubernetes cluster from scratch. * [Minikube](/docs/getting-started-guides/minikube/) is the recommended method for creating a local, single-node Kubernetes cluster for development and testing. Setup is completely automated and doesn't require a cloud provider account. -* [Kubeadm-dind](https://github.com/Mirantis/kubeadm-dind-cluster) is a multi-node (while minikube is single-node) Kubernetes cluster which only requires a docker daemon. It uses docker-in-docker technique to spawn the Kubernetes cluster. +* [Kubeadm-dind](https://github.com/kubernetes-sigs/kubeadm-dind-cluster) is a multi-node (while minikube is single-node) Kubernetes cluster which only requires a docker daemon. It uses docker-in-docker technique to spawn the Kubernetes cluster. * [Ubuntu on LXD](/docs/getting-started-guides/ubuntu/local/) supports a nine-instance deployment on localhost. @@ -64,6 +64,8 @@ a Kubernetes cluster from scratch. * [Pivotal Container Service](https://pivotal.io/platform/pivotal-container-service) provides enterprise-grade Kubernetes for both on-premises and public clouds. PKS enables on-demand provisioning of Kubernetes clusters, multi-tenancy and fully automated day-2 operations. +* [Oracle Container Engine for Kubernetes](https://docs.us-phoenix-1.oraclecloud.com/Content/ContEng/Concepts/contengoverview.htm) is a fully-managed, scalable, and highly available service that you can use to deploy your containerized applications to the cloud. + # Turnkey Cloud Solutions These solutions allow you to create Kubernetes clusters on a range of Cloud IaaS providers with only a @@ -80,6 +82,7 @@ few commands. These solutions are actively developed and have active community s * [Madcore.Ai](https://madcore.ai/) * [Kubermatic](https://cloud.kubermatic.io) * [Rancher 2.0](https://rancher.com/docs/rancher/v2.x/en/) +* [Oracle Container Engine for K8s](https://docs.us-phoenix-1.oraclecloud.com/Content/ContEng/Concepts/contengprerequisites.htm) # On-Premises turnkey cloud solutions These solutions allow you to create Kubernetes clusters on your internal, secure, cloud network with only a diff --git a/content/en/docs/tasks/access-application-cluster/access-cluster.md b/content/en/docs/tasks/access-application-cluster/access-cluster.md index 0f6963f207166..062787551d6bf 100644 --- a/content/en/docs/tasks/access-application-cluster/access-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/access-cluster.md @@ -142,7 +142,7 @@ as the kubectl CLI does to locate and authenticate to the apiserver. See this [e #### Other languages -There are [client libraries](/docs/reference/client-libraries/) for accessing the API from other languages. +There are [client libraries](/docs/reference/using-api/client-libraries/) for accessing the API from other languages. See documentation for other libraries for how they authenticate. ### Accessing the API from a Pod diff --git a/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md b/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md index 8ed6b00027758..9c67c4f628cdb 100644 --- a/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md +++ b/content/en/docs/tasks/access-application-cluster/service-access-application-cluster.md @@ -34,9 +34,9 @@ provides load balancing for an application that has two running instances. ## Creating a service for an application running in two pods 1. Run a Hello World application in your cluster: - - kubectl run hello-world --replicas=2 --labels="run=load-balancer-example" --image=gcr.io/google-samples/node-hello:1.0 --port=8080 - + ```shell + kubectl run hello-world --replicas=2 --labels="run=load-balancer-example" --image=gcr.io/google-samples/node-hello:1.0 --port=8080 + ``` The preceding command creates a [Deployment](/docs/concepts/workloads/controllers/deployment/) object and an associated @@ -46,77 +46,81 @@ provides load balancing for an application that has two running instances. each of which runs the Hello World application. 1. Display information about the Deployment: - - kubectl get deployments hello-world - kubectl describe deployments hello-world + ```shell + kubectl get deployments hello-world + kubectl describe deployments hello-world + ``` 1. Display information about your ReplicaSet objects: - - kubectl get replicasets - kubectl describe replicasets + ```shell + kubectl get replicasets + kubectl describe replicasets + ``` 1. Create a Service object that exposes the deployment: - - kubectl expose deployment hello-world --type=NodePort --name=example-service + ```shell + kubectl expose deployment hello-world --type=NodePort --name=example-service + ``` 1. Display information about the Service: - - kubectl describe services example-service - - The output is similar to this: - - Name: example-service - Namespace: default - Labels: run=load-balancer-example - Annotations: - Selector: run=load-balancer-example - Type: NodePort - IP: 10.32.0.16 - Port: 8080/TCP - TargetPort: 8080/TCP - NodePort: 31496/TCP - Endpoints: 10.200.1.4:8080,10.200.2.5:8080 - Session Affinity: None - Events: - - Make a note of the NodePort value for the service. For example, - in the preceding output, the NodePort value is 31496. + ```shell + kubectl describe services example-service + ``` + The output is similar to this: + ```shell + Name: example-service + Namespace: default + Labels: run=load-balancer-example + Annotations: + Selector: run=load-balancer-example + Type: NodePort + IP: 10.32.0.16 + Port: 8080/TCP + TargetPort: 8080/TCP + NodePort: 31496/TCP + Endpoints: 10.200.1.4:8080,10.200.2.5:8080 + Session Affinity: None + Events: + ``` + Make a note of the NodePort value for the service. For example, + in the preceding output, the NodePort value is 31496. 1. List the pods that are running the Hello World application: - - kubectl get pods --selector="run=load-balancer-example" --output=wide - - The output is similar to this: - - NAME READY STATUS ... IP NODE - hello-world-2895499144-bsbk5 1/1 Running ... 10.200.1.4 worker1 - hello-world-2895499144-m1pwt 1/1 Running ... 10.200.2.5 worker2 - + ```shell + kubectl get pods --selector="run=load-balancer-example" --output=wide + ``` + The output is similar to this: + ```shell + NAME READY STATUS ... IP NODE + hello-world-2895499144-bsbk5 1/1 Running ... 10.200.1.4 worker1 + hello-world-2895499144-m1pwt 1/1 Running ... 10.200.2.5 worker2 + ``` 1. Get the public IP address of one of your nodes that is running a Hello World pod. How you get this address depends on how you set up your cluster. For example, if you are using Minikube, you can see the node address by running `kubectl cluster-info`. If you are using Google Compute Engine instances, you can use the `gcloud compute instances list` command to see the public addresses of your - nodes. For more information about this command, see the [GCE documentation](https://cloud.google.com/sdk/gcloud/reference/compute/instances/list). + nodes. For more information about this command, see the [GCE documentation](https://cloud.google.com/sdk/gcloud/ +reference/compute/instances/list). 1. On your chosen node, create a firewall rule that allows TCP traffic on your node port. For example, if your Service has a NodePort value of - 31568, create a firewall rule that allows TCP traffic on port 31568. Different - cloud providers offer different ways of configuring firewall rules. See [the - GCE documentation on firewall rules](https://cloud.google.com/compute/docs/vpc/firewalls), + 31568, create a firewall rule that allows TCP traffic on port 31568. Different + cloud providers offer different ways of configuring firewall rules. See [the + GCE documentation on firewall rules](https://cloud.google.com/compute/docs/vpc/firewalls), for example. 1. Use the node address and node port to access the Hello World application: - - curl http://: - - where `` is the public IP address of your node, - and `` is the NodePort value for your service. - - The response to a successful request is a hello message: - - Hello Kubernetes! + ```shell + curl http://: + ``` + where `` is the public IP address of your node, + and `` is the NodePort value for your service. The + response to a successful request is a hello message: + ```shell + Hello Kubernetes! + ``` ## Using a service configuration file @@ -146,6 +150,3 @@ the Hello World application, enter this command: Learn more about [connecting applications with services](/docs/concepts/services-networking/connect-applications-service/). {{% /capture %}} - - - diff --git a/content/en/docs/tasks/administer-cluster/access-cluster-api.md b/content/en/docs/tasks/administer-cluster/access-cluster-api.md index e49d488e2f8fc..e05b459aebd16 100644 --- a/content/en/docs/tasks/administer-cluster/access-cluster-api.md +++ b/content/en/docs/tasks/administer-cluster/access-cluster-api.md @@ -165,7 +165,7 @@ for i in ret.items: #### Other languages -There are [client libraries](/docs/reference/client-libraries/) for accessing the API from other languages. See documentation for other libraries for how they authenticate. +There are [client libraries](/docs/reference/using-api/client-libraries/) for accessing the API from other languages. See documentation for other libraries for how they authenticate. ### Accessing the API from a Pod @@ -173,7 +173,7 @@ When accessing the API from a Pod, locating and authenticating to the API server are somewhat different. The easiest way to use the Kubernetes API from a Pod is to use -one of the official [client libraries](/docs/reference/client-libraries/). These +one of the official [client libraries](/docs/reference/using-api/client-libraries/). These libraries can automatically discover the API server and authenticate. While running in a Pod, the Kubernetes apiserver is accessible via a Service named @@ -196,7 +196,7 @@ at `/var/run/secrets/kubernetes.io/serviceaccount/namespace` in each container. From within a Pod, the recommended ways to connect to the Kubernetes API are: - - Use one of the official [client libraries](/docs/reference/client-libraries/) + - Use one of the official [client libraries](/docs/reference/using-api/client-libraries/) as they handle API host discovery and authentication automatically. For Go client, the `rest.InClusterConfig()` function assists with this. See [an example here](https://git.k8s.io/client-go/examples/in-cluster-client-configuration/main.go). diff --git a/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md b/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md index 64f72a2f14347..bad469431f9de 100644 --- a/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md +++ b/content/en/docs/tasks/administer-cluster/configure-multiple-schedulers.md @@ -44,7 +44,7 @@ For more details, please read the GCR [documentation](https://cloud.google.com/container-registry/docs/). ```shell -docker build -t my-kube-scheduler:1.0 . +docker build -t gcr.io/my-gcp-project/my-kube-scheduler:1.0 . gcloud docker -- push gcr.io/my-gcp-project/my-kube-scheduler:1.0 ``` @@ -63,6 +63,9 @@ config. Save it as `my-scheduler.yaml`: An important thing to note here is that the name of the scheduler specified as an argument to the scheduler command in the container spec should be unique. This is the name that is matched against the value of the optional `spec.schedulerName` on pods, to determine whether this scheduler is responsible for scheduling a particular pod. +Note also that we created a dedicated service account `my-scheduler` and bind the cluster role +`system:kube-scheduler` to it so that it can acquire the same privileges as `kube-scheduler`. + Please see the [kube-scheduler documentation](/docs/admin/kube-scheduler/) for detailed description of other command line arguments. diff --git a/content/en/docs/tasks/administer-cluster/my-scheduler.yaml b/content/en/docs/tasks/administer-cluster/my-scheduler.yaml index b9f653e1d2845..ab0c385cd667f 100644 --- a/content/en/docs/tasks/administer-cluster/my-scheduler.yaml +++ b/content/en/docs/tasks/administer-cluster/my-scheduler.yaml @@ -1,3 +1,22 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: my-scheduler + namespace: kube-system +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: my-scheduler-as-kube-scheduler +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +roleRef: + kind: ClusterRole + name: kube-scheduler + apiGroup: rbac.authorization.k8s.io +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -19,6 +38,7 @@ spec: tier: control-plane version: second spec: + serviceAccountName: my-scheduler containers: - command: - /usr/local/bin/kube-scheduler diff --git a/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md b/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md index 9223a9e0a081d..0111f6c21f8ab 100644 --- a/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md +++ b/content/en/docs/tasks/administer-cluster/network-policy-provider/kube-router-network-policy.md @@ -16,7 +16,7 @@ You need to have a Kubernetes cluster running. If you do not already have a clus {{% capture steps %}} ## Installing Kube-router addon -The Kube-router Addon comes with a Network Policy Controller that watches Kubernetes API server for any NetworkPolicy and pods updated and configures iptables rules and ipsets to allow or block traffic as directed by the policies. Please follow the [trying Kube-router with cluster installers](https://github.com/cloudnativelabs/kube-router/tree/master/Documentation#try-kube-router-with-cluster-installers) guide to install Kube-router addon. +The Kube-router Addon comes with a Network Policy Controller that watches Kubernetes API server for any NetworkPolicy and pods updated and configures iptables rules and ipsets to allow or block traffic as directed by the policies. Please follow the [trying Kube-router with cluster installers](https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers) guide to install Kube-router addon. {{% /capture %}} {{% capture whatsnext %}} diff --git a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md index d6af91637372b..b14f641a529df 100644 --- a/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md +++ b/content/en/docs/tasks/administer-cluster/reconfigure-kubelet.md @@ -295,7 +295,7 @@ look for the `KubeletConfigOK` condition in `status.conditions`. You should see `using current: /api/v1/namespaces/kube-system/configmaps/${NEW_CONFIG_MAP_NAME}` when the Kubelet starts using the new configuration. -### Deauthorize your Node fom reading the old ConfigMap +### Deauthorize your Node from reading the old ConfigMap Once you know your Node is using the new configuration and are confident that the new configuration has not caused any problems, it is a good idea to @@ -331,7 +331,7 @@ remove the `spec.configSource` subfield. After removing this subfield, you should eventually observe that the KubeletConfigOK condition's message reverts to `using current: local`. -### Deauthorize your Node fom reading the old ConfigMap +### Deauthorize your Node from reading the old ConfigMap Once you know your Node is using the default configuration again, it is a good idea to deauthorize the node from reading the old ConfigMap. Run the following diff --git a/content/en/docs/tasks/administer-cluster/setup-ha-etcd-with-kubeadm.md b/content/en/docs/tasks/administer-cluster/setup-ha-etcd-with-kubeadm.md index 3bee8f29322a4..3a05548706794 100644 --- a/content/en/docs/tasks/administer-cluster/setup-ha-etcd-with-kubeadm.md +++ b/content/en/docs/tasks/administer-cluster/setup-ha-etcd-with-kubeadm.md @@ -31,7 +31,9 @@ when using kubeadm to set up a kubernetes cluster. {{% capture steps %}} The general approach is to generate all certs on one node and only distribute -the *necessary* files to the other nodes. +the *necessary* files to the other nodes. Note that kubeadm contains all the necessary +crytographic machinery to generate the certificates described below; no other cryptographic tooling +is required for this exercise. ## Create configuration files for kubeadm @@ -196,7 +198,7 @@ cluster is healthy Once your have a working 3 member etcd cluster, you can continue [setting up an HA control plane using -kubeadm](/docs/tasks/administer-cluster/highly-available-master/). +kubeadm](/docs/setup/independent/high-availability/). {{% /capture %}} diff --git a/content/en/docs/tasks/administer-federation/cluster.md b/content/en/docs/tasks/administer-federation/cluster.md index 16c7bf4235e03..d45aa76f1fe9a 100644 --- a/content/en/docs/tasks/administer-federation/cluster.md +++ b/content/en/docs/tasks/administer-federation/cluster.md @@ -110,7 +110,7 @@ Currently, only integers are supported with `Gt` or `Lt`. ## Clusters API reference The full clusters API reference is currently in `federation/v1beta1` and more details can be found in the -[Federation API reference page](/docs/reference/generated/federation/). +[Federation API reference page](/docs/reference/federation/). {{% /capture %}} diff --git a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md index 53487e7c56fd9..c572b2ff2644e 100644 --- a/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md +++ b/content/en/docs/tasks/configure-pod-container/assign-pods-nodes.md @@ -21,7 +21,7 @@ Kubernetes cluster. 1. List the nodes in your cluster: - kubectl get nodes + kubectl get nodes The output is similar to this: @@ -32,7 +32,7 @@ Kubernetes cluster. 1. Chose one of your nodes, and add a label to it: - kubectl label nodes disktype=ssd + kubectl label nodes disktype=ssd where `` is the name of your chosen node. @@ -62,11 +62,11 @@ a `disktype=ssd` label. 1. Use the configuration file to create a pod that will get scheduled on your chosen node: - kubectl create -f https://k8s.io/docs/tasks/configure-pod-container/pod.yaml + kubectl create -f https://k8s.io/docs/tasks/configure-pod-container/pod.yaml 1. Verify that the pod is running on your chosen node: - kubectl get pods --output=wide + kubectl get pods --output=wide The output is similar to this: diff --git a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md index 750af757b4c38..8a8768302e0b7 100644 --- a/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md +++ b/content/en/docs/tasks/configure-pod-container/configure-liveness-readiness-probes.md @@ -195,6 +195,18 @@ starts. Just like the readiness probe, this will attempt to connect to the `goproxy` container on port 8080. If the liveness probe fails, the container will be restarted. +To try the TCP liveness check, create a Pod: + +```shell +kubectl create -f https://k8s.io/docs/tasks/configure-pod-container/tcp-liveness-readiness.yaml +``` + +After 15 seconds, view Pod events to verify that liveness probes: + +```shell +kubectl describe pod goproxy +``` + ## Use a named port You can use a named diff --git a/content/en/docs/tasks/federation/policy.rego b/content/en/docs/tasks/federation/policy.rego index 9ec64d37c929b..49827b6ae96e2 100644 --- a/content/en/docs/tasks/federation/policy.rego +++ b/content/en/docs/tasks/federation/policy.rego @@ -1,5 +1,5 @@ # OPA supports a high-level declarative language named Rego for authoring and -# enforcing policies. For more infomration on Rego, visit +# enforcing policies. For more information on Rego, visit # http://openpolicyagent.org. # Rego policies are namespaced by the "package" directive. @@ -15,7 +15,7 @@ import data.kubernetes.clusters # rule. # # The SchedulingPolicy Admission Controller running inside the Federation API -# server will merge these annotatiosn into incoming Federated resources. By +# server will merge these annotations into incoming Federated resources. By # setting replica-set-preferences, we can control the placement of Federated # ReplicaSets. # diff --git a/content/en/docs/tasks/inject-data-application/define-command-argument-container.md b/content/en/docs/tasks/inject-data-application/define-command-argument-container.md index 4c188960e6c1b..df2ca34a9c244 100644 --- a/content/en/docs/tasks/inject-data-application/define-command-argument-container.md +++ b/content/en/docs/tasks/inject-data-application/define-command-argument-container.md @@ -41,11 +41,11 @@ file for the Pod defines a command and two arguments: 1. Create a Pod based on the YAML configuration file: - kubectl create -f https://k8s.io/docs/tasks/inject-data-application/commands.yaml + kubectl create -f https://k8s.io/docs/tasks/inject-data-application/commands.yaml 1. List the running Pods: - kubectl get pods + kubectl get pods The output shows that the container that ran in the command-demo Pod has completed. @@ -53,7 +53,7 @@ file for the Pod defines a command and two arguments: 1. To see the output of the command that ran in the container, view the logs from the Pod: - kubectl logs command-demo + kubectl logs command-demo The output shows the values of the HOSTNAME and KUBERNETES_PORT environment variables: diff --git a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md index 86550571ff960..7b7820ecca0a8 100644 --- a/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md +++ b/content/en/docs/tasks/inject-data-application/define-environment-variable-container.md @@ -36,11 +36,11 @@ Pod: 1. Create a Pod based on the YAML configuration file: - kubectl create -f https://k8s.io/docs/tasks/inject-data-application/envars.yaml + kubectl create -f https://k8s.io/docs/tasks/inject-data-application/envars.yaml 1. List the running Pods: - kubectl get pods -l purpose=demonstrate-envars + kubectl get pods -l purpose=demonstrate-envars The output is similar to this: @@ -49,11 +49,11 @@ Pod: 1. Get a shell to the container running in your Pod: - kubectl exec -it envar-demo -- /bin/bash + kubectl exec -it envar-demo -- /bin/bash 1. In your shell, run the `printenv` command to list the environment variables. - root@envar-demo:/# printenv + root@envar-demo:/# printenv The output is similar to this: diff --git a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md index 182a5b31ed434..61b2f08f3d619 100644 --- a/content/en/docs/tasks/run-application/run-stateless-application-deployment.md +++ b/content/en/docs/tasks/run-application/run-stateless-application-deployment.md @@ -41,11 +41,11 @@ a Deployment that runs the nginx:1.7.9 Docker image: 1. Create a Deployment based on the YAML file: - kubectl apply -f https://k8s.io/docs/tasks/run-application/deployment.yaml + kubectl apply -f https://k8s.io/docs/tasks/run-application/deployment.yaml 1. Display information about the Deployment: - kubectl describe deployment nginx-deployment + kubectl describe deployment nginx-deployment The output is similar to this: @@ -80,7 +80,7 @@ a Deployment that runs the nginx:1.7.9 Docker image: 1. List the pods created by the deployment: - kubectl get pods -l app=nginx + kubectl get pods -l app=nginx The output is similar to this: @@ -90,7 +90,7 @@ a Deployment that runs the nginx:1.7.9 Docker image: 1. Display information about a pod: - kubectl describe pod + kubectl describe pod where `` is the name of one of your pods. @@ -103,11 +103,11 @@ specifies that the deployment should be updated to use nginx 1.8. 1. Apply the new YAML file: - kubectl apply -f https://k8s.io/docs/tasks/run-application/deployment-update.yaml + kubectl apply -f https://k8s.io/docs/tasks/run-application/deployment-update.yaml 1. Watch the deployment create pods with new names and delete the old pods: - kubectl get pods -l app=nginx + kubectl get pods -l app=nginx ## Scaling the application by increasing the replica count @@ -119,11 +119,11 @@ should have four pods: 1. Apply the new YAML file: - kubectl apply -f https://k8s.io/docs/tasks/run-application/deployment-scale.yaml + kubectl apply -f https://k8s.io/docs/tasks/run-application/deployment-scale.yaml 1. Verify that the Deployment has four pods: - kubectl get pods -l app=nginx + kubectl get pods -l app=nginx The output is similar to this: diff --git a/content/en/docs/tutorials/clusters/apparmor.md b/content/en/docs/tutorials/clusters/apparmor.md index 6140a470e2330..4b59bc1d7235c 100644 --- a/content/en/docs/tutorials/clusters/apparmor.md +++ b/content/en/docs/tutorials/clusters/apparmor.md @@ -375,7 +375,7 @@ tools to help with that: * `aa-genprof` and `aa-logprof` generate profile rules by monitoring an application's activity and logs, and admitting the actions it takes. Further instructions are provided by the - [AppArmor documentation](http://wiki.apparmor.net/index.php/Profiling_with_tools). + [AppArmor documentation](https://gitlab.com/apparmor/apparmor/wikis/Profiling_with_tools). * [bane](https://github.com/jfrazelle/bane) is an AppArmor profile generator for Docker that uses a simplified profile language. diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 8e69db3ff2a5a..b037d637a48fc 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -90,7 +90,8 @@

Deploying your first app on Kubernetes

-

For our first Deployment, we'll use a Node.js application packaged in a Docker container. The source code and the Dockerfile are available in the GitHub repository for the Kubernetes Basics.

+ +

For our first Deployment, we'll use a Node.js application packaged in a Docker container. The source code and the Dockerfile are available in the GitHub repository for the Kubernetes Basics.

Now that you know what Deployments are, let's go to the online tutorial and deploy our first app!

diff --git a/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html index 1cbeccdc1b80d..785503f8a8b9d 100644 --- a/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -86,7 +86,7 @@

Scaling overview

-

Scaling out a Deployment will ensure new Pods are created and scheduled to Nodes with available resources. Scaling in will reduce the number of Pods to the new desired state. Kubernetes also supports autoscaling of Pods, but it is outside of the scope of this tutorial. Scaling to zero is also possible, and it will terminate all Pods of the specified Deployment.

+

Scaling out a Deployment will ensure new Pods are created and scheduled to Nodes with available resources. Scaling in will reduce the number of Pods to the new desired state. Kubernetes also supports autoscaling of Pods, but it is outside of the scope of this tutorial. Scaling to zero is also possible, and it will terminate all Pods of the specified Deployment.

Running multiple instances of an application will require a way to distribute the traffic to all of them. Services have an integrated load-balancer that will distribute network traffic to all Pods of an exposed Deployment. Services will monitor continuously the running Pods using endpoints, to ensure the traffic is sent only to available Pods.

diff --git a/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md b/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md index a47c589055c59..398b3b9e143cf 100644 --- a/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md +++ b/content/en/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume.md @@ -68,26 +68,29 @@ When a PersistentVolumeClaim is created, a PersistentVolume is dynamically provi A [Secret](/docs/concepts/configuration/secret/) is an object that stores a piece of sensitive data like a password or key. The manifest files are already configured to use a Secret, but you have to create your own Secret. -1. Create the Secret object from the following command: +1. Create the Secret object from the following command. You will need to replace + `YOUR_PASSWORD` with the password you want to use. - kubectl create secret generic mysql-pass --from-literal=password=YOUR_PASSWORD + ``` + kubectl create secret generic mysql-pass --from-literal=password=YOUR_PASSWORD + ``` - {{< note >}} - **Note:** Replace `YOUR_PASSWORD` with the password you want to apply. - {{< /note >}} - 2. Verify that the Secret exists by running the following command: - kubectl get secrets + ``` + kubectl get secrets + ``` - The response should be like this: + The response should be like this: - NAME TYPE DATA AGE - mysql-pass Opaque 1 42s + ``` + NAME TYPE DATA AGE + mysql-pass Opaque 1 42s + ``` - {{< note >}} - **Note:** To protect the Secret from exposure, neither `get` nor `describe` show its contents. - {{< /note >}} +{{< note >}} +**Note:** To protect the Secret from exposure, neither `get` nor `describe` show its contents. +{{< /note >}} ## Deploy MySQL @@ -97,77 +100,96 @@ The following manifest describes a single-instance MySQL Deployment. The MySQL c 1. Deploy MySQL from the `mysql-deployment.yaml` file: - kubectl create -f mysql-deployment.yaml - -2. Verify that a PersistentVolume got dynamically provisioned: + ``` + kubectl create -f mysql-deployment.yaml + ``` - kubectl get pvc +2. Verify that a PersistentVolume got dynamically provisioned. Note that it can + It can take up to a few minutes for the PVs to be provisioned and bound. - {{< note >}} - **Note:** It can take up to a few minutes for the PVs to be provisioned and bound. - {{< /note >}} + ``` + kubectl get pvc + ``` - The response should be like this: + The response should be like this: - NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE - mysql-pv-claim Bound pvc-91e44fbf-d477-11e7-ac6a-42010a800002 20Gi RWO standard 29s + ``` + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + mysql-pv-claim Bound pvc-91e44fbf-d477-11e7-ac6a-42010a800002 20Gi RWO standard 29s + ``` 3. Verify that the Pod is running by running the following command: - kubectl get pods + ``` + kubectl get pods + ``` - {{< note >}} - **Note:** It can take up to a few minutes for the Pod's Status to be `RUNNING`. - {{< /note >}} + **Note:** It can take up to a few minutes for the Pod's Status to be `RUNNING`. - The response should be like this: + The response should be like this: - NAME READY STATUS RESTARTS AGE - wordpress-mysql-1894417608-x5dzt 1/1 Running 0 40s + ``` + NAME READY STATUS RESTARTS AGE + wordpress-mysql-1894417608-x5dzt 1/1 Running 0 40s + ``` ## Deploy WordPress -The following manifest describes a single-instance WordPress Deployment and Service. It uses many of the same features like a PVC for persistent storage and a Secret for the password. But it also uses a different setting: `type: NodePort`. This setting exposes WordPress to traffic from outside of the cluster. +The following manifest describes a single-instance WordPress Deployment and Service. It uses many of the same features like a PVC for persistent storage and a Secret for the password. But it also uses a different setting: `type: LoadBalancer`. This setting exposes WordPress to traffic from outside of the cluster. {{< code file="mysql-wordpress-persistent-volume/wordpress-deployment.yaml" >}} 1. Create a WordPress Service and Deployment from the `wordpress-deployment.yaml` file: - kubectl create -f wordpress-deployment.yaml + ``` + kubectl create -f wordpress-deployment.yaml + ``` 2. Verify that a PersistentVolume got dynamically provisioned: - kubectl get pvc + ``` + kubectl get pvc + ``` - {{< note >}} - **Note:** It can take up to a few minutes for the PVs to be provisioned and bound. - {{< /note >}} + **Note:** It can take up to a few minutes for the PVs to be provisioned and bound. - The response should be like this: + The response should be like this: - NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE - wp-pv-claim Bound pvc-e69d834d-d477-11e7-ac6a-42010a800002 20Gi RWO standard 7s + ``` + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + wp-pv-claim Bound pvc-e69d834d-d477-11e7-ac6a-42010a800002 20Gi RWO standard 7s + ``` 3. Verify that the Service is running by running the following command: - kubectl get services wordpress + ``` + kubectl get services wordpress + ``` - The response should be like this: + The response should be like this: - NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE - wordpress 10.0.0.89 80:32406/TCP 4m + ``` + NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE + wordpress 10.0.0.89 80:32406/TCP 4m + ``` - {{< note >}} - **Note:** Minikube can only expose Services through `NodePort`.

The `EXTERNAL-IP` is always ``. - {{< /note >}} + **Note:** Minikube can only expose Services through `NodePort`. + + ``` + The EXTERNAL-IP is always . + ``` 4. Run the following command to get the IP Address for the WordPress Service: - minikube service wordpress --url + ``` + minikube service wordpress --url + ``` - The response should be like this: + The response should be like this: - http://1.2.3.4:32406 + ``` + http://1.2.3.4:32406 + ``` 5. Copy the IP address, and load the page in your browser to view your site. @@ -175,9 +197,9 @@ The following manifest describes a single-instance WordPress Deployment and Serv ![wordpress-init](https://raw.githubusercontent.com/kubernetes/examples/master/mysql-wordpress-pd/WordPress.png) - {{< warning >}} - **Warning:** Do not leave your WordPress installation on this page. If another user finds it, they can set up a website on your instance and use it to serve malicious content.

Either install WordPress by creating a username and password or delete your instance. - {{< /warning >}} +{{< warning >}} +**Warning:** Do not leave your WordPress installation on this page. If another user finds it, they can set up a website on your instance and use it to serve malicious content.

Either install WordPress by creating a username and password or delete your instance. +{{< /warning >}} {{% /capture %}} @@ -185,16 +207,22 @@ The following manifest describes a single-instance WordPress Deployment and Serv 1. Run the following command to delete your Secret: - kubectl delete secret mysql-pass + ``` + kubectl delete secret mysql-pass + ``` 2. Run the following commands to delete all Deployments and Services: - kubectl delete deployment -l app=wordpress - kubectl delete service -l app=wordpress + ``` + kubectl delete deployment -l app=wordpress + kubectl delete service -l app=wordpress + ``` 3. Run the following commands to delete the PersistentVolumeClaims. The dynamically provisioned PersistentVolumes will be automatically deleted. - kubectl delete pvc -l app=wordpress + ``` + kubectl delete pvc -l app=wordpress + ``` {{% /capture %}} diff --git a/content/en/docs/user-guide/walkthrough/_index.md b/content/en/docs/user-guide/walkthrough/_index.md index 91268edd08852..06eaddf2386db 100644 --- a/content/en/docs/user-guide/walkthrough/_index.md +++ b/content/en/docs/user-guide/walkthrough/_index.md @@ -19,7 +19,7 @@ In order for the kubectl usage examples to work, make sure you have an example d ## Kubectl CLI -The easiest way to interact with Kubernetes is through the [kubectl](/docs/reference/kubectl/overview/) command-line interface. +The easiest way to interact with Kubernetes is through the kubectl command-line interface. For more info about kubectl, including its usage, commands, and parameters, see [Overview of kubectl](/docs/reference/kubectl/overview/). diff --git a/content/en/docs/user-journeys/users/application-developer/advanced.md b/content/en/docs/user-journeys/users/application-developer/advanced.md index 9bcd65440f81a..dde720f1b6425 100644 --- a/content/en/docs/user-journeys/users/application-developer/advanced.md +++ b/content/en/docs/user-journeys/users/application-developer/advanced.md @@ -106,7 +106,7 @@ If you do not have a {{< glossary_tooltip text="cluster operator" term_id="clust The following topics are also useful for building more complex applications: * {{< link text="Other points of extensibility within Kubernetes" url="/docs/concepts/overview/extending/" >}} - A conceptual overview of where you can hook into the Kubernetes architecture. -* {{< link text="Kubernetes Client Libraries" url="/docs/reference/client-libraries/" >}} - Useful for building apps that need to interact heavily with the Kubernetes API. +* {{< link text="Kubernetes Client Libraries" url="/docs/reference/using-api/client-libraries/" >}} - Useful for building apps that need to interact heavily with the Kubernetes API. #### What's next Congrats on completing the Application Developer user journey! You've covered the majority of features that Kubernetes has to offer. What now? diff --git a/content/en/includes/federation-current-state.md b/content/en/includes/federation-current-state.md index 56e4decdf3c2e..a1c59f2a1d300 100644 --- a/content/en/includes/federation-current-state.md +++ b/content/en/includes/federation-current-state.md @@ -1,7 +1 @@ -**Note:** `Federation V1`, the current Kubernetes federation API which reuses the Kubernetes API -resources 'as is', is currently considered alpha for many of its features, and there is no clear -path to evolve the API to GA. However, there is a `Federation V2` effort in progress to implement -a dedicated federation API apart from the Kubernetes API. The details can be found at -[sig-multicluster community page](https://github.com/kubernetes/community/tree/master/sig-multicluster). -{: .note} - +**Note:** `Federation V1`, the current Kubernetes federation API which reuses the Kubernetes API resources 'as is', is currently considered alpha for many of its features, and there is no clear path to evolve the API to GA. However, there is a `Federation V2` effort in progress to implement a dedicated federation API apart from the Kubernetes API. The details can be found at [sig-multicluster community page](https://github.com/kubernetes/community/tree/master/sig-multicluster). diff --git a/layouts/docs/docsportal_home.html b/layouts/docs/docsportal_home.html index cfd6d4bec45e4..ae0de8edcc119 100644 --- a/layouts/docs/docsportal_home.html +++ b/layouts/docs/docsportal_home.html @@ -36,8 +36,10 @@

{{ .Title }}

- Kubernetes is an open source system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications. - The open source project is hosted by the Cloud Native Computing Foundation (CNCF). +

+ Kubernetes is an open source system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications. + The open source project is hosted by the Cloud Native Computing Foundation (CNCF). +

diff --git a/layouts/partials/templates/blocks.html b/layouts/partials/templates/blocks.html index 280aa6aa5d14b..765b884ac70c2 100644 --- a/layouts/partials/templates/blocks.html +++ b/layouts/partials/templates/blocks.html @@ -9,8 +9,10 @@ {{ $section := $.ctx.Scratch.Get "section" }} {{ $headers := findRE "(.|\n)*?
" $section }} {{ range $headers }} -{{ $header := . | replaceRE "" "" | htmlUnescape }} -
  • {{ $header }}
  • +{{ $id := . | strings.TrimPrefix "

    .*" "" }} +{{ $header := . | replaceRE "" "" | htmlUnescape | safeHTML }} +
  • {{ $header }}
  • {{ end }} {{ $.ctx.Scratch.Add "sections" $section }} {{ end }} diff --git a/static/_redirects b/static/_redirects index 841fb91380d39..1e2782722e712 100644 --- a/static/_redirects +++ b/static/_redirects @@ -174,8 +174,10 @@ /docs/getting-started-guides/centos/* /docs/setup/independent/create-cluster-kubeadm/ 301 /docs/hellonode/ /docs/tutorials/stateless-application/hello-minikube/ 301 +/docs/home/contribute/stage-documentation-changes/ /docs/home/contribute/create-pull-request/ 301 /docs/home/coreos/ /docs/getting-started-guides/coreos/ 301 -/docs/home/deprecation-policy/ /docs/reference/deprecation-policy/ 301 +/docs/home/deprecation-policy/ /docs/reference/using-api/deprecation-policy/ 301 +/docs/reference/deprecation-policy/ /docs/reference/using-api/deprecation-policy/ 301 /docs/reference/federation/v1beta1/definitions/ /docs/reference/federation/extensions/v1beta1/definitions/ 301 /docs/reference/federation/v1beta1/operations/ /docs/reference/federation/extensions/v1beta1/operations/ 301 @@ -196,8 +198,8 @@ /docs/reference/generated/kubefed_options/ /docs/reference/setup-tools/kubefed/kubefed-options/ 301 /docs/reference/generated/kubefed_unjoin/ /docs/reference/setup-tools/kubefed/kubefed-unjoin/ 301 /docs/reference/generated/kubefed_version/ /docs/reference/setup-tools/kubefed/kubefed-version/ 301 - /docs/reference/kubectl/kubectl/kubectl_*.md /docs/reference/generated/kubectl/kubectl-commands#:splat 301 +/docs/reference/workloads-18-19/ https://v1-9.docs.kubernetes.io/docs/reference/workloads-18-19/ 301 /docs/reporting-security-issues/ /security/ 301 diff --git a/static/css/styles.css b/static/css/styles.css index 6857fb7fcc084..2ef22b423f08a 100644 --- a/static/css/styles.css +++ b/static/css/styles.css @@ -508,7 +508,7 @@ html.search #docsContent h1 { margin-bottom: 0; border-bottom: 0; padding-bottom #video { height: 200px; } -#video { width: 100%; position: relative; background-image: url(/images/kub_video_banner_box.jpg); background-position: center center; background-size: cover; } +#video { width: 100%; position: relative; background-image: url(/images/kub_video_banner_homepage.jpg); background-position: center center; background-size: cover; } #video > .light-text { display: none; position: absolute; top: 50%; left: 75%; width: 525px; padding-right: 80px; transform: translate(-50%, -50%); color: white; } @@ -859,7 +859,7 @@ html.search #docsContent h1 { margin-bottom: 0; border-bottom: 0; padding-bottom #oceanNodes main:nth-child(1) h3, #oceanNodes main:nth-child(1) p { text-align: left; } #oceanNodes main:nth-child(1) .image-wrapper { position: absolute; max-width: 48%; transform: translateY(-50%); } #oceanNodes main:nth-child(1) .image-wrapper img { max-width: 425px; } - #video { height: 550px; position: relative; background-image: url(../images/kub_video_banner_box.jpg); background-position: center center; background-size: cover; } + #video { height: 550px; position: relative; background-image: url(../images/kub_video_banner_homepage.jpg); background-position: center center; background-size: cover; } #talkToUs h4 br { display: block; } #talkToUs #bigSocial div { width: calc(25% - 18px); } #talkToUs #bigSocial div + div { margin-left: 20px; } diff --git a/static/docs/reference/generated/kubectl/scroll.js b/static/docs/reference/generated/kubectl/scroll.js index 67fee8729e449..6639f62895616 100644 --- a/static/docs/reference/generated/kubectl/scroll.js +++ b/static/docs/reference/generated/kubectl/scroll.js @@ -187,10 +187,10 @@ $(document).ready(function() { var scrollPosition = $(window).scrollTop(); scrollActions(scrollPosition); checkActiveElement(flatToc, scrollPosition); - // TODO: prevent scroll on sidebar from propogating to window + // TODO: prevent scroll on sidebar from propagating to window $(window).on('scroll', function(event) { var scrollPosition = $(window).scrollTop(); var activeSectionTokens = scrollActions(scrollPosition); var activeElemToken = checkActiveElement(flatToc, scrollPosition); }); -}); \ No newline at end of file +}); diff --git a/static/docs/reference/generated/kubernetes-api/v1.10/scroll.js b/static/docs/reference/generated/kubernetes-api/v1.10/scroll.js index 67fee8729e449..6639f62895616 100644 --- a/static/docs/reference/generated/kubernetes-api/v1.10/scroll.js +++ b/static/docs/reference/generated/kubernetes-api/v1.10/scroll.js @@ -187,10 +187,10 @@ $(document).ready(function() { var scrollPosition = $(window).scrollTop(); scrollActions(scrollPosition); checkActiveElement(flatToc, scrollPosition); - // TODO: prevent scroll on sidebar from propogating to window + // TODO: prevent scroll on sidebar from propagating to window $(window).on('scroll', function(event) { var scrollPosition = $(window).scrollTop(); var activeSectionTokens = scrollActions(scrollPosition); var activeElemToken = checkActiveElement(flatToc, scrollPosition); }); -}); \ No newline at end of file +}); diff --git a/static/images/KubeCon_EU_Community.jpg b/static/images/KubeCon_EU_Community.jpg index 51584150ae264..5b50d2b18a0b8 100644 Binary files a/static/images/KubeCon_EU_Community.jpg and b/static/images/KubeCon_EU_Community.jpg differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/containerd.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/containerd.png new file mode 100644 index 0000000000000..5d74665792d47 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/containerd.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cpu.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cpu.png new file mode 100644 index 0000000000000..a0f55b47b6224 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cpu.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cri-containerd.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cri-containerd.png new file mode 100644 index 0000000000000..1d4131f169616 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/cri-containerd.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-pods-filter.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-pods-filter.png new file mode 100644 index 0000000000000..a8ada1e59a470 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-pods-filter.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-pods.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-pods.png new file mode 100644 index 0000000000000..cc28e1e1d8b26 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-pods.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-ps.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-ps.png new file mode 100644 index 0000000000000..0643f3dcb0bc0 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/crictl-ps.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/docker-ce.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/docker-ce.png new file mode 100644 index 0000000000000..9233e2a2c3510 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/docker-ce.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/docker-ps.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/docker-ps.png new file mode 100644 index 0000000000000..ee2ff07ceab39 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/docker-ps.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/latency.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/latency.png new file mode 100644 index 0000000000000..67a4b0a4caea8 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/latency.png differ diff --git a/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/memory.png b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/memory.png new file mode 100644 index 0000000000000..bc0d3302ac975 Binary files /dev/null and b/static/images/blog/2018-05-24-kubernetes-containerd-integration-goes-ga/memory.png differ diff --git a/static/images/blog/2018-05-30-say-hello-to-discuss-kubernetes.png b/static/images/blog/2018-05-30-say-hello-to-discuss-kubernetes.png new file mode 100755 index 0000000000000..03243ffcdf1c2 Binary files /dev/null and b/static/images/blog/2018-05-30-say-hello-to-discuss-kubernetes.png differ diff --git a/static/images/kub_video_banner_box.jpg b/static/images/kub_video_banner_box.jpg deleted file mode 100644 index bdc024a1e3f62..0000000000000 Binary files a/static/images/kub_video_banner_box.jpg and /dev/null differ diff --git a/static/images/kub_video_banner_homepage.jpg b/static/images/kub_video_banner_homepage.jpg new file mode 100644 index 0000000000000..d70306b9c5497 Binary files /dev/null and b/static/images/kub_video_banner_homepage.jpg differ diff --git a/test/examples_test.go b/test/examples_test.go index bc94dec7eccd0..55d0fa6a92f63 100644 --- a/test/examples_test.go +++ b/test/examples_test.go @@ -369,7 +369,7 @@ func TestExampleObjectSchemas(t *testing.T) { "memory-defaults-pod": {&api.Pod{}}, "memory-defaults-pod-2": {&api.Pod{}}, "memory-defaults-pod-3": {&api.Pod{}}, - "my-scheduler": {&extensions.Deployment{}}, + "my-scheduler": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.Deployment{}}, "namespace-dev": {&api.Namespace{}}, "namespace-prod": {&api.Namespace{}}, "persistent-volume-label-initializer-config": {&admissionregistration.InitializerConfiguration{}}, @@ -421,15 +421,15 @@ func TestExampleObjectSchemas(t *testing.T) { "tcp-liveness-readiness": {&api.Pod{}}, }, "docs/tasks/debug-application-cluster": { - "counter-pod": {&api.Pod{}}, - "event-exporter-deploy": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.Deployment{}}, - "fluentd-gcp-configmap": {&api.ConfigMap{}}, - "fluentd-gcp-ds": {&extensions.DaemonSet{}}, - "nginx-dep": {&extensions.Deployment{}}, - "node-problem-detector": {&extensions.DaemonSet{}}, - "node-problem-detector-configmap": {&extensions.DaemonSet{}}, - "shell-demo": {&api.Pod{}}, - "termination": {&api.Pod{}}, + "counter-pod": {&api.Pod{}}, + "event-exporter-deploy": {&api.ServiceAccount{}, &rbac.ClusterRoleBinding{}, &extensions.Deployment{}}, + "fluentd-gcp-configmap": {&api.ConfigMap{}}, + "fluentd-gcp-ds": {&extensions.DaemonSet{}}, + "nginx-dep": {&extensions.Deployment{}}, + "node-problem-detector": {&extensions.DaemonSet{}}, + "node-problem-detector-configmap": {&extensions.DaemonSet{}}, + "shell-demo": {&api.Pod{}}, + "termination": {&api.Pod{}}, }, // TODO: decide whether federation examples should be added "docs/tasks/inject-data-application": { @@ -456,8 +456,8 @@ func TestExampleObjectSchemas(t *testing.T) { "secret-pod": {&api.Pod{}}, }, "docs/tasks/job": { - "cronjob": {&batch.CronJob{}}, - "job": {&batch.Job{}}, + "cronjob": {&batch.CronJob{}}, + "job": {&batch.Job{}}, }, "docs/tasks/job/coarse-parallel-processing-work-queue": { "job": {&batch.Job{}}, @@ -620,12 +620,19 @@ func TestExampleObjectSchemas(t *testing.T) { var sampleRegexp = regexp.MustCompile("(?ms)^```(?:(?Pyaml)\\w*\\n(?P.+?)|\\w*\\n(?P\\{.+?\\}))\\n^```") var subsetRegexp = regexp.MustCompile("(?ms)\\.{3}") +// Validates examples embedded in Markdown files. func TestReadme(t *testing.T) { + // BlockVolume required for local volume example + utilfeature.DefaultFeatureGate.Set("BlockVolume=true") + paths := []struct { file string - expectedType []runtime.Object + expectedType []runtime.Object // List of all valid types for the whole doc }{ - {"../content/en/docs/concepts/storage/volumes.md", []runtime.Object{&api.Pod{}}}, + {"../content/en/docs/concepts/storage/volumes.md", []runtime.Object{ + &api.Pod{}, + &api.PersistentVolume{}, + }}, } for _, path := range paths { @@ -639,7 +646,6 @@ func TestReadme(t *testing.T) { if matches == nil { continue } - ix := 0 for _, match := range matches { var content, subtype string for i, name := range sampleRegexp.SubexpNames() { @@ -655,21 +661,23 @@ func TestReadme(t *testing.T) { continue } - var expectedType runtime.Object - if len(path.expectedType) == 1 { - expectedType = path.expectedType[0] - } else { - expectedType = path.expectedType[ix] - ix++ - } json, err := yaml.ToJSON([]byte(content)) if err != nil { t.Errorf("%s could not be converted to JSON: %v\n%s", path, err, string(content)) } - if err := runtime.DecodeInto(testapi.Default.Codec(), json, expectedType); err != nil { + + var expectedType runtime.Object + for _, expectedType = range path.expectedType { + err = runtime.DecodeInto(testapi.Default.Codec(), json, expectedType) + if err == nil { + break + } + } + if err != nil { t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(content)) continue } + if errors := validateObject(expectedType); len(errors) > 0 { t.Errorf("%s did not validate correctly: %v", path, errors) }