diff --git a/404.md b/404.md
index 71497ddcbe151..bf053c1e3b6cc 100644
--- a/404.md
+++ b/404.md
@@ -65,7 +65,4 @@ $( document ).ready(function() {
});
-Sorry, this page was not found. :(
-
-You can let us know by filling out the "I wish this page" text field at
-the bottom of this page. Maybe try: "I wish this page _existed_."
+Sorry, this page was not found. :(
diff --git a/_includes/footer.html b/_includes/footer.html
index 00ee5554fc0d1..3fb22e0613322 100644
--- a/_includes/footer.html
+++ b/_includes/footer.html
@@ -18,8 +18,6 @@
Events Calendar
"permalink" : "http://kubernetes.github.io{{page.url}}"
};
(function(d,c,j){if(!document.getElementById(j)){var pd=d.createElement(c),s;pd.id=j;pd.src=('https:'==document.location.protocol)?'https://polldaddy.com/js/rating/rating.js':'http://i0.poll.fm/js/rating/rating.js';s=document.getElementsByTagName(c)[0];s.parentNode.insertBefore(pd,s);}}(document,'script','pd-rating-js'));
- {% endif %}
+
+ Create Issue
+ {% endif %}
diff --git a/_sass/_base.sass b/_sass/_base.sass
index c3270865619ca..afa136b5898f2 100644
--- a/_sass/_base.sass
+++ b/_sass/_base.sass
@@ -874,6 +874,12 @@ dd
img
max-width: 100%
+ a.button
+ border-radius: 2px
+
+ a.issue
+ margin-left: 20px
+
.fixed footer
position: fixed
bottom: 0
diff --git a/docs/admin/audit.md b/docs/admin/audit.md
new file mode 100644
index 0000000000000..a033330372cad
--- /dev/null
+++ b/docs/admin/audit.md
@@ -0,0 +1,55 @@
+---
+assignees:
+- soltysh
+- sttts
+
+---
+
+* TOC
+{:toc}
+
+## Audit in Kubernetes
+
+Kubernetes currently provides only basic audit capabilities, there is still a lot
+of work going on to provide fully featured auditing capabilities (see https://github.com/kubernetes/features/issues/22).
+
+Kubernetes audit is part of [kube-apiserver](/docs/admin/kube-apiserver) logging all requests
+coming to the server. Each audit log contains two entries:
+
+1. The request line containing:
+ - unique id allowing to match the response line (see 2)
+ - source ip of the request
+ - HTTP method being invoked
+ - original user invoking the operation
+ - impersonated user for the operation
+ - namespace of the request or
+ - URI as requested
+2. The response line containing:
+ - the unique id from 1
+ - response code
+
+Example output for user `admin` asking for a list of pods:
+
+```
+2016-09-07T13:03:57.400333046Z AUDIT: id="5c3b8227-4af9-4322-8a71-542231c3887b" ip="127.0.0.1" method="GET" user="admin" as="" namespace="default" uri="/api/v1/namespaces/default/pods"
+2016-09-07T13:03:57.400710987Z AUDIT: id="5c3b8227-4af9-4322-8a71-542231c3887b" response="200"
+```
+
+NOTE: The audit capabilities are available *only* for the secured endpoint of the API server.
+
+## Configuration
+
+[Kube-apiserver](/docs/admin/kube-apiserver) provides following options which are responsible
+for configuring where and how audit logs are handled:
+
+- `audit-log-path` - enables the audit log pointing to a file where the requests are being logged to.
+- `audit-log-maxage` - specifies maximum number of days to retain old audit log files based on the timestamp encoded in their filename.
+- `audit-log-maxbackup` - specifies maximum number of old audit log files to retain.
+- `audit-log-maxsize` - specifies maximum size in megabytes of the audit log file before it gets rotated. Defaults to 100MB
+
+Audit logs are being appended if the file already existed or a new one will be
+created at given location. If file size exceeds `audit-log-maxsize` the file is
+renamed by putting the current timestamp at the end name of the file name (before
+the file's extension)
+Whenever a new logfile get created, old log files may be deleted. This policy is
+configured using `audit-log-maxbackup` and `audit-log-maxage` flags.
diff --git a/docs/admin/index.md b/docs/admin/index.md
index 4df7fb3375d22..b3ccf4b3f42f7 100644
--- a/docs/admin/index.md
+++ b/docs/admin/index.md
@@ -79,4 +79,6 @@ project](/docs/admin/salt).
* **Authorization** [authorization](/docs/admin/authorization)
-* **Admission Controllers** [admission_controllers](/docs/admin/admission-controllers)
\ No newline at end of file
+* **Admission Controllers** [admission controllers](/docs/admin/admission-controllers)
+
+* **Audit** [audit](/docs/admin/audit)
diff --git a/docs/admin/networking.md b/docs/admin/networking.md
index cd8aaa09cfb38..9275b88565357 100644
--- a/docs/admin/networking.md
+++ b/docs/admin/networking.md
@@ -160,7 +160,7 @@ Lars Kellogg-Stedman.
### Weave Net from Weaveworks
-[Weave Net](https://www.weave.works/documentation/net-1-6-0-introducing-weave/) is a
+[Weave Net](https://www.weave.works/products/weave-net/) is a
resilient and simple to use network for Kubernetes and its hosted applications.
Weave Net runs as a [CNI plug-in](https://www.weave.works/docs/net/latest/cni-plugin/)
or stand-alone. In either version, it doesn’t require any configuration or extra code
diff --git a/docs/admin/node-problem.md b/docs/admin/node-problem.md
index fdb557311cc36..abb08abeff2e7 100644
--- a/docs/admin/node-problem.md
+++ b/docs/admin/node-problem.md
@@ -36,8 +36,7 @@ it to [support other log format](/docs/admin/node-problem/#support-other-log-for
## Enable/Disable in GCE cluster
-Node problem detector is [running as a cluster
-addon](/docs/admin/cluster-large/#addon-resources) enabled by default in the
+Node problem detector is [running as a cluster addon](cluster-large.md/#Addon-Resources) enabled by default in the
gce cluster.
You can enable/disable it by setting the environment variable
diff --git a/docs/getting-started-guides/azure.md b/docs/getting-started-guides/azure.md
index bb01c8641d433..19adf516927d9 100644
--- a/docs/getting-started-guides/azure.md
+++ b/docs/getting-started-guides/azure.md
@@ -43,7 +43,7 @@ export AZURE_SUBSCRIPTION_ID=""
export AZURE_TENANT_ID="" # only needed for Kubernetes < v1.3.0.
```
-These values can be overriden by setting them in `cluster/azure/config-default.sh` or as environment variables. They are shown here with their default values:
+These values can be overridden by setting them in `cluster/azure/config-default.sh` or as environment variables. They are shown here with their default values:
```shell
export AZURE_DEPLOY_ID="" # autogenerated if blank
diff --git a/docs/getting-started-guides/clc.md b/docs/getting-started-guides/clc.md
index 11470ca47503f..8ec4a3281a9bc 100644
--- a/docs/getting-started-guides/clc.md
+++ b/docs/getting-started-guides/clc.md
@@ -251,9 +251,9 @@ kubectl cluster-info
### Accessing the cluster programmatically
-It's possible to use the locally-stored client certificates to access the api server. For example, you may want to use any of the [Kubernetes API client libraries](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/client-libraries.md) to program against your Kubernetes cluster in the programming language of your choice.
+It's possible to use the locally stored client certificates to access the api server. For example, you may want to use any of the [Kubernetes API client libraries](https://github.com/kubernetes/kubernetes/blob/master/docs/devel/client-libraries.md) to program against your Kubernetes cluster in the programming language of your choice.
-To demostrate how to use these locally stored certificates, we provide the folowing example of using ```curl``` to communicate to the master api server via https:
+To demonstrate how to use these locally stored certificates, we provide the following example of using ```curl``` to communicate to the master api server via https:
```shell
curl \
@@ -267,7 +267,7 @@ distributed with OSX.
### Accessing the cluster with a browser
-We install two UIs on Kubernetes. The orginal KubeUI and [the newer kube
+We install two UIs on Kubernetes. The original KubeUI and [the newer kube
dashboard](/docs/user-guide/ui/). When you create a cluster, the script should output URLs for these
interfaces like this:
diff --git a/docs/getting-started-guides/coreos/bare_metal_calico.md b/docs/getting-started-guides/coreos/bare_metal_calico.md
index a8e6fe6be3019..9cbdd89b17171 100644
--- a/docs/getting-started-guides/coreos/bare_metal_calico.md
+++ b/docs/getting-started-guides/coreos/bare_metal_calico.md
@@ -39,7 +39,7 @@ Download the stable CoreOS bootable ISO from the [CoreOS website](https://coreos
1. Once you've downloaded the ISO image, burn the ISO to a CD/DVD/USB key and boot from it (if using a virtual machine you can boot directly from the ISO). Once booted, you should be automatically logged in as the `core` user at the terminal. At this point CoreOS is running from the ISO and it hasn't been installed yet.
-2. *On another machine*, download the the [master cloud-config template](https://raw.githubusercontent.com/projectcalico/calico-cni/k8s-1.1-docs/samples/kubernetes/cloud-config/master-config-template.yaml) and save it as `master-config.yaml`.
+2. *On another machine*, download the [master cloud-config template](https://raw.githubusercontent.com/projectcalico/calico-cni/k8s-1.1-docs/samples/kubernetes/cloud-config/master-config-template.yaml) and save it as `master-config.yaml`.
3. Replace the following variables in the `master-config.yaml` file.
diff --git a/docs/getting-started-guides/coreos/bare_metal_offline.md b/docs/getting-started-guides/coreos/bare_metal_offline.md
index e513831f4e318..cf62fe3439b5b 100644
--- a/docs/getting-started-guides/coreos/bare_metal_offline.md
+++ b/docs/getting-started-guides/coreos/bare_metal_offline.md
@@ -23,7 +23,7 @@ Deploy a CoreOS running Kubernetes environment. This particular guide is made to
* /tftpboot/pxelinux.0/(MAC) -> linked to Linux image config file
2. Update per install the link for pxelinux
3. Update the DHCP config to reflect the host needing deployment
-4. Setup nodes to deploy CoreOS creating a etcd cluster.
+4. Setup nodes to deploy CoreOS creating an etcd cluster.
5. Have no access to the public [etcd discovery tool](https://discovery.etcd.io/).
6. Installing the CoreOS slaves to become Kubernetes nodes.
@@ -98,7 +98,7 @@ Now you should have a working PXELINUX setup to image CoreOS nodes. You can veri
This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment.
-1. Find or create the TFTP root directory that everything will be based off of.
+1. Find or create the TFTP root directory that everything will be based on.
* For this document we will assume `/tftpboot/` is our root directory.
2. Once we know and have our tftp root directory we will create a new directory structure for our CoreOS images.
3. Download the CoreOS PXE files provided by the CoreOS team.
diff --git a/docs/getting-started-guides/logging-elasticsearch.md b/docs/getting-started-guides/logging-elasticsearch.md
index 2141cfbc6d758..3200bba62508f 100644
--- a/docs/getting-started-guides/logging-elasticsearch.md
+++ b/docs/getting-started-guides/logging-elasticsearch.md
@@ -93,7 +93,7 @@ asks you to configure your view of the ingested logs. Select the option for
timeseries values and select `@timestamp`. On the following page select the
`Discover` tab and then you should be able to see the ingested logs.
You can set the refresh interval to 5 seconds to have the logs
-regulary refreshed.
+regularly refreshed.
Here is a typical view of ingested logs from the Kibana viewer:
diff --git a/docs/getting-started-guides/meanstack.md b/docs/getting-started-guides/meanstack.md
index 46fee21ca0ad2..7287168bcd324 100644
--- a/docs/getting-started-guides/meanstack.md
+++ b/docs/getting-started-guides/meanstack.md
@@ -64,7 +64,7 @@ RUN npm install
CMD ["node", "app.js"]
```
-A `Dockerfile` is pretty self explanatory, and this one is dead simple.
+A `Dockerfile` is pretty self-explanatory, and this one is dead simple.
First, it uses the official Node.js LTS image as the base image.
diff --git a/docs/getting-started-guides/openstack-heat.md b/docs/getting-started-guides/openstack-heat.md
index 3a151c6bf5450..25a7264ced7d1 100644
--- a/docs/getting-started-guides/openstack-heat.md
+++ b/docs/getting-started-guides/openstack-heat.md
@@ -86,7 +86,7 @@ If you do not have your environment variables set, or do not want them consumed,
- **[config-default.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/config-default.sh)** Sets all parameters needed for heat template.
- **[config-image.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/config-image.sh)** Sets parameters needed to download and create new OpenStack image via glance.
- **[openrc-default.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/openrc-default.sh)** Sets environment variables for communicating to OpenStack. These are consumed by the cli tools (heat, glance, swift, nova).
-- **[openrc-swift.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/openrc-swift.sh)** Some OpenStack setups require the use of seperate swift credentials. Put those credentials in this file.
+- **[openrc-swift.sh](http://releases.k8s.io/{{page.githubbranch}}/cluster/openstack-heat/openrc-swift.sh)** Some OpenStack setups require the use of separate swift credentials. Put those credentials in this file.
Please see the contents of these files for documentation regarding each variable's function.
diff --git a/docs/getting-started-guides/ubuntu.md b/docs/getting-started-guides/ubuntu.md
index cfa7554d07b9f..96e0727ccbda9 100644
--- a/docs/getting-started-guides/ubuntu.md
+++ b/docs/getting-started-guides/ubuntu.md
@@ -59,7 +59,7 @@ $ export ETCD_VERSION=2.2.0
For users who want to bring up a cluster with k8s version v1.1.1, `controller manager` may fail to start
due to [a known issue](https://github.com/kubernetes/kubernetes/issues/17109). You could raise it
up manually by using following command on the remote master server. Note that
-you should do this only after `api-server` is up. Moreover this issue is fixed in v1.1.2 and later.
+you should do this only after `api-server` is up. Moreover, this issue is fixed in v1.1.2 and later.
```shell
$ sudo service kube-controller-manager start
diff --git a/docs/user-guide/compute-resources.md b/docs/user-guide/compute-resources.md
index da6b299ea2260..b62dcf4e56387 100644
--- a/docs/user-guide/compute-resources.md
+++ b/docs/user-guide/compute-resources.md
@@ -122,11 +122,11 @@ runner (Docker or rkt).
When using Docker:
- The `spec.container[].resources.requests.cpu` is converted to its core value (potentially fractional),
- and multipled by 1024, and used as the value of the [`--cpu-shares`](
+ and multiplied by 1024, and used as the value of the [`--cpu-shares`](
https://docs.docker.com/reference/run/#runtime-constraints-on-resources) flag to the `docker run`
command.
- The `spec.container[].resources.limits.cpu` is converted to its millicore value,
- multipled by 100000, and then divided by 1000, and used as the value of the [`--cpu-quota`](
+ multiplied by 100000, and then divided by 1000, and used as the value of the [`--cpu-quota`](
https://docs.docker.com/reference/run/#runtime-constraints-on-resources) flag to the `docker run`
command. The [`--cpu-period`] flag is set to 100000 which represents the default 100ms period
for measuring quota usage. The kubelet enforces cpu limits if it was started with the
diff --git a/docs/user-guide/configuring-containers.md b/docs/user-guide/configuring-containers.md
index 7a57d86ae8543..6b7b4472897bc 100644
--- a/docs/user-guide/configuring-containers.md
+++ b/docs/user-guide/configuring-containers.md
@@ -10,7 +10,7 @@ assignees:
## Configuration in Kubernetes
-In addition to the imperative-style commands, such as `kubectl run` and `kubectl expose`, described [elsewhere](/docs/user-guide/quick-start), Kubernetes supports declarative configuration. Often times, configuration files are preferable to imperative commands, since they can be checked into version control and changes to the files can be code reviewed, which is especially important for more complex configurations, producing a more robust, reliable and archival system.
+In addition to the imperative-style commands, such as `kubectl run` and `kubectl expose`, described [elsewhere](/docs/user-guide/quick-start), Kubernetes supports declarative configuration. Oftentimes, configuration files are preferable to imperative commands, since they can be checked into version control and changes to the files can be code reviewed, which is especially important for more complex configurations, producing a more robust, reliable and archival system.
In the declarative style, all configuration is stored in YAML or JSON configuration files using Kubernetes's API resource schemas as the configuration schemas. `kubectl` can create, update, delete, and get API resources. The `apiVersion` (currently 'v1'?), resource `kind`, and resource `name` are used by `kubectl` to construct the appropriate API path to invoke for the specified operation.
diff --git a/docs/user-guide/deployments.md b/docs/user-guide/deployments.md
index 287317e0604b3..1fae4ddd7d76b 100644
--- a/docs/user-guide/deployments.md
+++ b/docs/user-guide/deployments.md
@@ -78,7 +78,7 @@ nginx-deployment-2035384211-qqcnn 1/1 Running 0 18s app
The created Replica Set will ensure that there are three nginx Pods at all times.
-**Note:** You must specify appropriate selector and pod template labels of a Deployment (in this case, `app = nginx`), i.e. don't overlap with other controllers (including Deployments, Replica Sets, Replication Controllers, etc.) Kubernetes won't stop you from doing that, and if you end up with multiple controllers that have overlapping selectors, those controllers will fight with each others and won't behave correctly.
+**Note:** You must specify appropriate selector and pod template labels of a Deployment (in this case, `app = nginx`), i.e. don't overlap with other controllers (including Deployments, Replica Sets, Replication Controllers, etc.) Kubernetes won't stop you from doing that, and if you end up with multiple controllers that have overlapping selectors, those controllers will fight with each other's and won't behave correctly.
## The Status of a Deployment
@@ -503,7 +503,7 @@ number of Pods are less than the desired number.
Note that you should not create other pods whose labels match this selector, either directly, via another Deployment or via another controller such as Replica Sets or Replication Controllers. Otherwise, the Deployment will think that those pods were created by it. Kubernetes will not stop you from doing this.
-If you have multiple controllers that have overlapping selectors, the controllers will fight with each others and won't behave correctly.
+If you have multiple controllers that have overlapping selectors, the controllers will fight with each other's and won't behave correctly.
### Strategy
diff --git a/docs/user-guide/federation/federated-services.md b/docs/user-guide/federation/federated-services.md
index 29137faddbb5f..1734d0af29951 100644
--- a/docs/user-guide/federation/federated-services.md
+++ b/docs/user-guide/federation/federated-services.md
@@ -7,7 +7,7 @@ assignees:
This guide explains how to use Kubernetes Federated Services to deploy
a common Service across multiple Kubernetes clusters. This makes it
-easy to achieve cross-cluster service discovery and availibility zone
+easy to achieve cross-cluster service discovery and availability zone
fault tolerance for your Kubernetes applications.
@@ -42,7 +42,7 @@ Once created, the Federated Service automatically:
1. creates matching Kubernetes Services in every cluster underlying your Cluster Federation,
2. monitors the health of those service "shards" (and the clusters in which they reside), and
-3. manages a set of DNS records in a public DNS provder (like Google Cloud DNS, or AWS Route 53), thus ensuring that clients
+3. manages a set of DNS records in a public DNS provider (like Google Cloud DNS, or AWS Route 53), thus ensuring that clients
of your federated service can seamlessly locate an appropriate healthy service endpoint at all times, even in the event of cluster,
availability zone or regional outages.
@@ -200,7 +200,7 @@ nginx.mynamespace.myfederation.svc.asia-east1-b.example.com. CNAME 180 ngin
nginx.mynamespace.myfederation.svc.asia-east1-c.example.com. A 180 130.211.56.221
nginx.mynamespace.myfederation.svc.asia-east1.example.com. A 180 130.211.57.243, 130.211.56.221
nginx.mynamespace.myfederation.svc.europe-west1.example.com. CNAME 180 nginx.mynamespace.myfederation.svc.example.com.
-nginx.mynamespace.myfederation.svc.europe-west1-d.example.com. CNAME 180 nginx.mynamespace.myfederation.svc.europe-west1.example.com.
+nginx.mynamespace.myfederation.svc.europe-west1-d.example.com. CNAME 180 nginx.mynamespace.myfederation.svc.europe-west1.example.com.
... etc.
```
@@ -224,7 +224,7 @@ due to caching by intermediate DNS servers.
### Some notes about the above example
-1. Notice that there is a normal ('A') record for each service shard that has at least one healthy backend endpoint. For example in us-central1-a, 104.197.247.191 is the external IP address of the service shard in that zone, and in asia-east1-a the address is 130.211.56.221.
+1. Notice that there is a normal ('A') record for each service shard that has at least one healthy backend endpoint. For example, in us-central1-a, 104.197.247.191 is the external IP address of the service shard in that zone, and in asia-east1-a the address is 130.211.56.221.
2. Similarly, there are regional 'A' records which include all healthy shards in that region. For example, 'us-central1'. These regional records are useful for clients which do not have a particular zone preference, and as a building block for the automated locality and failover mechanism described below.
2. For zones where there are currently no healthy backend endpoints, a CNAME ('Canonical Name') record is used to alias (automatically redirect) those queries to the next closest healthy zone. In the example, the service shard in us-central1-f currently has no healthy backend endpoints (i.e. Pods), so a CNAME record has been created to automatically redirect queries to other shards in that region (us-central1 in this case).
3. Similarly, if no healthy shards exist in the enclosing region, the search progresses further afield. In the europe-west1-d availability zone, there are no healthy backends, so queries are redirected to the broader europe-west1 region (which also has no healthy backends), and onward to the global set of healthy addresses (' nginx.mynamespace.myfederation.svc.example.com.')
@@ -295,7 +295,7 @@ availability zones and regions other than the ones local to a Pod by
specifying the appropriate DNS names explicitly, and not relying on
automatic DNS expansion. For example,
"nginx.mynamespace.myfederation.svc.europe-west1.example.com" will
-resolve to all of the currently healthy service shards in Europe, even
+resolve to all of the currently healthy service shards in europe, even
if the Pod issuing the lookup is located in the U.S., and irrespective
of whether or not there are healthy shards of the service in the U.S.
This is useful for remote monitoring and other similar applications.
@@ -366,7 +366,7 @@ Check that:
1. Your federation name, DNS provider, DNS domain name are configured correctly. Consult the [federation admin guide](/docs/admin/federation/) or [tutorial](https://github.com/kelseyhightower/kubernetes-cluster-federation) to learn
how to configure your Cluster Federation system's DNS provider (or have your cluster administrator do this for you).
2. Confirm that the Cluster Federation's service-controller is successfully connecting to and authenticating against your selected DNS provider (look for `service-controller` errors or successes in the output of `kubectl logs federation-controller-manager --namespace federation`)
-3. Confirm that the Cluster Federation's service-controller is successfully creating DNS records in your DNS provider (or outputting errors in it's logs explaining in more detail what's failing).
+3. Confirm that the Cluster Federation's service-controller is successfully creating DNS records in your DNS provider (or outputting errors in its logs explaining in more detail what's failing).
#### Matching DNS records are created in my DNS provider, but clients are unable to resolve against those names
Check that:
diff --git a/docs/user-guide/jobs.md b/docs/user-guide/jobs.md
index fb050a2dd8b0d..59e09e7bcd125 100644
--- a/docs/user-guide/jobs.md
+++ b/docs/user-guide/jobs.md
@@ -167,7 +167,7 @@ parallelism, for a variety or reasons:
A Container in a Pod may fail for a number of reasons, such as because the process in it exited with
a non-zero exit code, or the Container was killed for exceeding a memory limit, etc. If this
happens, and the `.spec.template.containers[].restartPolicy = "OnFailure"`, then the Pod stays
-on the node, but the Container is re-run. Therefore, your program needs to handle the the case when it is
+on the node, but the Container is re-run. Therefore, your program needs to handle the case when it is
restarted locally, or else specify `.spec.template.containers[].restartPolicy = "Never"`.
See [pods-states](/docs/user-guide/pod-states) for more information on `restartPolicy`.
diff --git a/docs/user-guide/jobs/expansions/index.md b/docs/user-guide/jobs/expansions/index.md
index 91e916844fcf8..c955bbf124bde 100644
--- a/docs/user-guide/jobs/expansions/index.md
+++ b/docs/user-guide/jobs/expansions/index.md
@@ -54,7 +54,7 @@ job-banana.yaml
job-cherry.yaml
```
-Here, we used `sed` to replace the string `$ITEM` with the the loop variable.
+Here, we used `sed` to replace the string `$ITEM` with the loop variable.
You could use any type of template language (jinja2, erb) or write a program
to generate the Job objects.
diff --git a/docs/user-guide/jobs/work-queue-1/index.md b/docs/user-guide/jobs/work-queue-1/index.md
index 7f2ba0b6ac886..e579f4b29591e 100644
--- a/docs/user-guide/jobs/work-queue-1/index.md
+++ b/docs/user-guide/jobs/work-queue-1/index.md
@@ -122,8 +122,7 @@ root@temp-loe07:/#
```
In the last command, the `amqp-consume` tool takes one message (`-c 1`)
-from the queue, and passes that message to the standard input of an
-an arbitrary command. In this case, the program `cat` is just printing
+from the queue, and passes that message to the standard input of an arbitrary command. In this case, the program `cat` is just printing
out what it gets on the standard input, and the echo is just to add a carriage
return so the example is readable.
@@ -169,7 +168,7 @@ example program:
{% include code.html language="python" file="worker.py" ghlink="/docs/user-guide/job/work-queue-1/worker.py" %}
-Now, build an an image. If you are working in the source
+Now, build an image. If you are working in the source
tree, then change directory to `examples/job/work-queue-1`.
Otherwise, make a temporary directory, change to it,
download the [Dockerfile](Dockerfile?raw=true),
@@ -275,7 +274,7 @@ not all items will be processed.
If the number of completions is set to more than the number of items in the queue,
then the Job will not appear to be completed, even though all items in the queue
have been processed. It will start additional pods which will block waiting
-for a mesage.
+for a message.
There is an unlikely race with this pattern. If the container is killed in between the time
that the message is acknowledged by the amqp-consume command and the time that the container
diff --git a/docs/user-guide/jobs/work-queue-2/index.md b/docs/user-guide/jobs/work-queue-2/index.md
index 434859093b709..328ece5a641cb 100644
--- a/docs/user-guide/jobs/work-queue-2/index.md
+++ b/docs/user-guide/jobs/work-queue-2/index.md
@@ -31,7 +31,7 @@ Here is an overview of the steps in this example:
For this example, for simplicitly, we will start a single instance of Redis.
See the [Redis Example](https://github.com/kubernetes/kubernetes/tree/{{page.githubbranch}}/examples/redis/README.md) for an example
-of deploying Redis scaleably and redundantly.
+of deploying Redis scalably and redundantly.
Start a temporary Pod running Redis and a service so we can find it.
diff --git a/docs/user-guide/kubeconfig-file.md b/docs/user-guide/kubeconfig-file.md
index 1271b49d505be..043b6cc002c46 100644
--- a/docs/user-guide/kubeconfig-file.md
+++ b/docs/user-guide/kubeconfig-file.md
@@ -16,7 +16,7 @@ So in order to easily switch between multiple clusters, for multiple users, a ku
This file contains a series of authentication mechanisms and cluster connection information associated with nicknames. It also introduces the concept of a tuple of authentication information (user) and cluster connection information called a context that is also associated with a nickname.
-Multiple kubeconfig files are allowed, if specified explicitly. At runtime they are loaded and merged together along with override options specified from the command line (see [rules](#loading-and-merging) below).
+Multiple kubeconfig files are allowed, if specified explicitly. At runtime they are loaded and merged along with override options specified from the command line (see [rules](#loading-and-merging) below).
## Related discussion
diff --git a/docs/user-guide/kubectl-overview.md b/docs/user-guide/kubectl-overview.md
index 4c0304f142156..b0a6c5cc5a017 100644
--- a/docs/user-guide/kubectl-overview.md
+++ b/docs/user-guide/kubectl-overview.md
@@ -266,7 +266,7 @@ $ kubectl exec -ti /bin/bash
// Return a snapshot of the logs from pod .
$ kubectl logs
-// Start streaming the logs from pod . This is similiar to the 'tail -f' Linux command.
+// Start streaming the logs from pod . This is similar to the 'tail -f' Linux command.
$ kubectl logs -f
```
diff --git a/docs/user-guide/kubectl/kubectl_autoscale.md b/docs/user-guide/kubectl/kubectl_autoscale.md
index 9e9982df2fc3c..7cc24a1feefd4 100644
--- a/docs/user-guide/kubectl/kubectl_autoscale.md
+++ b/docs/user-guide/kubectl/kubectl_autoscale.md
@@ -20,10 +20,10 @@ kubectl autoscale (-f FILENAME | TYPE NAME | TYPE/NAME) [--min=MINPODS] --max=MA
### Examples
```
-# Auto scale a deployment "foo", with the number of pods between 2 to 10, no target CPU utilization specfied so a default autoscaling policy will be used:
+# Auto scale a deployment "foo", with the number of pods between 2 and 10, no target CPU utilization specfied so a default autoscaling policy will be used:
kubectl autoscale deployment foo --min=2 --max=10
-# Auto scale a replication controller "foo", with the number of pods between 1 to 5, target CPU utilization at 80%:
+# Auto scale a replication controller "foo", with the number of pods between 1 and 5, target CPU utilization at 80%:
kubectl autoscale rc foo --max=5 --cpu-percent=80
```
diff --git a/docs/user-guide/kubectl/kubectl_config.md b/docs/user-guide/kubectl/kubectl_config.md
index 5aed2a043cf61..8adac868b6f11 100644
--- a/docs/user-guide/kubectl/kubectl_config.md
+++ b/docs/user-guide/kubectl/kubectl_config.md
@@ -12,7 +12,7 @@ config modifies kubeconfig files using subcommands like "kubectl config set curr
The loading order follows these rules:
1. If the --kubeconfig flag is set, then only that file is loaded. The flag may only be set once and no merging takes place.
-2. If $KUBECONFIG environment variable is set, then it is used a list of paths (normal path delimitting rules for your system). These paths are merged together. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list.
+2. If $KUBECONFIG environment variable is set, then it is used a list of paths (normal path delimitting rules for your system). These paths are merged. When a value is modified, it is modified in the file that defines the stanza. When a value is created, it is created in the first file that exists. If no files in the chain exist, then it creates the last file in the list.
3. Otherwise, ${HOME}/.kube/config is used and no merging takes place.
diff --git a/docs/user-guide/kubectl/kubectl_config_set-cluster.md b/docs/user-guide/kubectl/kubectl_config_set-cluster.md
index 6751cb7fa8174..457531be77c8b 100644
--- a/docs/user-guide/kubectl/kubectl_config_set-cluster.md
+++ b/docs/user-guide/kubectl/kubectl_config_set-cluster.md
@@ -12,7 +12,7 @@ Sets a cluster entry in kubeconfig.
Specifying a name that already exists will merge new fields on top of existing values for those fields.
```
-kubectl config set-cluster NAME [--server=server] [--certificate-authority=path/to/certficate/authority] [--insecure-skip-tls-verify=true]
+kubectl config set-cluster NAME [--server=server] [--certificate-authority=path/to/certificate/authority] [--insecure-skip-tls-verify=true]
```
### Examples
diff --git a/docs/user-guide/kubectl/kubectl_config_set.md b/docs/user-guide/kubectl/kubectl_config_set.md
index 96aa933e04eb4..fafbc9ab39a97 100644
--- a/docs/user-guide/kubectl/kubectl_config_set.md
+++ b/docs/user-guide/kubectl/kubectl_config_set.md
@@ -9,7 +9,7 @@ Sets an individual value in a kubeconfig file
Sets an individual value in a kubeconfig file
-PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots.
+PROPERTY_NAME is a dot delimited name where each token represents either an attribute name or a map key. Map keys may not contain dots.
PROPERTY_VALUE is the new value you wish to set. Binary fields such as 'certificate-authority-data' expect a base64 encoded string unless the --set-raw-bytes flag is used.
```
diff --git a/docs/user-guide/kubectl/kubectl_config_unset.md b/docs/user-guide/kubectl/kubectl_config_unset.md
index c7d28e2d8ddf5..3f1fe7ebdef3c 100644
--- a/docs/user-guide/kubectl/kubectl_config_unset.md
+++ b/docs/user-guide/kubectl/kubectl_config_unset.md
@@ -9,7 +9,7 @@ Unsets an individual value in a kubeconfig file
Unsets an individual value in a kubeconfig file
-PROPERTY_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots.
+PROPERTY_NAME is a dot delimited name where each token represents either an attribute name or a map key. Map keys may not contain dots.
```
kubectl config unset PROPERTY_NAME
diff --git a/docs/user-guide/kubectl/kubectl_config_view.md b/docs/user-guide/kubectl/kubectl_config_view.md
index 38c862cb9221a..122c2f8de5338 100644
--- a/docs/user-guide/kubectl/kubectl_config_view.md
+++ b/docs/user-guide/kubectl/kubectl_config_view.md
@@ -29,8 +29,8 @@ kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}'
### Options
```
- --flatten[=false]: flatten the resulting kubeconfig file into self contained output (useful for creating portable kubeconfig files)
- --merge[=true]: merge together the full hierarchy of kubeconfig files
+ --flatten[=false]: flatten the resulting kubeconfig file into self-contained output (useful for creating portable kubeconfig files)
+ --merge[=true]: merge the full hierarchy of kubeconfig files
--minify[=false]: remove all information not used by current-context from the output
--no-headers[=false]: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.3/docs/user-guide/jsonpath.md].
diff --git a/docs/user-guide/kubectl/kubectl_expose.md b/docs/user-guide/kubectl/kubectl_expose.md
index 8721819ff5189..5247fd884cef9 100644
--- a/docs/user-guide/kubectl/kubectl_expose.md
+++ b/docs/user-guide/kubectl/kubectl_expose.md
@@ -59,7 +59,7 @@ kubectl expose deployment nginx --port=80 --target-port=8000
-f, --filename=[]: Filename, directory, or URL to a file identifying the resource to expose a service
--generator="service/v2": The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'.
-l, --labels="": Labels to apply to the service created by this call.
- --load-balancer-ip="": IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).
+ --load-balancer-ip="": IP to assign to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).
--name="": The name for the newly created object.
--no-headers[=false]: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/release-1.3/docs/user-guide/jsonpath.md].
diff --git a/docs/user-guide/labels.md b/docs/user-guide/labels.md
index 20d34d2626047..91befcd68194a 100644
--- a/docs/user-guide/labels.md
+++ b/docs/user-guide/labels.md
@@ -104,7 +104,7 @@ LIST and WATCH operations may specify label selectors to filter the sets of obje
* _equality-based_ requirements: `?labelSelector=environment%3Dproduction,tier%3Dfrontend`
* _set-based_ requirements: `?labelSelector=environment+in+%28production%2Cqa%29%2Ctier+in+%28frontend%29`
-Both label selector styles can be used to list or watch resources via a REST client. For example targeting `apiserver` with `kubectl` and using _equality-based_ one may write:
+Both label selector styles can be used to list or watch resources via a REST client. For example, targeting `apiserver` with `kubectl` and using _equality-based_ one may write:
```shell
$ kubectl get pods -l environment=production,tier=frontend
diff --git a/docs/user-guide/petset.md b/docs/user-guide/petset.md
index 47213c188d860..7694ca3d87c39 100644
--- a/docs/user-guide/petset.md
+++ b/docs/user-guide/petset.md
@@ -226,7 +226,7 @@ It's not necessary to "discover" the governing Service of a Pet Set, since it's
Usually pets also need to find their peers. In the previous nginx example, we just used `kubectl` to get the names of existing pods, and as humans, we could tell which ones belonged to a given Pet Set. Another way to find peers is by contacting the API server, just like `kubectl`, but that has several disadvantages (you end up implementing a Kubernetes specific init system that runs as pid 1 in your application container).
-Pet Set gives you a way to disover your peers using DNS records. To illustrate this we can use the previous example (note: one usually doesn't `apt-get` in a container).
+Pet Set gives you a way to discover your peers using DNS records. To illustrate this we can use the previous example (note: one usually doesn't `apt-get` in a container).
```shell
$ kubectl exec -it web-0 /bin/sh
diff --git a/docs/user-guide/petset/bootstrapping/index.md b/docs/user-guide/petset/bootstrapping/index.md
index 8a4924e3033d7..7ad12cc8e8b4d 100644
--- a/docs/user-guide/petset/bootstrapping/index.md
+++ b/docs/user-guide/petset/bootstrapping/index.md
@@ -19,7 +19,7 @@ This example shows you how to "carry over" runtime state across Pet restart by s
### Background
-Applications that incrementally build state usually need strong guarantees that they will not restart for extended durations. This is tricky to achieve with containers, so instead, we will ensure that the results of previous computations are trasferred to future pets. Doing so is straight-forward using vanilla Persistent Volumes (which Pet Set already gives you), unless the volume mount point itself needs to be initialized for the Pet to start. This is exactly the case with "virtual machine" docker images, like those based on ubuntu or fedora. Such images embed the entier rootfs of the distro, including package managers like `apt-get` that assume a certain layout of the filesystem. Meaning:
+Applications that incrementally build state usually need strong guarantees that they will not restart for extended durations. This is tricky to achieve with containers, so instead, we will ensure that the results of previous computations are trasferred to future pets. Doing so is straightforward using vanilla Persistent Volumes (which Pet Set already gives you), unless the volume mount point itself needs to be initialized for the Pet to start. This is exactly the case with "virtual machine" docker images, like those based on ubuntu or fedora. Such images embed the entier rootfs of the distro, including package managers like `apt-get` that assume a certain layout of the filesystem. Meaning:
* If you mount an empty volume under `/usr`, you won't be able to `apt-get`
* If you mount an empty volume under `/lib`, all your `apt-gets` will fail because there are no system libraries
@@ -166,7 +166,7 @@ vm-1.ub.default.svc.cluster.local
### Nginx master/slave cluster
-Lets create a Pet Set that writes out its own config based on a list of peers at initalization time, as described above.
+Lets create a Pet Set that writes out its own config based on a list of peers at initialization time, as described above.
Download and create [this](petset_peers.yaml) petset. It will setup 2 nginx webservers, but the second one will proxy all requests to the first:
@@ -192,7 +192,7 @@ web-0 1/1 Running 0 1m
web-1 1/1 Running 0 47s
```
-web-1 will redirect all requests to it's "master":
+web-1 will redirect all requests to its "master":
```shell
$ kubectl exec -it web-1 -- curl localhost
diff --git a/docs/user-guide/secrets/index.md b/docs/user-guide/secrets/index.md
index 9c348a07fbb09..fff246c93754e 100644
--- a/docs/user-guide/secrets/index.md
+++ b/docs/user-guide/secrets/index.md
@@ -177,7 +177,7 @@ To consume a Secret in a volume in a Pod:
1. Create a secret or use an existing one. Multiple pods can reference the same secret.
1. Modify your Pod definition to add a volume under `spec.volumes[]`. Name the volume anything, and have a `spec.volumes[].secret.secretName` field equal to the name of the secret object.
1. Add a `spec.containers[].volumeMounts[]` to each container that needs the secret. Specify `spec.containers[].volumeMounts[].readOnly = true` and `spec.containers[].volumeMounts[].mountPath` to an unused directory name where you would like the secrets to appear.
-1. Modify your image and/or command line so that the the program looks for files in that directory. Each key in the secret `data` map becomes the filename under `mountPath`.
+1. Modify your image and/or command line so that the program looks for files in that directory. Each key in the secret `data` map becomes the filename under `mountPath`.
This is an example of a pod that mounts a secret in a volume:
@@ -293,7 +293,7 @@ To use a secret in an environment variable in a pod:
1. Create a secret or use an existing one. Multiple pods can reference the same secret.
1. Modify your Pod definition in each container that you wish to consume the value of a secret key to add an environment variable for each secret key you wish to consume. The environment variable that consumes the secret key should populate the secret's name and key in `env[x].valueFrom.secretKeyRef`.
-1. Modify your image and/or command line so that the the program looks for values in the specified environment variables
+1. Modify your image and/or command line so that the program looks for values in the specified environment variables
This is an example of a pod that mounts a secret in a volume:
diff --git a/docs/user-guide/security-context.md b/docs/user-guide/security-context.md
index 78c4ae46f2b2d..2d5807a449c58 100644
--- a/docs/user-guide/security-context.md
+++ b/docs/user-guide/security-context.md
@@ -48,7 +48,7 @@ for more details.
#### `selinuxOptions`
-Volumes which support SELinux labeling are relabled to be accessable
+Volumes which support SELinux labeling are relabled to be accessible
by the label specified unders `seLinuxOptions`. Usually you will only
need to set the `level` section. This sets the SELinux MCS label given
to all containers within the pod as well as the volume.
diff --git a/docs/user-guide/update-demo/index.md b/docs/user-guide/update-demo/index.md
index 63262e69d2a9a..aa10b2a362010 100644
--- a/docs/user-guide/update-demo/index.md
+++ b/docs/user-guide/update-demo/index.md
@@ -32,7 +32,7 @@ $ kubectl proxy --www=docs/user-guide/update-demo/local/ &
I0218 15:18:31.623279 67480 proxy.go:36] Starting to serve on localhost:8001
```
-Now visit the the [demo website](http://localhost:8001/static). You won't see anything much quite yet.
+Now visit the [demo website](http://localhost:8001/static). You won't see anything much quite yet.
### Step Two: Run the replication controller
diff --git a/docs/user-guide/volumes.md b/docs/user-guide/volumes.md
index d118e4e9505f5..4d753ca3081fd 100644
--- a/docs/user-guide/volumes.md
+++ b/docs/user-guide/volumes.md
@@ -125,7 +125,7 @@ Watch out when using this type of volume, because:
* when Kubernetes adds resource-aware scheduling, as is planned, it will not be
able to account for resources used by a `hostPath`
* the directories created on the underlying hosts are only writable by root, you either need
- to run your process as root in a priveleged container or modify the file permissions on
+ to run your process as root in a privileged container or modify the file permissions on
the host to be able to write to a `hostPath` volume
#### Example pod
@@ -244,7 +244,7 @@ There are some restrictions when using an awsElasticBlockStore volume:
#### Creating an EBS volume
-Before you can use a EBS volume with a pod, you need to create it.
+Before you can use an EBS volume with a pod, you need to create it.
```shell
aws ec2 create-volume --availability-zone eu-west-1a --size 10 --volume-type gp2
@@ -379,7 +379,7 @@ mounts an empty directory and clones a git repository into it for your pod to
use. In the future, such volumes may be moved to an even more decoupled model,
rather than extending the Kubernetes API for every such use case.
-Here is a example for gitRepo volume:
+Here is an example for gitRepo volume:
```yaml
apiVersion: v1
diff --git a/js/script.js b/js/script.js
index f714cdec13344..aed501d70108e 100755
--- a/js/script.js
+++ b/js/script.js
@@ -92,14 +92,13 @@ function px(n){
var kub = (function () {
var HEADER_HEIGHT;
- var html, header, mainNav, quickstartButton, hero, encyclopedia, footer, wishField, headlineWrapper;
+ var html, header, mainNav, quickstartButton, hero, encyclopedia, footer, headlineWrapper;
$(document).ready(function () {
html = $('html');
body = $('body');
header = $('header');
mainNav = $('#mainNav');
- wishField = $('#wishField');
quickstartButton = $('#quickstartButton');
hero = $('#hero');
encyclopedia = $('#encyclopedia');
@@ -112,13 +111,11 @@ var kub = (function () {
window.addEventListener('resize', resetTheView);
window.addEventListener('scroll', resetTheView);
window.addEventListener('keydown', handleKeystrokes);
- wishField[0].addEventListener('keydown', handleKeystrokes);
document.onunload = function(){
window.removeEventListener('resize', resetTheView);
window.removeEventListener('scroll', resetTheView);
window.removeEventListener('keydown', handleKeystrokes);
- wishField[0].removeEventListener('keydown', handleKeystrokes);
};
setInterval(setFooterType, 10);
@@ -189,24 +186,8 @@ var kub = (function () {
}
}
- function submitWish(textfield) {
- window.location.replace("https://github.com/kubernetes/kubernetes.github.io/issues/new?title=I%20wish%20" +
- window.location.pathname + "%20" + textfield.value + "&body=I%20wish%20" +
- window.location.pathname + "%20" + textfield.value);
-
- textfield.value = '';
- textfield.blur();
- }
-
function handleKeystrokes(e) {
switch (e.which) {
- case 13: {
- if (e.currentTarget === wishField[0]) {
- submitWish(wishField[0]);
- }
- break;
- }
-
case 27: {
if (html.hasClass('open-nav')) {
toggleMenu();