diff --git a/Makefile b/Makefile index c791e7ad28884..713af5f0a909a 100644 --- a/Makefile +++ b/Makefile @@ -42,7 +42,7 @@ export KUBE_GOLDFLAGS # make all # make all WHAT=cmd/kubelet GOFLAGS=-v all: - hack/build-go.sh $(WHAT) + @hack/make-rules/build.sh $(WHAT) .PHONY: all # Build ginkgo @@ -50,7 +50,7 @@ all: # Example: # make ginkgo ginkgo: - hack/build-go.sh vendor/github.com/onsi/ginkgo/ginkgo + hack/make-rules/build.sh vendor/github.com/onsi/ginkgo/ginkgo .PHONY: ginkgo # Runs all the presubmission verifications. @@ -62,7 +62,7 @@ ginkgo: # make verify # make verify BRANCH=branch_x verify: - KUBE_VERIFY_GIT_BRANCH=$(BRANCH) hack/verify-all.sh -v + @KUBE_VERIFY_GIT_BRANCH=$(BRANCH) hack/make-rules/verify.sh -v .PHONY: verify # Build and run tests. @@ -79,24 +79,24 @@ verify: # make test # make check WHAT=pkg/kubelet GOFLAGS=-v check test: - hack/test-go.sh $(WHAT) $(TESTS) + @hack/make-rules/test.sh $(WHAT) $(TESTS) .PHONY: check test # Build and run integration tests. # # Example: -# make test_integration -test_integration: - hack/test-integration.sh -.PHONY: test_integration test_integ +# make test-integration +test-integration: + @hack/make-rules/test-integration.sh +.PHONY: test-integration # Build and run end-to-end tests. # # Example: -# make test_e2e -test_e2e: - go run hack/e2e.go -v --build --up --test --down -.PHONY: test_e2e +# make test-e2e +test-e2e: ginkgo + @go run hack/e2e.go -v --build --up --test --down +.PHONY: test-e2e # Build and run node end-to-end tests. # @@ -116,12 +116,20 @@ test_e2e: # INSTANCE_PREFIX: for REMOTE=true only. Instances created from images will have the name "${INSTANCE_PREFIX}-${IMAGE_NAME}". Defaults to "test"/ # # Example: -# make test_e2e_node FOCUS=kubelet SKIP=container -# make test_e2e_node REMOTE=true DELETE_INSTANCES=true +# make test-e2e-node FOCUS=kubelet SKIP=container +# make test-e2e-node REMOTE=true DELETE_INSTANCES=true # Build and run tests. -test_e2e_node: ginkgo - hack/e2e-node-test.sh -.PHONY: test_e2e_node +test-e2e-node: ginkgo + @hack/make-rules/test-e2e-node.sh +.PHONY: test-e2e-node + +# Build and run cmdline tests. +# +# Example: +# make test-cmd +test-cmd: + @hack/make-rules/test-cmd.sh +.PHONY: test-cmd # Remove all build artifacts. # @@ -139,15 +147,12 @@ clean: # WHAT: Directory names to vet. All *.go files under these # directories will be vetted. If not specified, "everything" will be # vetted. -# TESTS: Same as WHAT. -# GOFLAGS: Extra flags to pass to 'go' when building. -# GOLDFLAGS: Extra linking flags to pass to 'go' when building. # # Example: # make vet # make vet WHAT=pkg/kubelet vet: - hack/verify-govet.sh $(WHAT) $(TESTS) + @hack/make-rules/vet.sh $(WHAT) .PHONY: vet # Build a release @@ -155,7 +160,7 @@ vet: # Example: # make release release: - build/release.sh + @build/release.sh .PHONY: release # Build a release, but skip tests @@ -163,6 +168,5 @@ release: # Example: # make release-skip-tests release-skip-tests quick-release: - KUBE_RELEASE_RUN_TESTS=n KUBE_FASTBUILD=true build/release.sh + @KUBE_RELEASE_RUN_TESTS=n KUBE_FASTBUILD=true build/release.sh .PHONY: release-skip-tests quick-release - diff --git a/build/README.md b/build/README.md index 4040c15792334..2c9a964295cca 100644 --- a/build/README.md +++ b/build/README.md @@ -24,10 +24,11 @@ There is also early support for building Docker "run" containers The following scripts are found in the `build/` directory: * `run.sh`: Run a command in a build docker container. Common invocations: - * `run.sh hack/build-go.sh`: Build just linux binaries in the container. Pass options and packages as necessary. + * `run.sh make`: Build just linux binaries in the container. Pass options and packages as necessary. * `run.sh hack/build-cross.sh`: Build all binaries for all platforms - * `run.sh hack/test-go.sh`: Run all unit tests - * `run.sh hack/test-integration.sh`: Run integration test + * `run.sh make test`: Run all unit tests + * `run.sh make test-integration`: Run integration test + * `run.sh make test-cmd`: Run CLI tests * `copy-output.sh`: This will copy the contents of `_output/dockerized/bin` from any remote Docker container to the local `_output/dockerized/bin`. Right now this is only necessary on Mac OS X with `boot2docker` when your git repo isn't under `/Users`. * `make-clean.sh`: Clean out the contents of `_output/dockerized` and remove any local built container images. * `shell.sh`: Drop into a `bash` shell in a build container with a snapshot of the current repo code. diff --git a/build/release.sh b/build/release.sh index 2b2704b39dc80..1485629869829 100755 --- a/build/release.sh +++ b/build/release.sh @@ -33,8 +33,8 @@ kube::build::build_image kube::build::run_build_command hack/build-cross.sh if [[ $KUBE_RELEASE_RUN_TESTS =~ ^[yY]$ ]]; then - kube::build::run_build_command hack/test-go.sh - kube::build::run_build_command hack/test-integration.sh + kube::build::run_build_command make test + kube::build::run_build_command make test-integration fi if [[ "${FEDERATION:-}" == "true" ]];then diff --git a/cluster/azure-legacy/util.sh b/cluster/azure-legacy/util.sh index 010dde4d4edac..b2c7c434e1b92 100644 --- a/cluster/azure-legacy/util.sh +++ b/cluster/azure-legacy/util.sh @@ -514,7 +514,7 @@ function kube-down { #} # ----------------------------------------------------------------------------- -# Cluster specific test helpers used from hack/e2e-test.sh +# Cluster specific test helpers # Execute prior to running tests to build a release if required for env. # diff --git a/contrib/mesos/ci/test-integration.sh b/contrib/mesos/ci/test-integration.sh index f356d30c7251d..d44865bf40527 100755 --- a/contrib/mesos/ci/test-integration.sh +++ b/contrib/mesos/ci/test-integration.sh @@ -31,4 +31,4 @@ TEST_ARGS="$@" KUBE_ROOT=$(cd "$(dirname "${BASH_SOURCE}")/../../.." && pwd) -"${KUBE_ROOT}/contrib/mesos/ci/run.sh" make clean test_integration ${TEST_ARGS} +"${KUBE_ROOT}/contrib/mesos/ci/run.sh" make clean test-integration ${TEST_ARGS} diff --git a/contrib/mesos/docs/ha.md b/contrib/mesos/docs/ha.md index 491512ede090b..9ddbce3a8da29 100644 --- a/contrib/mesos/docs/ha.md +++ b/contrib/mesos/docs/ha.md @@ -155,7 +155,7 @@ K8SM_IMAGE_NAME=haih/k8sm git clone https://github.com/mesosphere/kubernetes cd kubernetes git checkout release-v0.7-v1.1 -KUBERNETES_CONTRIB=mesos build/run.sh hack/build-go.sh +KUBERNETES_CONTRIB=mesos build/run.sh make cd .. sudo docker build -t $K8SM_IMAGE_NAME --no-cache . EOF diff --git a/docs/devel/adding-an-APIGroup.md b/docs/devel/adding-an-APIGroup.md index cefa85641442d..f05009ddbaf57 100644 --- a/docs/devel/adding-an-APIGroup.md +++ b/docs/devel/adding-an-APIGroup.md @@ -119,8 +119,8 @@ pkg/kubectl/cmd/util/factory.go. 1. Add your group in pkg/api/testapi/testapi.go, then you can access the group in tests through testapi.``; -2. Add your "group/version" to `KUBE_API_VERSIONS` and `KUBE_TEST_API_VERSIONS` -in hack/test-go.sh. +2. Add your "group/version" to `KUBE_TEST_API_VERSIONS` in + hack/make-rules/test.sh and hack/make-rules/test-integration.sh TODO: Add a troubleshooting section. diff --git a/docs/devel/development.md b/docs/devel/development.md index 82014e7cad66a..4c00072e57b2f 100644 --- a/docs/devel/development.md +++ b/docs/devel/development.md @@ -71,11 +71,16 @@ up a GOPATH. To build Kubernetes using your local Go development environment (generate linux binaries): - hack/build-go.sh +```sh + make +``` + You may pass build options and packages to the script as necessary. To build binaries for all platforms: +```sh hack/build-cross.sh +``` ## Workflow @@ -314,8 +319,8 @@ Three basic commands let you run unit, integration and/or e2e tests: ```sh cd kubernetes -hack/test-go.sh # Run unit tests -hack/test-integration.sh # Run integration tests, requires etcd +make test # Run unit tests +make test-integration # Run integration tests, requires etcd go run hack/e2e.go -v --build --up --test --down # Run e2e tests ``` diff --git a/docs/devel/e2e-node-tests.md b/docs/devel/e2e-node-tests.md index 03ee4811f7c1c..f4713855235e3 100644 --- a/docs/devel/e2e-node-tests.md +++ b/docs/devel/e2e-node-tests.md @@ -57,7 +57,7 @@ Prerequisites: From the Kubernetes base directory, run: ```sh -make test_e2e_node +make test-e2e-node ``` This will: run the *ginkgo* binary against the subdirectory *test/e2e_node*, which will in turn: @@ -87,7 +87,7 @@ Prerequisites: Run: ```sh -make test_e2e_node REMOTE=true +make test-e2e-node REMOTE=true ``` This will: @@ -124,7 +124,7 @@ provided by the default image. List the available test images using gcloud. ```sh -make test_e2e_node LIST_IMAGES=true +make test-e2e-node LIST_IMAGES=true ``` This will output a list of the available images for the default image project. @@ -132,7 +132,7 @@ This will output a list of the available images for the default image project. Then run: ```sh -make test_e2e_node REMOTE=true IMAGES="" +make test-e2e-node REMOTE=true IMAGES="" ``` ## Run tests against a running GCE instance (not an image) @@ -140,7 +140,7 @@ make test_e2e_node REMOTE=true IMAGES="" This is useful if you have an host instance running already and want to run the tests there instead of on a new instance. ```sh -make test_e2e_node REMOTE=true HOSTS="" +make test-e2e-node REMOTE=true HOSTS="" ``` ## Delete instance after tests run @@ -148,7 +148,7 @@ make test_e2e_node REMOTE=true HOSTS="" This is useful if you want recreate the instance for each test run to trigger flakes related to starting the instance. ```sh -make test_e2e_node REMOTE=true DELETE_INSTANCES=true +make test-e2e-node REMOTE=true DELETE_INSTANCES=true ``` ## Keep instance, test binaries, and *processes* around after tests run @@ -156,7 +156,7 @@ make test_e2e_node REMOTE=true DELETE_INSTANCES=true This is useful if you want to manually inspect or debug the kubelet process run as part of the tests. ```sh -make test_e2e_node REMOTE=true CLEANUP=false +make test-e2e-node REMOTE=true CLEANUP=false ``` ## Run tests using an image in another project @@ -164,7 +164,7 @@ make test_e2e_node REMOTE=true CLEANUP=false This is useful if you want to create your own host image in another project and use it for testing. ```sh -make test_e2e_node REMOTE=true IMAGE_PROJECT="" IMAGES="" +make test-e2e-node REMOTE=true IMAGE_PROJECT="" IMAGES="" ``` Setting up your own host image may require additional steps such as installing etcd or docker. See @@ -176,7 +176,7 @@ This is useful if you want to create instances using a different name so that yo test in parallel against different instances of the same image. ```sh -make test_e2e_node REMOTE=true INSTANCE_PREFIX="my-prefix" +make test-e2e-node REMOTE=true INSTANCE_PREFIX="my-prefix" ``` # Additional Test Options for both Remote and Local execution @@ -186,13 +186,13 @@ make test_e2e_node REMOTE=true INSTANCE_PREFIX="my-prefix" To run tests matching a regex: ```sh -make test_e2e_node REMOTE=true FOCUS="" +make test-e2e-node REMOTE=true FOCUS="" ``` To run tests NOT matching a regex: ```sh -make test_e2e_node REMOTE=true SKIP="" +make test-e2e-node REMOTE=true SKIP="" ``` ## Run tests continually until they fail @@ -202,7 +202,7 @@ run the tests until they fail. **Note: this will only perform test setup once ( less useful for catching flakes related creating the instance from an image.** ```sh -make test_e2e_node REMOTE=true RUN_UNTIL_FAILURE=true +make test-e2e-node REMOTE=true RUN_UNTIL_FAILURE=true ``` # Notes on tests run by the Kubernetes project during pre-, post- submit. diff --git a/docs/devel/pull-requests.md b/docs/devel/pull-requests.md index 2037b410e09a7..40705971f53f3 100644 --- a/docs/devel/pull-requests.md +++ b/docs/devel/pull-requests.md @@ -69,9 +69,9 @@ Additionally, for infrequent or new contributors, we require the on call to appl The following will save time for both you and your reviewer: * Enable [pre-commit hooks](development.md#committing-changes-to-your-fork) and verify they pass. -* Verify `hack/verify-all.sh` passes. -* Verify `hack/test-go.sh` passes. -* Verify `hack/test-integration.sh` passes. +* Verify `make verify` passes. +* Verify `make test` passes. +* Verify `make test-integration.sh` passes. ## Release Notes diff --git a/docs/devel/releasing.md b/docs/devel/releasing.md index 2c8b5d00b0b02..eb48f469721b7 100644 --- a/docs/devel/releasing.md +++ b/docs/devel/releasing.md @@ -257,9 +257,11 @@ been automated that need to happen after the branch has been cut: *Please note that this information may be out of date. The scripts are the authoritative source on how version injection works.* -Kubernetes may be built from either a git tree (using `hack/build-go.sh`) or -from a tarball (using either `hack/build-go.sh` or `go install`) or directly by -the Go native build system (using `go get`). +Kubernetes may be built from either a git tree or from a tarball. We use +`make` to encapsulate a number of build steps into a single command. This +includes generating code, which means that tools like `go build` might work +(once files are generated) but might be using stale generated code. `make` is +the supported way to build. When building from git, we want to be able to insert specific information about the build tree at build time. In particular, we want to use the output of `git @@ -294,7 +296,7 @@ yield binaries that will identify themselves as `v0.4-dev` and will not be able to provide you with a SHA1. To add the extra versioning information when building from git, the -`hack/build-go.sh` script will gather that information (using `git describe` and +`make` build will gather that information (using `git describe` and `git rev-parse`) and then create a `-ldflags` string to pass to `go install` and tell the Go linker to override the contents of those variables at build time. It can, for instance, tell it to override `gitVersion` and set it to diff --git a/docs/devel/running-locally.md b/docs/devel/running-locally.md index 517b12c8c701e..0e56456ef85c6 100644 --- a/docs/devel/running-locally.md +++ b/docs/devel/running-locally.md @@ -170,7 +170,7 @@ You are running a single node setup. This has the limitation of only supporting ```sh cd kubernetes -hack/build-go.sh +make hack/local-up-cluster.sh ``` diff --git a/docs/devel/testing.md b/docs/devel/testing.md index dba01c10e9f0e..3d7fb452b3fcb 100644 --- a/docs/devel/testing.md +++ b/docs/devel/testing.md @@ -83,13 +83,13 @@ passing, so it is often a good idea to make sure the e2e tests work as well. ### Run all unit tests -The `hack/test-go.sh` script is the entrypoint for running the unit tests that -ensures that `GOPATH` is set up correctly. If you have `GOPATH` set up -correctly, you can also just use `go test` directly. +`make test` is the entrypoint for running the unit tests that ensures that +`GOPATH` is set up correctly. If you have `GOPATH` set up correctly, you can +also just use `go test` directly. ```sh cd kubernetes -hack/test-go.sh # Run all unit tests. +make test # Run all unit tests. ``` ### Set go flags during unit tests @@ -99,18 +99,23 @@ You can set [go flags](https://golang.org/cmd/go/) by setting the ### Run unit tests from certain packages -The `hack/test-go.sh` script accepts packages as arguments; the -`k8s.io/kubernetes` prefix is added automatically to these: +`make test` accepts packages as arguments; the `k8s.io/kubernetes` prefix is +added automatically to these: ```sh -hack/test-go.sh pkg/api # run tests for pkg/api -hack/test-go.sh pkg/api pkg/kubelet # run tests for pkg/api and pkg/kubelet +make test WHAT=pkg/api # run tests for pkg/api +``` + +To run multiple targets you need quotes: + +```sh +make test WHAT="pkg/api pkg/kubelet" # run tests for pkg/api and pkg/kubelet ``` In a shell, it's often handy to use brace expansion: ```sh -hack/test-go.sh pkg/{api,kubelet} # run tests for pkg/api and pkg/kubelet +make test WHAT=pkg/{api,kubelet} # run tests for pkg/api and pkg/kubelet ``` ### Run specific unit test cases in a package @@ -121,10 +126,10 @@ regular expression for the name of the test that should be run. ```sh # Runs TestValidatePod in pkg/api/validation with the verbose flag set -KUBE_GOFLAGS="-v" KUBE_TEST_ARGS='-run ^TestValidatePod$' hack/test-go.sh pkg/api/validation +make test WHAT=pkg/api/validation KUBE_GOFLAGS="-v" KUBE_TEST_ARGS='-run ^TestValidatePod$' # Runs tests that match the regex ValidatePod|ValidateConfigMap in pkg/api/validation -KUBE_GOFLAGS="-v" KUBE_TEST_ARGS="-run ValidatePod\|ValidateConfigMap$" hack/test-go.sh pkg/api/validation +make test WHAT=pkg/api/validation KUBE_GOFLAGS="-v" KUBE_TEST_ARGS="-run ValidatePod\|ValidateConfigMap$" ``` For other supported test flags, see the [golang @@ -137,7 +142,7 @@ You can do this efficiently. ```sh # Have 2 workers run all tests 5 times each (10 total iterations). -hack/test-go.sh -p 2 -i 5 +make test PARALLEL=2 ITERATION=5 ``` For more advanced ideas please see [flaky-tests.md](flaky-tests.md). @@ -149,7 +154,7 @@ Currently, collecting coverage is only supported for the Go unit tests. To run all unit tests and generate an HTML coverage report, run the following: ```sh -KUBE_COVER=y hack/test-go.sh +make test KUBE_COVER=y ``` At the end of the run, an HTML report will be generated with the path @@ -159,7 +164,7 @@ To run tests and collect coverage in only one package, pass its relative path under the `kubernetes` directory as an argument, for example: ```sh -KUBE_COVER=y hack/test-go.sh pkg/kubectl +make test WHAT=pkg/kubectl KUBE_COVER=y ``` Multiple arguments can be passed, in which case the coverage results will be @@ -224,14 +229,14 @@ for those internal etcd instances with the `TEST_ETCD_DIR` environment variable. ### Run integration tests -The integration tests are run using the `hack/test-integration.sh` script. +The integration tests are run using `make test-integration`. The Kubernetes integration tests are writting using the normal golang testing package but expect to have a running etcd instance to connect to. The `test- -integration.sh` script wraps `hack/test-go.sh` and sets up an etcd instance +integration.sh` script wraps `make test` and sets up an etcd instance for the integration tests to use. ```sh -hack/test-integration.sh # Run all integration tests. +make test-integration # Run all integration tests. ``` This script runs the golang tests in package @@ -244,7 +249,7 @@ You can use also use the `KUBE_TEST_ARGS` environment variable with the `hack ```sh # Run integration test TestPodUpdateActiveDeadlineSeconds with the verbose flag set. -KUBE_GOFLAGS="-v" KUBE_TEST_ARGS="-run ^TestPodUpdateActiveDeadlineSeconds$" hack/test-integration.sh +make test-integration KUBE_GOFLAGS="-v" KUBE_TEST_ARGS="-run ^TestPodUpdateActiveDeadlineSeconds$" ``` If you set `KUBE_TEST_ARGS`, the test case will be run with only the `v1` API diff --git a/examples/kubectl-container/Makefile b/examples/kubectl-container/Makefile index a414e4a59818b..3383644f14c21 100644 --- a/examples/kubectl-container/Makefile +++ b/examples/kubectl-container/Makefile @@ -23,7 +23,8 @@ GOARCH?=$(shell go env GOARCH) GOOS?=$(shell go env GOOS) kubectl: - KUBE_STATIC_OVERRIDES="kubectl" ../../hack/build-go.sh cmd/kubectl; cp ../../_output/local/bin/$(GOOS)/$(GOARCH)/kubectl . + make -C ../../ WHAT=cmd/kubectl KUBE_STATIC_OVERRIDES="kubectl"; \ + cp ../../_output/local/bin/$(GOOS)/$(GOARCH)/kubectl . .tag: kubectl ./kubectl version -c | grep -o 'GitVersion:"[^"]*"' | cut -f 2 -d '"' > .tag diff --git a/hack/benchmark-go.sh b/hack/benchmark-go.sh index 7976d0b33c3e4..0cb3708fb00f7 100755 --- a/hack/benchmark-go.sh +++ b/hack/benchmark-go.sh @@ -20,4 +20,8 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -KUBE_COVER="" KUBE_RACE=" " "${KUBE_ROOT}/hack/test-go.sh" -- -test.run="^X" -benchtime=1s -bench=. -benchmem $@ +make test \ + WHAT="$*" + KUBE_COVER="" \ + KUBE_RACE=" " \ + KUBE_TEST_ARGS="-- -test.run='^X' -benchtime=1s -bench=. -benchmem" \ diff --git a/hack/benchmark-integration.sh b/hack/benchmark-integration.sh index 2eb12f139bb9f..e08fdd04525ce 100755 --- a/hack/benchmark-integration.sh +++ b/hack/benchmark-integration.sh @@ -40,7 +40,7 @@ runTests() { KUBE_TEST_ETCD_PREFIXES="registry" \ ETCD_CUSTOM_PREFIX="None" \ KUBE_TEST_ARGS="${ARGS}" \ - "${KUBE_ROOT}/hack/test-go.sh" test/integration + make test WHAT=test/integration cleanup } diff --git a/hack/build-go.sh b/hack/build-go.sh index bbc7bd0847a70..3dedf3a84f8bd 100755 --- a/hack/build-go.sh +++ b/hack/build-go.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2014 The Kubernetes Authors. +# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,14 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This script sets up a go workspace locally and builds all go components. +# This script is a vestigial redirection. Please do not add "real" logic. set -o errexit set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" -kube::golang::build_binaries "$@" -kube::golang::place_bins +# For help output +ARGHELP="" +if [[ "$#" -gt 0 ]]; then + ARGHELP="WHAT='$@'" +fi + +echo "NOTE: $0 has been replaced by 'make' or 'make all'" +echo +echo "The equivalent of this invocation is: " +echo " make ${ARGHELP}" +echo +echo +make --no-print-directory -C "${KUBE_ROOT}" all WHAT="$*" diff --git a/hack/dev-push-hyperkube.sh b/hack/dev-push-hyperkube.sh index 0a592b93a596d..2c7991c818d2d 100755 --- a/hack/dev-push-hyperkube.sh +++ b/hack/dev-push-hyperkube.sh @@ -45,7 +45,7 @@ fi kube::build::verify_prereqs kube::build::build_image -kube::build::run_build_command hack/build-go.sh cmd/hyperkube +kube::build::run_build_command make WHAT=cmd/hyperkube REGISTRY="${KUBE_DOCKER_REGISTRY}/${KUBE_DOCKER_OWNER}" \ VERSION="${KUBE_DOCKER_VERSION}" \ diff --git a/hack/e2e-node-test.sh b/hack/e2e-node-test.sh index 64238f5a40b47..66dfdac25f493 100755 --- a/hack/e2e-node-test.sh +++ b/hack/e2e-node-test.sh @@ -14,121 +14,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" +# This script is a vestigial redirection. Please do not add "real" logic. -focus=${FOCUS:-""} -skip=${SKIP:-""} -report=${REPORT:-"/tmp/"} -artifacts=${ARTIFACTS:-"/tmp/_artifacts"} -remote=${REMOTE:-"false"} -images=${IMAGES:-""} -hosts=${HOSTS:-""} -if [[ $hosts == "" && $images == "" ]]; then - images="e2e-node-containervm-v20160321-image" -fi -image_project=${IMAGE_PROJECT:-"kubernetes-node-e2e-images"} -instance_prefix=${INSTANCE_PREFIX:-"test"} -cleanup=${CLEANUP:-"true"} -delete_instances=${DELETE_INSTANCES:-"false"} -run_until_failure=${RUN_UNTIL_FAILURE:-"false"} -list_images=${LIST_IMAGES:-"false"} +set -o errexit +set -o nounset +set -o pipefail -if [[ $list_images == "true" ]]; then - gcloud compute images list --project="${image_project}" | grep "e2e-node" - exit 0 -fi +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -ginkgo=$(kube::util::find-binary "ginkgo") -if [[ -z "${ginkgo}" ]]; then - echo "You do not appear to have ginkgo built. Try 'make WHAT=vendor/github.com/onsi/ginkgo/ginkgo'" - exit 1 +# For help output +ARGHELP="" +if [[ -n "${FOCUS:-}" ]]; then + ARGHELP="FOCUS='${FOCUS}' " fi - -if [ $remote = true ] ; then - # Setup the directory to copy test artifacts (logs, junit.xml, etc) from remote host to local host - if [ ! -d "${artifacts}" ]; then - echo "Creating artifacts directory at ${artifacts}" - mkdir -p ${artifacts} - fi - echo "Test artifacts will be written to ${artifacts}" - - # Get the compute zone - zone=$(gcloud info --format='value(config.properties.compute.zone)') - if [[ $zone == "" ]]; then - echo "Could not find gcloud compute/zone when running:\ngcloud info --format='value(config.properties.compute.zone)'" - exit 1 - fi - - # Get the compute project - project=$(gcloud info --format='value(config.project)') - if [[ $project == "" ]]; then - echo "Could not find gcloud project when running:\ngcloud info --format='value(config.project)'" - exit 1 - fi - - # Check if any of the images specified already have running instances. If so reuse those instances - # by moving the IMAGE to a HOST - if [[ $images != "" ]]; then - IFS=',' read -ra IM <<< "$images" - images="" - for i in "${IM[@]}"; do - if [[ $(gcloud compute instances list "${instance_prefix}-$i" | grep $i) ]]; then - if [[ $hosts != "" ]]; then - hosts="$hosts," - fi - echo "Reusing host ${instance_prefix}-$i" - hosts="${hosts}${instance_prefix}-${i}" - else - if [[ $images != "" ]]; then - images="$images," - fi - images="$images$i" - fi - done - fi - - # Parse the flags to pass to ginkgo - ginkgoflags="" - if [[ $focus != "" ]]; then - ginkgoflags="$ginkgoflags -focus=$focus " - fi - - if [[ $skip != "" ]]; then - ginkgoflags="$ginkgoflags -skip=$skip " - fi - - if [[ $run_until_failure != "" ]]; then - ginkgoflags="$ginkgoflags -untilItFails=$run_until_failure " - fi - - # Output the configuration we will try to run - echo "Running tests remotely using" - echo "Project: $project" - echo "Image Project: $image_project" - echo "Compute/Zone: $zone" - echo "Images: $images" - echo "Hosts: $hosts" - echo "Ginkgo Flags: $ginkgoflags" - - # Invoke the runner - go run test/e2e_node/runner/run_e2e.go --logtostderr --vmodule=*=2 --ssh-env="gce" \ - --zone="$zone" --project="$project" \ - --hosts="$hosts" --images="$images" --cleanup="$cleanup" \ - --results-dir="$artifacts" --ginkgo-flags="$ginkgoflags" \ - --image-project="$image_project" --instance-name-prefix="$instance_prefix" --setup-node="true" \ - --delete-instances="$delete_instances" - exit $? - -else - # Refresh sudo credentials if not running on GCE. - if ! ping -c 1 -q metadata.google.internal &> /dev/null; then - sudo -v || exit 1 - fi - - # Test using the host the script was run on - # Provided for backwards compatibility - "${ginkgo}" --focus=$focus --skip=$skip "${KUBE_ROOT}/test/e2e_node/" --report-dir=${report} \ - -- --alsologtostderr --v 2 --node-name $(hostname) --disable-kubenet=true --build-services=true --start-services=true --stop-services=true - exit $? +if [[ -n "${SKIP:-}" ]]; then + ARGHELP="${ARGHELP}SKIP='${SKIP}'" fi + +echo "NOTE: $0 has been replaced by 'make test-e2e-node'" +echo +echo "This script supports a number of parameters passed as environment variables." +echo "Please see the Makfile for more details." +echo +echo "The equivalent of this invocation is: " +echo " make test-e2e-node ${ARGHELP}" +echo +echo +make --no-print-directory -C "${KUBE_ROOT}" test-e2e-node FOCUS=${FOCUS:-} SKIP=${SKIP:-} diff --git a/hack/generate-docs.sh b/hack/generate-docs.sh index 522146ac232f3..4a5f9a4f7cac6 100755 --- a/hack/generate-docs.sh +++ b/hack/generate-docs.sh @@ -27,12 +27,14 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" \ - cmd/gendocs \ - cmd/genkubedocs \ - cmd/genman \ - cmd/genyaml \ - federation/cmd/genfeddocs +BINS=( + cmd/gendocs + cmd/genkubedocs + cmd/genman + cmd/genyaml + federation/cmd/genfeddocs +) +make -C "${KUBE_ROOT}" WHAT="${BINS[*]}" kube::util::ensure-temp-dir diff --git a/hack/jenkins/gotest.sh b/hack/jenkins/gotest.sh index fb9f40fcbcd8f..df9e6696d6f13 100755 --- a/hack/jenkins/gotest.sh +++ b/hack/jenkins/gotest.sh @@ -41,5 +41,5 @@ export KUBE_JUNIT_REPORT_DIR=${WORKSPACE}/_artifacts # Save the verbose stdout as well. export KUBE_KEEP_VERBOSE_TEST_OUTPUT=y -./hack/test-go.sh -./hack/test-integration.sh +make test +make test-integration diff --git a/hack/jenkins/test-dockerized.sh b/hack/jenkins/test-dockerized.sh index de23fbc85e7ef..85317b84031b9 100755 --- a/hack/jenkins/test-dockerized.sh +++ b/hack/jenkins/test-dockerized.sh @@ -55,7 +55,7 @@ rm -rf Godeps/_workspace # Temporary until _workspace is fully obliterated go install ./cmd/... ./hack/install-etcd.sh -./hack/test-go.sh -./hack/test-cmd.sh -./hack/test-integration.sh +make test +make test-cmd +make test-integration ./hack/test-update-storage-objects.sh diff --git a/hack/jenkins/verify-dockerized.sh b/hack/jenkins/verify-dockerized.sh index f417b34ab7b69..7532436fd7d8a 100755 --- a/hack/jenkins/verify-dockerized.sh +++ b/hack/jenkins/verify-dockerized.sh @@ -38,4 +38,4 @@ export LOG_LEVEL=4 cd /go/src/k8s.io/kubernetes ./hack/install-etcd.sh -./hack/verify-all.sh -v +make verify VERBOSE=1 diff --git a/hack/jenkins/verify.sh b/hack/jenkins/verify.sh index b9ccde02fd7e2..6eb931d80eaa6 100755 --- a/hack/jenkins/verify.sh +++ b/hack/jenkins/verify.sh @@ -34,4 +34,4 @@ export PATH=${GOPATH}/bin:${HOME}/third_party/etcd:/usr/local/go/bin:$PATH command -v etcd &>/dev/null || ./hack/install-etcd.sh go get -u github.com/tools/godep -./hack/verify-all.sh -v +make verify VERBOSE=1 diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 213c8d6bb4c3e..aa99c6aaffce4 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -78,9 +78,7 @@ do done if [ "x$GO_OUT" == "x" ]; then - "${KUBE_ROOT}/hack/build-go.sh" \ - cmd/kubectl \ - cmd/hyperkube + make -C "${KUBE_ROOT}" WHAT="cmd/kubectl cmd/hyperkube" else echo "skipped the build." fi diff --git a/hack/make-rules/build.sh b/hack/make-rules/build.sh new file mode 100755 index 0000000000000..85f5cbe69e717 --- /dev/null +++ b/hack/make-rules/build.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script sets up a go workspace locally and builds all go components. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +kube::golang::build_binaries "$@" +kube::golang::place_bins diff --git a/hack/make-rules/test-cmd.sh b/hack/make-rules/test-cmd.sh new file mode 100755 index 0000000000000..b2c0334efe853 --- /dev/null +++ b/hack/make-rules/test-cmd.sh @@ -0,0 +1,2374 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This command checks that the built commands can function together for +# simple scenarios. It does not require Docker. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/hack/lib/init.sh" +source "${KUBE_ROOT}/hack/lib/test.sh" + +# Stops the running kubectl proxy, if there is one. +function stop-proxy() +{ + [[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}" + [[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null + [[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE} + PROXY_PID= + PROXY_PORT= + PROXY_PORT_FILE= +} + +# Starts "kubect proxy" to test the client proxy. $1: api_prefix +function start-proxy() +{ + stop-proxy + + PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX) + kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}" + + + if [ $# -eq 0 ]; then + kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 & + else + kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 & + fi + PROXY_PID=$! + PROXY_PORT= + + local attempts=0 + while [[ -z ${PROXY_PORT} ]]; do + if (( ${attempts} > 9 )); then + kill "${PROXY_PID}" + kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})" + fi + sleep .5 + kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..." + PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE}) + attempts=$((attempts+1)) + done + + kube::log::status "kubectl proxy running on port ${PROXY_PORT}" + + # We try checking kubectl proxy 30 times with 1s delays to avoid occasional + # failures. + if [ $# -eq 0 ]; then + kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy" + else + kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1" + fi +} + +function cleanup() +{ + [[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null + [[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null + [[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null + stop-proxy + + kube::etcd::cleanup + rm -rf "${KUBE_TEMP}" + + kube::log::status "Clean up complete" +} + +# Executes curl against the proxy. $1 is the path to use, $2 is the desired +# return code. Prints a helpful message on failure. +function check-curl-proxy-code() +{ + local status + local -r address=$1 + local -r desired=$2 + local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}" + status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}") + if [ "${status}" == "${desired}" ]; then + return 0 + fi + echo "For address ${full_address}, got ${status} but wanted ${desired}" + return 1 +} + +# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333. +function kubectl-with-retry() +{ + ERROR_FILE="${KUBE_TEMP}/kubectl-error" + preserve_err_file=${PRESERVE_ERR_FILE-false} + for count in $(seq 0 3); do + kubectl "$@" 2> ${ERROR_FILE} || true + if grep -q "the object has been modified" "${ERROR_FILE}"; then + kube::log::status "retry $1, error: $(cat ${ERROR_FILE})" + rm "${ERROR_FILE}" + sleep $((2**count)) + else + if [ "$preserve_err_file" != true ] ; then + rm "${ERROR_FILE}" + fi + break + fi + done +} + +kube::util::trap_add cleanup EXIT SIGINT +kube::util::ensure-temp-dir + +BINS=( + cmd/kubectl + cmd/kube-apiserver + cmd/kube-controller-manager +) +make -C "${KUBE_ROOT}" WHAT="${BINS[*]}" + +kube::etcd::start + +ETCD_HOST=${ETCD_HOST:-127.0.0.1} +ETCD_PORT=${ETCD_PORT:-4001} +API_PORT=${API_PORT:-8080} +API_HOST=${API_HOST:-127.0.0.1} +KUBELET_PORT=${KUBELET_PORT:-10250} +KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248} +CTLRMGR_PORT=${CTLRMGR_PORT:-10252} +PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. + +IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9" +IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml +IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml +IMAGE_PERL="gcr.io/google-containers/perl" + +# ensure ~/.kube/config isn't loaded by tests +HOME="${KUBE_TEMP}" + +# Find a standard sed instance for use with edit scripts +SED=sed +if which gsed &>/dev/null; then + SED=gsed +fi +if ! ($SED --version 2>&1 | grep -q GNU); then + echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'." + exit 1 +fi + +# Check kubectl +kube::log::status "Running kubectl with no options" +"${KUBE_OUTPUT_HOSTBIN}/kubectl" + +# Only run kubelet on platforms it supports +if [[ "$(go env GOHOSTOS)" == "linux" ]]; then + +BINS=( + cmd/kubelet +) +make -C "${KUBE_ROOT}" WHAT="${BINS[*]}" + +kube::log::status "Starting kubelet in masterless mode" +"${KUBE_OUTPUT_HOSTBIN}/kubelet" \ + --really-crash-for-testing=true \ + --root-dir=/tmp/kubelet.$$ \ + --cert-dir="${TMPDIR:-/tmp/}" \ + --docker-endpoint="fake://" \ + --hostname-override="127.0.0.1" \ + --address="127.0.0.1" \ + --port="$KUBELET_PORT" \ + --healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 & +KUBELET_PID=$! +kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet(masterless)" +kill ${KUBELET_PID} 1>&2 2>/dev/null + +kube::log::status "Starting kubelet in masterful mode" +"${KUBE_OUTPUT_HOSTBIN}/kubelet" \ + --really-crash-for-testing=true \ + --root-dir=/tmp/kubelet.$$ \ + --cert-dir="${TMPDIR:-/tmp/}" \ + --docker-endpoint="fake://" \ + --hostname-override="127.0.0.1" \ + --address="127.0.0.1" \ + --api-servers="${API_HOST}:${API_PORT}" \ + --port="$KUBELET_PORT" \ + --healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 & +KUBELET_PID=$! + +kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet" + +fi + +# Start kube-apiserver +kube::log::status "Starting kube-apiserver" + +# Admission Controllers to invoke prior to persisting objects in cluster +ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ResourceQuota" + +"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \ + --address="127.0.0.1" \ + --public-address-override="127.0.0.1" \ + --port="${API_PORT}" \ + --admission-control="${ADMISSION_CONTROL}" \ + --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \ + --public-address-override="127.0.0.1" \ + --kubelet-port=${KUBELET_PORT} \ + --runtime-config=api/v1 \ + --storage-media-type="${KUBE_TEST_API_STORAGE_TYPE-}" \ + --cert-dir="${TMPDIR:-/tmp/}" \ + --service-cluster-ip-range="10.0.0.0/24" 1>&2 & +APISERVER_PID=$! + +kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver" + +# Start controller manager +kube::log::status "Starting controller-manager" +"${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \ + --port="${CTLRMGR_PORT}" \ + --kube-api-content-type="${KUBE_TEST_API_TYPE-}" \ + --master="127.0.0.1:${API_PORT}" 1>&2 & +CTLRMGR_PID=$! + +kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager" + +if [[ "$(go env GOHOSTOS)" == "linux" ]]; then + kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/api/v1/nodes/127.0.0.1" "apiserver(nodes)" +else + # create a fake node + kubectl create -f - -s "http://127.0.0.1:${API_PORT}" << __EOF__ +{ + "kind": "Node", + "apiVersion": "v1", + "metadata": { + "name": "127.0.0.1" + }, + "status": { + "capacity": { + "memory": "1Gi" + } + } +} +__EOF__ +fi + +# Expose kubectl directly for readability +PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH + +kube::log::status "Checking kubectl version" +kubectl version + +# TODO: we need to note down the current default namespace and set back to this +# namespace after the tests are done. +kubectl config view +CONTEXT="test" +kubectl config set-context "${CONTEXT}" +kubectl config use-context "${CONTEXT}" + +i=0 +create_and_use_new_namespace() { + i=$(($i+1)) + kubectl create namespace "namespace${i}" + kubectl config set-context "${CONTEXT}" --namespace="namespace${i}" +} + +runTests() { + version="$1" + echo "Testing api version: $1" + if [[ -z "${version}" ]]; then + kube_flags=( + -s "http://127.0.0.1:${API_PORT}" + --match-server-version + ) + [ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ] + else + kube_flags=( + -s "http://127.0.0.1:${API_PORT}" + --match-server-version + ) + [ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "${version}" ] + fi + id_field=".metadata.name" + labels_field=".metadata.labels" + annotations_field=".metadata.annotations" + service_selector_field=".spec.selector" + rc_replicas_field=".spec.replicas" + rc_status_replicas_field=".status.replicas" + rc_container_image_field=".spec.template.spec.containers" + rs_replicas_field=".spec.replicas" + port_field="(index .spec.ports 0).port" + port_name="(index .spec.ports 0).name" + second_port_field="(index .spec.ports 1).port" + second_port_name="(index .spec.ports 1).name" + image_field="(index .spec.containers 0).image" + hpa_min_field=".spec.minReplicas" + hpa_max_field=".spec.maxReplicas" + hpa_cpu_field=".spec.targetCPUUtilizationPercentage" + job_parallelism_field=".spec.parallelism" + deployment_replicas=".spec.replicas" + secret_data=".data" + secret_type=".type" + deployment_image_field="(index .spec.template.spec.containers 0).image" + deployment_second_image_field="(index .spec.template.spec.containers 1).image" + change_cause_annotation='.*kubernetes.io/change-cause.*' + + # Passing no arguments to create is an error + ! kubectl create + + ####################### + # kubectl config set # + ####################### + + kube::log::status "Testing kubectl(${version}:config set)" + + kubectl config set-cluster test-cluster --server="https://does-not-work" + + # Get the api cert and add a comment to avoid flag parsing problems + cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt") + + kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes + r_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}') + + encoded=$(echo -n "$cert_data" | base64) + kubectl config set clusters.test-cluster.certificate-authority-data "$encoded" + e_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}') + + test "$e_writen" == "$r_writen" + + ####################### + # kubectl local proxy # + ####################### + + # Make sure the UI can be proxied + start-proxy + check-curl-proxy-code /ui 301 + check-curl-proxy-code /metrics 200 + check-curl-proxy-code /api/ui 404 + if [[ -n "${version}" ]]; then + check-curl-proxy-code /api/${version}/namespaces 200 + fi + check-curl-proxy-code /static/ 200 + stop-proxy + + # Make sure the in-development api is accessible by default + start-proxy + check-curl-proxy-code /apis 200 + check-curl-proxy-code /apis/extensions/ 200 + stop-proxy + + # Custom paths let you see everything. + start-proxy /custom + check-curl-proxy-code /custom/ui 301 + check-curl-proxy-code /custom/metrics 200 + if [[ -n "${version}" ]]; then + check-curl-proxy-code /custom/api/${version}/namespaces 200 + fi + stop-proxy + + ######################### + # RESTMapper evaluation # + ######################### + + kube::log::status "Testing RESTMapper" + + RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error" + + ### Non-existent resource type should give a recognizeable error + # Pre-condition: None + # Command + kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true + if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then + kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})" + else + kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})" + exit 1 + fi + rm "${RESTMAPPER_ERROR_FILE}" + # Post-condition: None + + ########################### + # POD creation / deletion # + ########################### + + kube::log::status "Testing kubectl(${version}:pods)" + + ### Create POD valid-pod from JSON + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml + # Post-condition: valid-pod POD is created + kubectl get "${kube_flags[@]}" pods -o json + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod' + kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod' + kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod' + # Repeat above test using jsonpath template + kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod' + kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod' + kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod' + kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod' + # Describe command should print detailed information + kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:" "Controllers" + # Describe command should print events information by default + kube::test::describe_object_events_assert pods 'valid-pod' + # Describe command should not print events information when show-events=false + kube::test::describe_object_events_assert pods 'valid-pod' false + # Describe command should print events information when show-events=true + kube::test::describe_object_events_assert pods 'valid-pod' true + # Describe command (resource only) should print detailed information + kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controllers" + + # Describe command should print events information by default + kube::test::describe_resource_events_assert pods + # Describe command should not print events information when show-events=false + kube::test::describe_resource_events_assert pods false + # Describe command should print events information when show-events=true + kube::test::describe_resource_events_assert pods true + ### Validate Export ### + kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" ' valid-pod' "--export=true" + + ### Dump current valid-pod POD + output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}") + + ### Delete POD valid-pod by id + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 + # Post-condition: valid-pod POD doesn't exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Delete POD valid-pod by id with --now + # Pre-condition: valid-pod POD exists + kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete pod valid-pod "${kube_flags[@]}" --now + # Post-condition: valid-pod POD doesn't exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create POD valid-pod from dumped YAML + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + echo "${output_pod}" | $SED '/namespace:/d' | kubectl create -f - "${kube_flags[@]}" + # Post-condition: valid-pod POD is created + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Delete POD valid-pod from JSON + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 + # Post-condition: valid-pod POD doesn't exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create POD valid-pod from JSON + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" + # Post-condition: valid-pod POD is created + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Delete POD valid-pod with label + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:' + # Command + kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 + # Post-condition: valid-pod POD doesn't exist + kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' '' + + ### Create POD valid-pod from YAML + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" + # Post-condition: valid-pod POD is created + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Delete PODs with no parameter mustn't kill everything + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + ! kubectl delete pods "${kube_flags[@]}" + # Post-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Delete PODs with --all and a label selector is not permitted + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + ! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" + # Post-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Delete all PODs + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 # --all remove all the pods + # Post-condition: no POD exists + kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' '' + + # Detailed tests for describe pod output + ### Create a new namespace + # Pre-condition: the test-secrets namespace does not exist + kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':' + # Command + kubectl create namespace test-kubectl-describe-pod + # Post-condition: namespace 'test-secrets' is created. + kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod' + + ### Create a generic secret + # Pre-condition: no SECRET exists + kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod + # Post-condition: secret exists and has expected values + kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret' + kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type' + + ### Create a generic configmap + # Pre-condition: no CONFIGMAP exists + kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod + # Post-condition: configmap exists and has expected values + kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap' + + # Create a pod that consumes secret, configmap, and downward API keys as envs + kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod + + kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "" "TEST_CMD_2" "" "TEST_CMD_3" "env-test-pod (v1:metadata.name)" + # Describe command (resource only) should print detailed information about environment variables + kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "" "TEST_CMD_2" "" "TEST_CMD_3" "env-test-pod (v1:metadata.name)" + + # Clean-up + kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod + kubectl delete secret test-secret --namespace=test-kubectl-describe-pod + kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod + kubectl delete namespace test-kubectl-describe-pod + + ### Create two PODs + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" + kubectl create -f examples/storage/redis/redis-proxy.yaml "${kube_flags[@]}" + # Post-condition: valid-pod and redis-proxy PODs are created + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:' + + ### Delete multiple PODs at once + # Pre-condition: valid-pod and redis-proxy PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:' + # Command + kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once + # Post-condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create valid-pod POD + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" + # Post-condition: valid-pod POD is created + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Label the valid-pod POD + # Pre-condition: valid-pod is not labelled + kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:' + # Command + kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}" + # Post-condition: valid-pod is labelled + kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:' + + ### Delete POD by label + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 "${kube_flags[@]}" + # Post-condition: valid-pod POD doesn't exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create pod-with-precision POD + # Pre-condition: no POD is running + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}" + # Post-condition: valid-pod POD is running + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:' + + ## Patch preserves precision + # Command + kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}' + # Post-condition: pod-with-precision POD has patched annotation + kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue' + # Command + kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}" + # Post-condition: pod-with-precision POD has label + kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue' + # Command + kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}" + # Post-condition: pod-with-precision POD has annotation + kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue' + # Cleanup + kubectl delete pod pod-with-precision "${kube_flags[@]}" + + ### Create valid-pod POD + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" + # Post-condition: valid-pod POD is created + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ## Patch can modify a local object + kubectl patch --local -f pkg/api/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o jsonpath='{.spec.restartPolicy}' | grep -q "Never" + + ## Patch pod can change image + # Command + kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}' + # Post-condition: valid-pod POD has image nginx + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' + # Post-condition: valid-pod has the record annotation + kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation}" + # prove that patch can use different types + kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]' + # Post-condition: valid-pod POD has image nginx + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:' + # prove that patch can use different types + kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]' + # Post-condition: valid-pod POD has image nginx + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' + # prove that yaml input works too + YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n' + kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}" + # Post-condition: valid-pod POD has image nginx + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:' + ## Patch pod from JSON can change image + # Command + kubectl patch "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}' + # Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0 + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:' + + ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected + ERROR_FILE="${KUBE_TEMP}/conflict-error" + ## If the resourceVersion is the same as the one stored in the server, the patch will be applied. + # Command + # Needs to retry because other party may change the resource. + for count in $(seq 0 3); do + resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}') + kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true + if grep -q "the object has been modified" "${ERROR_FILE}"; then + kube::log::status "retry $1, error: $(cat ${ERROR_FILE})" + rm "${ERROR_FILE}" + sleep $((2**count)) + else + rm "${ERROR_FILE}" + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' + break + fi + done + + ## If the resourceVersion is the different from the one stored in the server, the patch will be rejected. + resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}') + ((resourceVersion+=100)) + # Command + kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true + # Post-condition: should get an error reporting the conflict + if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then + kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})" + else + kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})" + exit 1 + fi + rm "${ERROR_FILE}" + + ## --force replace pod can change other field, e.g., spec.container.name + # Command + kubectl get "${kube_flags[@]}" pod valid-pod -o json | $SED 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json + kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json + # Post-condition: spec.container.name = "replaced-k8s-serve-hostname" + kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname' + #cleaning + rm /tmp/tmp-valid-pod.json + + ## replace of a cluster scoped resource can succeed + # Pre-condition: a node exists + kubectl create -f - "${kube_flags[@]}" << __EOF__ +{ + "kind": "Node", + "apiVersion": "v1", + "metadata": { + "name": "node-${version}-test" + } +} +__EOF__ + kubectl replace -f - "${kube_flags[@]}" << __EOF__ +{ + "kind": "Node", + "apiVersion": "v1", + "metadata": { + "name": "node-${version}-test", + "annotations": {"a":"b"} + } +} +__EOF__ + # Post-condition: the node command succeeds + kube::test::get_object_assert "node node-${version}-test" "{{.metadata.annotations.a}}" 'b' + kubectl delete node node-${version}-test "${kube_flags[@]}" + + ## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor + echo -e "#!/bin/bash\n$SED -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh + chmod +x /tmp/tmp-editor.sh + # Pre-condition: valid-pod POD has image nginx + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' + EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod + # Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:' + # cleaning + rm /tmp/tmp-editor.sh + + ## kubectl edit should work on Windows + [ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ] + [ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ] + [ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ] + [ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ] + + ### Overwriting an existing label is not permitted + # Pre-condition: name is valid-pod + kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' + # Command + ! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" + # Post-condition: name is still valid-pod + kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' + + ### --overwrite must be used to overwrite existing label, can be applied to all resources + # Pre-condition: name is valid-pod + kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' + # Command + kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}" + # Post-condition: name is valid-pod-super-sayan + kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan' + + ### Delete POD by label + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 "${kube_flags[@]}" + # Post-condition: valid-pod POD doesn't exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create two PODs from 1 yaml file + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}" + # Post-condition: valid-pod and redis-proxy PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:' + + ### Delete two PODs from 1 yaml file + # Pre-condition: redis-master and redis-proxy PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:' + # Command + kubectl delete -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}" + # Post-condition: no PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + ## kubectl apply should update configuration annotations only if apply is already called + ## 1. kubectl create doesn't set the annotation + # Pre-Condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command: create a pod "test-pod" + kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" + # Post-Condition: pod "test-pod" is created + kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + # Post-Condition: pod "test-pod" doesn't have configuration annotation + ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + ## 2. kubectl replace doesn't set the annotation + kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml + # Command: replace the pod "test-pod" + kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}" + # Post-Condition: pod "test-pod" is replaced + kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced' + # Post-Condition: pod "test-pod" doesn't have configuration annotation + ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + ## 3. kubectl apply does set the annotation + # Command: apply the pod "test-pod" + kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}" + # Post-Condition: pod "test-pod" is applied + kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied' + # Post-Condition: pod "test-pod" has configuration annotation + [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration + ## 4. kubectl replace updates an existing annotation + kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml + # Command: replace the pod "test-pod" + kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}" + # Post-Condition: pod "test-pod" is replaced + kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced' + # Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied) + [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced + ! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] + # Clean up + rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced + kubectl delete pods test-pod "${kube_flags[@]}" + + ## Configuration annotations should be set when --save-config is enabled + ## 1. kubectl create --save-config should generate configuration annotation + # Pre-Condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command: create a pod "test-pod" + kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}" + # Post-Condition: pod "test-pod" has configuration annotation + [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Clean up + kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" + ## 2. kubectl edit --save-config should generate configuration annotation + # Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" + ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Command: edit the pod "test-pod" + temp_editor="${KUBE_TEMP}/tmp-editor.sh" + echo -e "#!/bin/bash\n$SED -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}" + chmod +x "${temp_editor}" + EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}" + # Post-Condition: pod "test-pod" has configuration annotation + [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Clean up + kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" + ## 3. kubectl replace --save-config should generate configuration annotation + # Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" + ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Command: replace the pod "test-pod" + kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}" + # Post-Condition: pod "test-pod" has configuration annotation + [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Clean up + kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" + ## 4. kubectl run --save-config should generate configuration annotation + # Pre-Condition: no RC exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command: create the rc "nginx" with image nginx + kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}" + # Post-Condition: rc "nginx" has configuration annotation + [[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + ## 5. kubectl expose --save-config should generate configuration annotation + # Pre-Condition: no service exists + kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command: expose the rc "nginx" + kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}" + # Post-Condition: service "nginx" has configuration annotation + [[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Clean up + kubectl delete rc,svc nginx + ## 6. kubectl autoscale --save-config should generate configuration annotation + # Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" + ! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Command: autoscale rc "frontend" + kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2 + # Post-Condition: hpa "frontend" has configuration annotation + [[ "$(kubectl get hpa.v1beta1.extensions frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Ensure we can interact with HPA objects in lists through both the extensions/v1beta1 and autoscaling/v1 APIs + output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" 'autoscaling/v1' + output_message=$(kubectl get hpa.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" 'extensions/v1beta1' + output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" 'autoscaling/v1' + # Clean up + # Note that we should delete hpa first, otherwise it may fight with the rc reaper. + kubectl delete hpa frontend "${kube_flags[@]}" + kubectl delete rc frontend "${kube_flags[@]}" + + ## kubectl create should not panic on empty string lists in a template + ERROR_FILE="${KUBE_TEMP}/validation-error" + kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true + # Post-condition: should get an error reporting the empty string + if grep -q "unexpected nil value for field" "${ERROR_FILE}"; then + kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})" + else + kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})" + exit 1 + fi + rm "${ERROR_FILE}" + + ## kubectl apply should create the resource that doesn't exist yet + # Pre-Condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command: apply a pod "test-pod" (doesn't exist) should create this pod + kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}" + # Post-Condition: pod "test-pod" is created + kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + # Post-Condition: pod "test-pod" has configuration annotation + [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + # Clean up + kubectl delete pods test-pod "${kube_flags[@]}" + + ## kubectl run should create deployments or jobs + # Pre-Condition: no Job exists + kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl run pi --generator=job/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}" + # Post-Condition: Job "pi" is created + kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:' + # Clean up + kubectl delete jobs pi "${kube_flags[@]}" + # Command + kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}" + # Post-Condition: Job "pi" is created + kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:' + # Clean up + kubectl delete jobs pi "${kube_flags[@]}" + # Post-condition: no pods exist. + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Pre-Condition: no Deployment exists + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl run nginx "--image=$IMAGE_NGINX" --generator=deployment/v1beta1 "${kube_flags[@]}" + # Post-Condition: Deployment "nginx" is created + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:' + # Clean up + kubectl delete deployment nginx "${kube_flags[@]}" + + ############### + # Kubectl get # + ############### + + ### Test retrieval of non-existing pods + # Pre-condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}") + # Post-condition: POD abc should error since it doesn't exist + kube::test::if_has_string "${output_message}" 'pods "abc" not found' + + ### Test retrieval of non-existing POD with output flag specified + # Pre-condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name) + # Post-condition: POD abc should error since it doesn't exist + kube::test::if_has_string "${output_message}" 'pods "abc" not found' + + ### Test retrieval of non-existing POD with json output flag specified + # Pre-condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json) + # Post-condition: POD abc should error since it doesn't exist + kube::test::if_has_string "${output_message}" 'pods "abc" not found' + # Post-condition: make sure we don't display an empty List + if kube::test::if_has_string "${output_message}" 'List'; then + echo 'Unexpected List output' + echo "${LINENO} $(basename $0)" + exit 1 + fi + + ##################################### + # Third Party Resources # + ##################################### + create_and_use_new_namespace + kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__ +{ + "kind": "ThirdPartyResource", + "apiVersion": "extensions/v1beta1", + "metadata": { + "name": "foo.company.com" + }, + "versions": [ + { + "name": "v1" + } + ] +} +__EOF__ + + # Post-Condition: assertion object exist + kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'foo.company.com:' + + kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1" "third party api" + + # Test that we can list this new third party resource + kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' + + # Test that we can create a new resource of type Foo + kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__ + { + "kind": "Foo", + "apiVersion": "company.com/v1", + "metadata": { + "name": "test" + }, + "some-field": "field1", + "other-field": "field2" +} +__EOF__ + + # Test that we can list this new third party resource + kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:' + + # Delete the resource + kubectl "${kube_flags[@]}" delete foos test + + # Make sure it's gone + kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' + + # teardown + kubectl delete thirdpartyresources foo.company.com "${kube_flags[@]}" + + ##################################### + # Recursive Resources via directory # + ##################################### + + ### Create multiple busybox PODs recursively from directory of YAML files + # Pre-condition: no POD exists + create_and_use_new_namespace + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + kube::test::if_has_string "${output_message}" 'error validating data: kind not set' + + ## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + echo -e '#!/bin/bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh + chmod +x /tmp/tmp-editor.sh + output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are edited, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + # cleaning + rm /tmp/tmp-editor.sh + + ## Replace multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:' + kube::test::if_has_string "${output_message}" 'error validating data: kind not set' + + ## Describe multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error + kube::test::if_has_string "${output_message}" "app=busybox0" + kube::test::if_has_string "${output_message}" "app=busybox1" + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ## Annotate multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ## Apply multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:' + kube::test::if_has_string "${output_message}" 'error validating data: kind not set' + + ## Convert multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ## Get multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}") + # Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up + kube::test::if_has_string "${output_message}" "busybox0:busybox1:" + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ## Label multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}") + echo $output_message + # Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up + kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ## Patch multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}") + echo $output_message + # Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ### Delete multiple busybox PODs recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ### Create replication controller recursively from directory of YAML files + # Pre-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" + # Post-condition: frontend replication controller is created + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + + ### Autoscale multiple replication controllers recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 replication controllers exist & 1 + # replica each + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1' + kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1' + # Command + output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox replication controllers are autoscaled + # with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error + kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 ' + kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 ' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + kubectl delete hpa busybox0 "${kube_flags[@]}" + kubectl delete hpa busybox1 "${kube_flags[@]}" + + ### Expose multiple replication controllers as service recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 replication controllers exist & 1 + # replica each + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1' + kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1' + # Command + output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}") + # Post-condition: service exists and the port is unnamed + kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" ' 80' + kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" ' 80' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ### Scale multiple replication controllers recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 replication controllers exist & 1 + # replica each + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1' + kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1' + # Command + output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error + kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2' + kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ### Delete multiple busybox replication controllers recursively from directory of YAML files + # Pre-condition: busybox0 & busybox1 PODs exist + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + + ### Rollout on multiple deployments recursively + # Pre-condition: no deployments exist + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + # Create deployments (revision 1) recursively from directory of YAML files + ! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:' + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:" + ## Rollback the deployments to revision 1 recursively + output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}") + # Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:" + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + ## Pause the deployments recursively + PRESERVE_ERR_FILE=true + kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" + output_message=$(cat ${ERROR_FILE}) + # Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error + kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:" + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + ## Resume the deployments recursively + kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" + output_message=$(cat ${ERROR_FILE}) + # Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error + kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "::" + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + ## Retrieve the rollout history of the deployments recursively + output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error + kube::test::if_has_string "${output_message}" "nginx0-deployment" + kube::test::if_has_string "${output_message}" "nginx1-deployment" + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + # Clean up + unset PRESERVE_ERR_FILE + rm "${ERROR_FILE}" + ! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 + sleep 1 + + ### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it + # Pre-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + # Create replication controllers recursively from directory of YAML files + ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' + # Command + ## Attempt to rollback the replication controllers to revision 1 recursively + output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error + kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + ## Attempt to pause the replication controllers recursively + output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error + kube::test::if_has_string "${output_message}" 'error when pausing "hack/testdata/recursive/rc/busybox.yaml' + kube::test::if_has_string "${output_message}" 'error when pausing "hack/testdata/recursive/rc/rc/busybox.yaml' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + ## Attempt to resume the replication controllers recursively + output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") + # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error + kube::test::if_has_string "${output_message}" 'error when resuming "hack/testdata/recursive/rc/busybox.yaml' + kube::test::if_has_string "${output_message}" 'error when resuming "hack/testdata/recursive/rc/rc/busybox.yaml' + kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" + # Clean up + ! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 + sleep 1 + + ############## + # Namespaces # + ############## + + ### Create a new namespace + # Pre-condition: only the "default" namespace exists + # The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test. + # kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:' + # Command + kubectl create namespace my-namespace + # Post-condition: namespace 'my-namespace' is created. + kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace' + # Clean up + kubectl delete namespace my-namespace + + ############## + # Pods in Namespaces # + ############## + + ### Create a new namespace + # Pre-condition: the other namespace does not exist + kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':' + # Command + kubectl create namespace other + # Post-condition: namespace 'other' is created. + kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other' + + ### Create POD valid-pod in specific namespace + # Pre-condition: no POD exists + kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create "${kube_flags[@]}" --namespace=other -f docs/admin/limitrange/valid-pod.yaml + # Post-condition: valid-pod POD is created + kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Delete POD valid-pod in specific namespace + # Pre-condition: valid-pod POD exists + kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 + # Post-condition: valid-pod POD doesn't exist + kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" '' + # Clean up + kubectl delete namespace other + + ############## + # Secrets # + ############## + + ### Create a new namespace + # Pre-condition: the test-secrets namespace does not exist + kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':' + # Command + kubectl create namespace test-secrets + # Post-condition: namespace 'test-secrets' is created. + kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets' + + ### Create a generic secret in a specific namespace + # Pre-condition: no SECRET exists + kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets + # Post-condition: secret exists and has expected values + kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret' + kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type' + [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]] + # Clean-up + kubectl delete secret test-secret --namespace=test-secrets + + ### Create a docker-registry secret in a specific namespace + # Pre-condition: no SECRET exists + kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets + # Post-condition: secret exists and has expected values + kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret' + kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg' + [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]] + # Clean-up + kubectl delete secret test-secret --namespace=test-secrets + + ### Create a tls secret + # Pre-condition: no SECRET exists + kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt + kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret' + kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls' + # Clean-up + kubectl delete secret test-secret --namespace=test-secrets + + # Create a secret using stringData + kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__ +{ + "kind": "Secret", + "apiVersion": "v1", + "metadata": { + "name": "secret-string-data" + }, + "data": { + "k1":"djE=", + "k2":"" + }, + "stringData": { + "k2":"v2" + } +} +__EOF__ + # Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field + kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*' + kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*' + kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '' + # Clean up + kubectl delete secret secret-string-data --namespace=test-secrets + + ### Create a secret using output flags + # Pre-condition: no secret exists + kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + [[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]] + ## Clean-up + kubectl delete secret test-secret --namespace=test-secrets + # Clean up + kubectl delete namespace test-secrets + + ###################### + # ConfigMap # + ###################### + + kubectl create -f docs/user-guide/configmap/configmap.yaml + kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap' + kubectl delete configmap test-configmap "${kube_flags[@]}" + + ### Create a new namespace + # Pre-condition: the test-configmaps namespace does not exist + kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':' + # Command + kubectl create namespace test-configmaps + # Post-condition: namespace 'test-configmaps' is created. + kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps' + + ### Create a generic configmap in a specific namespace + # Pre-condition: no configmaps namespace exists + kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps + # Post-condition: configmap exists and has expected values + kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap' + [[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]] + # Clean-up + kubectl delete configmap test-configmap --namespace=test-configmaps + kubectl delete namespace test-configmaps + + #################### + # Service Accounts # + #################### + + ### Create a new namespace + # Pre-condition: the test-service-accounts namespace does not exist + kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':' + # Command + kubectl create namespace test-service-accounts + # Post-condition: namespace 'test-service-accounts' is created. + kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts' + + ### Create a service account in a specific namespace + # Command + kubectl create serviceaccount test-service-account --namespace=test-service-accounts + # Post-condition: secret exists and has expected values + kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account' + # Clean-up + kubectl delete serviceaccount test-service-account --namespace=test-service-accounts + # Clean up + kubectl delete namespace test-service-accounts + + ################# + # Pod templates # + ################# + + ### Create PODTEMPLATE + # Pre-condition: no PODTEMPLATE + kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" '' + # Command + kubectl create -f docs/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}" + # Post-condition: nginx PODTEMPLATE is available + kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:' + + ### Printing pod templates works + kubectl get podtemplates "${kube_flags[@]}" + [[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]] + + ### Delete nginx pod template by name + # Pre-condition: nginx pod template is available + kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:' + # Command + kubectl delete podtemplate nginx "${kube_flags[@]}" + # Post-condition: No templates exist + kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" '' + + + ############ + # Services # + ############ + # switch back to the default namespace + kubectl config set-context "${CONTEXT}" --namespace="" + kube::log::status "Testing kubectl(${version}:services)" + + ### Create redis-master service from JSON + # Pre-condition: Only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + # Command + kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}" + # Post-condition: redis-master service exists + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' + # Describe command should print detailed information + kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:" + # Describe command should print events information by default + kube::test::describe_object_events_assert services 'redis-master' + # Describe command should not print events information when show-events=false + kube::test::describe_object_events_assert services 'redis-master' false + # Describe command should print events information when show-events=true + kube::test::describe_object_events_assert services 'redis-master' true + # Describe command (resource only) should print detailed information + kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:" + # Describe command should print events information by default + kube::test::describe_resource_events_assert services + # Describe command should not print events information when show-events=false + kube::test::describe_resource_events_assert services false + # Describe command should print events information when show-events=true + kube::test::describe_resource_events_assert services true + + ### Dump current redis-master service + output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}") + + ### Delete redis-master-service by id + # Pre-condition: redis-master service exists + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' + # Command + kubectl delete service redis-master "${kube_flags[@]}" + # Post-condition: Only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + + ### Create redis-master-service from dumped JSON + # Pre-condition: Only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + # Command + echo "${output_service}" | kubectl create -f - "${kube_flags[@]}" + # Post-condition: redis-master service is created + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' + + ### Create redis-master-${version}-test service + # Pre-condition: redis-master-service service exists + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' + # Command + kubectl create -f - "${kube_flags[@]}" << __EOF__ +{ + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "service-${version}-test" + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 80, + "targetPort": 80 + } + ] + } +} +__EOF__ + # Post-condition: service-${version}-test service is created + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:' + + ### Identity + kubectl get service "${kube_flags[@]}" service-${version}-test -o json | kubectl replace "${kube_flags[@]}" -f - + + ### Delete services by id + # Pre-condition: service-${version}-test exists + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:' + # Command + kubectl delete service redis-master "${kube_flags[@]}" + kubectl delete service "service-${version}-test" "${kube_flags[@]}" + # Post-condition: Only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + + ### Create two services + # Pre-condition: Only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + # Command + kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}" + kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}" + # Post-condition: redis-master and redis-slave services are created + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:' + + ### Custom columns can be specified + # Pre-condition: generate output using custom columns + output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}") + # Post-condition: should contain name column + kube::test::if_has_string "${output_message}" 'redis-master' + + ### Delete multiple services at once + # Pre-condition: redis-master and redis-slave services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:' + # Command + kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once + # Post-condition: Only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + + + ########################### + # Replication controllers # + ########################### + + kube::log::status "Testing kubectl(${version}:replicationcontrollers)" + + ### Create and stop controller, make sure it doesn't leak pods + # Pre-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" + kubectl delete rc frontend "${kube_flags[@]}" + # Post-condition: no pods from frontend controller + kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create replication controller frontend from JSON + # Pre-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" + # Post-condition: frontend replication controller is created + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + # Describe command should print detailed information + kube::test::describe_object_assert rc 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" + # Describe command should print events information by default + kube::test::describe_object_events_assert rc 'frontend' + # Describe command should not print events information when show-events=false + kube::test::describe_object_events_assert rc 'frontend' false + # Describe command should print events information when show-events=true + kube::test::describe_object_events_assert rc 'frontend' true + # Describe command (resource only) should print detailed information + kube::test::describe_resource_assert rc "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" + # Describe command should print events information by default + kube::test::describe_resource_events_assert rc + # Describe command should not print events information when show-events=false + kube::test::describe_resource_events_assert rc false + # Describe command should print events information when show-events=true + kube::test::describe_resource_events_assert rc true + + ### Scale replication controller frontend with current-replicas and replicas + # Pre-condition: 3 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' + # Command + kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" + # Post-condition: 2 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' + + ### Scale replication controller frontend with (wrong) current-replicas and replicas + # Pre-condition: 2 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' + # Command + ! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" + # Post-condition: nothing changed + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' + + ### Scale replication controller frontend with replicas only + # Pre-condition: 2 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' + # Command + kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}" + # Post-condition: 3 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' + + ### Scale replication controller from JSON with replicas only + # Pre-condition: 3 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' + # Command + kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" + # Post-condition: 2 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' + # Clean-up + kubectl delete rc frontend "${kube_flags[@]}" + + ### Scale multiple replication controllers + kubectl create -f examples/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}" + kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}" + # Command + kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}" + # Post-condition: 4 replicas each + kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4' + kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4' + # Clean-up + kubectl delete rc redis-{master,slave} "${kube_flags[@]}" + + ### Scale a job + kubectl create -f docs/user-guide/job.yaml "${kube_flags[@]}" + # Command + kubectl scale --replicas=2 job/pi + # Post-condition: 2 replicas for pi + kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2' + # Clean-up + kubectl delete job/pi "${kube_flags[@]}" + + ### Scale a deployment + kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}" + # Command + kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment + # Post-condition: 1 replica for nginx-deployment + kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1' + # Clean-up + kubectl delete deployment/nginx-deployment "${kube_flags[@]}" + + ### Expose a deployment as a service + kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}" + # Pre-condition: 3 replicas + kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3' + # Command + kubectl expose deployment/nginx-deployment + # Post-condition: service exists and exposes deployment port (80) + kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80' + # Clean-up + kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}" + + ### Expose replication controller as service + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" + # Pre-condition: 3 replicas + kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' + # Command + kubectl expose rc frontend --port=80 "${kube_flags[@]}" + # Post-condition: service exists and the port is unnamed + kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" ' 80' + # Command + kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}" + # Post-condition: service exists and the port is unnamed + kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" ' 443' + # Command + kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" + kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}" + # Post-condition: service exists and the port is unnamed + kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" ' 444' + # Create a service using service/v1 generator + kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}" + # Post-condition: service exists and the port is named default. + kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80' + # Verify that expose service works without specifying a port. + kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}" + # Post-condition: service exists with the same port as the original service. + kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80' + # Cleanup services + kubectl delete pod valid-pod "${kube_flags[@]}" + kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}" + + ### Expose negative invalid resource test + # Pre-condition: don't need + # Command + output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}") + # Post-condition: the error message has "cannot expose" string + kube::test::if_has_string "${output_message}" 'cannot expose' + + ### Try to generate a service with invalid name (exceeding maximum valid size) + # Pre-condition: use --name flag + output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name --port=8081 2>&1 "${kube_flags[@]}") + # Post-condition: should fail due to invalid name + kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value' + # Pre-condition: default run without --name flag; should succeed by truncating the inherited name + output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}") + # Post-condition: inherited name from pod has been truncated + kube::test::if_has_string "${output_message}" '\"kubernetes-serve-hostnam\" exposed' + # Clean-up + kubectl delete svc kubernetes-serve-hostnam "${kube_flags[@]}" + + ### Expose multiport object as a new service + # Pre-condition: don't use --port flag + output_message=$(kubectl expose -f docs/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}") + # Post-condition: expose succeeded + kube::test::if_has_string "${output_message}" '\"etcd-server\" exposed' + # Post-condition: generated service has both ports from the exposed pod + kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380' + kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 4001' + # Clean-up + kubectl delete svc etcd-server "${kube_flags[@]}" + + ### Delete replication controller with id + # Pre-condition: frontend replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + # Command + kubectl delete rc frontend "${kube_flags[@]}" + # Post-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create two replication controllers + # Pre-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" + kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}" + # Post-condition: frontend and redis-slave + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + + ### Delete multiple controllers at once + # Pre-condition: frontend and redis-slave + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + # Command + kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once + # Post-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Auto scale replication controller + # Pre-condition: no replication controller exists + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + # autoscale 1~2 pods, CPU utilization 70%, rc specified by file + kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 + kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' + kubectl delete hpa frontend "${kube_flags[@]}" + # autoscale 1~2 pods, CPU utilization 70%, rc specified by file, using old generator + kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 --generator=horizontalpodautoscaler/v1beta1 + kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' + kubectl delete hpa frontend "${kube_flags[@]}" + # autoscale 2~3 pods, no CPU utilization specified, rc specified by name + kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3 + kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' + kubectl delete hpa frontend "${kube_flags[@]}" + # autoscale 2~3 pods, no CPU utilization specified, rc specified by name, using old generator + kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3 --generator=horizontalpodautoscaler/v1beta1 + kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' + kubectl delete hpa frontend "${kube_flags[@]}" + # autoscale without specifying --max should fail + ! kubectl autoscale rc frontend "${kube_flags[@]}" + # Clean up + kubectl delete rc frontend "${kube_flags[@]}" + + + ###################### + # Deployments # + ###################### + + ### Auto scale deployment + # Pre-condition: no deployment exists + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' + # autoscale 2~3 pods, no CPU utilization specified + kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3 + kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' + # Clean up + # Note that we should delete hpa first, otherwise it may fight with the deployment reaper. + kubectl delete hpa nginx-deployment "${kube_flags[@]}" + kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}" + + ### Rollback a deployment + # Pre-condition: no deployment exists + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + # Create a deployment (revision 1) + kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:' + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + # Rollback to revision 1 - should be no-op + kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + # Update the deployment (revision 2) + kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}" + kube::test::get_object_assert deployment.extensions "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + # Rollback to revision 1 + kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" + sleep 1 + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + # Rollback to revision 1000000 - should be no-op + kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + # Rollback to last revision + kubectl rollout undo deployment nginx "${kube_flags[@]}" + sleep 1 + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + # Pause the deployment + kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}" + # A paused deployment cannot be rolled back + ! kubectl rollout undo deployment nginx "${kube_flags[@]}" + # Resume the deployment + kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}" + # The resumed deployment can now be rolled back + kubectl rollout undo deployment nginx "${kube_flags[@]}" + # Clean up + kubectl delete deployment nginx "${kube_flags[@]}" + + ### Set image of a deployment + # Pre-condition: no deployment exists + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + # Create a deployment + kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" + # Set the deployment's image + kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" + # Set non-existing container should fail + ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}" + # Set image of deployments without specifying name + kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" + # Set image of a deployment specified by file + kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" + # Set image of a local file without talking to the server + kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" + # Set image of all containers of the deployment + kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + # Clean up + kubectl delete deployment nginx-deployment "${kube_flags[@]}" + + + ###################### + # Replica Sets # + ###################### + + kube::log::status "Testing kubectl(${version}:replicasets)" + + ### Create and stop a replica set, make sure it doesn't leak pods + # Pre-condition: no replica set exists + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]}" + # Post-condition: no pods from frontend replica set + kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create replica set frontend from YAML + # Pre-condition: no replica set exists + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + # Post-condition: frontend replica set is created + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + # Describe command should print detailed information + kube::test::describe_object_assert rs 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" + # Describe command should print events information by default + kube::test::describe_object_events_assert rs 'frontend' + # Describe command should not print events information when show-events=false + kube::test::describe_object_events_assert rs 'frontend' false + # Describe command should print events information when show-events=true + kube::test::describe_object_events_assert rs 'frontend' true + # Describe command (resource only) should print detailed information + kube::test::describe_resource_assert rs "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" + # Describe command should print events information by default + kube::test::describe_resource_events_assert rs + # Describe command should not print events information when show-events=false + kube::test::describe_resource_events_assert rs false + # Describe command should print events information when show-events=true + kube::test::describe_resource_events_assert rs true + + ### Scale replica set frontend with current-replicas and replicas + # Pre-condition: 3 replicas + kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' + # Command + kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}" + # Post-condition: 2 replicas + kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2' + # Clean-up + kubectl delete rs frontend "${kube_flags[@]}" + + ### Expose replica set as service + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + # Pre-condition: 3 replicas + kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' + # Command + kubectl expose rs frontend --port=80 "${kube_flags[@]}" + # Post-condition: service exists and the port is unnamed + kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" ' 80' + # Create a service using service/v1 generator + kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}" + # Post-condition: service exists and the port is named default. + kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80' + # Cleanup services + kubectl delete service frontend{,-2} "${kube_flags[@]}" + + ### Delete replica set with id + # Pre-condition: frontend replica set exists + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + # Command + kubectl delete rs frontend "${kube_flags[@]}" + # Post-condition: no replica set exists + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Create two replica sets + # Pre-condition: no replica set exists + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}" + # Post-condition: frontend and redis-slave + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + + ### Delete multiple replica sets at once + # Pre-condition: frontend and redis-slave + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + # Command + kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once + # Post-condition: no replica set exists + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + + ### Auto scale replica set + # Pre-condition: no replica set exists + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file + kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 + kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' + kubectl delete hpa frontend "${kube_flags[@]}" + # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name + kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3 + kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' + kubectl delete hpa frontend "${kube_flags[@]}" + # autoscale without specifying --max should fail + ! kubectl autoscale rs frontend "${kube_flags[@]}" + # Clean up + kubectl delete rs frontend "${kube_flags[@]}" + + + ###################### + # Lists # + ###################### + + kube::log::status "Testing kubectl(${version}:lists)" + + ### Create a List with objects from multiple versions + # Command + kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}" + + ### Delete the List with objects from multiple versions + # Command + kubectl delete service/list-service-test deployment/list-deployment-test + + + ###################### + # Multiple Resources # + ###################### + + kube::log::status "Testing kubectl(${version}:multiple resources)" + + FILES="hack/testdata/multi-resource-yaml + hack/testdata/multi-resource-list + hack/testdata/multi-resource-json + hack/testdata/multi-resource-rclist + hack/testdata/multi-resource-svclist" + YAML=".yaml" + JSON=".json" + for file in $FILES; do + if [ -f $file$YAML ] + then + file=$file$YAML + replace_file="${file%.yaml}-modify.yaml" + else + file=$file$JSON + replace_file="${file%.json}-modify.json" + fi + + has_svc=true + has_rc=true + two_rcs=false + two_svcs=false + if [[ "${file}" == *rclist* ]]; then + has_svc=false + two_rcs=true + fi + if [[ "${file}" == *svclist* ]]; then + has_rc=false + two_svcs=true + fi + + ### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files: + ### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation + ### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type + echo "Testing with file ${file} and replace with file ${replace_file}" + # Pre-condition: no service (other than default kubernetes services) or replication controller exists + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f "${file}" "${kube_flags[@]}" + # Post-condition: mock service (and mock2) exists + if [ "$has_svc" = true ]; then + if [ "$two_svcs" = true ]; then + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:mock2:' + else + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:' + fi + fi + # Post-condition: mock rc (and mock2) exists + if [ "$has_rc" = true ]; then + if [ "$two_rcs" = true ]; then + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:' + else + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:' + fi + fi + # Command + kubectl get -f "${file}" "${kube_flags[@]}" + # Command: watching multiple resources should return "not supported" error + WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error" + kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true + if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then + kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1" + fi + kubectl describe -f "${file}" "${kube_flags[@]}" + # Command + kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}" + # Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced + if [ "$has_svc" = true ]; then + kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced' + if [ "$two_svcs" = true ]; then + kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced' + fi + fi + if [ "$has_rc" = true ]; then + kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced' + if [ "$two_rcs" = true ]; then + kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced' + fi + fi + # Command: kubectl edit multiple resources + temp_editor="${KUBE_TEMP}/tmp-editor.sh" + echo -e "#!/bin/bash\n$SED -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}" + chmod +x "${temp_editor}" + EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}" + # Post-condition: mock service (and mock2) and mock rc (and mock2) are edited + if [ "$has_svc" = true ]; then + kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited' + if [ "$two_svcs" = true ]; then + kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited' + fi + fi + if [ "$has_rc" = true ]; then + kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited' + if [ "$two_rcs" = true ]; then + kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited' + fi + fi + # cleaning + rm "${temp_editor}" + # Command + # We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label" + # fails on some, but not all, of the resources, retries will fail because it tries to modify + # existing labels. + kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}" + # Post-condition: mock service and mock rc (and mock2) are labeled + if [ "$has_svc" = true ]; then + kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true' + if [ "$two_svcs" = true ]; then + kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true' + fi + fi + if [ "$has_rc" = true ]; then + kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true' + if [ "$two_rcs" = true ]; then + kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true' + fi + fi + # Command + # Command + # We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate" + # fails on some, but not all, of the resources, retries will fail because it tries to modify + # existing annotations. + kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}" + # Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated + if [ "$has_svc" = true ]; then + kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true' + if [ "$two_svcs" = true ]; then + kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true' + fi + fi + if [ "$has_rc" = true ]; then + kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true' + if [ "$two_rcs" = true ]; then + kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true' + fi + fi + # Cleanup resources created + kubectl delete -f "${file}" "${kube_flags[@]}" + done + + ############################# + # Multiple Resources via URL# + ############################# + + # Pre-condition: no service (other than default kubernetes services) or replication controller exists + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + + # Command + kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}" + + # Post-condition: service(mock) and rc(mock) exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:' + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:' + + # Clean up + kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}" + + # Post-condition: no service (other than default kubernetes services) or replication controller exists + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' + + + ###################### + # Persistent Volumes # + ###################### + + ### Create and delete persistent volume examples + # Pre-condition: no persistent volumes currently exist + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}" + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:' + kubectl delete pv pv0001 "${kube_flags[@]}" + kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}" + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:' + kubectl delete pv pv0002 "${kube_flags[@]}" + kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}" + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:' + kubectl delete pv pv0003 "${kube_flags[@]}" + # Post-condition: no PVs + kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' + + ############################ + # Persistent Volume Claims # + ############################ + + ### Create and delete persistent volume claim examples + # Pre-condition: no persistent volume claims currently exist + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}" + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:' + kubectl delete pvc myclaim-1 "${kube_flags[@]}" + + kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}" + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:' + kubectl delete pvc myclaim-2 "${kube_flags[@]}" + + kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}" + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:' + kubectl delete pvc myclaim-3 "${kube_flags[@]}" + # Post-condition: no PVCs + kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' + + + + ######### + # Nodes # + ######### + + kube::log::status "Testing kubectl(${version}:nodes)" + + kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:' + + kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:" + # Describe command should print events information by default + kube::test::describe_object_events_assert nodes "127.0.0.1" + # Describe command should not print events information when show-events=false + kube::test::describe_object_events_assert nodes "127.0.0.1" false + # Describe command should print events information when show-events=true + kube::test::describe_object_events_assert nodes "127.0.0.1" true + # Describe command (resource only) should print detailed information + kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:" + # Describe command should print events information by default + kube::test::describe_resource_events_assert nodes + # Describe command should not print events information when show-events=false + kube::test::describe_resource_events_assert nodes false + # Describe command should print events information when show-events=true + kube::test::describe_resource_events_assert nodes true + + ### kubectl patch update can mark node unschedulable + # Pre-condition: node is schedulable + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}' + # Post-condition: node is unschedulable + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true' + kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}' + # Post-condition: node is schedulable + kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' + + + ##################### + # Retrieve multiple # + ##################### + + kube::log::status "Testing kubectl(${version}:multiget)" + kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:' + + + ##################### + # Resource aliasing # + ##################### + + kube::log::status "Testing resource aliasing" + kubectl create -f examples/storage/cassandra/cassandra-controller.yaml "${kube_flags[@]}" + kubectl create -f examples/storage/cassandra/cassandra-service.yaml "${kube_flags[@]}" + + object="all -l'app=cassandra'" + request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}" + + # all 4 cassandra's might not be in the request immediately... + kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \ + kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \ + kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:' + + kubectl delete all -l app=cassandra "${kube_flags[@]}" + + ########### + # Explain # + ########### + + kube::log::status "Testing kubectl(${version}:explain)" + kubectl explain pods + # shortcuts work + kubectl explain po + kubectl explain po.status.message + + + ########### + # Swagger # + ########### + + if [[ -n "${version}" ]]; then + # Verify schema + file="${KUBE_TEMP}/schema-${version}.json" + curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/${version}" > "${file}" + [[ "$(grep "list of returned" "${file}")" ]] + [[ "$(grep "List of pods" "${file}")" ]] + [[ "$(grep "Watch for changes to the described resources" "${file}")" ]] + fi + + ##################### + # Kubectl --sort-by # + ##################### + + ### sort-by should not panic if no pod exists + # Pre-condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl get pods --sort-by="{metadata.name}" + kubectl get pods --sort-by="{metadata.creationTimestamp}" + + ############################ + # Kubectl --all-namespaces # + ############################ + + # Pre-condition: the "default" namespace exists + kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:' + + ### Create POD + # Pre-condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Command + kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml + # Post-condition: valid-pod is created + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + + ### Verify a specific namespace is ignored when all-namespaces is provided + # Command + kubectl get pods --all-namespaces --namespace=default + + ### Clean up + # Pre-condition: valid-pod exists + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + # Command + kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 + # Post-condition: valid-pod doesn't exist + kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + + kube::test::clear_all +} + +runTests "v1" + +kube::log::status "TEST PASSED" diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh new file mode 100755 index 0000000000000..147e1ce71862e --- /dev/null +++ b/hack/make-rules/test-e2e-node.sh @@ -0,0 +1,134 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +focus=${FOCUS:-""} +skip=${SKIP:-""} +report=${REPORT:-"/tmp/"} +artifacts=${ARTIFACTS:-"/tmp/_artifacts"} +remote=${REMOTE:-"false"} +images=${IMAGES:-""} +hosts=${HOSTS:-""} +if [[ $hosts == "" && $images == "" ]]; then + images="e2e-node-containervm-v20160321-image" +fi +image_project=${IMAGE_PROJECT:-"kubernetes-node-e2e-images"} +instance_prefix=${INSTANCE_PREFIX:-"test"} +cleanup=${CLEANUP:-"true"} +delete_instances=${DELETE_INSTANCES:-"false"} +run_until_failure=${RUN_UNTIL_FAILURE:-"false"} +list_images=${LIST_IMAGES:-"false"} + +if [[ $list_images == "true" ]]; then + gcloud compute images list --project="${image_project}" | grep "e2e-node" + exit 0 +fi + +ginkgo=$(kube::util::find-binary "ginkgo") +if [[ -z "${ginkgo}" ]]; then + echo "You do not appear to have ginkgo built. Try 'make WHAT=vendor/github.com/onsi/ginkgo/ginkgo'" + exit 1 +fi + +if [ $remote = true ] ; then + # Setup the directory to copy test artifacts (logs, junit.xml, etc) from remote host to local host + if [ ! -d "${artifacts}" ]; then + echo "Creating artifacts directory at ${artifacts}" + mkdir -p ${artifacts} + fi + echo "Test artifacts will be written to ${artifacts}" + + # Get the compute zone + zone=$(gcloud info --format='value(config.properties.compute.zone)') + if [[ $zone == "" ]]; then + echo "Could not find gcloud compute/zone when running:\ngcloud info --format='value(config.properties.compute.zone)'" + exit 1 + fi + + # Get the compute project + project=$(gcloud info --format='value(config.project)') + if [[ $project == "" ]]; then + echo "Could not find gcloud project when running:\ngcloud info --format='value(config.project)'" + exit 1 + fi + + # Check if any of the images specified already have running instances. If so reuse those instances + # by moving the IMAGE to a HOST + if [[ $images != "" ]]; then + IFS=',' read -ra IM <<< "$images" + images="" + for i in "${IM[@]}"; do + if [[ $(gcloud compute instances list "${instance_prefix}-$i" | grep $i) ]]; then + if [[ $hosts != "" ]]; then + hosts="$hosts," + fi + echo "Reusing host ${instance_prefix}-$i" + hosts="${hosts}${instance_prefix}-${i}" + else + if [[ $images != "" ]]; then + images="$images," + fi + images="$images$i" + fi + done + fi + + # Parse the flags to pass to ginkgo + ginkgoflags="" + if [[ $focus != "" ]]; then + ginkgoflags="$ginkgoflags -focus=$focus " + fi + + if [[ $skip != "" ]]; then + ginkgoflags="$ginkgoflags -skip=$skip " + fi + + if [[ $run_until_failure != "" ]]; then + ginkgoflags="$ginkgoflags -untilItFails=$run_until_failure " + fi + + # Output the configuration we will try to run + echo "Running tests remotely using" + echo "Project: $project" + echo "Image Project: $image_project" + echo "Compute/Zone: $zone" + echo "Images: $images" + echo "Hosts: $hosts" + echo "Ginkgo Flags: $ginkgoflags" + + # Invoke the runner + go run test/e2e_node/runner/run_e2e.go --logtostderr --vmodule=*=2 --ssh-env="gce" \ + --zone="$zone" --project="$project" \ + --hosts="$hosts" --images="$images" --cleanup="$cleanup" \ + --results-dir="$artifacts" --ginkgo-flags="$ginkgoflags" \ + --image-project="$image_project" --instance-name-prefix="$instance_prefix" --setup-node="true" \ + --delete-instances="$delete_instances" + exit $? + +else + # Refresh sudo credentials if not running on GCE. + if ! ping -c 1 -q metadata.google.internal &> /dev/null; then + sudo -v || exit 1 + fi + + # Test using the host the script was run on + # Provided for backwards compatibility + "${ginkgo}" --focus=$focus --skip=$skip "${KUBE_ROOT}/test/e2e_node/" --report-dir=${report} \ + -- --alsologtostderr --v 2 --node-name $(hostname) --disable-kubenet=true --build-services=true --start-services=true --stop-services=true + exit $? +fi diff --git a/hack/make-rules/test-integration.sh b/hack/make-rules/test-integration.sh new file mode 100755 index 0000000000000..f594ef99afc30 --- /dev/null +++ b/hack/make-rules/test-integration.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/hack/lib/init.sh" +# Lists of API Versions of each groups that should be tested, groups are +# separated by comma, lists are separated by semicolon. e.g., +# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" +# TODO: It's going to be: +# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1"} +# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go) +# ONLY the last version is tested in each group. +KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,autoscaling/v1,batch/v1,apps/v1alpha1,policy/v1alpha1,extensions/v1beta1,rbac.authorization.k8s.io/v1alpha1,certificates/v1alpha1"} + +# Give integration tests longer to run +# TODO: allow a larger value to be passed in +#KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 240s} +KUBE_TIMEOUT="-timeout 600s" +KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"} +LOG_LEVEL=${LOG_LEVEL:-2} +KUBE_TEST_ARGS=${KUBE_TEST_ARGS:-} + +kube::test::find_integration_test_dirs() { + ( + cd ${KUBE_ROOT} + find test/integration -name '*_test.go' -print0 \ + | xargs -0n1 dirname \ + | sort -u + ) +} + +cleanup() { + kube::log::status "Cleaning up etcd" + kube::etcd::cleanup + kube::log::status "Integration test cleanup complete" +} + +runTests() { + kube::log::status "Starting etcd instance" + kube::etcd::start + kube::log::status "Running integration test cases" + + # TODO: Re-enable race detection when we switch to a thread-safe etcd client + # KUBE_RACE="-race" + make -C "${KUBE_ROOT}" test \ + WHAT="$(kube::test::find_integration_test_dirs | paste -sd' ')" \ + KUBE_GOFLAGS="${KUBE_GOFLAGS:-} -tags 'integration no-docker'" \ + KUBE_RACE="" \ + KUBE_TIMEOUT="${KUBE_TIMEOUT}" \ + KUBE_TEST_API_VERSIONS="$1" + + cleanup +} + +checkEtcdOnPath() { + kube::log::status "Checking etcd is on PATH" + which etcd && return + kube::log::status "Cannot find etcd, cannot run integration tests." + kube::log::status "Please see docs/devel/testing.md for instructions." + return 1 +} + +checkEtcdOnPath + +# Run cleanup to stop etcd on interrupt or other kill signal. +trap cleanup EXIT + +# If a test case is specified, just run once with v1 API version and exit +if [[ -n "${KUBE_TEST_ARGS}" ]]; then + runTests v1 +fi + +# Convert the CSV to an array of API versions to test +IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" +for apiVersion in "${apiVersions[@]}"; do + runTests "${apiVersion}" +done diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh new file mode 100755 index 0000000000000..28a412ddfee61 --- /dev/null +++ b/hack/make-rules/test.sh @@ -0,0 +1,282 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +kube::golang::setup_env + +kube::test::find_dirs() { + ( + cd ${KUBE_ROOT} + find . -not \( \ + \( \ + -path './_artifacts/*' \ + -o -path './_output/*' \ + -o -path './_gopath/*' \ + -o -path './contrib/podex/*' \ + -o -path './output/*' \ + -o -path './release/*' \ + -o -path './target/*' \ + -o -path './test/e2e/*' \ + -o -path './test/e2e_node/*' \ + -o -path './test/integration/*' \ + -o -path './test/component/scheduler/perf/*' \ + -o -path './third_party/*'\ + -o -path './vendor/*'\ + \) -prune \ + \) -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./||' | sort -u + ) +} + +KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s} +KUBE_COVER=${KUBE_COVER:-n} # set to 'y' to enable coverage collection +KUBE_COVERMODE=${KUBE_COVERMODE:-atomic} +# How many 'go test' instances to run simultaneously when running tests in +# coverage mode. +KUBE_COVERPROCS=${KUBE_COVERPROCS:-4} +KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing +# Set to the goveralls binary path to report coverage results to Coveralls.io. +KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-} +# Lists of API Versions of each groups that should be tested, groups are +# separated by comma, lists are separated by semicolon. e.g., +# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" +# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go) +# ONLY the last version is tested in each group. +KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,autoscaling/v1,batch/v1,batch/v2alpha1,extensions/v1beta1,apps/v1alpha1,federation/v1beta1,policy/v1alpha1,rbac.authorization.k8s.io/v1alpha1,certificates/v1alpha1"} +# once we have multiple group supports +# Create a junit-style XML test report in this directory if set. +KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-} +# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is +# set. +KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n} + +kube::test::usage() { + kube::log::usage_from_stdin < : number of parallel workers, must be >= 1 +EOF +} + +isnum() { + [[ "$1" =~ ^[0-9]+$ ]] +} + +PARALLEL="${PARALLEL:-1}" +while getopts "hp:i:" opt ; do + case $opt in + h) + kube::test::usage + exit 0 + ;; + p) + PARALLEL="$OPTARG" + if ! isnum "${PARALLEL}" || [[ "${PARALLEL}" -le 0 ]]; then + kube::log::usage "'$0': argument to -p must be numeric and greater than 0" + kube::test::usage + exit 1 + fi + ;; + i) + kube::log::usage "'$0': use GOFLAGS='-count '" + kube::test::usage + exit 1 + ;; + ?) + kube::test::usage + exit 1 + ;; + :) + kube::log::usage "Option -$OPTARG " + kube::test::usage + exit 1 + ;; + esac +done +shift $((OPTIND - 1)) + +# Use eval to preserve embedded quoted strings. +eval "goflags=(${KUBE_GOFLAGS:-})" +eval "testargs=(${KUBE_TEST_ARGS:-})" + +# Used to filter verbose test output. +go_test_grep_pattern=".*" + +# The go-junit-report tool needs full test case information to produce a +# meaningful report. +if [[ -n "${KUBE_JUNIT_REPORT_DIR}" ]] ; then + goflags+=(-v) + # Show only summary lines by matching lines like "status package/test" + go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+" +fi + +# Filter out arguments that start with "-" and move them to goflags. +testcases=() +for arg; do + if [[ "${arg}" == -* ]]; then + goflags+=("${arg}") + else + testcases+=("${arg}") + fi +done +if [[ ${#testcases[@]} -eq 0 ]]; then + testcases=($(kube::test::find_dirs)) +fi +set -- "${testcases[@]+${testcases[@]}}" + +junitFilenamePrefix() { + if [[ -z "${KUBE_JUNIT_REPORT_DIR}" ]]; then + echo "" + return + fi + mkdir -p "${KUBE_JUNIT_REPORT_DIR}" + local KUBE_TEST_API_NO_SLASH="${KUBE_TEST_API//\//-}" + echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_NO_SLASH}_$(kube::util::sortable_date)" +} + +produceJUnitXMLReport() { + local -r junit_filename_prefix=$1 + if [[ -z "${junit_filename_prefix}" ]]; then + return + fi + + local test_stdout_filenames + local junit_xml_filename + test_stdout_filenames=$(ls ${junit_filename_prefix}*.stdout) + junit_xml_filename="${junit_filename_prefix}.xml" + if ! command -v go-junit-report >/dev/null 2>&1; then + kube::log::error "go-junit-report not found; please install with " \ + "go get -u github.com/jstemmer/go-junit-report" + return + fi + cat ${test_stdout_filenames} | go-junit-report > "${junit_xml_filename}" + if [[ ! ${KUBE_KEEP_VERBOSE_TEST_OUTPUT} =~ ^[yY]$ ]]; then + rm ${test_stdout_filenames} + fi + kube::log::status "Saved JUnit XML test report to ${junit_xml_filename}" +} + +runTests() { + local junit_filename_prefix + junit_filename_prefix=$(junitFilenamePrefix) + + # If we're not collecting coverage, run all requested tests with one 'go test' + # command, which is much faster. + if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then + kube::log::status "Running tests without code coverage" + go test "${goflags[@]:+${goflags[@]}}" \ + ${KUBE_RACE} ${KUBE_TIMEOUT} "${@+${@/#/${KUBE_GO_PACKAGE}/}}" \ + "${testargs[@]:+${testargs[@]}}" \ + | tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} \ + | grep "${go_test_grep_pattern}" && rc=$? || rc=$? + produceJUnitXMLReport "${junit_filename_prefix}" + return ${rc} + fi + + # Create coverage report directories. + cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API}/$(kube::util::sortable_date)" + cover_profile="coverage.out" # Name for each individual coverage profile + kube::log::status "Saving coverage output in '${cover_report_dir}'" + mkdir -p "${@+${@/#/${cover_report_dir}/}}" + + # Run all specified tests, collecting coverage results. Go currently doesn't + # support collecting coverage across multiple packages at once, so we must issue + # separate 'go test' commands for each package and then combine at the end. + # To speed things up considerably, we can at least use xargs -P to run multiple + # 'go test' commands at once. + # To properly parse the test results if generating a JUnit test report, we + # must make sure the output from PARALLEL runs is not mixed. To achieve this, + # we spawn a subshell for each PARALLEL process, redirecting the output to + # separate files. + # cmd/libs/go2idl/generator is fragile when run under coverage, so ignore it for now. + # see: https://github.com/kubernetes/kubernetes/issues/24967 + printf "%s\n" "${@}" | grep -v "cmd/libs/go2idl/generator"| xargs -I{} -n1 -P${KUBE_COVERPROCS} \ + bash -c "set -o pipefail; _pkg=\"{}\"; _pkg_out=\${_pkg//\//_}; \ + go test ${goflags[@]:+${goflags[@]}} \ + ${KUBE_RACE} \ + ${KUBE_TIMEOUT} \ + -cover -covermode=\"${KUBE_COVERMODE}\" \ + -coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \ + \"${KUBE_GO_PACKAGE}/\${_pkg}\" \ + ${testargs[@]:+${testargs[@]}} \ + | tee ${junit_filename_prefix:+\"${junit_filename_prefix}-\$_pkg_out.stdout\"} \ + | grep \"${go_test_grep_pattern}\"" \ + && test_result=$? || test_result=$? + + produceJUnitXMLReport "${junit_filename_prefix}" + + COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out" + { + # The combined coverage profile needs to start with a line indicating which + # coverage mode was used (set, count, or atomic). This line is included in + # each of the coverage profiles generated when running 'go test -cover', but + # we strip these lines out when combining so that there's only one. + echo "mode: ${KUBE_COVERMODE}" + + # Include all coverage reach data in the combined profile, but exclude the + # 'mode' lines, as there should be only one. + for x in `find "${cover_report_dir}" -name "${cover_profile}"`; do + cat $x | grep -h -v "^mode:" || true + done + } >"${COMBINED_COVER_PROFILE}" + + coverage_html_file="${cover_report_dir}/combined-coverage.html" + go tool cover -html="${COMBINED_COVER_PROFILE}" -o="${coverage_html_file}" + kube::log::status "Combined coverage report: ${coverage_html_file}" + + return ${test_result} +} + +reportCoverageToCoveralls() { + if [[ ${KUBE_COVER} =~ ^[yY]$ ]] && [[ -x "${KUBE_GOVERALLS_BIN}" ]]; then + kube::log::status "Reporting coverage results to Coveralls for service ${CI_NAME:-}" + ${KUBE_GOVERALLS_BIN} -coverprofile="${COMBINED_COVER_PROFILE}" \ + ${CI_NAME:+"-service=${CI_NAME}"} \ + ${COVERALLS_REPO_TOKEN:+"-repotoken=${COVERALLS_REPO_TOKEN}"} \ + || true + fi +} + +checkFDs() { + # several unittests panic when httptest cannot open more sockets + # due to the low default files limit on OS X. Warn about low limit. + local fileslimit="$(ulimit -n)" + if [[ $fileslimit -lt 1000 ]]; then + echo "WARNING: ulimit -n (files) should be at least 1000, is $fileslimit, may cause test failure"; + fi +} + +checkFDs + +# Convert the CSVs to arrays. +IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" +apiVersionsCount=${#apiVersions[@]} +for (( i=0; i<${apiVersionsCount}; i++ )); do + apiVersion=${apiVersions[i]} + echo "Running tests for APIVersion: $apiVersion" + # KUBE_TEST_API sets the version of each group to be tested. + KUBE_TEST_API="${apiVersion}" runTests "$@" +done + +# We might run the tests for multiple versions, but we want to report only +# one of them to coveralls. Here we report coverage from the last run. +reportCoverageToCoveralls diff --git a/hack/make-rules/verify.sh b/hack/make-rules/verify.sh new file mode 100755 index 0000000000000..a1b138128e9e8 --- /dev/null +++ b/hack/make-rules/verify.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/cluster/lib/util.sh" + +if [ -n "${VERBOSE}" ]; then + SILENT=false +else + SILENT=true +fi + +# Excluded checks are always skipped. +EXCLUDED_CHECKS=( + "verify-linkcheck.sh" # runs in separate Jenkins job once per day due to high network usage + ) + +function is-excluded { + if [[ $1 -ef "$KUBE_ROOT/hack/verify-all.sh" ]]; then + return + fi + for e in ${EXCLUDED_CHECKS[@]}; do + if [[ $1 -ef "$KUBE_ROOT/hack/$e" ]]; then + return + fi + done + return 1 +} + +function run-cmd { + if ${SILENT}; then + "$@" &> /dev/null + else + "$@" + fi +} + +function run-checks { + local -r pattern=$1 + local -r runner=$2 + + for t in $(ls ${pattern}) + do + if is-excluded "${t}" ; then + echo "Skipping ${t}" + continue + fi + echo -e "Verifying ${t}" + local start=$(date +%s) + run-cmd "${runner}" "${t}" && tr=$? || tr=$? + local elapsed=$(($(date +%s) - ${start})) + if [[ ${tr} -eq 0 ]]; then + echo -e "${color_green}SUCCESS${color_norm} ${t}\t${elapsed}s" + else + echo -e "${color_red}FAILED${color_norm} ${t}\t${elapsed}s" + ret=1 + fi + done +} + +while getopts ":v" opt; do + case ${opt} in + v) + SILENT=false + ;; + \?) + echo "Invalid flag: -${OPTARG}" >&2 + exit 1 + ;; + esac +done + +if ${SILENT} ; then + echo "Running in silent mode, run with -v if you want to see script logs." +fi + +ret=0 +run-checks "${KUBE_ROOT}/hack/verify-*.sh" bash +run-checks "${KUBE_ROOT}/hack/verify-*.py" python +exit ${ret} + +# ex: ts=2 sw=2 et filetype=sh diff --git a/hack/make-rules/vet.sh b/hack/make-rules/vet.sh new file mode 100755 index 0000000000000..2c0c329fd8ec3 --- /dev/null +++ b/hack/make-rules/vet.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +source "${KUBE_ROOT}/hack/lib/init.sh" + +cd "${KUBE_ROOT}" + +# This is required before we run govet for the results to be correct. +# See https://github.com/golang/go/issues/16086 for details. +go install ./cmd/... + +# Use eval to preserve embedded quoted strings. +eval "goflags=(${KUBE_GOFLAGS:-})" + +# Filter out arguments that start with "-" and move them to goflags. +targets=() +for arg; do + if [[ "${arg}" == -* ]]; then + goflags+=("${arg}") + else + targets+=("${arg}") + fi +done + +if [[ ${#targets[@]} -eq 0 ]]; then + # Do not run on third_party directories. + targets=$(go list ./... | egrep -v "/(third_party|vendor)/") +fi + +go vet "${goflags[@]:+${goflags[@]}}" ${targets[@]} diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index b51705eaa7d7e..f4e5abeb0af4f 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2014 The Kubernetes Authors. +# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,2357 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This command checks that the built commands can function together for -# simple scenarios. It does not require Docker. +# This script is a vestigial redirection. Please do not add "real" logic. set -o errexit set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" -source "${KUBE_ROOT}/hack/lib/test.sh" -# Stops the running kubectl proxy, if there is one. -function stop-proxy() -{ - [[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}" - [[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null - [[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE} - PROXY_PID= - PROXY_PORT= - PROXY_PORT_FILE= -} - -# Starts "kubect proxy" to test the client proxy. $1: api_prefix -function start-proxy() -{ - stop-proxy - - PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX) - kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}" - - - if [ $# -eq 0 ]; then - kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 & - else - kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 & - fi - PROXY_PID=$! - PROXY_PORT= - - local attempts=0 - while [[ -z ${PROXY_PORT} ]]; do - if (( ${attempts} > 9 )); then - kill "${PROXY_PID}" - kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})" - fi - sleep .5 - kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..." - PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE}) - attempts=$((attempts+1)) - done - - kube::log::status "kubectl proxy running on port ${PROXY_PORT}" - - # We try checking kubectl proxy 30 times with 1s delays to avoid occasional - # failures. - if [ $# -eq 0 ]; then - kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy" - else - kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1" - fi -} - -function cleanup() -{ - [[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null - [[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null - [[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null - stop-proxy - - kube::etcd::cleanup - rm -rf "${KUBE_TEMP}" - - kube::log::status "Clean up complete" -} - -# Executes curl against the proxy. $1 is the path to use, $2 is the desired -# return code. Prints a helpful message on failure. -function check-curl-proxy-code() -{ - local status - local -r address=$1 - local -r desired=$2 - local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}" - status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}") - if [ "${status}" == "${desired}" ]; then - return 0 - fi - echo "For address ${full_address}, got ${status} but wanted ${desired}" - return 1 -} - -# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333. -function kubectl-with-retry() -{ - ERROR_FILE="${KUBE_TEMP}/kubectl-error" - preserve_err_file=${PRESERVE_ERR_FILE-false} - for count in $(seq 0 3); do - kubectl "$@" 2> ${ERROR_FILE} || true - if grep -q "the object has been modified" "${ERROR_FILE}"; then - kube::log::status "retry $1, error: $(cat ${ERROR_FILE})" - rm "${ERROR_FILE}" - sleep $((2**count)) - else - if [ "$preserve_err_file" != true ] ; then - rm "${ERROR_FILE}" - fi - break - fi - done -} - -kube::util::trap_add cleanup EXIT SIGINT -kube::util::ensure-temp-dir - -"${KUBE_ROOT}/hack/build-go.sh" \ - cmd/kubectl \ - cmd/kube-apiserver \ - cmd/kube-controller-manager - -kube::etcd::start - -ETCD_HOST=${ETCD_HOST:-127.0.0.1} -ETCD_PORT=${ETCD_PORT:-4001} -API_PORT=${API_PORT:-8080} -API_HOST=${API_HOST:-127.0.0.1} -KUBELET_PORT=${KUBELET_PORT:-10250} -KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248} -CTLRMGR_PORT=${CTLRMGR_PORT:-10252} -PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. - -IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9" -IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml -IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml -IMAGE_PERL="gcr.io/google-containers/perl" - -# ensure ~/.kube/config isn't loaded by tests -HOME="${KUBE_TEMP}" - -# Find a standard sed instance for use with edit scripts -SED=sed -if which gsed &>/dev/null; then - SED=gsed -fi -if ! ($SED --version 2>&1 | grep -q GNU); then - echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'." - exit 1 -fi - -# Check kubectl -kube::log::status "Running kubectl with no options" -"${KUBE_OUTPUT_HOSTBIN}/kubectl" - -# Only run kubelet on platforms it supports -if [[ "$(go env GOHOSTOS)" == "linux" ]]; then - -"${KUBE_ROOT}/hack/build-go.sh" \ - cmd/kubelet - -kube::log::status "Starting kubelet in masterless mode" -"${KUBE_OUTPUT_HOSTBIN}/kubelet" \ - --really-crash-for-testing=true \ - --root-dir=/tmp/kubelet.$$ \ - --cert-dir="${TMPDIR:-/tmp/}" \ - --docker-endpoint="fake://" \ - --hostname-override="127.0.0.1" \ - --address="127.0.0.1" \ - --port="$KUBELET_PORT" \ - --healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 & -KUBELET_PID=$! -kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet(masterless)" -kill ${KUBELET_PID} 1>&2 2>/dev/null - -kube::log::status "Starting kubelet in masterful mode" -"${KUBE_OUTPUT_HOSTBIN}/kubelet" \ - --really-crash-for-testing=true \ - --root-dir=/tmp/kubelet.$$ \ - --cert-dir="${TMPDIR:-/tmp/}" \ - --docker-endpoint="fake://" \ - --hostname-override="127.0.0.1" \ - --address="127.0.0.1" \ - --api-servers="${API_HOST}:${API_PORT}" \ - --port="$KUBELET_PORT" \ - --healthz-port="${KUBELET_HEALTHZ_PORT}" 1>&2 & -KUBELET_PID=$! - -kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kubelet" - -fi - -# Start kube-apiserver -kube::log::status "Starting kube-apiserver" - -# Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL="NamespaceLifecycle,LimitRanger,ResourceQuota" - -"${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \ - --address="127.0.0.1" \ - --public-address-override="127.0.0.1" \ - --port="${API_PORT}" \ - --admission-control="${ADMISSION_CONTROL}" \ - --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \ - --public-address-override="127.0.0.1" \ - --kubelet-port=${KUBELET_PORT} \ - --runtime-config=api/v1 \ - --storage-media-type="${KUBE_TEST_API_STORAGE_TYPE-}" \ - --cert-dir="${TMPDIR:-/tmp/}" \ - --service-cluster-ip-range="10.0.0.0/24" 1>&2 & -APISERVER_PID=$! - -kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver" - -# Start controller manager -kube::log::status "Starting controller-manager" -"${KUBE_OUTPUT_HOSTBIN}/kube-controller-manager" \ - --port="${CTLRMGR_PORT}" \ - --kube-api-content-type="${KUBE_TEST_API_TYPE-}" \ - --master="127.0.0.1:${API_PORT}" 1>&2 & -CTLRMGR_PID=$! - -kube::util::wait_for_url "http://127.0.0.1:${CTLRMGR_PORT}/healthz" "controller-manager" - -if [[ "$(go env GOHOSTOS)" == "linux" ]]; then - kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/api/v1/nodes/127.0.0.1" "apiserver(nodes)" -else - # create a fake node - kubectl create -f - -s "http://127.0.0.1:${API_PORT}" << __EOF__ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "127.0.0.1" - }, - "status": { - "capacity": { - "memory": "1Gi" - } - } -} -__EOF__ +# For help output +ARGHELP="" +if [[ "$#" -gt 0 ]]; then + ARGHELP="" fi -# Expose kubectl directly for readability -PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH - -kube::log::status "Checking kubectl version" -kubectl version - -# TODO: we need to note down the current default namespace and set back to this -# namespace after the tests are done. -kubectl config view -CONTEXT="test" -kubectl config set-context "${CONTEXT}" -kubectl config use-context "${CONTEXT}" - -i=0 -create_and_use_new_namespace() { - i=$(($i+1)) - kubectl create namespace "namespace${i}" - kubectl config set-context "${CONTEXT}" --namespace="namespace${i}" -} - -runTests() { - version="$1" - echo "Testing api version: $1" - if [[ -z "${version}" ]]; then - kube_flags=( - -s "http://127.0.0.1:${API_PORT}" - --match-server-version - ) - [ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ] - else - kube_flags=( - -s "http://127.0.0.1:${API_PORT}" - --match-server-version - ) - [ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "${version}" ] - fi - id_field=".metadata.name" - labels_field=".metadata.labels" - annotations_field=".metadata.annotations" - service_selector_field=".spec.selector" - rc_replicas_field=".spec.replicas" - rc_status_replicas_field=".status.replicas" - rc_container_image_field=".spec.template.spec.containers" - rs_replicas_field=".spec.replicas" - port_field="(index .spec.ports 0).port" - port_name="(index .spec.ports 0).name" - second_port_field="(index .spec.ports 1).port" - second_port_name="(index .spec.ports 1).name" - image_field="(index .spec.containers 0).image" - hpa_min_field=".spec.minReplicas" - hpa_max_field=".spec.maxReplicas" - hpa_cpu_field=".spec.targetCPUUtilizationPercentage" - job_parallelism_field=".spec.parallelism" - deployment_replicas=".spec.replicas" - secret_data=".data" - secret_type=".type" - deployment_image_field="(index .spec.template.spec.containers 0).image" - deployment_second_image_field="(index .spec.template.spec.containers 1).image" - change_cause_annotation='.*kubernetes.io/change-cause.*' - - # Passing no arguments to create is an error - ! kubectl create - - ####################### - # kubectl config set # - ####################### - - kube::log::status "Testing kubectl(${version}:config set)" - - kubectl config set-cluster test-cluster --server="https://does-not-work" - - # Get the api cert and add a comment to avoid flag parsing problems - cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt") - - kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes - r_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}') - - encoded=$(echo -n "$cert_data" | base64) - kubectl config set clusters.test-cluster.certificate-authority-data "$encoded" - e_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}') - - test "$e_writen" == "$r_writen" - - ####################### - # kubectl local proxy # - ####################### - - # Make sure the UI can be proxied - start-proxy - check-curl-proxy-code /ui 301 - check-curl-proxy-code /metrics 200 - check-curl-proxy-code /api/ui 404 - if [[ -n "${version}" ]]; then - check-curl-proxy-code /api/${version}/namespaces 200 - fi - check-curl-proxy-code /static/ 200 - stop-proxy - - # Make sure the in-development api is accessible by default - start-proxy - check-curl-proxy-code /apis 200 - check-curl-proxy-code /apis/extensions/ 200 - stop-proxy - - # Custom paths let you see everything. - start-proxy /custom - check-curl-proxy-code /custom/ui 301 - check-curl-proxy-code /custom/metrics 200 - if [[ -n "${version}" ]]; then - check-curl-proxy-code /custom/api/${version}/namespaces 200 - fi - stop-proxy - - ######################### - # RESTMapper evaluation # - ######################### - - kube::log::status "Testing RESTMapper" - - RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error" - - ### Non-existent resource type should give a recognizeable error - # Pre-condition: None - # Command - kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true - if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then - kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})" - else - kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})" - exit 1 - fi - rm "${RESTMAPPER_ERROR_FILE}" - # Post-condition: None - - ########################### - # POD creation / deletion # - ########################### - - kube::log::status "Testing kubectl(${version}:pods)" - - ### Create POD valid-pod from JSON - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml - # Post-condition: valid-pod POD is created - kubectl get "${kube_flags[@]}" pods -o json - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod' - kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod' - kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod' - # Repeat above test using jsonpath template - kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod' - kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod' - kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod' - kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod' - # Describe command should print detailed information - kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:" "Controllers" - # Describe command should print events information by default - kube::test::describe_object_events_assert pods 'valid-pod' - # Describe command should not print events information when show-events=false - kube::test::describe_object_events_assert pods 'valid-pod' false - # Describe command should print events information when show-events=true - kube::test::describe_object_events_assert pods 'valid-pod' true - # Describe command (resource only) should print detailed information - kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controllers" - - # Describe command should print events information by default - kube::test::describe_resource_events_assert pods - # Describe command should not print events information when show-events=false - kube::test::describe_resource_events_assert pods false - # Describe command should print events information when show-events=true - kube::test::describe_resource_events_assert pods true - ### Validate Export ### - kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" ' valid-pod' "--export=true" - - ### Dump current valid-pod POD - output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}") - - ### Delete POD valid-pod by id - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 - # Post-condition: valid-pod POD doesn't exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Delete POD valid-pod by id with --now - # Pre-condition: valid-pod POD exists - kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete pod valid-pod "${kube_flags[@]}" --now - # Post-condition: valid-pod POD doesn't exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create POD valid-pod from dumped YAML - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - echo "${output_pod}" | $SED '/namespace:/d' | kubectl create -f - "${kube_flags[@]}" - # Post-condition: valid-pod POD is created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Delete POD valid-pod from JSON - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 - # Post-condition: valid-pod POD doesn't exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create POD valid-pod from JSON - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" - # Post-condition: valid-pod POD is created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Delete POD valid-pod with label - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:' - # Command - kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 - # Post-condition: valid-pod POD doesn't exist - kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' '' - - ### Create POD valid-pod from YAML - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" - # Post-condition: valid-pod POD is created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Delete PODs with no parameter mustn't kill everything - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - ! kubectl delete pods "${kube_flags[@]}" - # Post-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Delete PODs with --all and a label selector is not permitted - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - ! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" - # Post-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Delete all PODs - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 # --all remove all the pods - # Post-condition: no POD exists - kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' '' - - # Detailed tests for describe pod output - ### Create a new namespace - # Pre-condition: the test-secrets namespace does not exist - kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':' - # Command - kubectl create namespace test-kubectl-describe-pod - # Post-condition: namespace 'test-secrets' is created. - kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod' - - ### Create a generic secret - # Pre-condition: no SECRET exists - kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod - # Post-condition: secret exists and has expected values - kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret' - kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type' - - ### Create a generic configmap - # Pre-condition: no CONFIGMAP exists - kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod - # Post-condition: configmap exists and has expected values - kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap' - - # Create a pod that consumes secret, configmap, and downward API keys as envs - kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' - kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod - - kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "" "TEST_CMD_2" "" "TEST_CMD_3" "env-test-pod (v1:metadata.name)" - # Describe command (resource only) should print detailed information about environment variables - kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "" "TEST_CMD_2" "" "TEST_CMD_3" "env-test-pod (v1:metadata.name)" - - # Clean-up - kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod - kubectl delete secret test-secret --namespace=test-kubectl-describe-pod - kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod - kubectl delete namespace test-kubectl-describe-pod - - ### Create two PODs - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" - kubectl create -f examples/storage/redis/redis-proxy.yaml "${kube_flags[@]}" - # Post-condition: valid-pod and redis-proxy PODs are created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:' - - ### Delete multiple PODs at once - # Pre-condition: valid-pod and redis-proxy PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:' - # Command - kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once - # Post-condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create valid-pod POD - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" - # Post-condition: valid-pod POD is created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Label the valid-pod POD - # Pre-condition: valid-pod is not labelled - kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:' - # Command - kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}" - # Post-condition: valid-pod is labelled - kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:' - - ### Delete POD by label - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 "${kube_flags[@]}" - # Post-condition: valid-pod POD doesn't exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create pod-with-precision POD - # Pre-condition: no POD is running - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}" - # Post-condition: valid-pod POD is running - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:' - - ## Patch preserves precision - # Command - kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}' - # Post-condition: pod-with-precision POD has patched annotation - kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue' - # Command - kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}" - # Post-condition: pod-with-precision POD has label - kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue' - # Command - kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}" - # Post-condition: pod-with-precision POD has annotation - kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue' - # Cleanup - kubectl delete pod pod-with-precision "${kube_flags[@]}" - - ### Create valid-pod POD - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" - # Post-condition: valid-pod POD is created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ## Patch can modify a local object - kubectl patch --local -f pkg/api/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o jsonpath='{.spec.restartPolicy}' | grep -q "Never" - - ## Patch pod can change image - # Command - kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}' - # Post-condition: valid-pod POD has image nginx - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' - # Post-condition: valid-pod has the record annotation - kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation}" - # prove that patch can use different types - kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]' - # Post-condition: valid-pod POD has image nginx - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:' - # prove that patch can use different types - kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]' - # Post-condition: valid-pod POD has image nginx - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' - # prove that yaml input works too - YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n' - kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}" - # Post-condition: valid-pod POD has image nginx - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:' - ## Patch pod from JSON can change image - # Command - kubectl patch "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}' - # Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0 - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:' - - ## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected - ERROR_FILE="${KUBE_TEMP}/conflict-error" - ## If the resourceVersion is the same as the one stored in the server, the patch will be applied. - # Command - # Needs to retry because other party may change the resource. - for count in $(seq 0 3); do - resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}') - kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true - if grep -q "the object has been modified" "${ERROR_FILE}"; then - kube::log::status "retry $1, error: $(cat ${ERROR_FILE})" - rm "${ERROR_FILE}" - sleep $((2**count)) - else - rm "${ERROR_FILE}" - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' - break - fi - done - - ## If the resourceVersion is the different from the one stored in the server, the patch will be rejected. - resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}') - ((resourceVersion+=100)) - # Command - kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true - # Post-condition: should get an error reporting the conflict - if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then - kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})" - else - kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})" - exit 1 - fi - rm "${ERROR_FILE}" - - ## --force replace pod can change other field, e.g., spec.container.name - # Command - kubectl get "${kube_flags[@]}" pod valid-pod -o json | $SED 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json - kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json - # Post-condition: spec.container.name = "replaced-k8s-serve-hostname" - kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname' - #cleaning - rm /tmp/tmp-valid-pod.json - - ## replace of a cluster scoped resource can succeed - # Pre-condition: a node exists - kubectl create -f - "${kube_flags[@]}" << __EOF__ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "node-${version}-test" - } -} -__EOF__ - kubectl replace -f - "${kube_flags[@]}" << __EOF__ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "node-${version}-test", - "annotations": {"a":"b"} - } -} -__EOF__ - # Post-condition: the node command succeeds - kube::test::get_object_assert "node node-${version}-test" "{{.metadata.annotations.a}}" 'b' - kubectl delete node node-${version}-test "${kube_flags[@]}" - - ## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor - echo -e "#!/bin/bash\n$SED -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh - chmod +x /tmp/tmp-editor.sh - # Pre-condition: valid-pod POD has image nginx - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' - EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod - # Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:' - # cleaning - rm /tmp/tmp-editor.sh - - ## kubectl edit should work on Windows - [ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ] - [ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ] - [ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ] - [ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ] - - ### Overwriting an existing label is not permitted - # Pre-condition: name is valid-pod - kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' - # Command - ! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" - # Post-condition: name is still valid-pod - kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' - - ### --overwrite must be used to overwrite existing label, can be applied to all resources - # Pre-condition: name is valid-pod - kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' - # Command - kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}" - # Post-condition: name is valid-pod-super-sayan - kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan' - - ### Delete POD by label - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 "${kube_flags[@]}" - # Post-condition: valid-pod POD doesn't exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create two PODs from 1 yaml file - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}" - # Post-condition: valid-pod and redis-proxy PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:' - - ### Delete two PODs from 1 yaml file - # Pre-condition: redis-master and redis-proxy PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:redis-proxy:' - # Command - kubectl delete -f docs/user-guide/multi-pod.yaml "${kube_flags[@]}" - # Post-condition: no PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - ## kubectl apply should update configuration annotations only if apply is already called - ## 1. kubectl create doesn't set the annotation - # Pre-Condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command: create a pod "test-pod" - kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" - # Post-Condition: pod "test-pod" is created - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' - # Post-Condition: pod "test-pod" doesn't have configuration annotation - ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - ## 2. kubectl replace doesn't set the annotation - kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml - # Command: replace the pod "test-pod" - kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}" - # Post-Condition: pod "test-pod" is replaced - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced' - # Post-Condition: pod "test-pod" doesn't have configuration annotation - ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - ## 3. kubectl apply does set the annotation - # Command: apply the pod "test-pod" - kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}" - # Post-Condition: pod "test-pod" is applied - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied' - # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration - ## 4. kubectl replace updates an existing annotation - kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml - # Command: replace the pod "test-pod" - kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}" - # Post-Condition: pod "test-pod" is replaced - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced' - # Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied) - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced - ! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] - # Clean up - rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced - kubectl delete pods test-pod "${kube_flags[@]}" - - ## Configuration annotations should be set when --save-config is enabled - ## 1. kubectl create --save-config should generate configuration annotation - # Pre-Condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command: create a pod "test-pod" - kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}" - # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Clean up - kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" - ## 2. kubectl edit --save-config should generate configuration annotation - # Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" - ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Command: edit the pod "test-pod" - temp_editor="${KUBE_TEMP}/tmp-editor.sh" - echo -e "#!/bin/bash\n$SED -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}" - chmod +x "${temp_editor}" - EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}" - # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Clean up - kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" - ## 3. kubectl replace --save-config should generate configuration annotation - # Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" - ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Command: replace the pod "test-pod" - kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}" - # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Clean up - kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" - ## 4. kubectl run --save-config should generate configuration annotation - # Pre-Condition: no RC exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command: create the rc "nginx" with image nginx - kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}" - # Post-Condition: rc "nginx" has configuration annotation - [[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - ## 5. kubectl expose --save-config should generate configuration annotation - # Pre-Condition: no service exists - kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command: expose the rc "nginx" - kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}" - # Post-Condition: service "nginx" has configuration annotation - [[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Clean up - kubectl delete rc,svc nginx - ## 6. kubectl autoscale --save-config should generate configuration annotation - # Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - ! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Command: autoscale rc "frontend" - kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2 - # Post-Condition: hpa "frontend" has configuration annotation - [[ "$(kubectl get hpa.v1beta1.extensions frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Ensure we can interact with HPA objects in lists through both the extensions/v1beta1 and autoscaling/v1 APIs - output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") - kube::test::if_has_string "${output_message}" 'autoscaling/v1' - output_message=$(kubectl get hpa.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") - kube::test::if_has_string "${output_message}" 'extensions/v1beta1' - output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") - kube::test::if_has_string "${output_message}" 'autoscaling/v1' - # Clean up - # Note that we should delete hpa first, otherwise it may fight with the rc reaper. - kubectl delete hpa frontend "${kube_flags[@]}" - kubectl delete rc frontend "${kube_flags[@]}" - - ## kubectl create should not panic on empty string lists in a template - ERROR_FILE="${KUBE_TEMP}/validation-error" - kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true - # Post-condition: should get an error reporting the empty string - if grep -q "unexpected nil value for field" "${ERROR_FILE}"; then - kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})" - else - kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})" - exit 1 - fi - rm "${ERROR_FILE}" - - ## kubectl apply should create the resource that doesn't exist yet - # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command: apply a pod "test-pod" (doesn't exist) should create this pod - kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}" - # Post-Condition: pod "test-pod" is created - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' - # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] - # Clean up - kubectl delete pods test-pod "${kube_flags[@]}" - - ## kubectl run should create deployments or jobs - # Pre-Condition: no Job exists - kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl run pi --generator=job/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}" - # Post-Condition: Job "pi" is created - kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:' - # Clean up - kubectl delete jobs pi "${kube_flags[@]}" - # Command - kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}" - # Post-Condition: Job "pi" is created - kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:' - # Clean up - kubectl delete jobs pi "${kube_flags[@]}" - # Post-condition: no pods exist. - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Pre-Condition: no Deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl run nginx "--image=$IMAGE_NGINX" --generator=deployment/v1beta1 "${kube_flags[@]}" - # Post-Condition: Deployment "nginx" is created - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:' - # Clean up - kubectl delete deployment nginx "${kube_flags[@]}" - - ############### - # Kubectl get # - ############### - - ### Test retrieval of non-existing pods - # Pre-condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}") - # Post-condition: POD abc should error since it doesn't exist - kube::test::if_has_string "${output_message}" 'pods "abc" not found' - - ### Test retrieval of non-existing POD with output flag specified - # Pre-condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name) - # Post-condition: POD abc should error since it doesn't exist - kube::test::if_has_string "${output_message}" 'pods "abc" not found' - - ### Test retrieval of non-existing POD with json output flag specified - # Pre-condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json) - # Post-condition: POD abc should error since it doesn't exist - kube::test::if_has_string "${output_message}" 'pods "abc" not found' - # Post-condition: make sure we don't display an empty List - if kube::test::if_has_string "${output_message}" 'List'; then - echo 'Unexpected List output' - echo "${LINENO} $(basename $0)" - exit 1 - fi - - ##################################### - # Third Party Resources # - ##################################### - create_and_use_new_namespace - kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__ -{ - "kind": "ThirdPartyResource", - "apiVersion": "extensions/v1beta1", - "metadata": { - "name": "foo.company.com" - }, - "versions": [ - { - "name": "v1" - } - ] -} -__EOF__ - - # Post-Condition: assertion object exist - kube::test::get_object_assert thirdpartyresources "{{range.items}}{{$id_field}}:{{end}}" 'foo.company.com:' - - kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/apis/company.com/v1" "third party api" - - # Test that we can list this new third party resource - kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' - - # Test that we can create a new resource of type Foo - kubectl "${kube_flags[@]}" create -f - "${kube_flags[@]}" << __EOF__ - { - "kind": "Foo", - "apiVersion": "company.com/v1", - "metadata": { - "name": "test" - }, - "some-field": "field1", - "other-field": "field2" -} -__EOF__ - - # Test that we can list this new third party resource - kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:' - - # Delete the resource - kubectl "${kube_flags[@]}" delete foos test - - # Make sure it's gone - kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" '' - - # teardown - kubectl delete thirdpartyresources foo.company.com "${kube_flags[@]}" - - ##################################### - # Recursive Resources via directory # - ##################################### - - ### Create multiple busybox PODs recursively from directory of YAML files - # Pre-condition: no POD exists - create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - kube::test::if_has_string "${output_message}" 'error validating data: kind not set' - - ## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - echo -e '#!/bin/bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh - chmod +x /tmp/tmp-editor.sh - output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are edited, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - # cleaning - rm /tmp/tmp-editor.sh - - ## Replace multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:' - kube::test::if_has_string "${output_message}" 'error validating data: kind not set' - - ## Describe multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error - kube::test::if_has_string "${output_message}" "app=busybox0" - kube::test::if_has_string "${output_message}" "app=busybox1" - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ## Annotate multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ## Apply multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:' - kube::test::if_has_string "${output_message}" 'error validating data: kind not set' - - ## Convert multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ## Get multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}") - # Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up - kube::test::if_has_string "${output_message}" "busybox0:busybox1:" - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ## Label multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}") - echo $output_message - # Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up - kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ## Patch multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}") - echo $output_message - # Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up - kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ### Delete multiple busybox PODs recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ### Create replication controller recursively from directory of YAML files - # Pre-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" - # Post-condition: frontend replication controller is created - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - - ### Autoscale multiple replication controllers recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 replication controllers exist & 1 - # replica each - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1' - kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1' - # Command - output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox replication controllers are autoscaled - # with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error - kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 ' - kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 ' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - kubectl delete hpa busybox0 "${kube_flags[@]}" - kubectl delete hpa busybox1 "${kube_flags[@]}" - - ### Expose multiple replication controllers as service recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 replication controllers exist & 1 - # replica each - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1' - kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1' - # Command - output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}") - # Post-condition: service exists and the port is unnamed - kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" ' 80' - kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" ' 80' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ### Scale multiple replication controllers recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 replication controllers exist & 1 - # replica each - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1' - kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1' - # Command - output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error - kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2' - kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ### Delete multiple busybox replication controllers recursively from directory of YAML files - # Pre-condition: busybox0 & busybox1 PODs exist - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - - ### Rollout on multiple deployments recursively - # Pre-condition: no deployments exist - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - # Create deployments (revision 1) recursively from directory of YAML files - ! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:' - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:" - ## Rollback the deployments to revision 1 recursively - output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}") - # Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:" - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - ## Pause the deployments recursively - PRESERVE_ERR_FILE=true - kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" - output_message=$(cat ${ERROR_FILE}) - # Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error - kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:" - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - ## Resume the deployments recursively - kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" - output_message=$(cat ${ERROR_FILE}) - # Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error - kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "::" - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - ## Retrieve the rollout history of the deployments recursively - output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error - kube::test::if_has_string "${output_message}" "nginx0-deployment" - kube::test::if_has_string "${output_message}" "nginx1-deployment" - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - # Clean up - unset PRESERVE_ERR_FILE - rm "${ERROR_FILE}" - ! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 - sleep 1 - - ### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it - # Pre-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - # Create replication controllers recursively from directory of YAML files - ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' - # Command - ## Attempt to rollback the replication controllers to revision 1 recursively - output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error - kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - ## Attempt to pause the replication controllers recursively - output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error - kube::test::if_has_string "${output_message}" 'error when pausing "hack/testdata/recursive/rc/busybox.yaml' - kube::test::if_has_string "${output_message}" 'error when pausing "hack/testdata/recursive/rc/rc/busybox.yaml' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - ## Attempt to resume the replication controllers recursively - output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}") - # Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error - kube::test::if_has_string "${output_message}" 'error when resuming "hack/testdata/recursive/rc/busybox.yaml' - kube::test::if_has_string "${output_message}" 'error when resuming "hack/testdata/recursive/rc/rc/busybox.yaml' - kube::test::if_has_string "${output_message}" "Object 'Kind' is missing" - # Clean up - ! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 - sleep 1 - - ############## - # Namespaces # - ############## - - ### Create a new namespace - # Pre-condition: only the "default" namespace exists - # The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test. - # kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:' - # Command - kubectl create namespace my-namespace - # Post-condition: namespace 'my-namespace' is created. - kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace' - # Clean up - kubectl delete namespace my-namespace - - ############## - # Pods in Namespaces # - ############## - - ### Create a new namespace - # Pre-condition: the other namespace does not exist - kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':' - # Command - kubectl create namespace other - # Post-condition: namespace 'other' is created. - kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other' - - ### Create POD valid-pod in specific namespace - # Pre-condition: no POD exists - kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create "${kube_flags[@]}" --namespace=other -f docs/admin/limitrange/valid-pod.yaml - # Post-condition: valid-pod POD is created - kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Delete POD valid-pod in specific namespace - # Pre-condition: valid-pod POD exists - kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 - # Post-condition: valid-pod POD doesn't exist - kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" '' - # Clean up - kubectl delete namespace other - - ############## - # Secrets # - ############## - - ### Create a new namespace - # Pre-condition: the test-secrets namespace does not exist - kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':' - # Command - kubectl create namespace test-secrets - # Post-condition: namespace 'test-secrets' is created. - kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets' - - ### Create a generic secret in a specific namespace - # Pre-condition: no SECRET exists - kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets - # Post-condition: secret exists and has expected values - kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret' - kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type' - [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]] - # Clean-up - kubectl delete secret test-secret --namespace=test-secrets - - ### Create a docker-registry secret in a specific namespace - # Pre-condition: no SECRET exists - kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='test-user@test.com' --namespace=test-secrets - # Post-condition: secret exists and has expected values - kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret' - kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg' - [[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]] - # Clean-up - kubectl delete secret test-secret --namespace=test-secrets - - ### Create a tls secret - # Pre-condition: no SECRET exists - kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt - kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret' - kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls' - # Clean-up - kubectl delete secret test-secret --namespace=test-secrets - - # Create a secret using stringData - kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__ -{ - "kind": "Secret", - "apiVersion": "v1", - "metadata": { - "name": "secret-string-data" - }, - "data": { - "k1":"djE=", - "k2":"" - }, - "stringData": { - "k2":"v2" - } -} -__EOF__ - # Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field - kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*' - kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*' - kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '' - # Clean up - kubectl delete secret secret-string-data --namespace=test-secrets - - ### Create a secret using output flags - # Pre-condition: no secret exists - kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - [[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]] - ## Clean-up - kubectl delete secret test-secret --namespace=test-secrets - # Clean up - kubectl delete namespace test-secrets - - ###################### - # ConfigMap # - ###################### - - kubectl create -f docs/user-guide/configmap/configmap.yaml - kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap' - kubectl delete configmap test-configmap "${kube_flags[@]}" - - ### Create a new namespace - # Pre-condition: the test-configmaps namespace does not exist - kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':' - # Command - kubectl create namespace test-configmaps - # Post-condition: namespace 'test-configmaps' is created. - kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps' - - ### Create a generic configmap in a specific namespace - # Pre-condition: no configmaps namespace exists - kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps - # Post-condition: configmap exists and has expected values - kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap' - [[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]] - # Clean-up - kubectl delete configmap test-configmap --namespace=test-configmaps - kubectl delete namespace test-configmaps - - #################### - # Service Accounts # - #################### - - ### Create a new namespace - # Pre-condition: the test-service-accounts namespace does not exist - kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':' - # Command - kubectl create namespace test-service-accounts - # Post-condition: namespace 'test-service-accounts' is created. - kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts' - - ### Create a service account in a specific namespace - # Command - kubectl create serviceaccount test-service-account --namespace=test-service-accounts - # Post-condition: secret exists and has expected values - kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account' - # Clean-up - kubectl delete serviceaccount test-service-account --namespace=test-service-accounts - # Clean up - kubectl delete namespace test-service-accounts - - ################# - # Pod templates # - ################# - - ### Create PODTEMPLATE - # Pre-condition: no PODTEMPLATE - kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" '' - # Command - kubectl create -f docs/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}" - # Post-condition: nginx PODTEMPLATE is available - kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:' - - ### Printing pod templates works - kubectl get podtemplates "${kube_flags[@]}" - [[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]] - - ### Delete nginx pod template by name - # Pre-condition: nginx pod template is available - kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:' - # Command - kubectl delete podtemplate nginx "${kube_flags[@]}" - # Post-condition: No templates exist - kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" '' - - - ############ - # Services # - ############ - # switch back to the default namespace - kubectl config set-context "${CONTEXT}" --namespace="" - kube::log::status "Testing kubectl(${version}:services)" - - ### Create redis-master service from JSON - # Pre-condition: Only the default kubernetes services exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - # Command - kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}" - # Post-condition: redis-master service exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' - # Describe command should print detailed information - kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:" - # Describe command should print events information by default - kube::test::describe_object_events_assert services 'redis-master' - # Describe command should not print events information when show-events=false - kube::test::describe_object_events_assert services 'redis-master' false - # Describe command should print events information when show-events=true - kube::test::describe_object_events_assert services 'redis-master' true - # Describe command (resource only) should print detailed information - kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:" - # Describe command should print events information by default - kube::test::describe_resource_events_assert services - # Describe command should not print events information when show-events=false - kube::test::describe_resource_events_assert services false - # Describe command should print events information when show-events=true - kube::test::describe_resource_events_assert services true - - ### Dump current redis-master service - output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}") - - ### Delete redis-master-service by id - # Pre-condition: redis-master service exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' - # Command - kubectl delete service redis-master "${kube_flags[@]}" - # Post-condition: Only the default kubernetes services exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - - ### Create redis-master-service from dumped JSON - # Pre-condition: Only the default kubernetes services exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - # Command - echo "${output_service}" | kubectl create -f - "${kube_flags[@]}" - # Post-condition: redis-master service is created - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' - - ### Create redis-master-${version}-test service - # Pre-condition: redis-master-service service exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:' - # Command - kubectl create -f - "${kube_flags[@]}" << __EOF__ -{ - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "service-${version}-test" - }, - "spec": { - "ports": [ - { - "protocol": "TCP", - "port": 80, - "targetPort": 80 - } - ] - } -} -__EOF__ - # Post-condition: service-${version}-test service is created - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:' - - ### Identity - kubectl get service "${kube_flags[@]}" service-${version}-test -o json | kubectl replace "${kube_flags[@]}" -f - - - ### Delete services by id - # Pre-condition: service-${version}-test exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:' - # Command - kubectl delete service redis-master "${kube_flags[@]}" - kubectl delete service "service-${version}-test" "${kube_flags[@]}" - # Post-condition: Only the default kubernetes services exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - - ### Create two services - # Pre-condition: Only the default kubernetes services exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - # Command - kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}" - kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}" - # Post-condition: redis-master and redis-slave services are created - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:' - - ### Custom columns can be specified - # Pre-condition: generate output using custom columns - output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}") - # Post-condition: should contain name column - kube::test::if_has_string "${output_message}" 'redis-master' - - ### Delete multiple services at once - # Pre-condition: redis-master and redis-slave services exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:' - # Command - kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once - # Post-condition: Only the default kubernetes services exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - - - ########################### - # Replication controllers # - ########################### - - kube::log::status "Testing kubectl(${version}:replicationcontrollers)" - - ### Create and stop controller, make sure it doesn't leak pods - # Pre-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - kubectl delete rc frontend "${kube_flags[@]}" - # Post-condition: no pods from frontend controller - kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create replication controller frontend from JSON - # Pre-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - # Post-condition: frontend replication controller is created - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' - # Describe command should print detailed information - kube::test::describe_object_assert rc 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" - # Describe command should print events information by default - kube::test::describe_object_events_assert rc 'frontend' - # Describe command should not print events information when show-events=false - kube::test::describe_object_events_assert rc 'frontend' false - # Describe command should print events information when show-events=true - kube::test::describe_object_events_assert rc 'frontend' true - # Describe command (resource only) should print detailed information - kube::test::describe_resource_assert rc "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" - # Describe command should print events information by default - kube::test::describe_resource_events_assert rc - # Describe command should not print events information when show-events=false - kube::test::describe_resource_events_assert rc false - # Describe command should print events information when show-events=true - kube::test::describe_resource_events_assert rc true - - ### Scale replication controller frontend with current-replicas and replicas - # Pre-condition: 3 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' - # Command - kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" - # Post-condition: 2 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' - - ### Scale replication controller frontend with (wrong) current-replicas and replicas - # Pre-condition: 2 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' - # Command - ! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" - # Post-condition: nothing changed - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' - - ### Scale replication controller frontend with replicas only - # Pre-condition: 2 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' - # Command - kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}" - # Post-condition: 3 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' - - ### Scale replication controller from JSON with replicas only - # Pre-condition: 3 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' - # Command - kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - # Post-condition: 2 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' - # Clean-up - kubectl delete rc frontend "${kube_flags[@]}" - - ### Scale multiple replication controllers - kubectl create -f examples/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}" - kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}" - # Command - kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}" - # Post-condition: 4 replicas each - kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4' - kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4' - # Clean-up - kubectl delete rc redis-{master,slave} "${kube_flags[@]}" - - ### Scale a job - kubectl create -f docs/user-guide/job.yaml "${kube_flags[@]}" - # Command - kubectl scale --replicas=2 job/pi - # Post-condition: 2 replicas for pi - kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2' - # Clean-up - kubectl delete job/pi "${kube_flags[@]}" - - ### Scale a deployment - kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}" - # Command - kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment - # Post-condition: 1 replica for nginx-deployment - kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1' - # Clean-up - kubectl delete deployment/nginx-deployment "${kube_flags[@]}" - - ### Expose a deployment as a service - kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}" - # Pre-condition: 3 replicas - kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3' - # Command - kubectl expose deployment/nginx-deployment - # Post-condition: service exists and exposes deployment port (80) - kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80' - # Clean-up - kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}" - - ### Expose replication controller as service - kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - # Pre-condition: 3 replicas - kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' - # Command - kubectl expose rc frontend --port=80 "${kube_flags[@]}" - # Post-condition: service exists and the port is unnamed - kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" ' 80' - # Command - kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}" - # Post-condition: service exists and the port is unnamed - kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" ' 443' - # Command - kubectl create -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" - kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}" - # Post-condition: service exists and the port is unnamed - kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" ' 444' - # Create a service using service/v1 generator - kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}" - # Post-condition: service exists and the port is named default. - kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80' - # Verify that expose service works without specifying a port. - kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}" - # Post-condition: service exists with the same port as the original service. - kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80' - # Cleanup services - kubectl delete pod valid-pod "${kube_flags[@]}" - kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}" - - ### Expose negative invalid resource test - # Pre-condition: don't need - # Command - output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}") - # Post-condition: the error message has "cannot expose" string - kube::test::if_has_string "${output_message}" 'cannot expose' - - ### Try to generate a service with invalid name (exceeding maximum valid size) - # Pre-condition: use --name flag - output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name --port=8081 2>&1 "${kube_flags[@]}") - # Post-condition: should fail due to invalid name - kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value' - # Pre-condition: default run without --name flag; should succeed by truncating the inherited name - output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}") - # Post-condition: inherited name from pod has been truncated - kube::test::if_has_string "${output_message}" '\"kubernetes-serve-hostnam\" exposed' - # Clean-up - kubectl delete svc kubernetes-serve-hostnam "${kube_flags[@]}" - - ### Expose multiport object as a new service - # Pre-condition: don't use --port flag - output_message=$(kubectl expose -f docs/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}") - # Post-condition: expose succeeded - kube::test::if_has_string "${output_message}" '\"etcd-server\" exposed' - # Post-condition: generated service has both ports from the exposed pod - kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380' - kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 4001' - # Clean-up - kubectl delete svc etcd-server "${kube_flags[@]}" - - ### Delete replication controller with id - # Pre-condition: frontend replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' - # Command - kubectl delete rc frontend "${kube_flags[@]}" - # Post-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create two replication controllers - # Pre-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}" - # Post-condition: frontend and redis-slave - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' - - ### Delete multiple controllers at once - # Pre-condition: frontend and redis-slave - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' - # Command - kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once - # Post-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Auto scale replication controller - # Pre-condition: no replication controller exists - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' - # autoscale 1~2 pods, CPU utilization 70%, rc specified by file - kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' - kubectl delete hpa frontend "${kube_flags[@]}" - # autoscale 1~2 pods, CPU utilization 70%, rc specified by file, using old generator - kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 --generator=horizontalpodautoscaler/v1beta1 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' - kubectl delete hpa frontend "${kube_flags[@]}" - # autoscale 2~3 pods, no CPU utilization specified, rc specified by name - kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' - kubectl delete hpa frontend "${kube_flags[@]}" - # autoscale 2~3 pods, no CPU utilization specified, rc specified by name, using old generator - kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3 --generator=horizontalpodautoscaler/v1beta1 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' - kubectl delete hpa frontend "${kube_flags[@]}" - # autoscale without specifying --max should fail - ! kubectl autoscale rc frontend "${kube_flags[@]}" - # Clean up - kubectl delete rc frontend "${kube_flags[@]}" - - - ###################### - # Deployments # - ###################### - - ### Auto scale deployment - # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/user-guide/deployment.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' - # autoscale 2~3 pods, no CPU utilization specified - kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3 - kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' - # Clean up - # Note that we should delete hpa first, otherwise it may fight with the deployment reaper. - kubectl delete hpa nginx-deployment "${kube_flags[@]}" - kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}" - - ### Rollback a deployment - # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - # Create a deployment (revision 1) - kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:' - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - # Rollback to revision 1 - should be no-op - kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - # Update the deployment (revision 2) - kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment.extensions "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - # Rollback to revision 1 - kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" - sleep 1 - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - # Rollback to revision 1000000 - should be no-op - kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - # Rollback to last revision - kubectl rollout undo deployment nginx "${kube_flags[@]}" - sleep 1 - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - # Pause the deployment - kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}" - # A paused deployment cannot be rolled back - ! kubectl rollout undo deployment nginx "${kube_flags[@]}" - # Resume the deployment - kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}" - # The resumed deployment can now be rolled back - kubectl rollout undo deployment nginx "${kube_flags[@]}" - # Clean up - kubectl delete deployment nginx "${kube_flags[@]}" - - ### Set image of a deployment - # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - # Create a deployment - kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" - # Set the deployment's image - kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" - # Set non-existing container should fail - ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}" - # Set image of deployments without specifying name - kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" - # Set image of a deployment specified by file - kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" - # Set image of a local file without talking to the server - kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_PERL}:" - # Set image of all containers of the deployment - kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$deployment_second_image_field}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - # Clean up - kubectl delete deployment nginx-deployment "${kube_flags[@]}" - - - ###################### - # Replica Sets # - ###################### - - kube::log::status "Testing kubectl(${version}:replicasets)" - - ### Create and stop a replica set, make sure it doesn't leak pods - # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - kubectl delete rs frontend "${kube_flags[@]}" - # Post-condition: no pods from frontend replica set - kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create replica set frontend from YAML - # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - # Post-condition: frontend replica set is created - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' - # Describe command should print detailed information - kube::test::describe_object_assert rs 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" - # Describe command should print events information by default - kube::test::describe_object_events_assert rs 'frontend' - # Describe command should not print events information when show-events=false - kube::test::describe_object_events_assert rs 'frontend' false - # Describe command should print events information when show-events=true - kube::test::describe_object_events_assert rs 'frontend' true - # Describe command (resource only) should print detailed information - kube::test::describe_resource_assert rs "Name:" "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:" - # Describe command should print events information by default - kube::test::describe_resource_events_assert rs - # Describe command should not print events information when show-events=false - kube::test::describe_resource_events_assert rs false - # Describe command should print events information when show-events=true - kube::test::describe_resource_events_assert rs true - - ### Scale replica set frontend with current-replicas and replicas - # Pre-condition: 3 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' - # Command - kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}" - # Post-condition: 2 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2' - # Clean-up - kubectl delete rs frontend "${kube_flags[@]}" - - ### Expose replica set as service - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - # Pre-condition: 3 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' - # Command - kubectl expose rs frontend --port=80 "${kube_flags[@]}" - # Post-condition: service exists and the port is unnamed - kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" ' 80' - # Create a service using service/v1 generator - kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}" - # Post-condition: service exists and the port is named default. - kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80' - # Cleanup services - kubectl delete service frontend{,-2} "${kube_flags[@]}" - - ### Delete replica set with id - # Pre-condition: frontend replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' - # Command - kubectl delete rs frontend "${kube_flags[@]}" - # Post-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Create two replica sets - # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}" - # Post-condition: frontend and redis-slave - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' - - ### Delete multiple replica sets at once - # Pre-condition: frontend and redis-slave - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' - # Command - kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once - # Post-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - - ### Auto scale replica set - # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' - # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file - kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' - kubectl delete hpa frontend "${kube_flags[@]}" - # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name - kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 ' - kubectl delete hpa frontend "${kube_flags[@]}" - # autoscale without specifying --max should fail - ! kubectl autoscale rs frontend "${kube_flags[@]}" - # Clean up - kubectl delete rs frontend "${kube_flags[@]}" - - - ###################### - # Lists # - ###################### - - kube::log::status "Testing kubectl(${version}:lists)" - - ### Create a List with objects from multiple versions - # Command - kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}" - - ### Delete the List with objects from multiple versions - # Command - kubectl delete service/list-service-test deployment/list-deployment-test - - - ###################### - # Multiple Resources # - ###################### - - kube::log::status "Testing kubectl(${version}:multiple resources)" - - FILES="hack/testdata/multi-resource-yaml - hack/testdata/multi-resource-list - hack/testdata/multi-resource-json - hack/testdata/multi-resource-rclist - hack/testdata/multi-resource-svclist" - YAML=".yaml" - JSON=".json" - for file in $FILES; do - if [ -f $file$YAML ] - then - file=$file$YAML - replace_file="${file%.yaml}-modify.yaml" - else - file=$file$JSON - replace_file="${file%.json}-modify.json" - fi - - has_svc=true - has_rc=true - two_rcs=false - two_svcs=false - if [[ "${file}" == *rclist* ]]; then - has_svc=false - two_rcs=true - fi - if [[ "${file}" == *svclist* ]]; then - has_rc=false - two_svcs=true - fi - - ### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files: - ### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation - ### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type - echo "Testing with file ${file} and replace with file ${replace_file}" - # Pre-condition: no service (other than default kubernetes services) or replication controller exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f "${file}" "${kube_flags[@]}" - # Post-condition: mock service (and mock2) exists - if [ "$has_svc" = true ]; then - if [ "$two_svcs" = true ]; then - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:mock2:' - else - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:' - fi - fi - # Post-condition: mock rc (and mock2) exists - if [ "$has_rc" = true ]; then - if [ "$two_rcs" = true ]; then - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:' - else - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:' - fi - fi - # Command - kubectl get -f "${file}" "${kube_flags[@]}" - # Command: watching multiple resources should return "not supported" error - WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error" - kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true - if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then - kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1" - fi - kubectl describe -f "${file}" "${kube_flags[@]}" - # Command - kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}" - # Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced - if [ "$has_svc" = true ]; then - kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced' - if [ "$two_svcs" = true ]; then - kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced' - fi - fi - if [ "$has_rc" = true ]; then - kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced' - if [ "$two_rcs" = true ]; then - kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced' - fi - fi - # Command: kubectl edit multiple resources - temp_editor="${KUBE_TEMP}/tmp-editor.sh" - echo -e "#!/bin/bash\n$SED -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}" - chmod +x "${temp_editor}" - EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}" - # Post-condition: mock service (and mock2) and mock rc (and mock2) are edited - if [ "$has_svc" = true ]; then - kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited' - if [ "$two_svcs" = true ]; then - kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited' - fi - fi - if [ "$has_rc" = true ]; then - kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited' - if [ "$two_rcs" = true ]; then - kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited' - fi - fi - # cleaning - rm "${temp_editor}" - # Command - # We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label" - # fails on some, but not all, of the resources, retries will fail because it tries to modify - # existing labels. - kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}" - # Post-condition: mock service and mock rc (and mock2) are labeled - if [ "$has_svc" = true ]; then - kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true' - if [ "$two_svcs" = true ]; then - kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true' - fi - fi - if [ "$has_rc" = true ]; then - kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true' - if [ "$two_rcs" = true ]; then - kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true' - fi - fi - # Command - # Command - # We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate" - # fails on some, but not all, of the resources, retries will fail because it tries to modify - # existing annotations. - kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}" - # Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated - if [ "$has_svc" = true ]; then - kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true' - if [ "$two_svcs" = true ]; then - kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true' - fi - fi - if [ "$has_rc" = true ]; then - kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true' - if [ "$two_rcs" = true ]; then - kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true' - fi - fi - # Cleanup resources created - kubectl delete -f "${file}" "${kube_flags[@]}" - done - - ############################# - # Multiple Resources via URL# - ############################# - - # Pre-condition: no service (other than default kubernetes services) or replication controller exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - - # Command - kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}" - - # Post-condition: service(mock) and rc(mock) exist - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:mock:' - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:' - - # Clean up - kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}" - - # Post-condition: no service (other than default kubernetes services) or replication controller exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' - kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' - - - ###################### - # Persistent Volumes # - ###################### - - ### Create and delete persistent volume examples - # Pre-condition: no persistent volumes currently exist - kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}" - kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:' - kubectl delete pv pv0001 "${kube_flags[@]}" - kubectl create -f docs/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}" - kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:' - kubectl delete pv pv0002 "${kube_flags[@]}" - kubectl create -f docs/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}" - kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:' - kubectl delete pv pv0003 "${kube_flags[@]}" - # Post-condition: no PVs - kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" '' - - ############################ - # Persistent Volume Claims # - ############################ - - ### Create and delete persistent volume claim examples - # Pre-condition: no persistent volume claims currently exist - kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create -f docs/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}" - kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:' - kubectl delete pvc myclaim-1 "${kube_flags[@]}" - - kubectl create -f docs/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}" - kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:' - kubectl delete pvc myclaim-2 "${kube_flags[@]}" - - kubectl create -f docs/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}" - kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:' - kubectl delete pvc myclaim-3 "${kube_flags[@]}" - # Post-condition: no PVCs - kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" '' - - - - ######### - # Nodes # - ######### - - kube::log::status "Testing kubectl(${version}:nodes)" - - kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:' - - kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:" - # Describe command should print events information by default - kube::test::describe_object_events_assert nodes "127.0.0.1" - # Describe command should not print events information when show-events=false - kube::test::describe_object_events_assert nodes "127.0.0.1" false - # Describe command should print events information when show-events=true - kube::test::describe_object_events_assert nodes "127.0.0.1" true - # Describe command (resource only) should print detailed information - kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:" - # Describe command should print events information by default - kube::test::describe_resource_events_assert nodes - # Describe command should not print events information when show-events=false - kube::test::describe_resource_events_assert nodes false - # Describe command should print events information when show-events=true - kube::test::describe_resource_events_assert nodes true - - ### kubectl patch update can mark node unschedulable - # Pre-condition: node is schedulable - kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' - kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}' - # Post-condition: node is unschedulable - kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true' - kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}' - # Post-condition: node is schedulable - kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' - - - ##################### - # Retrieve multiple # - ##################### - - kube::log::status "Testing kubectl(${version}:multiget)" - kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:' - - - ##################### - # Resource aliasing # - ##################### - - kube::log::status "Testing resource aliasing" - kubectl create -f examples/storage/cassandra/cassandra-controller.yaml "${kube_flags[@]}" - kubectl create -f examples/storage/cassandra/cassandra-service.yaml "${kube_flags[@]}" - - object="all -l'app=cassandra'" - request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}" - - # all 4 cassandra's might not be in the request immediately... - kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \ - kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \ - kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:' - - kubectl delete all -l app=cassandra "${kube_flags[@]}" - - ########### - # Explain # - ########### - - kube::log::status "Testing kubectl(${version}:explain)" - kubectl explain pods - # shortcuts work - kubectl explain po - kubectl explain po.status.message - - - ########### - # Swagger # - ########### - - if [[ -n "${version}" ]]; then - # Verify schema - file="${KUBE_TEMP}/schema-${version}.json" - curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/${version}" > "${file}" - [[ "$(grep "list of returned" "${file}")" ]] - [[ "$(grep "List of pods" "${file}")" ]] - [[ "$(grep "Watch for changes to the described resources" "${file}")" ]] - fi - - ##################### - # Kubectl --sort-by # - ##################### - - ### sort-by should not panic if no pod exists - # Pre-condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl get pods --sort-by="{metadata.name}" - kubectl get pods --sort-by="{metadata.creationTimestamp}" - - ############################ - # Kubectl --all-namespaces # - ############################ - - # Pre-condition: the "default" namespace exists - kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:' - - ### Create POD - # Pre-condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - # Command - kubectl create "${kube_flags[@]}" -f docs/admin/limitrange/valid-pod.yaml - # Post-condition: valid-pod is created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - - ### Verify a specific namespace is ignored when all-namespaces is provided - # Command - kubectl get pods --all-namespaces --namespace=default - - ### Clean up - # Pre-condition: valid-pod exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' - # Command - kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 - # Post-condition: valid-pod doesn't exist - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - - kube::test::clear_all -} - -runTests "v1" - -kube::log::status "TEST PASSED" +echo "NOTE: $0 has been replaced by 'make test-cmd'" +echo +echo "The equivalent of this invocation is: " +echo " make test-cmd ${ARGHELP}" +echo +echo +make --no-print-directory -C "${KUBE_ROOT}" test-cmd diff --git a/hack/test-go.sh b/hack/test-go.sh index 635445b2883da..a1e4e92189a72 100755 --- a/hack/test-go.sh +++ b/hack/test-go.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2014 The Kubernetes Authors. +# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,269 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +# This script is a vestigial redirection. Please do not add "real" logic. + set -o errexit set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" - -kube::golang::setup_env - -kube::test::find_dirs() { - ( - cd ${KUBE_ROOT} - find . -not \( \ - \( \ - -path './_artifacts/*' \ - -o -path './_output/*' \ - -o -path './_gopath/*' \ - -o -path './contrib/podex/*' \ - -o -path './output/*' \ - -o -path './release/*' \ - -o -path './target/*' \ - -o -path './test/e2e/*' \ - -o -path './test/e2e_node/*' \ - -o -path './test/integration/*' \ - -o -path './test/component/scheduler/perf/*' \ - -o -path './third_party/*'\ - -o -path './vendor/*'\ - \) -prune \ - \) -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./||' | sort -u - ) -} - -KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s} -KUBE_COVER=${KUBE_COVER:-n} # set to 'y' to enable coverage collection -KUBE_COVERMODE=${KUBE_COVERMODE:-atomic} -# How many 'go test' instances to run simultaneously when running tests in -# coverage mode. -KUBE_COVERPROCS=${KUBE_COVERPROCS:-4} -KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing -# Set to the goveralls binary path to report coverage results to Coveralls.io. -KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-} -# Lists of API Versions of each groups that should be tested, groups are -# separated by comma, lists are separated by semicolon. e.g., -# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" -# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go) -# ONLY the last version is tested in each group. -KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,autoscaling/v1,batch/v1,batch/v2alpha1,extensions/v1beta1,apps/v1alpha1,federation/v1beta1,policy/v1alpha1,rbac.authorization.k8s.io/v1alpha1,certificates/v1alpha1"} -# once we have multiple group supports -# Create a junit-style XML test report in this directory if set. -KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-} -# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is -# set. -KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n} - -kube::test::usage() { - kube::log::usage_from_stdin < : number of parallel workers, must be >= 1 -EOF -} - -isnum() { - [[ "$1" =~ ^[0-9]+$ ]] -} -parallel=1 -while getopts "hp:i:" opt ; do - case $opt in - h) - kube::test::usage - exit 0 - ;; - p) - parallel="$OPTARG" - if ! isnum "${parallel}" || [[ "${parallel}" -le 0 ]]; then - kube::log::usage "'$0': argument to -p must be numeric and greater than 0" - kube::test::usage - exit 1 - fi - ;; - i) - kube::log::usage "'$0': use GOFLAGS='-count '" - kube::test::usage - exit 1 - ;; - ?) - kube::test::usage - exit 1 - ;; - :) - kube::log::usage "Option -$OPTARG " - kube::test::usage - exit 1 - ;; - esac -done -shift $((OPTIND - 1)) - -# Use eval to preserve embedded quoted strings. -eval "goflags=(${KUBE_GOFLAGS:-})" -eval "testargs=(${KUBE_TEST_ARGS:-})" - -# Used to filter verbose test output. -go_test_grep_pattern=".*" - -# The go-junit-report tool needs full test case information to produce a -# meaningful report. -if [[ -n "${KUBE_JUNIT_REPORT_DIR}" ]] ; then - goflags+=(-v) - # Show only summary lines by matching lines like "status package/test" - go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+" +# For help output +ARGHELP="" +if [[ "$#" -gt 0 ]]; then + ARGHELP="WHAT='$@'" fi -# Filter out arguments that start with "-" and move them to goflags. -testcases=() -for arg; do - if [[ "${arg}" == -* ]]; then - goflags+=("${arg}") - else - testcases+=("${arg}") - fi -done -if [[ ${#testcases[@]} -eq 0 ]]; then - testcases=($(kube::test::find_dirs)) -fi -set -- "${testcases[@]+${testcases[@]}}" - -junitFilenamePrefix() { - if [[ -z "${KUBE_JUNIT_REPORT_DIR}" ]]; then - echo "" - return - fi - mkdir -p "${KUBE_JUNIT_REPORT_DIR}" - local KUBE_TEST_API_NO_SLASH="${KUBE_TEST_API//\//-}" - echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_NO_SLASH}_$(kube::util::sortable_date)" -} - -produceJUnitXMLReport() { - local -r junit_filename_prefix=$1 - if [[ -z "${junit_filename_prefix}" ]]; then - return - fi - - local test_stdout_filenames - local junit_xml_filename - test_stdout_filenames=$(ls ${junit_filename_prefix}*.stdout) - junit_xml_filename="${junit_filename_prefix}.xml" - if ! command -v go-junit-report >/dev/null 2>&1; then - kube::log::error "go-junit-report not found; please install with " \ - "go get -u github.com/jstemmer/go-junit-report" - return - fi - cat ${test_stdout_filenames} | go-junit-report > "${junit_xml_filename}" - if [[ ! ${KUBE_KEEP_VERBOSE_TEST_OUTPUT} =~ ^[yY]$ ]]; then - rm ${test_stdout_filenames} - fi - kube::log::status "Saved JUnit XML test report to ${junit_xml_filename}" -} - -runTests() { - local junit_filename_prefix - junit_filename_prefix=$(junitFilenamePrefix) - - # If we're not collecting coverage, run all requested tests with one 'go test' - # command, which is much faster. - if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then - kube::log::status "Running tests without code coverage" - go test "${goflags[@]:+${goflags[@]}}" \ - ${KUBE_RACE} ${KUBE_TIMEOUT} "${@+${@/#/${KUBE_GO_PACKAGE}/}}" \ - "${testargs[@]:+${testargs[@]}}" \ - | tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} \ - | grep "${go_test_grep_pattern}" && rc=$? || rc=$? - produceJUnitXMLReport "${junit_filename_prefix}" - return ${rc} - fi - - # Create coverage report directories. - cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API}/$(kube::util::sortable_date)" - cover_profile="coverage.out" # Name for each individual coverage profile - kube::log::status "Saving coverage output in '${cover_report_dir}'" - mkdir -p "${@+${@/#/${cover_report_dir}/}}" - - # Run all specified tests, collecting coverage results. Go currently doesn't - # support collecting coverage across multiple packages at once, so we must issue - # separate 'go test' commands for each package and then combine at the end. - # To speed things up considerably, we can at least use xargs -P to run multiple - # 'go test' commands at once. - # To properly parse the test results if generating a JUnit test report, we - # must make sure the output from parallel runs is not mixed. To achieve this, - # we spawn a subshell for each parallel process, redirecting the output to - # separate files. - # cmd/libs/go2idl/generator is fragile when run under coverage, so ignore it for now. - # see: https://github.com/kubernetes/kubernetes/issues/24967 - printf "%s\n" "${@}" | grep -v "cmd/libs/go2idl/generator"| xargs -I{} -n1 -P${KUBE_COVERPROCS} \ - bash -c "set -o pipefail; _pkg=\"{}\"; _pkg_out=\${_pkg//\//_}; \ - go test ${goflags[@]:+${goflags[@]}} \ - ${KUBE_RACE} \ - ${KUBE_TIMEOUT} \ - -cover -covermode=\"${KUBE_COVERMODE}\" \ - -coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \ - \"${KUBE_GO_PACKAGE}/\${_pkg}\" \ - ${testargs[@]:+${testargs[@]}} \ - | tee ${junit_filename_prefix:+\"${junit_filename_prefix}-\$_pkg_out.stdout\"} \ - | grep \"${go_test_grep_pattern}\"" \ - && test_result=$? || test_result=$? - - produceJUnitXMLReport "${junit_filename_prefix}" - - COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out" - { - # The combined coverage profile needs to start with a line indicating which - # coverage mode was used (set, count, or atomic). This line is included in - # each of the coverage profiles generated when running 'go test -cover', but - # we strip these lines out when combining so that there's only one. - echo "mode: ${KUBE_COVERMODE}" - - # Include all coverage reach data in the combined profile, but exclude the - # 'mode' lines, as there should be only one. - for x in `find "${cover_report_dir}" -name "${cover_profile}"`; do - cat $x | grep -h -v "^mode:" || true - done - } >"${COMBINED_COVER_PROFILE}" - - coverage_html_file="${cover_report_dir}/combined-coverage.html" - go tool cover -html="${COMBINED_COVER_PROFILE}" -o="${coverage_html_file}" - kube::log::status "Combined coverage report: ${coverage_html_file}" - - return ${test_result} -} - -reportCoverageToCoveralls() { - if [[ ${KUBE_COVER} =~ ^[yY]$ ]] && [[ -x "${KUBE_GOVERALLS_BIN}" ]]; then - kube::log::status "Reporting coverage results to Coveralls for service ${CI_NAME:-}" - ${KUBE_GOVERALLS_BIN} -coverprofile="${COMBINED_COVER_PROFILE}" \ - ${CI_NAME:+"-service=${CI_NAME}"} \ - ${COVERALLS_REPO_TOKEN:+"-repotoken=${COVERALLS_REPO_TOKEN}"} \ - || true - fi -} - -checkFDs() { - # several unittests panic when httptest cannot open more sockets - # due to the low default files limit on OS X. Warn about low limit. - local fileslimit="$(ulimit -n)" - if [[ $fileslimit -lt 1000 ]]; then - echo "WARNING: ulimit -n (files) should be at least 1000, is $fileslimit, may cause test failure"; - fi -} - -checkFDs - -# Convert the CSVs to arrays. -IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" -apiVersionsCount=${#apiVersions[@]} -for (( i=0; i<${apiVersionsCount}; i++ )); do - apiVersion=${apiVersions[i]} - echo "Running tests for APIVersion: $apiVersion" - # KUBE_TEST_API sets the version of each group to be tested. - KUBE_TEST_API="${apiVersion}" runTests "$@" -done - -# We might run the tests for multiple versions, but we want to report only -# one of them to coveralls. Here we report coverage from the last run. -reportCoverageToCoveralls +echo "NOTE: $0 has been replaced by 'make test'" +echo +echo "The equivalent of this invocation is: " +echo " make test ${ARGHELP}" +echo +echo +make --no-print-directory -C "${KUBE_ROOT}" test WHAT="$*" diff --git a/hack/test-integration.sh b/hack/test-integration.sh index eb882a43777e9..766e4a0c81a8f 100755 --- a/hack/test-integration.sh +++ b/hack/test-integration.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2014 The Kubernetes Authors. +# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,80 +14,22 @@ # See the License for the specific language governing permissions and # limitations under the License. +# This script is a vestigial redirection. Please do not add "real" logic. + set -o errexit set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" -# Lists of API Versions of each groups that should be tested, groups are -# separated by comma, lists are separated by semicolon. e.g., -# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" -# TODO: It's going to be: -# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1"} -# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go) -# ONLY the last version is tested in each group. -KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,autoscaling/v1,batch/v1,apps/v1alpha1,policy/v1alpha1,extensions/v1beta1,rbac.authorization.k8s.io/v1alpha1,certificates/v1alpha1"} - -# Give integration tests longer to run -# TODO: allow a larger value to be passed in -#KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 240s} -KUBE_TIMEOUT="-timeout 600s" -KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"} -LOG_LEVEL=${LOG_LEVEL:-2} -KUBE_TEST_ARGS=${KUBE_TEST_ARGS:-} - -kube::test::find_integration_test_dirs() { - ( - cd ${KUBE_ROOT} - find test/integration -name '*_test.go' -print0 \ - | xargs -0n1 dirname \ - | sort -u - ) -} - -cleanup() { - kube::log::status "Cleaning up etcd" - kube::etcd::cleanup - kube::log::status "Integration test cleanup complete" -} - -runTests() { - kube::log::status "Starting etcd instance" - kube::etcd::start - kube::log::status "Running integration test cases" - - # TODO: Re-enable race detection when we switch to a thread-safe etcd client - # KUBE_RACE="-race" - KUBE_GOFLAGS="${KUBE_GOFLAGS:-} -tags 'integration no-docker'" \ - KUBE_RACE="" \ - KUBE_TIMEOUT="${KUBE_TIMEOUT}" \ - KUBE_TEST_API_VERSIONS="$1" \ - "${KUBE_ROOT}/hack/test-go.sh" $(kube::test::find_integration_test_dirs) - - cleanup -} - -checkEtcdOnPath() { - kube::log::status "Checking etcd is on PATH" - which etcd && return - kube::log::status "Cannot find etcd, cannot run integration tests." - kube::log::status "Please see docs/devel/testing.md for instructions." - return 1 -} - -checkEtcdOnPath - -# Run cleanup to stop etcd on interrupt or other kill signal. -trap cleanup EXIT -# If a test case is specified, just run once with v1 API version and exit -if [[ -n "${KUBE_TEST_ARGS}" ]]; then - runTests v1 -fi +echo "NOTE: $0 has been replaced by 'make test-integration'" +echo +echo "The equivalent of this invocation is: " +echo " make test-integration" +echo +echo +echo make --no-print-directory -C "${KUBE_ROOT}" test-integration +echo +echo +make --no-print-directory -C "${KUBE_ROOT}" test-integration -# Convert the CSV to an array of API versions to test -IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" -for apiVersion in "${apiVersions[@]}"; do - runTests "${apiVersion}" -done diff --git a/hack/test-update-storage-objects.sh b/hack/test-update-storage-objects.sh index a51d2642918d7..1213a04b7bde3 100755 --- a/hack/test-update-storage-objects.sh +++ b/hack/test-update-storage-objects.sh @@ -92,7 +92,7 @@ function cleanup() { trap cleanup EXIT SIGINT -"${KUBE_ROOT}/hack/build-go.sh" cmd/kube-apiserver +make -C "${KUBE_ROOT}" WHAT=cmd/kube-apiserver kube::etcd::start diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 2eba4de2f2753..9f6a4e1533e4d 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -29,7 +29,7 @@ BUILD_TARGETS=( cmd/libs/go2idl/deepcopy-gen cmd/libs/go2idl/set-gen ) -"${KUBE_ROOT}/hack/build-go.sh" ${BUILD_TARGETS[*]} +make -C "${KUBE_ROOT}" WHAT="${BUILD_TARGETS[*]}" clientgen=$(kube::util::find-binary "client-gen") conversiongen=$(kube::util::find-binary "conversion-gen") diff --git a/hack/update-generated-docs.sh b/hack/update-generated-docs.sh index f42c0f3c6ee33..457b4a4c327b6 100755 --- a/hack/update-generated-docs.sh +++ b/hack/update-generated-docs.sh @@ -27,12 +27,14 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" \ - cmd/gendocs \ - cmd/genkubedocs \ - cmd/genman \ - cmd/genyaml \ - federation/cmd/genfeddocs +BINS=( + cmd/gendocs + cmd/genkubedocs + cmd/genman + cmd/genyaml + federation/cmd/genfeddocs +) +make -C "${KUBE_ROOT}" WHAT="${BINS[*]}" kube::util::ensure-temp-dir diff --git a/hack/update-generated-protobuf-dockerized.sh b/hack/update-generated-protobuf-dockerized.sh index 447bf271462f2..5cb379d370b6e 100755 --- a/hack/update-generated-protobuf-dockerized.sh +++ b/hack/update-generated-protobuf-dockerized.sh @@ -23,9 +23,11 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -hack/build-go.sh \ - cmd/libs/go2idl/go-to-protobuf \ - cmd/libs/go2idl/go-to-protobuf/protoc-gen-gogo +BINS=( + cmd/libs/go2idl/go-to-protobuf + cmd/libs/go2idl/go-to-protobuf/protoc-gen-gogo +) +make -C "${KUBE_ROOT}" WHAT="${BINS[*]}" if [[ -z "$(which protoc)" || "$(protoc --version)" != "libprotoc 3.0."* ]]; then echo "Generating protobuf requires protoc 3.0.0-beta1 or newer. Please download and" diff --git a/hack/update-munge-docs.sh b/hack/update-munge-docs.sh index 2c1dec561a776..1bc71c690bb54 100755 --- a/hack/update-munge-docs.sh +++ b/hack/update-munge-docs.sh @@ -26,8 +26,7 @@ git_upstream=$(kube::util::git_upstream_remote_name) kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" \ - cmd/mungedocs +make -C "${KUBE_ROOT}" WHAT=cmd/mungedocs kube::util::ensure-temp-dir diff --git a/hack/update-swagger-spec.sh b/hack/update-swagger-spec.sh index ec4d65eac01ee..ea888bcb54b47 100755 --- a/hack/update-swagger-spec.sh +++ b/hack/update-swagger-spec.sh @@ -33,7 +33,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" cmd/kube-apiserver +make -C "${KUBE_ROOT}" WHAT=cmd/kube-apiserver function cleanup() { diff --git a/hack/verify-all.sh b/hack/verify-all.sh index fc0630c48afce..45a9a571e1353 100755 --- a/hack/verify-all.sh +++ b/hack/verify-all.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2014 The Kubernetes Authors. +# Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,82 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +# This script is a vestigial redirection. Please do not add "real" logic. + set -o errexit set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -source "${KUBE_ROOT}/cluster/lib/util.sh" - -SILENT=true - -# Excluded checks are always skipped. -EXCLUDED_CHECKS=( - "verify-linkcheck.sh" # runs in separate Jenkins job once per day due to high network usage - ) - -function is-excluded { - if [[ $1 -ef ${BASH_SOURCE} ]]; then - return - fi - for e in ${EXCLUDED_CHECKS[@]}; do - if [[ $1 -ef "$KUBE_ROOT/hack/$e" ]]; then - return - fi - done - return 1 -} - -function run-cmd { - if ${SILENT}; then - "$@" &> /dev/null - else - "$@" - fi -} -function run-checks { - local -r pattern=$1 - local -r runner=$2 - - for t in $(ls ${pattern}) - do - if is-excluded "${t}" ; then - echo "Skipping ${t}" - continue - fi - echo -e "Verifying ${t}" - local start=$(date +%s) - run-cmd "${runner}" "${t}" && tr=$? || tr=$? - local elapsed=$(($(date +%s) - ${start})) - if [[ ${tr} -eq 0 ]]; then - echo -e "${color_green}SUCCESS${color_norm} ${t}\t${elapsed}s" - else - echo -e "${color_red}FAILED${color_norm} ${t}\t${elapsed}s" - ret=1 - fi - done -} - -while getopts ":v" opt; do - case ${opt} in - v) - SILENT=false - ;; - \?) - echo "Invalid flag: -${OPTARG}" >&2 - exit 1 - ;; - esac -done - -if ${SILENT} ; then - echo "Running in the silent mode, run with -v if you want to see script logs." +# For help output +ARGHELP="" +if [[ -n "${KUBE_VERIFY_GIT_BRANCH:-}" ]]; then + ARGHELP="BRANCH=${KUBE_VERIFY_GIT_BRANCH}" fi -ret=0 -run-checks "${KUBE_ROOT}/hack/verify-*.sh" bash -run-checks "${KUBE_ROOT}/hack/verify-*.py" python -exit ${ret} - -# ex: ts=2 sw=2 et filetype=sh +echo "NOTE: $0 has been replaced by 'make verify'" +echo +echo "The equivalent of this invocation is: " +echo " make verify ${ARGHELP}" +echo +echo +make --no-print-directory -C "${KUBE_ROOT}" verify BRANCH="${KUBE_VERIFY_GIT_BRANCH:-}" diff --git a/hack/verify-description.sh b/hack/verify-description.sh index 8bcbb0d65f9e0..8fca1b2fc850e 100755 --- a/hack/verify-description.sh +++ b/hack/verify-description.sh @@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" cmd/genswaggertypedocs +make -C "${KUBE_ROOT}" WHAT=cmd/genswaggertypedocs # Find binary genswaggertypedocs=$(kube::util::find-binary "genswaggertypedocs") diff --git a/hack/verify-generated-docs.sh b/hack/verify-generated-docs.sh index b543a5016631e..8ca70334c879f 100755 --- a/hack/verify-generated-docs.sh +++ b/hack/verify-generated-docs.sh @@ -23,12 +23,14 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" \ - cmd/gendocs \ - cmd/genkubedocs \ - cmd/genman \ - cmd/genyaml \ - federation/cmd/genfeddocs +BINS=( + cmd/gendocs + cmd/genkubedocs + cmd/genman + cmd/genyaml + federation/cmd/genfeddocs +) +make -C "${KUBE_ROOT}" WHAT="${BINS[*]}" kube::util::ensure-temp-dir diff --git a/hack/verify-generated-swagger-docs.sh b/hack/verify-generated-swagger-docs.sh index 8bbd76715955d..0212689e64d60 100755 --- a/hack/verify-generated-swagger-docs.sh +++ b/hack/verify-generated-swagger-docs.sh @@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" cmd/genswaggertypedocs +make -C "${KUBE_ROOT}" WHAT=cmd/genswaggertypedocs # Find binary genswaggertypedocs=$(kube::util::find-binary "genswaggertypedocs") @@ -33,7 +33,7 @@ if [[ ! -x "$genswaggertypedocs" ]]; then echo "It looks as if you don't have a compiled genswaggertypedocs binary" echo echo "If you are running from a clone of the git repo, please run" - echo "'./hack/build-go.sh cmd/genswaggertypedocs'." + echo "'make WHAT=cmd/genswaggertypedocs'." } >&2 exit 1 fi diff --git a/hack/verify-govet.sh b/hack/verify-govet.sh index ab1c14572837e..13e3c4b39db91 100755 --- a/hack/verify-govet.sh +++ b/hack/verify-govet.sh @@ -14,36 +14,24 @@ # See the License for the specific language governing permissions and # limitations under the License. +# This script is a vestigial redirection. Please do not add "real" logic. set -o errexit set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. -source "${KUBE_ROOT}/hack/lib/init.sh" -cd "${KUBE_ROOT}" - -# This is required before we run govet for the results to be correct. -# See https://github.com/golang/go/issues/16086 for details. -go install ./cmd/... - -# Use eval to preserve embedded quoted strings. -eval "goflags=(${KUBE_GOFLAGS:-})" - -# Filter out arguments that start with "-" and move them to goflags. -targets=() -for arg; do - if [[ "${arg}" == -* ]]; then - goflags+=("${arg}") - else - targets+=("${arg}") - fi -done - -if [[ ${#targets[@]} -eq 0 ]]; then - # Do not run on third_party directories. - targets=$(go list ./... | egrep -v "/(third_party|vendor)/") +# For help output +ARGHELP="" +if [[ "$#" -gt 0 ]]; then + ARGHELP="WHAT='$@'" fi -go vet "${goflags[@]:+${goflags[@]}}" ${targets[@]} +echo "NOTE: $0 has been replaced by 'make vet'" +echo +echo "The equivalent of this invocation is: " +echo " make vet ${ARGHELP}" +echo +echo +make --no-print-directory -C "${KUBE_ROOT}" vet WHAT="$@" diff --git a/hack/verify-import-boss.sh b/hack/verify-import-boss.sh index cb6a3106952c5..635556d85daab 100755 --- a/hack/verify-import-boss.sh +++ b/hack/verify-import-boss.sh @@ -23,6 +23,6 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" cmd/libs/go2idl/import-boss +make -C "${KUBE_ROOT}" WHAT=cmd/libs/go2idl/import-boss $(kube::util::find-binary "import-boss") --verify-only diff --git a/hack/verify-linkcheck.sh b/hack/verify-linkcheck.sh index 7ec986617fef8..69427f3ba3f89 100755 --- a/hack/verify-linkcheck.sh +++ b/hack/verify-linkcheck.sh @@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" cmd/linkcheck +make -C "${KUBE_ROOT}" WHAT=cmd/linkcheck linkcheck=$(kube::util::find-binary "linkcheck") diff --git a/hack/verify-munge-docs.sh b/hack/verify-munge-docs.sh index 69555680e9e3b..b242a8b62748d 100755 --- a/hack/verify-munge-docs.sh +++ b/hack/verify-munge-docs.sh @@ -26,8 +26,7 @@ git_upstream=$(kube::util::git_upstream_remote_name) kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" \ - cmd/mungedocs +make -C "${KUBE_ROOT}/" WHAT=cmd/mungedocs # Find binary mungedocs=$(kube::util::find-binary "mungedocs") diff --git a/hack/verify-swagger-spec.sh b/hack/verify-swagger-spec.sh index 2477882743f28..c4035353388ad 100755 --- a/hack/verify-swagger-spec.sh +++ b/hack/verify-swagger-spec.sh @@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" cmd/kube-apiserver +make -C "${KUBE_ROOT}" WHAT=cmd/kube-apiserver apiserver=$(kube::util::find-binary "kube-apiserver") diff --git a/hack/verify-symbols.sh b/hack/verify-symbols.sh index f398d9ff09d25..43d865cef870b 100755 --- a/hack/verify-symbols.sh +++ b/hack/verify-symbols.sh @@ -23,7 +23,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh" kube::golang::setup_env -"${KUBE_ROOT}/hack/build-go.sh" cmd/hyperkube +make -C "${KUBE_ROOT}" WHAT=cmd/hyperkube # add other BADSYMBOLS here. BADSYMBOLS=( diff --git a/hooks/pre-commit b/hooks/pre-commit index 600f21afc9e04..bd4cbe53bc157 100755 --- a/hooks/pre-commit +++ b/hooks/pre-commit @@ -7,7 +7,7 @@ readonly green=$(tput bold; tput setaf 2) exit_code=0 echo -ne "Checking that it builds... " -if ! OUT=$("hack/build-go.sh" 2>&1); then +if ! OUT=$(make 2>&1); then echo echo "${red}${OUT}" exit_code=1 diff --git a/pkg/api/ref_test.go b/pkg/api/ref_test.go index 82f26c11b604b..8860fd5ac15b6 100644 --- a/pkg/api/ref_test.go +++ b/pkg/api/ref_test.go @@ -37,7 +37,7 @@ func (obj *ExtensionAPIObject) GetObjectKind() unversioned.ObjectKind { return & func TestGetReference(t *testing.T) { - // when vendoring kube, if you don't force the set of registered versions (like this hack/test-go.sh does) + // when vendoring kube, if you don't force the set of registered versions (like make test does) // then you run into trouble because the types aren't registered in the scheme by anything. This does the // register manually to allow unit test execution if _, _, err := Scheme.ObjectKinds(&Pod{}); err != nil { diff --git a/test/e2e_node/e2e_build.go b/test/e2e_node/e2e_build.go index 9f1672fcdbd0f..769d03269e856 100644 --- a/test/e2e_node/e2e_build.go +++ b/test/e2e_node/e2e_build.go @@ -43,7 +43,8 @@ func buildGo() { if err != nil { glog.Fatalf("Failed to locate kubernetes root directory %v.", err) } - cmd := exec.Command(filepath.Join(k8sRoot, "hack/build-go.sh"), buildTargets...) + targets := strings.Join(buildTargets, " ") + cmd := exec.Command("make", "-C", k8sRoot, fmt.Sprintf("WHAT=%s", targets)) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run()