From e75d150500a431e270d8fca3b16868bfd8c0aee7 Mon Sep 17 00:00:00 2001 From: Oleg Loewen Date: Mon, 24 Jun 2019 15:25:28 +0200 Subject: [PATCH] Add support for testcase groups. Merge test results of junit.xml files and e2e.log files. --- .../{Testgrid.yaml => allE2eTestgrid.yaml} | 10 +- .test-defs/conformanceTestgrid.yaml | 51 +++ .test-defs/e2eFast.yaml | 14 +- .test-defs/e2eSlow.yaml | 18 +- test/e2etest/README.md | 59 +++ test/e2etest/config/config.go | 22 +- test/e2etest/kubetest/desc_generator.go | 76 +++- .../description/1.10/conformance.json | 3 - .../description/1.11/conformance.json | 3 - .../description/1.12/conformance.json | 3 - .../description/1.13/conformance.json | 3 - .../description/1.14/conformance.json | 3 - .../description/1.14/e2e_base_fast.json | 334 -------------- .../description/1.14/e2e_base_slow.json | 161 ------- .../description/1.14/e2e_example.json | 5 - .../kubetest/description/1.14/working.json | 411 ++++++++++++++++++ test/e2etest/kubetest/kubetest_runner.go | 8 +- test/e2etest/kubetest/publisher.go | 23 +- test/e2etest/kubetest/results_evaluator.go | 62 ++- test/e2etest/kubetest/xml_junit_result.go | 51 ++- test/e2etest/main.go | 4 +- test/e2etest/util/sets/string.go | 95 +++- 22 files changed, 809 insertions(+), 610 deletions(-) rename .test-defs/{Testgrid.yaml => allE2eTestgrid.yaml} (85%) create mode 100644 .test-defs/conformanceTestgrid.yaml create mode 100644 test/e2etest/README.md delete mode 100644 test/e2etest/kubetest/description/1.10/conformance.json delete mode 100644 test/e2etest/kubetest/description/1.11/conformance.json delete mode 100644 test/e2etest/kubetest/description/1.12/conformance.json delete mode 100644 test/e2etest/kubetest/description/1.13/conformance.json delete mode 100644 test/e2etest/kubetest/description/1.14/conformance.json delete mode 100644 test/e2etest/kubetest/description/1.14/e2e_base_fast.json delete mode 100644 test/e2etest/kubetest/description/1.14/e2e_base_slow.json delete mode 100644 test/e2etest/kubetest/description/1.14/e2e_example.json create mode 100644 test/e2etest/kubetest/description/1.14/working.json diff --git a/.test-defs/Testgrid.yaml b/.test-defs/allE2eTestgrid.yaml similarity index 85% rename from .test-defs/Testgrid.yaml rename to .test-defs/allE2eTestgrid.yaml index 77c42aac9a..4fac9f6b3f 100644 --- a/.test-defs/Testgrid.yaml +++ b/.test-defs/allE2eTestgrid.yaml @@ -27,7 +27,7 @@ spec: config: - type: env - name: PUBLISH_TO_TESTGRID + name: PUBLISH_RESULTS_TO_TESTGRID value: "true" - type: file name: GCLOUD_ACCOUNT_SECRET @@ -36,7 +36,13 @@ spec: secretKeyRef: name: testgrid-gcs-secret key: gcloud.json + - type: env + name: TESTCASE_GROUPS + value: 'fast,slow' + - type: env + name: DESCRIPTION_FILE + value: "working.json" command: [bash, -c] - args: ["TESTDESCRIPTION_NAME=conformance.desc ./test/k8s-e2e/e2e-test-execute"] + args: go run $GOPATH/src/github.com/gardener/test-infra/test/e2etest image: eu.gcr.io/gardener-project/gardener/testmachinery/base-step diff --git a/.test-defs/conformanceTestgrid.yaml b/.test-defs/conformanceTestgrid.yaml new file mode 100644 index 0000000000..775edaf0b6 --- /dev/null +++ b/.test-defs/conformanceTestgrid.yaml @@ -0,0 +1,51 @@ +# Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +kind: TestDefinition +metadata: + name: testgrid +spec: + owner: DL_5C5BE3E2970B9F404D0E2F50@sap.com + recipientsOnFailure: + - DL_5C5BE3E2970B9F404D0E2F50@sap.com + + description: Run kubernetes conformance tests and push result files (e2e.log and junit_01.xml) to testgrid repository. + + activeDeadlineSeconds: 10800 + behavior: ["serial"] + + config: + - type: env + name: PUBLISH_RESULTS_TO_TESTGRID + value: "true" + - type: file + name: GCLOUD_ACCOUNT_SECRET + path: /tmp/secrets/gardener-logs-conformance-tests.json + valueFrom: + secretKeyRef: + name: testgrid-gcs-secret + key: gcloud.json + - type: env + name: TESTCASE_GROUPS + value: 'conformance' + - type: env + name: GINKGO_PARALLEL + value: "false" + - type: env + name: DESCRIPTION_FILE + value: "working.json" + + command: [bash, -c] + args: go run $GOPATH/src/github.com/gardener/test-infra/test/e2etest + image: eu.gcr.io/gardener-project/gardener/testmachinery/base-step diff --git a/.test-defs/e2eFast.yaml b/.test-defs/e2eFast.yaml index f094075289..94a910dac6 100644 --- a/.test-defs/e2eFast.yaml +++ b/.test-defs/e2eFast.yaml @@ -16,9 +16,9 @@ kind: TestDefinition metadata: name: e2e-fast spec: - owner: DL_5C5BE3E2970B9F404D0E2F50@sap.com + owner: gardener-oq@listserv.sap.com recipientsOnFailure: - - DL_5C5BE3E2970B9F404D0E2F50@sap.com + - gardener-oq@listserv.sap.com description: Run fast kubernetes e2e tests. @@ -27,12 +27,12 @@ spec: config: - type: env - name: CONFORMANCE_SKIP - value: 'Serial\|Slow\|\[k8s.io\]\sProbing\scontainer\sshould\sbe\srestarted\swith\sa\s\/healthz\shttp\sliveness\sprobe\s\[NodeConformance\]\s\[Conformance\]\|\[k8s.io\]\sProbing\scontainer\sshould\s\*not\*\sbe\srestarted\swith\sa\s\/healthz\shttp\sliveness\sprobe\s\[NodeConformance\]\s\[Conformance\]\|\[k8s.io\]\sProbing\scontainer\sshould\shave\smonotonically\sincreasing\srestart\scount\s\[NodeConformance\]\s\[Conformance\]\|\[k8s.io\]\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sBurst\sscaling\sshould\srun\sto\scompletion\seven\swith\sunhealthy\spods\s\[Conformance\]\|\[k8s.io\]\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sScaling\sshould\shappen\sin\spredictable\sorder\sand\shalt\sif\sany\sstateful\spod\sis\sunhealthy\s\[Conformance\]\|\[k8s.io\]\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sshould\sperform\scanary\supdates\sand\sphased\srolling\supdates\sof\stemplate\smodifications\s\[Conformance\]\|\[k8s.io\]\sBasic\sStatefulSet\sfunctionality\s\[StatefulSetBasic\]\sshould\sperform\srolling\supdates\sand\sroll\sbacks\sof\stemplate\smodifications\s\[Conformance\]\|\[k8s.io\]\sGuestbook\sapplication\sshould\screate\sand\sstop\sa\sworking\sapplication\s\s\[Conformance\]' + name: TESTCASE_GROUPS + value: 'fast' - type: env - name: GINKGO_PARALLEL - value: "true" + name: DESCRIPTION_FILE + value: "working.json" command: [bash, -c] - args: ["TESTDESCRIPTION_NAME=e2e_base_fast.desc ./test/k8s-e2e/e2e-test-execute"] + args: go run $GOPATH/src/github.com/gardener/test-infra/test/e2etest image: eu.gcr.io/gardener-project/gardener/testmachinery/base-step \ No newline at end of file diff --git a/.test-defs/e2eSlow.yaml b/.test-defs/e2eSlow.yaml index e26abb1d5f..aba3c16407 100644 --- a/.test-defs/e2eSlow.yaml +++ b/.test-defs/e2eSlow.yaml @@ -16,9 +16,9 @@ kind: TestDefinition metadata: name: e2e-slow spec: - owner: DL_5C5BE3E2970B9F404D0E2F50@sap.com + owner: gardener-oq@listserv.sap.com recipientsOnFailure: - - DL_5C5BE3E2970B9F404D0E2F50@sap.com + - gardener-oq@listserv.sap.com description: Run kubernetes e2e test with tags Slow and Serial. @@ -27,13 +27,13 @@ spec: behavior: ["serial"] config: - - type: env - name: CONFORMANCE_FOCUS - value: 'Serial\|Slow' - - type: env - name: GINKGO_PARALLEL - value: "true" + - type: env + name: TESTCASE_GROUPS + value: 'slow' + - type: env + name: DESCRIPTION_FILE + value: "working.json" command: [bash, -c] - args: ["TESTDESCRIPTION_NAME=e2e_base_slow.desc ./test/k8s-e2e/e2e-test-execute"] + args: go run $GOPATH/src/github.com/gardener/test-infra/test/e2etest image: eu.gcr.io/gardener-project/gardener/testmachinery/base-step \ No newline at end of file diff --git a/test/e2etest/README.md b/test/e2etest/README.md new file mode 100644 index 0000000000..c07cc3e8e1 --- /dev/null +++ b/test/e2etest/README.md @@ -0,0 +1,59 @@ +# E2E Test Runner + +The e2e test runner leverages kubetest to execute e2e tests and has a few additional features: + +- Define description files consisting of e2e testcases to run +- Annotate testcases to run only for dedicated cloud providers +- Evaluate test results of the kubetest run and provide elastic search documents + +## Usage + +Ensure all required environment variables have been set. Create a `shoot.config` file in `EXPORT_PATH` directory and paste the kubeconfig of the kubernetes cluster to test in it. Run `e2etest` in command line to execute the e2e tests. + +### Prerequisites: + +- Go installed +- Git installed +- (only for publishing results) environment variable `GCLOUD_ACCOUNT_SECRET` should point to a google cloud storage secret file + +### Parameters: + +| Environment Variable | Default | Description | +|---|---|---| +| DESCRIPTION_FILE | | **[Required]** Path to description json file, which lists the testcases to run | +| K8S_VERSION | | **[Required]** Kubernetes cluster version | +| TESTCASE_GROUPS | | **[Required]** testcases groups to run (comma separated). E.g. `fast,slow` | +| CLOUDPROVIDER | | **[Required]** Cloud provider (supported: aws, gcp, azure, alicloud, openstack) | +| EXPORT_PATH | /tmp/e2e/export | Location of `shoot.config` file and test results | +| GINKGO_PARALLEL | true | Whether to run kubetest in parallel way. Testcases that consist of the `[Serial] tag are executed serially. | +| IGNORE_FALSE_POSITIVE_LIST | false | Ignores exclusion of testcases that are listed in `false_positive.json` | +| IGNORE_SKIP_LIST | false | Ignores exclusion of testcases that are listed in `skip.json` | +| INCLUDE_UNTRACKED_TESTS | false | Executes testcases that are not mentioned in description files for given provider and kubernetes release version | +| FLAKE_ATTEMPTS | 2 | Flake attempts for kubetest: how many time a failed test should be rerun | +| PUBLISH_RESULTS_TO_TESTGRID | false | Whether to push test results to google cloud storage, for testgrid | +| RETEST_FLAGGED_ONLY | false | Runs testcases with retest flag only. Value of `DESCRIPTION_FILE` is ignored | + +### Description Files +Example: +```json +[ + { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should reject invalid sysctls", "groups": ["slow", "conformance"], "only": ["aws", "gcp"], "retest": ["aws"], "comment": "Some comment"}, + { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should support sysctls", "groups": ["slow"], "exclude": ["aws"]} +] +``` +| Field | Description | +|---|---| +| testcase | testcase name. Can be a substring. All testcases that has this as substring will be executed | +| groups | assigns the testcase to testcase groups | +| only | will consider the testcase only for given cloud provider | +| exclude | will not consider the tetscase for given cloud provider | +| comment | is not evaluated in any way in code. Use only for additional information | +| retest | testcase will be excluded from all general test runs for given providers. Testcases with retest flag can be executed by setting `RETEST_ONLY=true` | + +Existing description files: +- `working.json` consists of all working e2e testcases separated in different groups +- `skip.json` consists of testcases that are always skipped by kubetest due to reasons like: driver not supported, requires >1 nodes, etc. +- `false_positive.json` consists of testcases that are failing because of different reasons like bad code, which makes sense to test with next kubernetes release version + +### Output +You find the kubetest dump results (like e2e.log and junit_*.xml files) in the `/tmp/e2e/artifacts` directory. These artifacts are evaluated and stored as *.json files in the `EXPORT_PATH` directory. diff --git a/test/e2etest/config/config.go b/test/e2etest/config/config.go index 99e7c4f2c0..6a9ea89910 100644 --- a/test/e2etest/config/config.go +++ b/test/e2etest/config/config.go @@ -9,7 +9,9 @@ import ( "path/filepath" "regexp" "runtime" + "sort" "strconv" + "strings" ) var ( @@ -23,8 +25,6 @@ var ( TmpDir string ShootKubeconfigPath string GinkgoParallel bool - ConformanceTestsSkip string - ConformanceTestsFocus string DescriptionFile string K8sRelease string CloudProvider string @@ -38,8 +38,11 @@ var ( K8sReleaseMajorMinor string GardenerVersion string RetestFlaggedOnly bool + TestcaseGroup []string ) +var WORKING_DESC_FILE = "working.json" + func init() { //log.SetLevel(log.DebugLevel) @@ -69,13 +72,16 @@ func init() { log.Fatal(errors.Wrapf(err, "file %s does not exist: ", ShootKubeconfigPath)) } GinkgoParallel, _ = strconv.ParseBool(util.GetEnv("GINKGO_PARALLEL", "true")) - ConformanceTestsSkip = os.Getenv("CONFORMANCE_SKIP") - ConformanceTestsFocus = os.Getenv("CONFORMANCE_FOCUS") - DescriptionFile = os.Getenv("DESCRIPTION_FILE") + DescriptionFile = util.GetEnv("DESCRIPTION_FILE", WORKING_DESC_FILE) K8sRelease = os.Getenv("K8S_VERSION") if K8sRelease == "" { log.Fatal("K8S_VERSION environment variable not found") } + TestcaseGroup = strings.Split(os.Getenv("TESTCASE_GROUPS"), ",") + sort.Strings(TestcaseGroup) + if len(TestcaseGroup) == 0 { + log.Fatal("TESTCASE_GROUP environment variable not found") + } CloudProvider = os.Getenv("CLOUDPROVIDER") if CloudProvider == "" { log.Fatal("CLOUDPROVIDER environment variable not found") @@ -85,6 +91,9 @@ func init() { K8sReleaseMajorMinor = string(regexp.MustCompile(`^(\d+\.\d+)`).FindSubmatch([]byte(K8sRelease))[1]) DescriptionsPath = path.Join(OwnDir, "kubetest", "description", K8sReleaseMajorMinor) DescriptionFilePath = path.Join(DescriptionsPath, DescriptionFile) + if _, err := os.Stat(DescriptionFilePath); err != nil { + log.Fatal(errors.Wrapf(err, "file %s does not exist: ", DescriptionFilePath)) + } FlakeAttempts, _ = strconv.Atoi(util.GetEnv("FLAKE_ATTEMPTS", "2")) PublishResultsToTestgrid, _ = strconv.ParseBool(util.GetEnv("PUBLISH_RESULTS_TO_TESTGRID", "false")) IgnoreSkipList, _ = strconv.ParseBool(util.GetEnv("IGNORE_SKIP_LIST", "false")) @@ -99,8 +108,6 @@ func init() { log.Debugf("TestInfraPath: %s", TestInfraPath) log.Debugf("ShootKubeconfigPath: %s", ShootKubeconfigPath) log.Debugf("GinkgoParallel: %t", GinkgoParallel) - log.Debugf("ConformanceTestsSkip: %s", ConformanceTestsSkip) - log.Debugf("ConformanceTestsFocus: %s", ConformanceTestsFocus) log.Debugf("K8sRelease: %s", K8sRelease) log.Debugf("CloudProvider: %s", CloudProvider) log.Debugf("IgnoreFalsePositiveList: %t", IgnoreFalsePositiveList) @@ -112,4 +119,5 @@ func init() { log.Debugf("FlakeAttempts: %o", FlakeAttempts) log.Debugf("GardenerVersion: %o", GardenerVersion) log.Debugf("RetestFlaggedOnly: %o", RetestFlaggedOnly) + log.Debugf("TestcaseGroup: %o", TestcaseGroup) } diff --git a/test/e2etest/kubetest/desc_generator.go b/test/e2etest/kubetest/desc_generator.go index 78dde854da..a972235878 100644 --- a/test/e2etest/kubetest/desc_generator.go +++ b/test/e2etest/kubetest/desc_generator.go @@ -16,30 +16,47 @@ import ( ) const ( - FALSE_POSITIVES_DESC_FILE = "false_positives.json" - SKIP_DESC_FILE = "skip.json" - GENERATED_RUN_DESC_FILE = "generated_tests_to_run.txt" - SUCCESS = "success" - FAILURE = "failure" + FalsePositivesDescFile = "false_positives.json" + SkipDescFile = "skip.json" + GeneratedRunDescFile = "generated_tests_to_run.txt" + AllTestcasesFile = "all_testcases.txt" + Success = "success" + Failure = "failure" + Wildcard = "*" ) -var falsePositiveDescPath = filepath.Join(config.DescriptionsPath, FALSE_POSITIVES_DESC_FILE) -var GeneratedRunDescPath = filepath.Join(config.TmpDir, GENERATED_RUN_DESC_FILE) -var skipDescPath = filepath.Join(config.DescriptionsPath, SKIP_DESC_FILE) +var falsePositiveDescPath = filepath.Join(config.DescriptionsPath, FalsePositivesDescFile) +var GeneratedRunDescPath = filepath.Join(config.TmpDir, GeneratedRunDescFile) +var AllTestcasesFilePath = filepath.Join(config.TmpDir, AllTestcasesFile) +var skipDescPath = filepath.Join(config.DescriptionsPath, SkipDescFile) func Generate() (desc string) { + log.Info("Generate test description file") testcasesToRun := sets.NewStringSet() allE2eTestcases := getAllE2eTestCases() if config.DescriptionFilePath != "" { - testcasesFromDescriptionFile := getTestcaseNamesFromDesc(config.DescriptionFilePath) - testcasesToRun = allE2eTestcases.GetSetOfMatching(testcasesFromDescriptionFile) + testcasesFromDescFile := UnmarshalDescription(config.DescriptionFilePath) + for _, testcaseFromDesc := range testcasesFromDescFile { + matching := allE2eTestcases.GetMatchingForTestcase(testcaseFromDesc.Name, testcaseFromDesc.Skip, testcaseFromDesc.Focus) + if testcaseFromDesc.validForCurrentContext() { + if matching.Len() == 0 { + log.Warnf("Couldn't find testcase: '%s'", testcaseFromDesc.Name) + continue + } + testcasesToRun = testcasesToRun.Union(matching) + } else { + // this is necessary since e.g. all conformance testcases are added by a wildcard, but there may still be + // additionally a conformance test excluded explicitly or assigned to a group + testcasesToRun = testcasesToRun.Difference(matching) + } + } } if config.RetestFlaggedOnly { var testcasesFromDescriptionFile = sets.NewStringSet() testcasesFromDescriptionFile.Insert("[Conformance]") - testcasesToRun = allE2eTestcases.GetSetOfMatching(testcasesFromDescriptionFile) + testcasesToRun = allE2eTestcases.GetMatchingOfSet(testcasesFromDescriptionFile) } if !config.IgnoreFalsePositiveList { @@ -77,26 +94,41 @@ func Generate() (desc string) { if err := writeLinesToFile(testcasesToRun, GeneratedRunDescPath); err != nil { log.Fatal(errors.Wrapf(err, "Couldn't save testcasesToRun as file in %s", GeneratedRunDescPath)) } + log.Infof("Description file %s generated", GeneratedRunDescPath) return GeneratedRunDescPath } func getTestcaseNamesFromDesc(descPath string) sets.StringSet { - testcasesOfCurrentProvider := sets.NewStringSet() + matchedTestcases := sets.NewStringSet() testcases := UnmarshalDescription(descPath) for _, testcase := range testcases { if len(testcase.ExcludedProviders) != 0 && len(testcase.OnlyProviders) != 0 { log.Warn("fields excluded and only of description file testcase, are not allowed to be defined both at the same time. Skipping testcase: %s", testcase.Name) continue } - // check - excludedExplicitly := util.Contains(testcase.ExcludedProviders, config.CloudProvider) - excludedImplicitly := len(testcase.OnlyProviders) != 0 && !util.Contains(testcase.OnlyProviders, config.CloudProvider) - retestActiveForThisProviderAndTest := config.RetestFlaggedOnly && util.Contains(testcase.Retest, config.CloudProvider) - if !excludedExplicitly && !excludedImplicitly && !config.RetestFlaggedOnly || retestActiveForThisProviderAndTest { - testcasesOfCurrentProvider.Insert(testcase.Name) + if testcase.validForCurrentContext() { + matchedTestcases.Insert(testcase.Name) } } - return testcasesOfCurrentProvider + return matchedTestcases +} + +func (testcase TestcaseDesc) validForCurrentContext() bool { + validForCurrentContext := false + excludedExplicitly := util.Contains(testcase.ExcludedProviders, config.CloudProvider) + consideredByOnlyField := testcase.OnlyProviders == nil || len(testcase.OnlyProviders) != 0 && util.Contains(testcase.OnlyProviders, config.CloudProvider) + testcasesGroupMatched := false + for _, testcaseGroup := range config.TestcaseGroup { + if testcaseGroup == Wildcard || util.Contains(testcase.TestcaseGroups, testcaseGroup) { + testcasesGroupMatched = true + break + } + } + retestActiveForThisProviderAndTest := config.RetestFlaggedOnly && util.Contains(testcase.Retest, config.CloudProvider) + if !excludedExplicitly && consideredByOnlyField && !config.RetestFlaggedOnly && testcasesGroupMatched || retestActiveForThisProviderAndTest { + validForCurrentContext = true + } + return validForCurrentContext } func getAllE2eTestCases() sets.StringSet { @@ -121,6 +153,9 @@ func getAllE2eTestCases() sets.StringSet { allTestcases.Insert(testcase.Name) } } + if log.GetLevel() == log.DebugLevel { + allTestcases.WriteToFile(AllTestcasesFilePath) + } return allTestcases } @@ -172,4 +207,7 @@ type TestcaseDesc struct { ExcludedProviders []string `json:"exclude,omitempty"` OnlyProviders []string `json:"only,omitempty"` Retest []string `json:"retest,omitempty"` + TestcaseGroups []string `json:"groups"` + Skip string `json:"skip,omitempty"` + Focus string `json:"focus,omitempty"` } diff --git a/test/e2etest/kubetest/description/1.10/conformance.json b/test/e2etest/kubetest/description/1.10/conformance.json deleted file mode 100644 index 2e23bf151a..0000000000 --- a/test/e2etest/kubetest/description/1.10/conformance.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - {"testcase": "[Conformance]"} -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.11/conformance.json b/test/e2etest/kubetest/description/1.11/conformance.json deleted file mode 100644 index 2e23bf151a..0000000000 --- a/test/e2etest/kubetest/description/1.11/conformance.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - {"testcase": "[Conformance]"} -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.12/conformance.json b/test/e2etest/kubetest/description/1.12/conformance.json deleted file mode 100644 index 2e23bf151a..0000000000 --- a/test/e2etest/kubetest/description/1.12/conformance.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - {"testcase": "[Conformance]"} -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.13/conformance.json b/test/e2etest/kubetest/description/1.13/conformance.json deleted file mode 100644 index 2e23bf151a..0000000000 --- a/test/e2etest/kubetest/description/1.13/conformance.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - {"testcase": "[Conformance]"} -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.14/conformance.json b/test/e2etest/kubetest/description/1.14/conformance.json deleted file mode 100644 index 2e23bf151a..0000000000 --- a/test/e2etest/kubetest/description/1.14/conformance.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - {"testcase": "[Conformance]"} -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.14/e2e_base_fast.json b/test/e2etest/kubetest/description/1.14/e2e_base_fast.json deleted file mode 100644 index b7e55cf528..0000000000 --- a/test/e2etest/kubetest/description/1.14/e2e_base_fast.json +++ /dev/null @@ -1,334 +0,0 @@ -[ - { "testcase": "[Conformance]"}, - { "testcase": "[k8s.io] [Feature:Example] [k8s.io] Downward API should create a pod that prints his name and namespace"}, - { "testcase": "[k8s.io] [sig-node] kubelet [k8s.io] [sig-node] Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s."}, - { "testcase": "[k8s.io] [sig-node] Security Context [Feature:SecurityContext] should support pod.Spec.SecurityContext.SupplementalGroups"}, - { "testcase": "[k8s.io] [sig-node] Security Context [Feature:SecurityContext] should support seccomp alpha runtime/default annotation [Feature:Seccomp]"}, - { "testcase": "[k8s.io] [sig-node] Security Context [Feature:SecurityContext] should support seccomp alpha unconfined annotation on the container [Feature:Seccomp]"}, - { "testcase": "[k8s.io] [sig-node] Security Context [Feature:SecurityContext] should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp]"}, - { "testcase": "[k8s.io] [sig-node] Security Context [Feature:SecurityContext] should support seccomp default which is unconfined [Feature:Seccomp]"}, - { "testcase": "[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]"}, - { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should not launch unsafe, but not explicitly enabled sysctls on the node"}, - { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should reject invalid sysctls"}, - { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should support sysctls"}, - { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should support unsafe sysctls which are actually whitelisted"}, - { "testcase": "[sig-api-machinery] Initializers [Feature:Initializers] should be invisible to controllers by default"}, - { "testcase": "[sig-api-machinery] Initializers [Feature:Initializers] will be set to nil if a patch removes the last pending initializer"}, - { "testcase": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to create another node"}, - { "testcase": "[sig-auth] [Feature:NodeAuthorizer] A node shouldn't be able to delete another node"}, - { "testcase": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error"}, - { "testcase": "[sig-auth] [Feature:NodeAuthorizer] Getting a non-existent secret should exit with the Forbidden error, not a NotFound error"}, - { "testcase": "[sig-auth] [Feature:NodeAuthorizer] Getting a secret for a workload the node has access to should succeed"}, - { "testcase": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing configmap should exit with the Forbidden error"}, - { "testcase": "[sig-auth] [Feature:NodeAuthorizer] Getting an existing secret should exit with the Forbidden error"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl apply apply set/view last-applied"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl apply should apply a new configuration to an existing RC"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl apply should reuse port when apply to an existing SVC"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info dump should check if cluster-info dump succeeds"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl copy should copy a file from a running Pod"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl create quota should create a quota with scopes"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl create quota should create a quota without scopes"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl create quota should reject quota with invalid scopes"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl run CronJob should create a CronJob"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Proxy server should support --unix-socket=/path [Conformance]"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should return command exit codes"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support exec"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support exec through an HTTP proxy"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support exec through kubectl proxy"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support port-forward"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 [k8s.io] that expects a client request should support a client that connects, sends DATA, and disconnects"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 [k8s.io] that expects a client request should support a client that connects, sends NO DATA, and disconnects"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 [k8s.io] that expects NO client request should support a client that connects, sends DATA, and disconnects"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 should support forwarding over websockets"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost [k8s.io] that expects a client request should support a client that connects, sends DATA, and disconnects"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost [k8s.io] that expects a client request should support a client that connects, sends NO DATA, and disconnects"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost [k8s.io] that expects NO client request should support a client that connects, sends DATA, and disconnects"}, - { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost should support forwarding over websockets"}, - { "testcase": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should delete the token secret when the secret expired"}, - { "testcase": "[sig-cluster-lifecycle] [Feature:BootstrapTokens] should not delete the token secret when the secret is not expired"}, - { "testcase": "[sig-cluster-lifecycle] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to cadvisor port 4194 using proxy subresource"}, - { "testcase": "[sig-cluster-lifecycle] Ports Security Check [Feature:KubeletSecurity] should not be able to proxy to the readonly kubelet port 10255 using proxy subresource"}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists)."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn)."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:PodPriority] should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:ScopeSelectors] should verify ResourceQuota with best effort scope using scope-selectors."}, - { "testcase": "[sig-scheduling] ResourceQuota [Feature:ScopeSelectors] should verify ResourceQuota with terminating scopes through scope selectors."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class. [sig-storage]"}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim. [sig-storage]"}, - { "testcase": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs)"}, - { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root"}, - { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root"}, - { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup"}, - { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup"}, - { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup"}, - { "testcase": "[sig-storage] HostPath should support r/w [NodeConformance]"}, - { "testcase": "[sig-storage] HostPath should support subPath [NodeConformance]"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set fsGroup for one pod"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set fsGroup for one pod"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set fsGroup for one pod"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set fsGroup for one pod"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set same fsGroup for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set fsGroup for one pod"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set same fsGroup for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should not set different fsGroups for two pods simultaneously"}, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1"}, - { "testcase": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]"}, - { "testcase": "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should be able to pull image from docker hub [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should be able to pull image from gcr.io [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should not be able to pull non-existing image from gcr.io [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when starting a container that exits should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when starting a container that exits should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when starting a container that exits should report termination message from log output if TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when starting a container that exits should report termination message if TerminationMessagePath is set [NodeConformance]"}, - { "testcase": "[k8s.io] Container Runtime blackbox test when starting a container that exits should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]"}, - { "testcase": "[k8s.io] PrivilegedPod [NodeConformance] should enable privileged commands"}, - { "testcase": "[k8s.io] Security Context When creating a container with runAsUser should run the container with uid 0 [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context When creating a container with runAsUser should run the container with uid 65534 [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context When creating a pod with privileged should run the container as unprivileged when false [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when true [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [NodeConformance]"}, - { "testcase": "[sig-api-machinery] CustomResourceDefinition Watch CustomResourceDefinition Watch watch on custom resource definition objects"}, - { "testcase": "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob"}, - { "testcase": "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil"}, - { "testcase": "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources"}, - { "testcase": "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources"}, - { "testcase": "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod"}, - { "testcase": "[sig-api-machinery] Generated clientset should create v1beta1 cronJobs, delete cronJobs, watch cronJobs"}, - { "testcase": "[sig-api-machinery] Secrets should fail to create secret in volume due to empty secret key"}, - { "testcase": "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls"}, - { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata"}, - { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls"}, - { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes"}, - { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return pod details"}, - { "testcase": "[sig-api-machinery] Watchers should receive events on concurrent watches in same order"}, - { "testcase": "[sig-apps] CronJob should remove from active list jobs that have been deleted"}, - { "testcase": "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods"}, - { "testcase": "[sig-apps] Deployment deployment should support rollback"}, - { "testcase": "[sig-apps] Deployment iterative rollouts should eventually progress"}, - { "testcase": "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef"}, - { "testcase": "[sig-apps] DisruptionController evictions: enough pods, absolute => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer => should not allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: no PDB => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: too few pods, absolute => should not allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController should create a PodDisruptionBudget"}, - { "testcase": "[sig-apps] DisruptionController should update PodDisruptionBudget status"}, - { "testcase": "[sig-apps] Job should adopt matching orphans and release non-matching pods"}, - { "testcase": "[sig-apps] Job should delete a job"}, - { "testcase": "[sig-apps] Job should exceed active deadline"}, - { "testcase": "[sig-apps] Job should exceed backoffLimit"}, - { "testcase": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted"}, - { "testcase": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are not locally restarted"}, - { "testcase": "[sig-apps] Job should run a job to completion when tasks succeed"}, - { "testcase": "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota"}, - { "testcase": "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota"}, - { "testcase": "[sig-auth] Certificates API should support building a client with a CSR"}, - { "testcase": "[sig-auth] PodSecurityPolicy should forbid pod creation when no PSP is available"}, - { "testcase": "[sig-auth] ServiceAccounts should ensure a single API token exists"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Proxy server should support --unix-socket=/path [Conformance]"}, - { "testcase": "[sig-instrumentation] Cadvisor should be healthy on every node."}, - { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from API server."}, - { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager."}, - { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet."}, - { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler."}, - { "testcase": "[sig-network] DNS should provide DNS for pods for Hostname and Subdomain"}, - { "testcase": "[sig-network] DNS should support configurable pod resolv.conf"}, - { "testcase": "[sig-network] NetworkPolicy NetworkPolicy between server and client should allow ingress access on one named port [Feature:NetworkPolicy]"}, - { "testcase": "[sig-network] NetworkPolicy NetworkPolicy between server and client should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]"}, - { "testcase": "[sig-network] NetworkPolicy NetworkPolicy between server and client should enforce policy based on PodSelector [Feature:NetworkPolicy]"}, - { "testcase": "[sig-network] NetworkPolicy NetworkPolicy between server and client should enforce policy based on Ports [Feature:NetworkPolicy]"}, - { "testcase": "[sig-network] NetworkPolicy NetworkPolicy between server and client should support a 'default-deny' policy [Feature:NetworkPolicy]"}, - { "testcase": "[sig-network] NetworkPolicy NetworkPolicy between server and client should support allow-all policy [Feature:NetworkPolicy]"}, - { "testcase": "[sig-network] Networking should provide unchanging, static URL paths for kubernetes api services"}, - { "testcase": "[sig-network] Services should be able to change the type from ClusterIP to ExternalName"}, - { "testcase": "[sig-network] Services should be able to change the type from ExternalName to ClusterIP"}, - { "testcase": "[sig-network] Services should be able to change the type from ExternalName to NodePort"}, - { "testcase": "[sig-network] Services should be able to change the type from NodePort to ExternalName"}, - { "testcase": "[sig-network] Services should be able to switch session affinity for NodePort service"}, - { "testcase": "[sig-network] Services should be able to switch session affinity for service with type clusterIP"}, - { "testcase": "[sig-network] Services should be able to update NodePorts with two same port numbers but different protocols"}, - { "testcase": "[sig-network] Services should check NodePort out-of-range"}, - { "testcase": "[sig-network] Services should create endpoints for unready pods"}, - { "testcase": "[sig-network] Services should have session affinity work for NodePort service"}, - { "testcase": "[sig-network] Services should have session affinity work for service with type clusterIP"}, - { "testcase": "[sig-network] Services should prevent NodePort collisions"}, - { "testcase": "[sig-network] Services should release NodePorts on delete"}, - { "testcase": "[sig-network] Services should use same NodePort with same port but different protocols"}, - { "testcase": "[sig-node] ConfigMap should fail to create configMap in volume due to empty configmap key"}, - { "testcase": "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a configMap."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a pod."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a replica set."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a replication controller."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a secret."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and capture the life of a service."}, - { "testcase": "[sig-scheduling] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated."}, - { "testcase": "[sig-scheduling] ResourceQuota should verify ResourceQuota with best effort scope."}, - { "testcase": "[sig-scheduling] ResourceQuota should verify ResourceQuota with terminating scopes."}, - { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner allowedTopologies should create persistent volume in the zone specified in allowedTopologies of storageclass"}, - { "testcase": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap"}, - { "testcase": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected"}, - { "testcase": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret"}, - { "testcase": "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately"}, - { "testcase": "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted."}, - { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access"}, - { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access"}, - { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access"}, - { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access"}, - { "testcase": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access"}, - { "testcase": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access"}, - { "testcase": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity"}, - { "testcase": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector"}, - { "testcase": "[sig-storage] Volumes ConfigMap should be mountable"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with defaults"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with defaults"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount"}, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should be mountable"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount"}, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should be mountable"}, - { "testcase": "[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]"}, - { "testcase": "[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]"}, - { "testcase": "[sig-apps] DisruptionController evictions: enough pods, absolute => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer => should not allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: no PDB => should allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: too few pods, absolute => should not allow an eviction"}, - { "testcase": "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction"}, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Proxy server should support --unix-socket=/path [Conformance]"}, - { "testcase": "[sig-network] Networking should provide Internet connection for containers [Feature:Networking-IPv4]", "exclude": [ "azure" ] }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support inline execution and attach", "exclude": [ "aws" ] }, - { "testcase": "[k8s.io] Pods should support pod readiness gates [NodeFeature:PodReadinessGate]", "only": [ "gcp", "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directory", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing single file", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should be mountable", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should be mountable", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", "only": [ "openstack" ] } -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.14/e2e_base_slow.json b/test/e2etest/kubetest/description/1.14/e2e_base_slow.json deleted file mode 100644 index fc55602d76..0000000000 --- a/test/e2etest/kubetest/description/1.14/e2e_base_slow.json +++ /dev/null @@ -1,161 +0,0 @@ -[ - { "testcase": "[Conformance]" }, - { "testcase": "[k8s.io] [sig-node] Kubelet [Serial] [Slow] [k8s.io] [sig-node] experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking] resource tracking for 100 pods per node" }, - { "testcase": "[k8s.io] [sig-node] Kubelet [Serial] [Slow] [k8s.io] [sig-node] regular resource usage tracking resource tracking for 100 pods per node" }, - { "testcase": "[k8s.io] EquivalenceCache [Serial] validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]" }, - { "testcase": "[k8s.io] EquivalenceCache [Serial] validates pod affinity works properly when new replica pod is scheduled" }, - { "testcase": "[k8s.io] EquivalenceCache [Serial] validates pod anti-affinity works properly when new replica pod is scheduled" }, - { "testcase": "[k8s.io] Variable Expansion should fail substituting values in a volume subpath with absolute path [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]" }, - { "testcase": "[k8s.io] Variable Expansion should fail substituting values in a volume subpath with backticks [Feature:VolumeSubpathEnvExpansion][NodeAlphaFeature:VolumeSubpathEnvExpansion][Slow]" }, - { "testcase": "[sig-api-machinery] Initializers [Feature:Initializers] don't cause replicaset controller creating extra pods if the initializer is not handled [Serial]" }, - { "testcase": "[sig-api-machinery] Initializers [Feature:Initializers] should dynamically register and apply initializers to pods [Serial]" }, - { "testcase": "[sig-api-machinery] Namespaces [Serial] should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]" }, - { "testcase": "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds)" }, - { "testcase": "[sig-apps] CronJob should not schedule jobs when suspended [Slow]" }, - { "testcase": "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow]" }, - { "testcase": "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete" }, - { "testcase": "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] Deployment Should scale from 1 pod to 3 pods and from 3 to 5" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] Deployment Should scale from 5 pods to 3 pods and from 3 to 1" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability" }, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl taint [Serial] should remove all the taints with the same key off a node" }, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl taint [Serial] should update the taint on a node" }, - { "testcase": "[sig-scheduling] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds" }, - { "testcase": "[sig-scheduling] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes" }, - { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes" }, - { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes" }, - { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes" }, - { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction" }, - { "testcase": "[sig-scheduling] PodPriorityResolution [Serial] validates critical system priorities are created and resolved" }, - { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates MaxPods limit number of pods that are allowed to run [Slow]" }, - { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching" }, - { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching" }, - { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching" }, - { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching" }, - { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP" }, - { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol" }, - { "testcase": "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works" }, - { "testcase": "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod" }, - { "testcase": "[sig-scheduling] SchedulerPreemption [Serial] validates pod anti-affinity works in preemption" }, - { "testcase": "[sig-storage] ConfigMap Should fail non-optional pod creation due to configMap object does not exist [Slow]" }, - { "testcase": "[sig-storage] ConfigMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]" }, - { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] should provision storage with different parameters" }, - { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner delayed binding [Slow] should create persistent volumes in the same zone as node after a pod mounting the claims is started" }, - { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner delayed binding with allowedTopologies [Slow] should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started" }, - { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner External should let an external dynamic provisioner create and delete persistent volumes [Slow]" }, - { "testcase": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for git_repo [Serial] [Slow]" }, - { "testcase": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access [Slow]" }, - { "testcase": "[sig-storage] PersistentVolumes-local Local volume provisioner [Serial] should create and recreate local persistent volume" }, - { "testcase": "[sig-storage] PersistentVolumes-local Local volume provisioner [Serial] should discover dynamically created local persistent volume mountpoint in discovery directory" }, - { "testcase": "[sig-storage] PersistentVolumes-local Local volume provisioner [Serial] should not create local persistent volume for filesystem volume that was not bind mounted" }, - { "testcase": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to non-existent path" }, - { "testcase": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to wrong node" }, - { "testcase": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity" }, - { "testcase": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod management is parallel and pod has affinity" }, - { "testcase": "[sig-storage] PersistentVolumes-local Stress with local volume provisioner [Serial] should use be able to process many pods and reuse local volumes" }, - { "testcase": "[sig-storage] Projected configMap Should fail non-optional pod creation due to configMap object does not exist [Slow]" }, - { "testcase": "[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]" }, - { "testcase": "[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow]" }, - { "testcase": "[sig-storage] Projected secret Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]" }, - { "testcase": "[sig-storage] Secrets Should fail non-optional pod creation due to secret object does not exist [Slow]" }, - { "testcase": "[sig-storage] Secrets Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]" }, - { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted" }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] ReplicationController light Should scale from 2 pods to 1 pod" }, - { "testcase": "[k8s.io] Pods should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]" }, - { "testcase": "[sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent" }, - { "testcase": "[k8s.io] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]" }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC" }, - { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] ReplicationController light Should scale from 1 pod to 2 pods" }, - { "testcase": "[sig-storage] CSI Volumes CSI attach test using HostPath driver [Feature:CSIDriverRegistry] volume with no CSI driver needs VolumeAttachment" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should be mountable" }, - { "testcase": "[sig-network] DNS should provide DNS for ExternalName services" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] volumes should be mountable" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC" }, - { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources" }, - { "testcase": "[sig-apps] CronJob should schedule multiple jobs concurrently" }, - { "testcase": "[sig-apps] CronJob should replace jobs when ReplaceConcurrent" }, - { "testcase": "[sig-apps] CronJob should not emit unexpected warnings" }, - { "testcase": "[k8s.io] [Feature:Example] [k8s.io] Liveness liveness pods should be automatically restarted" }, - { "testcase": "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job" }, - { "testcase": "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP" }, - { "testcase": "[sig-network] NetworkPolicy NetworkPolicy between server and client should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]" }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]" }, - { "testcase": "[k8s.io] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]" }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Conformance]" }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]" }, - { "testcase": "[k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]" }, - { "testcase": "[k8s.io] Probing container should *not* be restarted with a /healthz http liveness probe [Conformance]" }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]" }, - { "testcase": "[sig-cli] Kubectl client [k8s.io] Guestbook application should create and stop a working application [Conformance]" }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-apps] StatefulSet [k8s.io] Deploy clustered applications [Feature:StatefulSet] [Slow] should creating a working CockroachDB cluster", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-scheduling] SchedulerPriorities [Serial] Pod should avoid nodes that have avoidPod annotation", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms", "exclude": [ "alicloud" ] }, - { "testcase": "[sig-network] Services should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", "only": [ "gcp", "openstack" ] }, - { "testcase": "[sig-network] Services should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", "only": [ "gcp", "openstack" ] }, - { "testcase": "[sig-network] Services should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", "only": [ "gcp", "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "only": [ "openstack" ] }, - { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume", "only": [ "openstack" ] } -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.14/e2e_example.json b/test/e2etest/kubetest/description/1.14/e2e_example.json deleted file mode 100644 index 0872bd097b..0000000000 --- a/test/e2etest/kubetest/description/1.14/e2e_example.json +++ /dev/null @@ -1,5 +0,0 @@ -[ - { "testcase": "[sig-network] Proxy version v1 should proxy through a service and a pod [Conformance]"}, - { "testcase": "[k8s.io] [sig-node] Security Context [Feature:SecurityContext] should support pod.Spec.SecurityContext.SupplementalGroups"}, - { "testcase": "[k8s.io] [sig-node] Security Context [Feature:SecurityContext] should support seccomp alpha runtime/default annotation [Feature:Seccomp]"} -] \ No newline at end of file diff --git a/test/e2etest/kubetest/description/1.14/working.json b/test/e2etest/kubetest/description/1.14/working.json new file mode 100644 index 0000000000..3ee3209dac --- /dev/null +++ b/test/e2etest/kubetest/description/1.14/working.json @@ -0,0 +1,411 @@ +[ + { "testcase": "[Conformance]", "focus": "Serial|Slow", "groups": ["slow", "conformance"]}, + { "testcase": "[Conformance]", "skip": "Serial|Slow", "groups": ["fast", "conformance"]}, + { "testcase": "[k8s.io] [sig-node] kubelet [k8s.io] [sig-node] Clean up pods on node kubelet should be able to delete 10 pods per node in 1m0s.", "groups": ["fast"]}, + { "testcase": "[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should not launch unsafe, but not explicitly enabled sysctls on the node", "groups": ["fast"]}, + { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should reject invalid sysctls", "groups": ["fast"]}, + { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should support sysctls", "groups": ["fast"]}, + { "testcase": "[k8s.io] Sysctls [NodeFeature:Sysctls] should support unsafe sysctls which are actually whitelisted", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl apply apply set/view last-applied", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl apply should apply a new configuration to an existing RC", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl apply should reuse port when apply to an existing SVC", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl cluster-info dump should check if cluster-info dump succeeds", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl copy should copy a file from a running Pod", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl create quota should create a quota with scopes", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl create quota should create a quota without scopes", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl create quota should reject quota with invalid scopes", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl run CronJob should create a CronJob", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should return command exit codes", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support exec", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support exec through an HTTP proxy", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support exec through kubectl proxy", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support port-forward", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 [k8s.io] that expects a client request should support a client that connects, sends DATA, and disconnects", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 [k8s.io] that expects a client request should support a client that connects, sends NO DATA, and disconnects", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 [k8s.io] that expects NO client request should support a client that connects, sends DATA, and disconnects", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on 0.0.0.0 should support forwarding over websockets", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost [k8s.io] that expects a client request should support a client that connects, sends DATA, and disconnects", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost [k8s.io] that expects a client request should support a client that connects, sends NO DATA, and disconnects", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost [k8s.io] that expects NO client request should support a client that connects, sends DATA, and disconnects", "groups": ["fast"]}, + { "testcase": "[sig-cli] Kubectl Port forwarding [k8s.io] With a server listening on localhost should support forwarding over websockets", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim with a storage class. [sig-storage]", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a persistent volume claim. [sig-storage]", "groups": ["fast"]}, + { "testcase": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] ConfigMap should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] Downward API volume should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] files with FSGroup ownership should support (root,0644,tmpfs)", "groups": ["fast"]}, + { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is non-root", "groups": ["fast"]}, + { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] new files should be created with FSGroup ownership when container is root", "groups": ["fast"]}, + { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] nonexistent volume subPath should have the correct mode and owner using FSGroup", "groups": ["fast"]}, + { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] volume on default medium should have the correct mode using FSGroup", "groups": ["fast"]}, + { "testcase": "[sig-storage] EmptyDir volumes when FSGroup is specified [NodeFeature:FSGroup] volume on tmpfs should have the correct mode using FSGroup", "groups": ["fast"]}, + { "testcase": "[sig-storage] HostPath should support r/w [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[sig-storage] HostPath should support subPath [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: block] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set fsGroup for one pod", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set fsGroup for one pod", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Set fsGroup for local volume should set same fsGroup for two pods simultaneously", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: blockfswithoutformat] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set fsGroup for one pod", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set fsGroup for one pod", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Set fsGroup for local volume should set same fsGroup for two pods simultaneously", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link-bindmounted] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set fsGroup for one pod", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Set fsGroup for local volume should set same fsGroup for two pods simultaneously", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir-link] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set fsGroup for one pod", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Set fsGroup for local volume should set same fsGroup for two pods simultaneously", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: dir] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and read from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] One pod requesting one prebound PVC should be able to mount volume and write from pod1", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set fsGroup for one pod", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set same fsGroup for two pods simultaneously", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume at the same time should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Two pods mounting a local volume one after the other should be able to write from pod1 and read from pod2", "groups": ["fast"]}, + { "testcase": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] Projected configMap should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] Projected downwardAPI should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", "groups": ["fast"]}, + { "testcase": "[sig-storage] Projected secret should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should be able to pull from private registry with secret [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should be able to pull image from docker hub [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should be able to pull image from gcr.io [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should not be able to pull from private registry without secret [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should not be able to pull image from invalid registry [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] Container Runtime blackbox test when running a container with a new image should not be able to pull non-existing image from gcr.io [NodeConformance]", "groups": ["fast"]}, + { "testcase": "[k8s.io] PrivilegedPod [NodeConformance] should enable privileged commands", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] CustomResourceDefinition Watch CustomResourceDefinition Watch watch on custom resource definition objects", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Garbage collector should delete jobs and pods created by cronjob", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Garbage collector should orphan pods created by rc if deleteOptions.OrphanDependents is nil", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Garbage collector should support cascading deletion of custom resources", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Garbage collector should support orphan deletion of custom resources", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Generated clientset should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Generated clientset should create v1beta1 cronJobs, delete cronJobs, watch cronJobs", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Secrets should fail to create secret in volume due to empty secret key", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Servers with support for API chunking should return chunks of results for list calls", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return a 406 for a backend which does not implement metadata", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return chunks of table results for list calls", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return generic metadata details across all namespaces for nodes", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Servers with support for Table transformation should return pod details", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] Watchers should receive events on concurrent watches in same order", "groups": ["fast"]}, + { "testcase": "[sig-apps] CronJob should remove from active list jobs that have been deleted", "groups": ["fast"]}, + { "testcase": "[sig-apps] Deployment deployment reaping should cascade to its replica sets and pods", "groups": ["fast"]}, + { "testcase": "[sig-apps] Deployment deployment should support rollback", "groups": ["fast"]}, + { "testcase": "[sig-apps] Deployment iterative rollouts should eventually progress", "groups": ["fast"]}, + { "testcase": "[sig-apps] Deployment test Deployment ReplicaSet orphaning and adoption regarding controllerRef", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: enough pods, absolute => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer => should not allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: no PDB => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: too few pods, absolute => should not allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController should create a PodDisruptionBudget", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController should update PodDisruptionBudget status", "groups": ["fast"]}, + { "testcase": "[sig-apps] Job should adopt matching orphans and release non-matching pods", "groups": ["fast"]}, + { "testcase": "[sig-apps] Job should delete a job", "groups": ["fast"]}, + { "testcase": "[sig-apps] Job should exceed active deadline", "groups": ["fast"]}, + { "testcase": "[sig-apps] Job should exceed backoffLimit", "groups": ["fast"]}, + { "testcase": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted", "groups": ["fast"]}, + { "testcase": "[sig-apps] Job should run a job to completion when tasks sometimes fail and are not locally restarted", "groups": ["fast"]}, + { "testcase": "[sig-apps] Job should run a job to completion when tasks succeed", "groups": ["fast"]}, + { "testcase": "[sig-apps] ReplicaSet should surface a failure condition on a common issue like exceeded quota", "groups": ["fast"]}, + { "testcase": "[sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota", "groups": ["fast"]}, + { "testcase": "[sig-auth] Certificates API should support building a client with a CSR", "groups": ["fast"]}, + { "testcase": "[sig-auth] PodSecurityPolicy should forbid pod creation when no PSP is available", "groups": ["fast"]}, + { "testcase": "[sig-auth] ServiceAccounts should ensure a single API token exists", "groups": ["fast"]}, + { "testcase": "[sig-instrumentation] Cadvisor should be healthy on every node.", "groups": ["fast"]}, + { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from API server.", "groups": ["fast"]}, + { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from a ControllerManager.", "groups": ["fast"]}, + { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Kubelet.", "groups": ["fast"]}, + { "testcase": "[sig-instrumentation] MetricsGrabber should grab all metrics from a Scheduler.", "groups": ["fast"]}, + { "testcase": "[sig-network] DNS should provide DNS for pods for Hostname and Subdomain", "groups": ["fast"]}, + { "testcase": "[sig-network] DNS should support configurable pod resolv.conf", "groups": ["fast"]}, + { "testcase": "[sig-network] Networking should provide unchanging, static URL paths for kubernetes api services", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should be able to change the type from ClusterIP to ExternalName", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should be able to change the type from ExternalName to ClusterIP", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should be able to change the type from ExternalName to NodePort", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should be able to change the type from NodePort to ExternalName", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should be able to switch session affinity for NodePort service", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should be able to switch session affinity for service with type clusterIP", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should be able to update NodePorts with two same port numbers but different protocols", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should check NodePort out-of-range", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should create endpoints for unready pods", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should have session affinity work for NodePort service", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should have session affinity work for service with type clusterIP", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should prevent NodePort collisions", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should release NodePorts on delete", "groups": ["fast"]}, + { "testcase": "[sig-network] Services should use same NodePort with same port but different protocols", "groups": ["fast"]}, + { "testcase": "[sig-scheduling] LimitRange should create a LimitRange with defaults and ensure pod has those defaults applied.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a configMap.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should create a ResourceQuota and ensure its status is promptly calculated.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope.", "groups": ["fast"]}, + { "testcase": "[sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes.", "groups": ["fast"]}, + { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner allowedTopologies should create persistent volume in the zone specified in allowedTopologies of storageclass", "groups": ["fast"]}, + { "testcase": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : configmap", "groups": ["fast"]}, + { "testcase": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : projected", "groups": ["fast"]}, + { "testcase": "[sig-storage] Ephemeralstorage When pod refers to non-existent ephemeral storage should allow deletion of pod with invalid volume : secret", "groups": ["fast"]}, + { "testcase": "[sig-storage] PV Protection Verify that PV bound to a PVC is not removed immediately", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes NFS when invoking the Recycle reclaim policy should test that a PV becomes Available and is clean after the PVC is deleted.", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PV and a pre-bound PVC: test write access", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and a pre-bound PV: test write access", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs create a PVC and non-pre-bound PV: test write access", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes NFS with Single PV - PVC pairs should create a non-pre-bound PV and PVC: test write access", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 2 PVs and 4 PVCs: test write access", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 3 PVs and 3 PVCs: test write access", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeAffinity", "groups": ["fast"]}, + { "testcase": "[sig-storage] PersistentVolumes-local Pod with node different from PV's NodeAffinity should fail scheduling due to different NodeSelector", "groups": ["fast"]}, + { "testcase": "[sig-storage] Volumes ConfigMap should be mountable", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with defaults", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] provisioning should provision storage with defaults", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should be able to unmount after the subpath directory is deleted", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing directory", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support existing single file", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support file as subpath", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support non-existent path", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly directory specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support readOnly file specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing directory", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support existing single file", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing directory", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support existing single file", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumes should be mountable", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing directory", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support existing single file", "groups": ["fast", "debug1"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount", "groups": ["fast"]}, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumes should be mountable", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: enough pods, absolute => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: enough pods, replicaSet, percentage => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable allow single eviction, percentage => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: maxUnavailable deny evictions, integer => should not allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: no PDB => should allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: too few pods, absolute => should not allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] DisruptionController evictions: too few pods, replicaSet, percentage => should not allow an eviction", "groups": ["fast"]}, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should adopt matching orphans and release non-matching pods", "groups": ["fast"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-storage] PVC Protection Verify that PVC in active use by a pod is not removed immediately", "groups": ["fast"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-storage] PVC Protection Verify that scheduling of a pod that uses PVC that is being deleted fails and the pod becomes Unschedulable", "groups": ["fast"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Simple pod should support inline execution and attach", "groups": ["fast"], "exclude": [ "aws" ] }, + { "testcase": "[k8s.io] Pods should support pod readiness gates [NodeFeature:PodReadinessGate]", "groups": ["fast"], "only": [ "gcp", "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should be able to unmount after the subpath directory is deleted", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directories when readOnly specified in the volumeSource", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing directory", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support existing single file", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support file as subpath", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support non-existent path", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly directory specified in the volumeMount", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support readOnly file specified in the volumeMount", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should allow exec of files on the volume", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumes should be mountable", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should be able to unmount after the subpath directory is deleted", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directories when readOnly specified in the volumeSource", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing directory", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support existing single file", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support file as subpath", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support non-existent path", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly directory specified in the volumeMount", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support readOnly file specified in the volumeMount", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should allow exec of files on the volume", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumes should be mountable", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (filesystem volmode)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", "groups": ["fast"], "only": [ "openstack" ] }, + { "testcase": "[k8s.io] [sig-node] Kubelet [Serial] [Slow] [k8s.io] [sig-node] regular resource usage tracking resource tracking for 100 pods per node", "groups": ["slow"] }, + { "testcase": "[k8s.io] EquivalenceCache [Serial] validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", "groups": ["slow"] }, + { "testcase": "[k8s.io] EquivalenceCache [Serial] validates pod affinity works properly when new replica pod is scheduled", "groups": ["slow"] }, + { "testcase": "[k8s.io] EquivalenceCache [Serial] validates pod anti-affinity works properly when new replica pod is scheduled", "groups": ["slow"] }, + { "testcase": "[sig-api-machinery] Namespaces [Serial] should delete fast enough (90 percent of 100 namespaces in 150 seconds)", "groups": ["slow"] }, + { "testcase": "[sig-apps] CronJob should not schedule jobs when suspended [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-apps] CronJob should not schedule new jobs when ForbidConcurrent [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-apps] Daemon set [Serial] should not update pod when spec was updated and update strategy is OnDelete", "groups": ["slow"] }, + { "testcase": "[sig-apps] Daemon set [Serial] should run and stop complex daemon with node affinity", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] Deployment Should scale from 1 pod to 3 pods and from 3 to 5", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] Deployment Should scale from 5 pods to 3 pods and from 3 to 1", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicaSet Should scale from 1 pod to 3 pods and from 3 to 5", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicaSet Should scale from 5 pods to 3 pods and from 3 to 1", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicationController Should scale from 1 pod to 3 pods and from 3 to 5 and verify decision stability", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] [Serial] [Slow] ReplicationController Should scale from 5 pods to 3 pods and from 3 to 1 and verify decision stability", "groups": ["slow"] }, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl taint [Serial] should remove all the taints with the same key off a node", "groups": ["slow"] }, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Kubectl taint [Serial] should update the taint on a node", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] NoExecuteTaintManager Multiple Pods [Serial] evicts pods with minTolerationSeconds", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] NoExecuteTaintManager Multiple Pods [Serial] only evicts pods without tolerations from tainted nodes", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] doesn't evict pod with tolerations from tainted nodes", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] eventually evict pod with finite tolerations from tainted nodes", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] evicts pods from tainted nodes", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] PodPriorityResolution [Serial] validates critical system priorities are created and resolved", "groups": ["slow", "debug2"] }, + { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates MaxPods limit number of pods that are allowed to run [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that NodeAffinity is respected if not matching", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that required NodeAffinity setting is respected if matching", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if matching", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that taints-tolerations is respected if not matching", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPredicates [Serial] validates that there is no conflict between pods with same hostPort but different hostIP and protocol", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPreemption [Serial] validates basic preemption works", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPreemption [Serial] validates lower priority pod preemption by critical pod", "groups": ["slow"] }, + { "testcase": "[sig-scheduling] SchedulerPreemption [Serial] validates pod anti-affinity works in preemption", "groups": ["slow"] }, + { "testcase": "[sig-storage] ConfigMap Should fail non-optional pod creation due to configMap object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] ConfigMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner [Slow] should provision storage with different parameters", "groups": ["slow"] }, + { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner delayed binding [Slow] should create persistent volumes in the same zone as node after a pod mounting the claims is started", "groups": ["slow"] }, + { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner delayed binding with allowedTopologies [Slow] should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", "groups": ["slow"] }, + { "testcase": "[sig-storage] Dynamic Provisioning DynamicProvisioner External should let an external dynamic provisioner create and delete persistent volumes [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] EmptyDir wrapper volumes should not cause race condition when used for git_repo [Serial] [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] PersistentVolumes NFS with multiple PVs and PVCs all in same ns should create 4 PVs and 2 PVCs: test write access [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to non-existent path", "groups": ["slow"] }, + { "testcase": "[sig-storage] PersistentVolumes-local Local volume that cannot be mounted [Slow] should fail due to wrong node", "groups": ["slow"] }, + { "testcase": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod has affinity", "groups": ["slow"] }, + { "testcase": "[sig-storage] PersistentVolumes-local StatefulSet with pod affinity [Slow] should use volumes on one node when pod management is parallel and pod has affinity", "groups": ["slow"] }, + { "testcase": "[sig-storage] Projected configMap Should fail non-optional pod creation due to configMap object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] Projected configMap Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] Projected secret Should fail non-optional pod creation due to secret object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] Projected secret Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] Secrets Should fail non-optional pod creation due to secret object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] Secrets Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] PersistentVolumes-local [Volume type: tmpfs] Set fsGroup for local volume should set different fsGroup for second pod if first pod is deleted", "groups": ["slow"] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should implement legacy replacement when the update strategy is OnDelete", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] ReplicationController light Should scale from 2 pods to 1 pod", "groups": ["slow"] }, + { "testcase": "[k8s.io] Pods should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", "groups": ["slow"] }, + { "testcase": "[sig-api-machinery] Servers with support for API chunking should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", "groups": ["slow"] }, + { "testcase": "[k8s.io] Pods should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] subPath should support restarting containers using file as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: emptydir] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPathSymlink] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: hostPath] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (block volmode)] volumeMode should fail in binding dynamic provisioned PV to PVC", "groups": ["slow"] }, + { "testcase": "[sig-autoscaling] [HPA] Horizontal pod autoscaling (scale resource: CPU) [sig-autoscaling] ReplicationController light Should scale from 1 pod to 2 pods", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (default fs)] volumes should be mountable", "groups": ["slow"] }, + { "testcase": "[sig-network] DNS should provide DNS for ExternalName services", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath-v0] [Testpattern: Dynamic PV (default fs)] volumes should be mountable", "groups": ["slow"] }, + { "testcase": "[sig-storage] CSI Volumes [Driver: csi-hostpath] [Testpattern: Dynamic PV (filesystem volmode)] volumeMode should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", "groups": ["slow"] }, + { "testcase": "[sig-apps] CronJob should schedule multiple jobs concurrently", "groups": ["slow"] }, + { "testcase": "[sig-apps] CronJob should replace jobs when ReplaceConcurrent", "groups": ["slow"] }, + { "testcase": "[sig-apps] CronJob should not emit unexpected warnings", "groups": ["slow"] }, + { "testcase": "[sig-apps] CronJob should delete successful finished jobs with limit of one successful job", "groups": ["slow"] }, + { "testcase": "[sig-network] Services should preserve source pod IP for traffic thru service cluster IP", "groups": ["slow"] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Conformance]", "groups": ["slow", "conformance"] }, + { "testcase": "[k8s.io] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]", "groups": ["slow", "conformance"] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Conformance]", "groups": ["slow", "conformance"] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]", "groups": ["slow", "conformance"] }, + { "testcase": "[k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]", "groups": ["slow", "conformance"] }, + { "testcase": "[sig-cli] Kubectl client [k8s.io] Guestbook application should create and stop a working application [Conformance]", "groups": ["slow", "conformance"] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform canary updates and phased rolling updates of template modifications [Conformance]", "groups": ["slow", "conformance"] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should not deadlock when a pod's predecessor fails", "groups": ["slow"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications with PVCs", "groups": ["slow"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should provide basic identity", "groups": ["slow"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-scheduling] SchedulerPriorities [Serial] Pod should avoid nodes that have avoidPod annotation", "groups": ["slow"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be preferably scheduled to nodes pod can tolerate", "groups": ["slow"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-scheduling] SchedulerPriorities [Serial] Pod should be scheduled to node that don't match the PodAntiAffinity terms", "groups": ["slow"], "exclude": [ "alicloud" ] }, + { "testcase": "[sig-network] Services should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", "groups": ["slow"], "only": [ "gcp", "openstack" ] }, + { "testcase": "[sig-network] Services should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", "groups": ["slow"], "only": [ "gcp", "openstack" ] }, + { "testcase": "[sig-network] Services should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", "groups": ["slow"], "only": [ "gcp", "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support creating multiple subpath from same volumes [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] subPath should support restarting containers using file as subpath [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Inline-volume (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if non-existent subpath is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath directory is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath file is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should fail if subpath with backstepping is outside the volume [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using directory as subpath [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] subPath should support restarting containers using file as subpath [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (default fs)] volumeIO should write files of various sizes, verify size, validate content [Slow]", "groups": ["slow"], "only": [ "openstack" ] }, + { "testcase": "[sig-storage] In-tree Volumes [Driver: ceph][Feature:Volumes] [Testpattern: Pre-provisioned PV (block volmode)] volumeMode should fail to create pod by failing to mount volume", "groups": ["slow"], "only": [ "openstack" ] } +] \ No newline at end of file diff --git a/test/e2etest/kubetest/kubetest_runner.go b/test/e2etest/kubetest/kubetest_runner.go index 96c8de84f0..1e2ca1f7b4 100644 --- a/test/e2etest/kubetest/kubetest_runner.go +++ b/test/e2etest/kubetest/kubetest_runner.go @@ -115,10 +115,14 @@ func runKubetest(args KubetestArgs) { var out bytes.Buffer var stderr bytes.Buffer cmd.Stdout = &out - cmd.Stderr = &stderr + if log.GetLevel() == log.DebugLevel { + cmd.Stderr = &stderr + } err := cmd.Run() - log.Info(fmt.Printf(stderr.String())) + if log.GetLevel() == log.DebugLevel { + log.Info(fmt.Printf(stderr.String())) + } // Output our results if out.String() != "" { diff --git a/test/e2etest/kubetest/publisher.go b/test/e2etest/kubetest/publisher.go index 005692b2ab..4d8d9c92fc 100644 --- a/test/e2etest/kubetest/publisher.go +++ b/test/e2etest/kubetest/publisher.go @@ -19,8 +19,9 @@ const ( gcsProjectID = "gardener" ) +// Publish creates meta files finished.json, started.json in kubetestResultsPath path and uploads them +// and additionally e2e.log and junit_01.xml files to the google cloud storage func Publish(kubetestResultsPath string, resultSummary Summary) { - kubetestResultsPath = getDirectResultsDir(kubetestResultsPath) files := make([]string, 0) finishedJsonPath := filepath.Join(kubetestResultsPath, "finished.json") startedJsonPath := filepath.Join(kubetestResultsPath, "started.json") @@ -31,27 +32,19 @@ func Publish(kubetestResultsPath string, resultSummary Summary) { startedJsonPath, ) createMetadataFiles(startedJsonPath, finishedJsonPath, resultSummary) + log.Infof("Publish to google cloud storage: %v", files) uploadTestResultFiles(files) } -func getDirectResultsDir(generalResultsPath string) string { - filesInResultPath, err := ioutil.ReadDir(generalResultsPath) - if err != nil { - log.Fatal(err) - } - generalResultsPath = filepath.Join(generalResultsPath, filesInResultPath[0].Name()) - return generalResultsPath -} - func createMetadataFiles(startedJsonPath, finishedJsonPath string, testSummary Summary) { startedJsonContent := []byte(fmt.Sprintf("{\"timestamp\": %d}", testSummary.StartTime.Unix())) if err := ioutil.WriteFile(startedJsonPath, startedJsonContent, 06444); err != nil { log.Fatal(err) } - testStatus := "FAILURE" + testStatus := "Failure" if testSummary.TestsuiteSuccessful { - testStatus = "SUCCESS" + testStatus = "Success" } finishedJsonContent := []byte(fmt.Sprintf("{\"timestamp\": %d, \"result\": \"%s\", \"metadata\": {\"shoot-k8s-release\": \"%s\", \"gardener\": \"%s\"}}", testSummary.FinishedTime.Unix(), testStatus, config.K8sRelease, config.GardenerVersion)) if err := ioutil.WriteFile(finishedJsonPath, finishedJsonContent, 06444); err != nil { @@ -82,7 +75,11 @@ func uploadTestResultFiles(files []string) { if filepath.Base(fileSourcePath) == "junit_01.xml" { filename = filepath.Join("artifacts", filename) } - fileTargetPath := fmt.Sprintf("ci-gardener-e2e-conformance-%s-v%s/%s/%s", provider, config.K8sReleaseMajorMinor, timestamp, filename) + bucketSuffix := "" + if len(config.TestcaseGroup) == 1 && config.TestcaseGroup[0] == "conformance" { + bucketSuffix = "-conformance" + } + fileTargetPath := fmt.Sprintf("ci-gardener-e2e%s-%s-v%s/%s/%s", bucketSuffix, provider, config.K8sReleaseMajorMinor, timestamp, filename) if err := upload(client, gcsBucket, fileSourcePath, fileTargetPath); err != nil { switch err { diff --git a/test/e2etest/kubetest/results_evaluator.go b/test/e2etest/kubetest/results_evaluator.go index 6b074fee36..188b779a3b 100644 --- a/test/e2etest/kubetest/results_evaluator.go +++ b/test/e2etest/kubetest/results_evaluator.go @@ -3,16 +3,20 @@ package kubetest import ( "bufio" "encoding/json" + "encoding/xml" "fmt" "github.com/gardener/test-infra/test/e2etest/config" "github.com/gardener/test-infra/test/e2etest/util" "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "io" "io/ioutil" "os" "path" + "path/filepath" "regexp" "strconv" + "strings" "time" ) @@ -20,18 +24,26 @@ const ( E2eLogFileNamePattern = "e2e.log$" JunitXmlFileNamePattern = `junit_\d+.xml$` TestSummaryFileName = "test_summary.json" + MergedJunitXmlFile = "junit_01.xml" + MergedE2eLogFile = "e2e.log" ) +var mergedJunitXmlFilePath = filepath.Join(config.ExportPath, MergedJunitXmlFile) +var MergedE2eLogFilePath = filepath.Join(config.ExportPath, MergedE2eLogFile) + // Analyze analyzes junit.xml files and e2e.log files, which are dumped by kubetest and provides a resulting test suite summary and results for each testcase individually. These results are then written to the export dir as files. func Analyze(kubetestResultsPath string) Summary { + log.Info("Analyze e2e.log and junit.xml files") e2eLogFilePaths := util.GetFilesByPattern(kubetestResultsPath, E2eLogFileNamePattern) summary := analyzeE2eLogs(e2eLogFilePaths) junitXMLFilePaths := util.GetFilesByPattern(kubetestResultsPath, JunitXmlFileNamePattern) - analyzeJunitXMLs(junitXMLFilePaths) + analyzeJunitXMLs(junitXMLFilePaths, summary.TestsuiteDuration) return summary } -func analyzeJunitXMLs(junitXMLFilePaths []string) { +func analyzeJunitXMLs(junitXMLFilePaths []string, durationSec int) { + var mergedJunitXmlResult = &JunitXMLResult{FailedTests: 0, ExecutedTests: 0, DurationFloat: 0, SuccessfulTests: 0, DurationInt: durationSec} + testcaseNameToTestcase := make(map[string]TestcaseResult) for _, junitXMLPath := range junitXMLFilePaths { file, err := os.Open(junitXMLPath) if err != nil { @@ -42,10 +54,17 @@ func analyzeJunitXMLs(junitXMLFilePaths []string) { if err != nil { log.Fatal(errors.Wrapf(err, "Couldn't unmarshal %s", file.Name())) } + mergedJunitXmlResult.FailedTests += junitXml.FailedTests + mergedJunitXmlResult.ExecutedTests += junitXml.ExecutedTests + mergedJunitXmlResult.SuccessfulTests += junitXml.SuccessfulTests for _, testcase := range junitXml.Testcases { if testcase.Skipped { + if _, ok := testcaseNameToTestcase[testcase.Name]; !ok { + testcaseNameToTestcase[testcase.Name] = testcase + } continue } + testcaseNameToTestcase[testcase.Name] = testcase testcaseJSON, err := json.MarshalIndent(testcase, "", " ") if err != nil { log.Fatal(errors.Wrapf(err, "Couldn't marshal testsuite summary %s", testcaseJSON)) @@ -58,6 +77,24 @@ func analyzeJunitXMLs(junitXMLFilePaths []string) { } } } + for _, testcase := range testcaseNameToTestcase { + mergedJunitXmlResult.Testcases = append(mergedJunitXmlResult.Testcases, testcase) + } + saveJunitXmlToFile(mergedJunitXmlResult) +} + +func saveJunitXmlToFile(mergedJunitXmlResult *JunitXMLResult) { + output, err := xml.MarshalIndent(mergedJunitXmlResult, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + output = append([]byte(xml.Header), output...) + + file, _ := os.Create(mergedJunitXmlFilePath) + defer file.Close() + if _, err = file.Write(output); err != nil { + log.Fatal(err) + } } func analyzeE2eLogs(e2eLogFilePaths []string) Summary { @@ -89,6 +126,7 @@ func analyzeE2eLogs(e2eLogFilePaths []string) Summary { summary.Flaked = summary.FlakedTestcases != 0 } + summary.ExecutionGroup = strings.Join(config.TestcaseGroup, ",") summary.FinishedTime = time.Now() summary.StartTime = summary.FinishedTime.Add(time.Second * time.Duration(-summary.TestsuiteDuration)) file, err := json.MarshalIndent(summary, "", " ") @@ -101,9 +139,28 @@ func analyzeE2eLogs(e2eLogFilePaths []string) Summary { if err := ioutil.WriteFile(summaryFilePath, file, 0644); err != nil { log.Fatal(errors.Wrapf(err, "Couldn't write %s to file", summaryFilePath)) } + + mergeE2eLogFiles(MergedE2eLogFilePath, e2eLogFilePaths) return summary } +func mergeE2eLogFiles(dst string, e2eLogFilePaths []string) { + resultFile, _ := os.Create(dst) + + for _, e2eLogFile := range e2eLogFilePaths { + fileToAppend, err := os.Open(e2eLogFile) + if err != nil { + log.Fatalln("failed to open file %s for reading:", e2eLogFile, err) + } + defer fileToAppend.Close() + + if _, err := io.Copy(resultFile, fileToAppend); err != nil { + log.Fatalln("failed to append file %s to file %s:", fileToAppend, resultFile, err) + } + } + log.Infof("Merged %o e2e log files to %s", len(e2eLogFilePaths), dst) +} + type Summary struct { ExecutedTestcases int `json:"executed_testcases"` SuccessfulTestcases int `json:"successful_testcases"` @@ -115,4 +172,5 @@ type Summary struct { DescriptionFile string `json:"test_desc_file"` StartTime time.Time `json:"-"` FinishedTime time.Time `json:"-"` + ExecutionGroup string `json:"execution_group"` } diff --git a/test/e2etest/kubetest/xml_junit_result.go b/test/e2etest/kubetest/xml_junit_result.go index 2ea8eecead..cc36d1190d 100644 --- a/test/e2etest/kubetest/xml_junit_result.go +++ b/test/e2etest/kubetest/xml_junit_result.go @@ -4,53 +4,58 @@ import ( "encoding/xml" "github.com/gardener/test-infra/test/e2etest/config" "regexp" + "strings" ) -func (result JunitXMLResult) CalculateAdditionalFields() { +func (result *JunitXMLResult) CalculateAdditionalFields() { result.SuccessfulTests = result.ExecutedTests - result.FailedTests result.DurationInt = int(result.DurationFloat) regexpSigGroup := regexp.MustCompile(`^\[.*?]`) for i, _ := range result.Testcases { - result.Testcases[i] = result.Testcases[i].calculateAdditionalFields(regexpSigGroup) + result.Testcases[i].calculateAdditionalFields(regexpSigGroup) // TODO: why is here result.Testcases[i].calculateAdditionalFields(regexpSigGroup) not possible, why is assigning to variable is required? } } -func (testcase TestcaseResult) calculateAdditionalFields(regexpSigGroup *regexp.Regexp) TestcaseResult { +func (testcase *TestcaseResult) calculateAdditionalFields(regexpSigGroup *regexp.Regexp) { testcase.SigGroup = regexpSigGroup.FindString(testcase.Name) if testcase.SkippedRaw != nil { testcase.Skipped = true } if testcase.FailureText == "" { - testcase.Status = SUCCESS + testcase.Status = Success + testcase.Successful = true } else { - testcase.Status = FAILURE + testcase.Status = Failure + testcase.Successful = false } testcase.DurationInt = int(testcase.DurationFloat) testcase.TestDesc = config.DescriptionFile - return testcase + testcase.ExecutionGroup = strings.Join(config.TestcaseGroup, ",") } type JunitXMLResult struct { - XMLName xml.Name `xml:"testsuite"` - ExecutedTests int `xml:"tests,attr"` - FailedTests int `xml:"failures,attr"` - DurationFloat float32 `xml:"time,attr"` - DurationInt int + XMLName xml.Name `xml:"testsuite"` + ExecutedTests int `xml:"tests,attr"` + FailedTests int `xml:"failures,attr"` + DurationFloat float32 `xml:"time,attr"` + DurationInt int `xml:"-"` Testcases []TestcaseResult `xml:"testcase"` - SuccessfulTests int + SuccessfulTests int `xml:"-"` } type TestcaseResult struct { - XMLName xml.Name `xml:"testcase" json:"-"` - Name string `xml:"name,attr" json:"name"` - Status string `json:"status" json:"-"` - SkippedRaw *struct{} `xml:"skipped" json:"-"` - Skipped bool `json:"-"` - FailureText string `xml:"failure" json:"failure.text,omitempty"` - SystemOutput string `xml:"system-out" json:"system-out,omitempty"` - DurationFloat float32 `xml:"time,attr" json:"-"` - DurationInt int `json:"duration"` - SigGroup string `json:"sig"` - TestDesc string `json:"test_desc_file"` + XMLName xml.Name `xml:"testcase" json:"-"` + Name string `xml:"name,attr" json:"name"` + Status string `xml:"-" json:"status"` + SkippedRaw *struct{} `xml:"skipped" json:"-"` + Skipped bool `xml:"-" json:"-"` + FailureText string `xml:"failure,omitempty" json:"failure.text,omitempty"` + SystemOutput string `xml:"system-out,omitempty" json:"system-out,omitempty"` + DurationFloat float32 `xml:"time,attr" json:"-"` + DurationInt int `xml:"-" json:"duration"` + SigGroup string `xml:"-" json:"sig"` + TestDesc string `xml:"-" json:"test_desc_file"` + ExecutionGroup string `xml:"-" json:"execution_group"` + Successful bool `xml:"-" json:"successful"` } diff --git a/test/e2etest/main.go b/test/e2etest/main.go index 370d4a8dd3..cb0f7c1af9 100644 --- a/test/e2etest/main.go +++ b/test/e2etest/main.go @@ -15,7 +15,7 @@ func main() { desc := kubetest.Generate() kubetestResultsPath := kubetest.Run(desc) resultSummary := kubetest.Analyze(kubetestResultsPath) - if config.GinkgoParallel == false && config.PublishResultsToTestgrid == true && resultSummary.TestsuiteSuccessful == true && config.DescriptionFile == "conformance.json" { - kubetest.Publish(kubetestResultsPath, resultSummary) + if config.PublishResultsToTestgrid == true && resultSummary.TestsuiteSuccessful == true { + kubetest.Publish(config.ExportPath, resultSummary) } } diff --git a/test/e2etest/util/sets/string.go b/test/e2etest/util/sets/string.go index 7b89dc9f79..e105169802 100644 --- a/test/e2etest/util/sets/string.go +++ b/test/e2etest/util/sets/string.go @@ -1,6 +1,11 @@ package sets import ( + "fmt" + "log" + "os" + "regexp" + "sort" "strings" ) @@ -24,30 +29,70 @@ func (s StringSet) Insert(items ...string) { } } -func (s StringSet) GetSetOfMatching(substrings StringSet) StringSet { +func (s StringSet) GetMatchingOfSet(needles StringSet) StringSet { matchedItems := NewStringSet() - for substring := range substrings { - for mapItem := range s { - if strings.Contains(mapItem, substring) { - matchedItems.Insert(mapItem) + for hayItem := range s { + for needle := range needles { + if strings.Contains(hayItem, needle) { + matchedItems.Insert(hayItem) } } } return matchedItems } -func (s StringSet) GetMatching(substring string) string { +func (s StringSet) GetMatching(substring string) StringSet { + matches := NewStringSet() for mapItem := range s { if strings.Contains(mapItem, substring) { - return mapItem + matches.Insert(mapItem) + } + } + return matches +} + +func (s StringSet) GetMatchingForTestcase(testcaseName, skip, focus string) StringSet { + matches := NewStringSet() + var skipRegex *regexp.Regexp + var focusRegex *regexp.Regexp + if skip != "" { + skipRegex = regexp.MustCompile(skip) + } + if focus != "" { + focusRegex = regexp.MustCompile(focus) + } + for mapItem := range s { + if strings.Contains(mapItem, testcaseName) { + if skip == "" && focus == "" { + matches.Insert(mapItem) + continue + } + if skipRegex != nil && !skipRegex.MatchString(mapItem) { + matches.Insert(mapItem) + continue + } + if focusRegex != nil && focusRegex.MatchString(mapItem) { + matches.Insert(mapItem) + continue + } + } + } + return matches +} + +func (s StringSet) DeleteMatchingSet(needles StringSet) { + for needle := range needles { + for match := range s.GetMatching(needle) { + s.Delete(match) } } - return "" } func (s StringSet) DeleteMatching(items ...string) { for _, item := range items { - s.Delete(s.GetMatching(item)) + for match := range s.GetMatching(item) { + s.Delete(match) + } } } @@ -75,6 +120,22 @@ func (s1 StringSet) Union(s2 StringSet) StringSet { return result } +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s StringSet) Difference(s2 StringSet) StringSet { + result := NewStringSet() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + type sortableSliceOfString []string func (s sortableSliceOfString) Len() int { return len(s) } @@ -110,11 +171,27 @@ func (s StringSet) PopAny() (string, bool) { return zeroValue, false } +// Has returns true if and only if item is contained in the set. +func (s StringSet) Has(item string) bool { + _, contained := s[item] + return contained +} + // Len returns the size of the set. func (s StringSet) Len() int { return len(s) } +func (s StringSet) WriteToFile(filepath string) { + file, err := os.Create(filepath) + if err != nil { + log.Fatal(err) + } + for item := range s { + fmt.Fprintln(file, item) + } +} + func lessString(lhs, rhs string) bool { return lhs < rhs }