Skip to content

Commit

Permalink
Merge pull request kubernetes#108293 from chendave/upstream_gopath
Browse files Browse the repository at this point in the history
e2e: Embed DNS related maninfests into codebase
  • Loading branch information
k8s-ci-robot authored Feb 24, 2022
2 parents 016b96c + cee132b commit 7588a82
Show file tree
Hide file tree
Showing 5 changed files with 72 additions and 44 deletions.
68 changes: 25 additions & 43 deletions test/e2e/network/example_cluster_dns.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ package network
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
Expand All @@ -38,6 +36,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/network/common"
)

Expand All @@ -64,25 +63,23 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
c = f.ClientSet
})

ginkgo.It("should create pod that uses dns", func() {
mkpath := func(file string) string {
return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file)
read := func(file string) string {
data, err := e2etestfiles.Read(file)
if err != nil {
framework.Fail(err.Error())
}
return string(data)
}

ginkgo.It("should create pod that uses dns", func() {
// contrary to the example, this test does not use contexts, for simplicity
// namespaces are passed directly.
// Also, for simplicity, we don't use yamls with namespaces, but we
// create testing namespaces instead.

backendRcYaml := mkpath("dns-backend-rc.yaml")
backendRcName := "dns-backend"
backendSvcYaml := mkpath("dns-backend-service.yaml")
backendSvcName := "dns-backend"
backendPodName := "dns-backend"
frontendPodYaml := mkpath("dns-frontend-pod.yaml")
frontendPodName := "dns-frontend"
frontendPodContainerName := "dns-frontend"

backendName := "dns-backend"
frontendName := "dns-frontend"
clusterDnsPath := "test/e2e/testing-manifests/cluster-dns"
podOutput := "Hello World!"

// we need two namespaces anyway, so let's forget about
Expand All @@ -96,30 +93,30 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
}

for _, ns := range namespaces {
framework.RunKubectlOrDie(ns.Name, "create", "-f", backendRcYaml, getNsCmdFlag(ns))
framework.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-rc.yaml")), "create", "-f", "-")
}

for _, ns := range namespaces {
framework.RunKubectlOrDie(ns.Name, "create", "-f", backendSvcYaml, getNsCmdFlag(ns))
framework.RunKubectlOrDieInput(ns.Name, read(filepath.Join(clusterDnsPath, "dns-backend-service.yaml")), "create", "-f", "-")
}

// wait for objects
for _, ns := range namespaces {
e2eresource.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController"))
e2enetwork.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
e2eresource.WaitForControlledPodsRunning(c, ns.Name, backendName, api.Kind("ReplicationController"))
e2enetwork.WaitForService(c, ns.Name, backendName, true, framework.Poll, framework.ServiceStartTimeout)
}
// it is not enough that pods are running because they may be set to running, but
// the application itself may have not been initialized. Just query the application.
for _, ns := range namespaces {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns.Name).List(context.TODO(), options)
framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name)
err = e2epod.PodsResponding(c, ns.Name, backendPodName, false, pods)
err = e2epod.PodsResponding(c, ns.Name, backendName, false, pods)
framework.ExpectNoError(err, "waiting for all pods to respond")
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)

err = waitForServiceResponding(c, ns.Name, backendSvcName)
err = waitForServiceResponding(c, ns.Name, backendName)
framework.ExpectNoError(err, "waiting for the service to respond")
}

Expand All @@ -131,7 +128,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
// This complicated code may be removed if the pod itself retried after
// dns error or timeout.
// This code is probably unnecessary, but let's stay on the safe side.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(namespaces[0].Name).List(context.TODO(), options)

Expand All @@ -140,47 +137,32 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
}
podName := pods.Items[0].Name

queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendSvcName+"."+namespaces[0].Name)
queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout)
framework.ExpectNoError(err, "waiting for output from pod exec")

updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain))
updatedPodYaml := strings.Replace(read(filepath.Join(clusterDnsPath, "dns-frontend-pod.yaml")), fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain), 1)

// create a pod in each namespace
for _, ns := range namespaces {
framework.NewKubectlCommand(ns.Name, "create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie(ns.Name)
framework.RunKubectlOrDieInput(ns.Name, updatedPodYaml, "create", "-f", "-")
}

// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
// that we cannot wait for the pods to be running because our pods terminate by themselves.
for _, ns := range namespaces {
err := e2epod.WaitForPodNotPending(c, ns.Name, frontendPodName)
err := e2epod.WaitForPodNotPending(c, ns.Name, frontendName)
framework.ExpectNoError(err)
}

// wait for pods to print their result
for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
framework.ExpectNoError(err, "pod %s failed to print result in logs", frontendPodName)
_, err := framework.LookForStringInLog(ns.Name, frontendName, frontendName, podOutput, framework.PodStartTimeout)
framework.ExpectNoError(err, "pod %s failed to print result in logs", frontendName)
}
})
})

func getNsCmdFlag(ns *v1.Namespace) string {
return fmt.Sprintf("--namespace=%v", ns.Name)
}

// pass enough context with the 'old' parameter so that it replaces what your really intended.
func prepareResourceWithReplacedString(inputFile, old, new string) string {
f, err := os.Open(inputFile)
framework.ExpectNoError(err, "failed to open file: %s", inputFile)
defer f.Close()
data, err := io.ReadAll(f)
framework.ExpectNoError(err, "failed to read from file: %s", inputFile)
podYaml := strings.Replace(string(data), old, new, 1)
return podYaml
}

// waitForServiceResponding waits for the service to be responding.
func waitForServiceResponding(c clientset.Interface, ns, name string) error {
ginkgo.By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
Expand Down
21 changes: 21 additions & 0 deletions test/e2e/testing-manifests/cluster-dns/dns-backend-rc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: dns-backend
labels:
name: dns-backend
spec:
replicas: 1
selector:
name: dns-backend
template:
metadata:
labels:
name: dns-backend
spec:
containers:
- name: dns-backend
image: k8s.gcr.io/example-dns-backend:v1
ports:
- name: backend-port
containerPort: 8000
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
kind: Service
apiVersion: v1
metadata:
name: dns-backend
spec:
ports:
- port: 8000
selector:
name: dns-backend
16 changes: 16 additions & 0 deletions test/e2e/testing-manifests/cluster-dns/dns-frontend-pod.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: dns-frontend
labels:
name: dns-frontend
spec:
containers:
- name: dns-frontend
image: k8s.gcr.io/example-dns-frontend:v1
command:
- python
- client.py
- http://dns-backend.development.svc.cluster.local:8000
imagePullPolicy: Always
restartPolicy: Never
2 changes: 1 addition & 1 deletion test/e2e/testing-manifests/embed.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
)

//go:embed flexvolume guestbook kubectl sample-device-plugin.yaml scheduling/nvidia-driver-installer.yaml statefulset storage-csi
//go:embed cluster-dns flexvolume guestbook kubectl sample-device-plugin.yaml scheduling/nvidia-driver-installer.yaml statefulset storage-csi
var e2eTestingManifestsFS embed.FS

func GetE2ETestingManifestsFS() e2etestfiles.EmbeddedFileSource {
Expand Down

0 comments on commit 7588a82

Please sign in to comment.