From 9dae8ab64a748a76913351d9fcedf994869380e2 Mon Sep 17 00:00:00 2001 From: Lu Jiajing Date: Wed, 27 Jan 2021 16:44:58 +0800 Subject: [PATCH] Fix service port naming convention (#1368) Signed-off-by: Megrez Lu --- .ci/run-e2e-tests.sh | 4 + .github/workflows/e2e-kubernetes.yaml | 2 +- Makefile | 32 +++++- pkg/service/collector.go | 12 +-- pkg/service/collector_test.go | 12 +-- test/e2e/examples1_test.go | 37 +------ test/e2e/istio_test.go | 134 ++++++++++++++++++++++++++ test/e2e/utils.go | 65 ++++++++++--- 8 files changed, 233 insertions(+), 65 deletions(-) create mode 100644 test/e2e/istio_test.go diff --git a/.ci/run-e2e-tests.sh b/.ci/run-e2e-tests.sh index 99173e1a6..f552d5c5c 100755 --- a/.ci/run-e2e-tests.sh +++ b/.ci/run-e2e-tests.sh @@ -70,6 +70,10 @@ then export SPECIFY_OTEL_IMAGES=true export SPECIFY_OTEL_CONFIG=true make e2e-tests-smoke +elif [ "${TEST_GROUP}" = "istio" ] +then + echo "Running Smoke Tests with istio" + make e2e-tests-istio else echo "Unknown TEST_GROUP [${TEST_GROUP}]"; exit 1 fi diff --git a/.github/workflows/e2e-kubernetes.yaml b/.github/workflows/e2e-kubernetes.yaml index e37caf610..3d4ec04f7 100644 --- a/.github/workflows/e2e-kubernetes.yaml +++ b/.github/workflows/e2e-kubernetes.yaml @@ -6,7 +6,7 @@ jobs: runs-on: ubuntu-16.04 strategy: matrix: - TEST_GROUP: [smoke, es, cassandra, streaming, examples1, examples2, generate, es-otel, streaming-otel, smoke-otel, upgrade] + TEST_GROUP: [smoke, es, cassandra, streaming, examples1, examples2, generate, es-otel, streaming-otel, smoke-otel, upgrade, istio] steps: - uses: actions/setup-go@v1 with: diff --git a/Makefile b/Makefile index 3ee00f63b..4f55889b5 100644 --- a/Makefile +++ b/Makefile @@ -25,9 +25,13 @@ ES_OPERATOR_NAMESPACE ?= openshift-logging ES_OPERATOR_BRANCH ?= release-4.4 ES_OPERATOR_IMAGE ?= quay.io/openshift/origin-elasticsearch-operator:4.4 SDK_VERSION=v0.18.2 +ISTIO_VERSION ?= 1.8.2 +ISTIOCTL="./deploy/test/istio/bin/istioctl" GOPATH ?= "$(HOME)/go" GOROOT ?= "$(shell go env GOROOT)" +SED ?= "sed" + PROMETHEUS_OPERATOR_TAG ?= v0.39.0 PROMETHEUS_BUNDLE ?= https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/${PROMETHEUS_OPERATOR_TAG}/bundle.yaml @@ -115,7 +119,7 @@ prepare-e2e-tests: build docker push @cat deploy/role_binding.yaml >> deploy/test/namespace-manifests.yaml @echo "---" >> deploy/test/namespace-manifests.yaml - @sed "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" test/operator.yaml >> deploy/test/namespace-manifests.yaml + @${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" test/operator.yaml >> deploy/test/namespace-manifests.yaml @cp deploy/crds/jaegertracing.io_jaegers_crd.yaml deploy/test/global-manifests.yaml @echo "---" >> deploy/test/global-manifests.yaml @@ -194,6 +198,11 @@ e2e-tests-upgrade: prepare-e2e-tests @echo Running Upgrade end-to-end tests... UPGRADE_TEST_VERSION=$(shell .ci/get_test_upgrade_version.sh ${JAEGER_VERSION}) go test -tags=upgrade ./test/e2e/... $(TEST_OPTIONS) +.PHONY: e2e-tests-istio +e2e-tests-istio: prepare-e2e-tests istio + @echo Running Istio end-to-end tests... + @STORAGE_NAMESPACE=$(STORAGE_NAMESPACE) KAFKA_NAMESPACE=$(KAFKA_NAMESPACE) go test -tags=istio ./test/e2e/... $(TEST_OPTIONS) + .PHONY: run run: crd @rm -rf /tmp/_cert* @@ -250,6 +259,19 @@ else @kubectl create -f ./test/elasticsearch.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true endif +.PHONY: istio +istio: + @echo Install istio with minimal profile + @mkdir -p deploy/test + @[ -f "${ISTIOCTL}" ] || (curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VERSION} TARGET_ARCH=x86_64 sh - && mv ./istio-${ISTIO_VERSION} ./deploy/test/istio) + @${ISTIOCTL} install --set profile=minimal -y + +.PHONY: undeploy-istio +undeploy-istio: + @[ -f "${ISTIOCTL}" ] && (${ISTIOCTL} manifest generate --set profile=demo | kubectl delete --ignore-not-found=true -f -) || true + @kubectl delete namespace istio-system --ignore-not-found=true || true + @rm -rf deploy/test/istio + .PHONY: cassandra cassandra: storage @kubectl create -f ./test/cassandra.yml --namespace $(STORAGE_NAMESPACE) 2>&1 | grep -v "already exists" || true @@ -270,7 +292,7 @@ else @kubectl create clusterrolebinding strimzi-cluster-operator-entity-operator-delegation --clusterrole=strimzi-entity-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true @kubectl create clusterrolebinding strimzi-cluster-operator-topic-operator-delegation --clusterrole=strimzi-topic-operator --serviceaccount ${KAFKA_NAMESPACE}:strimzi-cluster-operator 2>&1 | grep -v "already exists" || true @curl --fail --location $(KAFKA_YAML) --output deploy/test/kafka-operator.yaml --create-dirs - @sed 's/namespace: .*/namespace: $(KAFKA_NAMESPACE)/' deploy/test/kafka-operator.yaml | kubectl -n $(KAFKA_NAMESPACE) apply -f - 2>&1 | grep -v "already exists" || true + @${SED} 's/namespace: .*/namespace: $(KAFKA_NAMESPACE)/' deploy/test/kafka-operator.yaml | kubectl -n $(KAFKA_NAMESPACE) apply -f - 2>&1 | grep -v "already exists" || true @kubectl set env deployment strimzi-cluster-operator -n ${KAFKA_NAMESPACE} STRIMZI_NAMESPACE="*" endif @@ -294,7 +316,7 @@ else @echo Creating namespace $(KAFKA_NAMESPACE) @kubectl create namespace $(KAFKA_NAMESPACE) 2>&1 | grep -v "already exists" || true @curl --fail --location $(KAFKA_EXAMPLE) --output deploy/test/kafka-example.yaml --create-dirs - @sed -i 's/size: 100Gi/size: 10Gi/g' deploy/test/kafka-example.yaml + @${SED} -i 's/size: 100Gi/size: 10Gi/g' deploy/test/kafka-example.yaml @kubectl -n $(KAFKA_NAMESPACE) apply --dry-run=true -f deploy/test/kafka-example.yaml @kubectl -n $(KAFKA_NAMESPACE) apply -f deploy/test/kafka-example.yaml 2>&1 | grep -v "already exists" || true endif @@ -321,7 +343,7 @@ else endif .PHONY: clean -clean: undeploy-kafka undeploy-es-operator undeploy-prometheus-operator +clean: undeploy-kafka undeploy-es-operator undeploy-prometheus-operator undeploy-istio @rm -f deploy/test/*.yaml @if [ -d deploy/test ]; then rmdir deploy/test ; fi @kubectl delete -f ./test/cassandra.yml --ignore-not-found=true -n $(STORAGE_NAMESPACE) || true @@ -383,7 +405,7 @@ deploy: ingress crd @kubectl apply -f deploy/service_account.yaml @kubectl apply -f deploy/cluster_role.yaml @kubectl apply -f deploy/cluster_role_binding.yaml - @sed "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml | kubectl apply -f - + @${SED} "s~image: jaegertracing\/jaeger-operator\:.*~image: $(BUILD_IMAGE)~gi" deploy/operator.yaml | kubectl apply -f - .PHONY: operatorhub operatorhub: check-operatorhub-pr-template diff --git a/pkg/service/collector.go b/pkg/service/collector.go index a7adc6833..146407b58 100644 --- a/pkg/service/collector.go +++ b/pkg/service/collector.go @@ -97,12 +97,12 @@ func GetNameForHeadlessCollectorService(jaeger *v1.Jaeger) string { func GetPortNameForGRPC(jaeger *v1.Jaeger) string { if viper.GetString("platform") == v1.FlagPlatformOpenShift { // we always have TLS certs when running on OpenShift, so, TLS is always enabled - return "https-grpc" + return "grpc-https" } // if we don't have a jaeger provided, it's certainly not TLS... if nil == jaeger { - return "http-grpc" + return "grpc-http" } // perhaps the user has provisioned the certs and configured the CR manually? @@ -110,18 +110,18 @@ func GetPortNameForGRPC(jaeger *v1.Jaeger) string { if val, ok := jaeger.Spec.Collector.Options.Map()["collector.grpc.tls.enabled"]; ok { enabled, err := strconv.ParseBool(val) if err != nil { - return "http-grpc" // not "true", defaults to false + return "grpc-http" // not "true", defaults to false } if enabled { - return "https-grpc" // explicit true + return "grpc-https" // explicit true } - return "http-grpc" // explicit false + return "grpc-http" // explicit false } // doesn't look like we have TLS enabled - return "http-grpc" + return "grpc-http" } func getTypeForCollectorService(jaeger *v1.Jaeger) corev1.ServiceType { diff --git a/pkg/service/collector_test.go b/pkg/service/collector_test.go index f5f2cd041..02f914ae4 100644 --- a/pkg/service/collector_test.go +++ b/pkg/service/collector_test.go @@ -66,13 +66,13 @@ func TestCollectorGRPCPortName(t *testing.T) { { "nil", nil, - "http-grpc", + "grpc-http", false, // in openshift? }, { "no-tls", &v1.Jaeger{}, - "http-grpc", + "grpc-http", false, // in openshift? }, { @@ -84,7 +84,7 @@ func TestCollectorGRPCPortName(t *testing.T) { }, }, }, - "http-grpc", + "grpc-http", false, // in openshift? }, { @@ -96,7 +96,7 @@ func TestCollectorGRPCPortName(t *testing.T) { }, }, }, - "http-grpc", + "grpc-http", false, // in openshift? }, { @@ -108,13 +108,13 @@ func TestCollectorGRPCPortName(t *testing.T) { }, }, }, - "https-grpc", + "grpc-https", false, // in openshift? }, { "in-openshift", &v1.Jaeger{}, - "https-grpc", + "grpc-https", true, // in openshift? }, } { diff --git a/test/e2e/examples1_test.go b/test/e2e/examples1_test.go index 9ddacb0ce..d282dd036 100644 --- a/test/e2e/examples1_test.go +++ b/test/e2e/examples1_test.go @@ -3,9 +3,7 @@ package e2e import ( - "context" "encoding/json" - "errors" "io/ioutil" "net/http" "os" @@ -16,13 +14,10 @@ import ( "time" framework "github.com/operator-framework/operator-sdk/pkg/test" - log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" ) type ExamplesTestSuite struct { @@ -109,7 +104,7 @@ func (suite *ExamplesTestSuite) TestBusinessApp() { require.NoError(t, err) // Now deploy examples/business-application-injected-sidecar.yaml - businessAppCR := getBusinessAppCR(err) + businessAppCR := getBusinessAppCR() defer os.Remove(businessAppCR.Name()) cmd := exec.Command("kubectl", "create", "--namespace", namespace, "--filename", businessAppCR.Name()) output, err := cmd.CombinedOutput() @@ -126,23 +121,8 @@ func (suite *ExamplesTestSuite) TestBusinessApp() { handler := &corev1.Handler{HTTPGet: livelinessHandler} livelinessProbe := &corev1.Probe{Handler: *handler, InitialDelaySeconds: 1, FailureThreshold: 3, PeriodSeconds: 10, SuccessThreshold: 1, TimeoutSeconds: 1} - err = wait.Poll(retryInterval, timeout, func() (done bool, err error) { - vertxDeployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Get(context.Background(), vertxDeploymentName, metav1.GetOptions{}) - require.NoError(t, err) - containers := vertxDeployment.Spec.Template.Spec.Containers - for index, container := range containers { - if container.Name == vertxDeploymentName { - vertxDeployment.Spec.Template.Spec.Containers[index].LivenessProbe = livelinessProbe - updatedVertxDeployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Update(context.Background(), vertxDeployment, metav1.UpdateOptions{}) - if err != nil { - log.Warnf("Error %v updating vertx app, retrying", err) - return false, nil - } - log.Infof("Updated deployment %v", updatedVertxDeployment.Name) - return true, nil - } - } - return false, errors.New("Vertx deployment not found") + err = waitForDeploymentAndUpdate(vertxDeploymentName, vertxDeploymentName, func(container *corev1.Container) { + container.LivenessProbe = livelinessProbe }) require.NoError(t, err) @@ -173,17 +153,6 @@ func (suite *ExamplesTestSuite) TestBusinessApp() { require.NoError(t, err, "SmokeTest failed") } -func getBusinessAppCR(err error) *os.File { - content, err := ioutil.ReadFile("../../examples/business-application-injected-sidecar.yaml") - require.NoError(t, err) - newContent := strings.Replace(string(content), "image: jaegertracing/vertx-create-span:operator-e2e-tests", "image: "+vertxExampleImage, 1) - file, err := ioutil.TempFile("", "vertx-example") - require.NoError(t, err) - err = ioutil.WriteFile(file.Name(), []byte(newContent), 0666) - require.NoError(t, err) - return file -} - func execOcCommand(args ...string) { cmd := exec.Command("oc", args...) output, err := cmd.CombinedOutput() diff --git a/test/e2e/istio_test.go b/test/e2e/istio_test.go new file mode 100644 index 000000000..bfee20ad3 --- /dev/null +++ b/test/e2e/istio_test.go @@ -0,0 +1,134 @@ +// +build istio + +package e2e + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strconv" + "strings" + "testing" + "time" + + framework "github.com/operator-framework/operator-sdk/pkg/test" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +type IstioTestSuite struct { + suite.Suite +} + +// LIFECYCLE - Suite +func (suite *IstioTestSuite) SetupSuite() { + t = suite.T() + var err error + ctx, err = prepare(t) + if err != nil { + if ctx != nil { + ctx.Cleanup() + } + require.FailNow(t, "Failed in prepare") + } + fw = framework.Global + namespace = ctx.GetID() + require.NotNil(t, namespace, "GetID failed") + + // label namespace + ns, err := framework.Global.KubeClient.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) + require.NoError(t, err, "failed to get the namespaces details: %v", err) + + nsLabels := ns.GetLabels() + if nsLabels == nil { + nsLabels = make(map[string]string) + } + nsLabels["istio-injection"] = "enabled" + ns.SetLabels(nsLabels) + + ns, err = framework.Global.KubeClient.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{}) + require.NoError(t, err, "failed to update labels of the namespace %s", namespace) + + addToFrameworkSchemeForSmokeTests(t) +} + +func (suite *IstioTestSuite) TearDownSuite() { + handleSuiteTearDown() +} + +// LIFECYCLE - Test + +func (suite *IstioTestSuite) SetupTest() { + t = suite.T() +} + +func (suite *IstioTestSuite) AfterTest(suiteName, testName string) { + handleTestFailure() +} + +func TestIstioSuite(t *testing.T) { + suite.Run(t, new(IstioTestSuite)) +} + +func (suite *IstioTestSuite) TestEnvoySidecar() { + // First deploy a Jaeger instance + jaegerInstanceName := "simplest" + jaegerInstance := createJaegerInstanceFromFile(jaegerInstanceName, "../../examples/simplest.yaml") + defer undeployJaegerInstance(jaegerInstance) + err := WaitForDeployment(t, fw.KubeClient, namespace, jaegerInstanceName, 1, retryInterval, timeout+(1*time.Minute)) + require.NoError(t, err) + + // Now deploy examples/business-application-injected-sidecar.yaml + businessAppCR := getBusinessAppCR() + defer os.Remove(businessAppCR.Name()) + cmd := exec.Command("kubectl", "create", "--namespace", namespace, "--filename", businessAppCR.Name()) + output, err := cmd.CombinedOutput() + if err != nil && !strings.Contains(string(output), "AlreadyExists") { + require.NoError(t, err, "Failed creating Jaeger instance with: [%s]\n", string(output)) + } + const vertxDeploymentName = "myapp" + err = WaitForDeployment(t, fw.KubeClient, namespace, vertxDeploymentName, 1, retryInterval, timeout) + require.NoError(t, err, "Failed waiting for myapp deployment") + + // Add a liveliness probe to create some traces + vertxPort := intstr.IntOrString{IntVal: 8080} + livelinessHandler := &corev1.HTTPGetAction{Path: "/", Port: vertxPort, Scheme: corev1.URISchemeHTTP} + handler := &corev1.Handler{HTTPGet: livelinessHandler} + livelinessProbe := &corev1.Probe{Handler: *handler, InitialDelaySeconds: 1, FailureThreshold: 3, PeriodSeconds: 10, SuccessThreshold: 1, TimeoutSeconds: 1} + + err = waitForDeploymentAndUpdate(vertxDeploymentName, vertxDeploymentName, func(container *corev1.Container) { + container.LivenessProbe = livelinessProbe + }) + require.NoError(t, err) + + exists := testContainerInPod(namespace, vertxDeploymentName, "istio-proxy", nil) + require.True(t, exists) + + // Confirm that we've created some traces + ports := []string{"0:16686"} + portForward, closeChan := CreatePortForward(namespace, jaegerInstanceName, "all-in-one", ports, fw.KubeConfig) + defer portForward.Close() + defer close(closeChan) + forwardedPorts, err := portForward.GetPorts() + require.NoError(t, err) + queryPort := strconv.Itoa(int(forwardedPorts[0].Local)) + + url := "http://localhost:" + queryPort + "/api/traces?service=order" + err = WaitAndPollForHTTPResponse(url, func(response *http.Response) (bool, error) { + body, err := ioutil.ReadAll(response.Body) + require.NoError(t, err) + + resp := &resp{} + err = json.Unmarshal(body, &resp) + require.NoError(t, err) + + return len(resp.Data) > 0 && strings.Contains(string(body), "traceID"), nil + }) + require.NoError(t, err, "SmokeTest failed") +} diff --git a/test/e2e/utils.go b/test/e2e/utils.go index a3dcf5c88..494e90df7 100644 --- a/test/e2e/utils.go +++ b/test/e2e/utils.go @@ -15,18 +15,16 @@ import ( "testing" "time" - "github.com/opentracing/opentracing-go" - "github.com/uber/jaeger-client-go/config" - - "github.com/jaegertracing/jaeger-operator/pkg/apis/kafka/v1beta1" - osv1 "github.com/openshift/api/route/v1" osv1sec "github.com/openshift/api/security/v1" + "github.com/opentracing/opentracing-go" framework "github.com/operator-framework/operator-sdk/pkg/test" "github.com/operator-framework/operator-sdk/pkg/test/e2eutil" + "github.com/prometheus/common/log" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/uber/jaeger-client-go/config" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" @@ -40,6 +38,7 @@ import ( "github.com/jaegertracing/jaeger-operator/pkg/apis" v1 "github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1" + "github.com/jaegertracing/jaeger-operator/pkg/apis/kafka/v1beta1" "github.com/jaegertracing/jaeger-operator/pkg/util" ) @@ -440,7 +439,7 @@ type services struct { Data []string `json:"data"` total int `json:"total"` limit int `json:"limit"` - offset int `json:offset` + offset int `json:"offset"` errors interface{} `json:"errors"` } @@ -640,12 +639,18 @@ func wasUsingOtelAllInOne(jaegerInstanceName, namespace string) bool { return false } +// verifyAgentImage test if this Jaeger Instance is using the OTEL agent? func verifyAgentImage(appName, namespace string, expected bool) { - require.Equal(t, expected, wasUsingOtelAgent(appName, namespace)) + require.Equal(t, expected, testContainerInPod(namespace, appName, "jaeger-agent", func(container corev1.Container) bool { + logrus.Infof("Test %s is using agent image %s", t.Name(), container.Image) + return strings.Contains(container.Image, "jaeger-opentelemetry-agent") + })) } -// Was this Jaeger Instance using the OTEL agent? -func wasUsingOtelAgent(appName, namespace string) bool { +// testContainerInPod is a general function to test if the container exists in the pod +// provided that the pod has `app` label. Return true if and only if the container exists and +// the user-defined function `predicate` returns true if given. +func testContainerInPod(namespace, appName, containerName string, predicate func(corev1.Container) bool) bool { var pods *corev1.PodList var pod corev1.Pod @@ -676,13 +681,15 @@ func wasUsingOtelAgent(appName, namespace string) bool { containers := pod.Spec.Containers for _, container := range containers { - if container.Name == "jaeger-agent" { - logrus.Infof("Test %s is using agent image %s", t.Name(), container.Image) - return strings.Contains(container.Image, "jaeger-opentelemetry-agent") + if container.Name == containerName { + if predicate != nil { + return predicate(container) + } + return true } } - require.Failf(t, "Did not find an agent image for %s in namespace %s", appName, namespace) + require.Failf(t, "Did not find container %s for pod with label{app=%s} in namespace %s", containerName, appName, namespace) return false } @@ -861,3 +868,35 @@ func getTracingClientWithCollectorEndpoint(serviceName, collectorEndpoint string } return cfg.NewTracer() } + +func waitForDeploymentAndUpdate(deploymentName, containerName string, update func(container *corev1.Container)) error { + return wait.Poll(retryInterval, timeout, func() (done bool, err error) { + deployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Get(context.Background(), deploymentName, metav1.GetOptions{}) + require.NoError(t, err) + containers := deployment.Spec.Template.Spec.Containers + for index, container := range containers { + if container.Name == containerName { + update(&deployment.Spec.Template.Spec.Containers[index]) + updatedDeployment, err := fw.KubeClient.AppsV1().Deployments(namespace).Update(context.Background(), deployment, metav1.UpdateOptions{}) + if err != nil { + log.Warnf("Error %v updating container, retrying", err) + return false, nil + } + log.Infof("Updated deployment %v", updatedDeployment.Name) + return true, nil + } + } + return false, fmt.Errorf("container %s in deployment %s not found", containerName, deploymentName) + }) +} + +func getBusinessAppCR() *os.File { + content, err := ioutil.ReadFile("../../examples/business-application-injected-sidecar.yaml") + require.NoError(t, err) + newContent := strings.Replace(string(content), "image: jaegertracing/vertx-create-span:operator-e2e-tests", "image: "+vertxExampleImage, 1) + file, err := ioutil.TempFile("", "vertx-example") + require.NoError(t, err) + err = ioutil.WriteFile(file.Name(), []byte(newContent), 0666) + require.NoError(t, err) + return file +}