Skip to content

Commit

Permalink
Merge pull request #19 from jkyros/e2e-makefile-target
Browse files Browse the repository at this point in the history
PODAUTO-86: Pull test container dockerfile, e2e targets out of CI into keda repo, reenable CPU test
  • Loading branch information
openshift-merge-bot[bot] authored Jan 11, 2024
2 parents a47156b + 2aa3230 commit b4113ea
Show file tree
Hide file tree
Showing 4 changed files with 74 additions and 0 deletions.
14 changes: 14 additions & 0 deletions Dockerfile.tests
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# This dockerfile is intended to build something approximating the upstream test
# image, but for use in OpenShift CI. Upstream keda bumps the keda-tools image version
# when they feel like it, so we might just need to pay attention when we do releases.
FROM ghcr.io/kedacore/keda-tools:1.20.5
COPY . /src
RUN chmod 777 -R /src
WORKDIR /src
RUN mkdir -p /go && chmod 777 -R /go
RUN mkdir -p /bin && chmod 777 -R /bin
ENV GOCACHE=/src/.cache
ENV USE_SUDO=false
ENV PATH=/src/helm:/src/:$PATH
ENV HELM_INSTALL_DIR=/src
RUN curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 && chmod 700 get_helm.sh && ./get_helm.sh
22 changes: 22 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,28 @@ e2e-test-clean-crds: ## Delete all scaled objects and jobs across all namespaces
e2e-test-clean: get-cluster-context ## Delete all namespaces labeled with type=e2e
kubectl delete ns -l type=e2e

# The OpenShift tests are split into 3 targets because when we test the CMA operator,
# we want to do the setup and cleanup with the operator, but still run the test suite
# from the test image, so we need that granularity.
.PHONY: e2e-test-openshift-setup
e2e-test-openshift-setup: ## Setup the tests for OpenShift
@echo "--- Performing Setup ---"
cd tests; go test -v -timeout 15m -tags e2e ./utils/setup_test.go

.PHONY: e2e-test-openshift
e2e-test-openshift: ## Run tests for OpenShift
@echo "--- Running Internal Tests ---"
cd tests; go test -p 1 -v -timeout 60m -tags e2e $(shell cd tests; go list -tags e2e ./internals/... | grep -v internals/global_custom_ca)
@echo "--- Running Scaler Tests ---"
cd tests; go test -p 1 -v -timeout 60m -tags e2e ./scalers/cpu/... ./scalers/kafka/... ./scalers/memory/... ./scalers/prometheus/...
@echo "--- Running Sequential Tests ---"
cd tests; go test -p 1 -v -timeout 60m -tags e2e ./sequential/...

.PHONY: e2e-test-openshift-clean
e2e-test-openshift-clean: ## Cleanup the test environment for OpenShift
@echo "--- Cleaning Up ---"
cd tests; go test -v -timeout 60m -tags e2e ./utils/cleanup_test.go

.PHONY: smoke-test
smoke-test: ## Run e2e tests against Kubernetes cluster configured in ~/.kube/config.
./tests/run-smoke-tests.sh
Expand Down
28 changes: 28 additions & 0 deletions tests/helper/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"encoding/pem"
"fmt"
"io"
Expand Down Expand Up @@ -458,6 +459,33 @@ func WaitForAllPodRunningInNamespace(t *testing.T, kc *kubernetes.Clientset, nam
return false
}

// Waits until the Horizontal Pod Autoscaler for the scaledObject reports that it has metrics available
// to calculate, or until the number of iterations are done, whichever happens first.
func WaitForHPAMetricsToPopulate(t *testing.T, kc *kubernetes.Clientset, name, namespace string,
iterations, intervalSeconds int) bool {
totalWaitDuration := time.Duration(iterations) * time.Duration(intervalSeconds) * time.Second
startedWaiting := time.Now()
for i := 0; i < iterations; i++ {
t.Logf("Waiting up to %s for HPA to populate metrics - %s so far", totalWaitDuration, time.Since(startedWaiting).Round(time.Second))

hpa, _ := kc.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.Background(), name, metav1.GetOptions{})
if hpa.Status.CurrentMetrics != nil {
for _, currentMetric := range hpa.Status.CurrentMetrics {
// When testing on a kind cluster at least, an empty metricStatus object with a blank type shows up first,
// so we need to make sure we have *actual* resource metrics before we return
if currentMetric.Type != "" {
j, _ := json.MarshalIndent(hpa.Status.CurrentMetrics, " ", " ")
t.Logf("HPA has metrics after %s: %s", time.Since(startedWaiting), j)
return true
}
}
}

time.Sleep(time.Duration(intervalSeconds) * time.Second)
}
return false
}

// Waits until deployment ready replica count hits target or number of iterations are done.
func WaitForDeploymentReplicaReadyCount(t *testing.T, kc *kubernetes.Clientset, name, namespace string,
target, iterations, intervalSeconds int) bool {
Expand Down
10 changes: 10 additions & 0 deletions tests/scalers/cpu/cpu_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/client-go/kubernetes"

. "github.com/kedacore/keda/v2/tests/helper"
Expand All @@ -28,6 +29,7 @@ var (
testNamespace = fmt.Sprintf("%s-ns", testName)
deploymentName = fmt.Sprintf("%s-deployment", testName)
scaledObjectName = fmt.Sprintf("%s-so", testName)
hpaName = fmt.Sprintf("keda-hpa-%s-so", testName)
)

type templateData struct {
Expand Down Expand Up @@ -204,6 +206,14 @@ func scaleOut(t *testing.T, kc *kubernetes.Clientset, data templateData) {
assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 1),
"Replica count should start out as 1")

// The default metrics-server window is 30s, and that's what keda is used to, but some platforms use things like
// prometheus-adapter, and have the window tuned to a larger window of say 5m. In that case it takes 5 minutes before
// the HPA can even start scaling, and as a result we'll fail this test unless we wait for the metrics before we start.
// We'd read the window straight from the metrics-server config, but we'd have to know too much about unusual configurations,
// so we just wait up to 10 minutes for the metrics (wherever they're coming from) before we proceed with the test.
require.True(t, WaitForHPAMetricsToPopulate(t, kc, hpaName, testNamespace, 120, 5),
"HPA should populate metrics within 10 minutes")

t.Log("--- testing scale out ---")
t.Log("--- applying job ---")

Expand Down

0 comments on commit b4113ea

Please sign in to comment.