Skip to content

Commit

Permalink
gatewayapi: bump e2e tests to v1
Browse files Browse the repository at this point in the history
Bump Gateway API E2E tests to v1 and switch to Istio from Contour.

Signed-off-by: Sanskar Jaiswal <jaiswalsanskar078@gmail.com>
  • Loading branch information
aryan9600 committed Nov 29, 2023
1 parent 0d0d0ef commit 09b0937
Show file tree
Hide file tree
Showing 6 changed files with 111 additions and 90 deletions.
76 changes: 35 additions & 41 deletions test/gatewayapi/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,62 +2,56 @@

set -o errexit

CONTOUR_VER="v1.26.0"
GATEWAY_API_VER="v1beta1"
GATEWAY_API_VER="v1.0.0"
REPO_ROOT=$(git rev-parse --show-toplevel)
KUSTOMIZE_VERSION=4.5.2
OS=$(uname -s)
ARCH=$(arch)
if [[ $ARCH == "x86_64" ]]; then
ARCH="amd64"
fi
ISTIO_VER="1.20.0"

mkdir -p ${REPO_ROOT}/bin

echo ">>> Installing Contour components, Gateway API CRDs"
kubectl apply -f https://raw.githubusercontent.com/projectcontour/contour/${CONTOUR_VER}/examples/render/contour-gateway-provisioner.yaml
echo ">>> Installing Gateway API CRDs"
kubectl get crd gateways.gateway.networking.k8s.io &> /dev/null || \
{ kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=${GATEWAY_API_VER}" | kubectl apply -f -; }

kubectl -n projectcontour rollout status deployment/contour-gateway-provisioner
kubectl -n gateway-system wait --for=condition=complete job/gateway-api-admission
kubectl -n gateway-system wait --for=condition=complete job/gateway-api-admission-patch
kubectl -n gateway-system rollout status deployment/gateway-api-admission-server
kubectl -n projectcontour get all
echo ">>> Downloading Istio ${ISTIO_VER}"
cd ${REPO_ROOT}/bin && \
curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VER} sh -

echo ">>> Creating GatewayClass"
cat <<EOF | kubectl apply -f -
kind: GatewayClass
apiVersion: gateway.networking.k8s.io/v1beta1
metadata:
name: contour
spec:
controllerName: projectcontour.io/gateway-controller
EOF
echo ">>> Installing Istio ${ISTIO_VER}"
${REPO_ROOT}/bin/istio-${ISTIO_VER}/bin/istioctl install --set profile=minimal \
--set values.pilot.resources.requests.cpu=100m \
--set values.pilot.resources.requests.memory=100Mi -y

kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.20/samples/addons/prometheus.yaml
kubectl -n istio-system rollout status deployment/prometheus

echo ">>> Creating Gateway"
kubectl create ns istio-ingress
cat <<EOF | kubectl apply -f -
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1beta1
kind: Gateway
metadata:
name: contour
namespace: projectcontour
name: gateway
namespace: istio-ingress
spec:
gatewayClassName: contour
gatewayClassName: istio
listeners:
- name: http
protocol: HTTP
port: 80
allowedRoutes:
namespaces:
from: All
- name: default
hostname: "*.example.com"
port: 80
protocol: HTTP
allowedRoutes:
namespaces:
from: All
EOF

echo '>>> Installing Kustomize'
cd ${REPO_ROOT}/bin && \
curl -sL https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv${KUSTOMIZE_VERSION}/kustomize_v${KUSTOMIZE_VERSION}_${OS}_${ARCH}.tar.gz | \
tar xz

echo '>>> Installing Flagger'
${REPO_ROOT}/bin/kustomize build ${REPO_ROOT}/kustomize/gatewayapi | kubectl apply -f -
kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
kubectl create ns flagger-system
helm upgrade -i flagger ${REPO_ROOT}/charts/flagger \
--set crd.create=false \
--namespace flagger-system \
--set prometheus.install=false \
--set meshProvider=gatewayapi:v1 \
--set metricsServer=http://prometheus.istio-system:9090

kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n flagger-system rollout status deployment/flagger
18 changes: 5 additions & 13 deletions test/gatewayapi/test-ab.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env bash

# This script runs e2e tests for A/B traffic shifting, Canary analysis and promotion
# Prerequisites: Kubernetes Kind and Contour with GatewayAPI
# Prerequisites: Kubernetes Kind and Istio with GatewayAPI

set -o errexit

Expand Down Expand Up @@ -33,10 +33,10 @@ spec:
port: 9898
portName: http
hosts:
- localproject.contour.io
- www.example.com
gatewayRefs:
- name: contour
namespace: projectcontour
- name: gateway
namespace: istio-ingress
analysis:
interval: 15s
threshold: 10
Expand All @@ -46,14 +46,6 @@ spec:
x-canary:
exact: "insider"
metrics:
# - name: request-success-rate
# thresholdRange:
# min: 99
# interval: 1m
# - name: request-duration
# threshold: 500
# interval: 30s
- name: error-rate
templateRef:
name: error-rate
Expand All @@ -74,7 +66,7 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io -H 'X-Canary: insider' http://envoy-contour.projectcontour/"
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com -H 'X-Canary: insider' http://gateway-istio.istio-ingress"
logCmdOutput: "true"
EOF

Expand Down
19 changes: 10 additions & 9 deletions test/gatewayapi/test-bg.sh
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
#!/usr/bin/env bash

# This script runs e2e tests for Blue/Green traffic shifting, Canary analysis and promotion
# Prerequisites: Kubernetes Kind and Contour with GatewayAPI
# Prerequisites: Kubernetes Kind and Istio with GatewayAPI

set -o errexit

REPO_ROOT=$(git rev-parse --show-toplevel)

source ${REPO_ROOT}/test/gatewayapi/test-utils.sh

create_request_duration_metric_template

echo '>>> Deploy podinfo in bg-test namespace'
kubectl create ns bg-test
kubectl apply -f ${REPO_ROOT}/test/workloads/secret.yaml -n bg-test
Expand All @@ -30,20 +32,19 @@ spec:
port: 9898
portName: http
hosts:
- localproject.contour.io
- www.example.com
gatewayRefs:
- name: contour
namespace: projectcontour
- name: gateway
namespace: istio-ingress
analysis:
interval: 10s
threshold: 5
iterations: 5
metrics:
- name: request-success-rate
thresholdRange:
min: 99
interval: 1m
- name: request-duration
- name: custom-requst-duration
templateRef:
name: request-duration
namespace: flagger-system
threshold: 500
interval: 30s
webhooks:
Expand Down
20 changes: 10 additions & 10 deletions test/gatewayapi/test-canary.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env bash

# This script runs e2e tests for progressive traffic shifting, Canary analysis and promotion
# Prerequisites: Kubernetes Kind and Contour with GatewayAPI
# Prerequisites: Kubernetes Kind and Istio with GatewayAPI

set -o errexit

Expand Down Expand Up @@ -29,10 +29,10 @@ spec:
port: 9898
portName: http
hosts:
- localproject.contour.io
- www.example.com
gatewayRefs:
- name: contour
namespace: projectcontour
- name: gateway
namespace: istio-ingress
analysis:
interval: 15s
threshold: 15
Expand All @@ -59,7 +59,7 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy-contour.projectcontour/"
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress"
logCmdOutput: "true"
EOF

Expand Down Expand Up @@ -122,13 +122,13 @@ spec:
targetPort: 9898
portName: http
hosts:
- localproject.contour.io
- www.example.com
gatewayRefs:
- name: contour
namespace: projectcontour
- name: gateway
namespace: istio-ingress
analysis:
interval: 15s
threshold: 15
threshold: 6
maxWeight: 50
stepWeight: 10
metrics:
Expand All @@ -152,7 +152,7 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy-contour.projectcontour/status/500"
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress/status/500"
logCmdOutput: "true"
EOF

Expand Down
16 changes: 8 additions & 8 deletions test/gatewayapi/test-session-affinity.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/usr/bin/env bash

# This script runs e2e tests for progressive traffic shifting with session affinity, Canary analysis and promotion
# Prerequisites: Kubernetes Kind and Contour with GatewayAPI
# Prerequisites: Kubernetes Kind and Istio with GatewayAPI

set -o errexit

Expand Down Expand Up @@ -34,10 +34,10 @@ spec:
port: 9898
portName: http
hosts:
- localproject.contour.io
- www.example.com
gatewayRefs:
- name: contour
namespace: projectcontour
- name: gateway
namespace: istio-ingress
analysis:
interval: 15s
threshold: 15
Expand Down Expand Up @@ -66,7 +66,7 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy-contour.projectcontour/"
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress"
logCmdOutput: "true"
EOF

Expand All @@ -75,7 +75,7 @@ check_primary "sa-test"
display_httproute "sa-test"

echo '>>> Port forwarding load balancer'
kubectl port-forward -n projectcontour svc/envoy-contour 8888:80 2>&1 > /dev/null &
kubectl port-forward -n istio-ingress svc/gateway-istio 8888:80 2>&1 > /dev/null &
pf_pid=$!

cleanup() {
Expand Down Expand Up @@ -104,7 +104,7 @@ until ${ok}; do
done

echo '>>> Verifying session affinity'
if ! URL=http://localhost:8888 HOST=localproject.contour.io VERSION=6.1.0 COOKIE_NAME=flagger-cookie \
if ! URL=http://localhost:8888 HOST=www.example.com VERSION=6.1.0 COOKIE_NAME=flagger-cookie \
go run ${REPO_ROOT}/test/gatewayapi/verify_session_affinity.go; then
echo "failed to verify session affinity"
exit $?
Expand Down Expand Up @@ -145,7 +145,7 @@ done

echo '>>> Verifying cookie cleanup'
canary_cookie=$(kubectl -n sa-test get canary podinfo -o=jsonpath='{.status.previousSessionAffinityCookie}' | xargs)
response=$(curl -H "Host: localproject.contour.io" -H "Cookie: $canary_cookie" -D - http://localhost:8888)
response=$(curl -H "Host: www.example.com" -H "Cookie: $canary_cookie" -D - http://localhost:8888)

if [[ $response == *"$canary_cookie"* ]]; then
echo "✔ Found previous cookie in response"
Expand Down
52 changes: 43 additions & 9 deletions test/gatewayapi/test-utils.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,34 @@ display_httproute() {
fi
}

create_request_duration_metric_template() {
if ! kubectl -n flagger-system get metrictemplates request-duration ; then
echo '>>> Create request-duration metric template'
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: request-duration
namespace: flagger-system
spec:
provider:
type: prometheus
address: http://prometheus.istio-system:9090
query: |
histogram_quantile(0.99,
sum(
rate(
http_request_duration_seconds_bucket{
namespace=~"{{ namespace }}",
app="{{ target }}",
}[{{ interval }}]
)
) by (le)
)
EOF
fi
}

create_latency_metric_template() {
if ! kubectl -n flagger-system get metrictemplates latency; then
echo '>>> Create latency metric template'
Expand All @@ -48,13 +76,15 @@ create_latency_metric_template() {
spec:
provider:
type: prometheus
address: http://flagger-prometheus:9090
address: http://prometheus.istio-system:9090
query: |
histogram_quantile(0.99,
sum(
rate(
envoy_cluster_upstream_rq_time_bucket{
envoy_cluster_name=~"{{ namespace }}_{{ target }}-canary_[0-9a-zA-Z-]+",
istio_request_duration_milliseconds_bucket{
reporter="source",
destination_workload_namespace=~"{{ namespace }}",
destination_workload=~"{{ target }}",
}[{{ interval }}]
)
) by (le)
Expand All @@ -75,21 +105,25 @@ create_error_rate_metric_template() {
spec:
provider:
type: prometheus
address: http://flagger-prometheus:9090
address: http://prometheus.istio-system:9090
query: |
100 - sum(
rate(
envoy_cluster_upstream_rq{
envoy_cluster_name=~"{{ namespace }}_{{ target }}-canary_[0-9a-zA-Z-]+",
envoy_response_code!~"5.*"
istio_requests_total{
reporter="source",
destination_workload_namespace=~"{{ namespace }}",
destination_workload=~"{{ target }}",
response_code!~"5.*"
}[{{ interval }}]
)
)
/
sum(
rate(
envoy_cluster_upstream_rq{
envoy_cluster_name=~"{{ namespace }}_{{ target }}-canary_[0-9a-zA-Z-]+",
istio_requests_total{
reporter="source",
destination_workload_namespace=~"{{ namespace }}",
destination_workload=~"{{ target }}",
}[{{ interval }}]
)
)
Expand Down

0 comments on commit 09b0937

Please sign in to comment.