forked from istio/installer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnoauth.mk
89 lines (65 loc) · 4.51 KB
/
noauth.mk
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# Targets for testing the installer in 'minimal' mode. Both configs install only pilot and optional ingress or
# manual-injected sidecars.
# Security is not enabled - this can be used for users who have ipsec or other secure VPC, or don't need the
# security features. It is also intended to verify that Istio can work without citadel for a-la-carte modes.
run-test-noauth: ${TOP}/bin/istioctl run-test-noauth-micro run-test-noauth-full run-test-knative
# Run a test with the smallest/simplest install possible
run-test-noauth-micro:
kubectl apply -k kustomize/cluster --prune -l istio=cluster
# Verify that we can kube-inject using files ( there is no injector in this config )
kubectl create ns simple-micro || true
# Use a kustomization to lower the alloc (to fit in circle)
kubectl apply -k test/minimal --prune -l release=istio-system-istio-discovery
kubectl apply -k test/minimal --prune -l release=istio-system-istio-ingress
kubectl wait deployments istio-pilot istio-ingressgateway -n istio-system --for=condition=available --timeout=${WAIT_TIMEOUT}
# Add a node port service, so the tests can also run from container - port is 30080
# If running with 'local mount' - it also sets a port in the main docker contaier, so port is accessible from dev
# machine. Otherwise the test should in inside the kind container node.
kubectl apply -f test/kind/ingress-service.yaml
# Apply an ingress, to verify ingress is configured properly
kubectl apply -f test/simple/ingress.yaml
istioctl kube-inject -f test/simple/servicesToBeInjected.yaml \
-n simple-micro \
--meshConfigFile test/simple/mesh.yaml \
--valuesFile test/simple/values.yaml \
--injectConfigFile istio-control/istio-autoinject/files/injection-template.yaml \
| kubectl apply -n simple-micro -f -
kubectl wait deployments echosrv-deployment-1 -n simple-micro --for=condition=available --timeout=${WAIT_TIMEOUT}
kubectl wait deployments echosrv-deployment-2 -n simple-micro --for=condition=available --timeout=${WAIT_TIMEOUT}
# Verify ingress and pilot are happy
# The 'simple' fortio has a rewrite rule - so /fortio/fortio/ is the real UI
curl -s localhost:30080/fortio/fortio/ |grep fortio_chart.js
# This is the ingress gateway, no rewrite. Without host it hits the redirect
curl -s localhost:30080/fortio/ -HHost:fortio-ingress.example.com | grep fortio_chart.js
# Installs minimal istio (pilot + ingressgateway) to support knative serving.
# Then installs a simple service and waits for the route to be ready.
#
# This test can be run in several environments:
# - using a 'minimal' pilot+ingress in istio-system
# - using a full istio in istio-system
# - using only a 'minimal' istio+ingress in a separate namespace - nothing in istio-system
# The last config seems to be broken in CircleCI but passes locally, still investigating.
run-test-knative: run-build-cluster run-build-minimal run-build-ingress
kubectl apply -k kustomize/cluster --prune -l istio=cluster
# Install Knative CRDs (istio-crds applied via install-crds)
# Using serving seems to be flaky - no matches for kind "Image" in version "caching.internal.knative.dev/v1alpha1"
kubectl apply --selector=knative.dev/crd-install=true --filename test/knative/crds.yaml
kubectl wait --for=condition=Established -f test/knative/crds.yaml
# Install pilot, ingress - using a kustomization that installs them in istio-micro instead of istio-system
# The kustomization installs a modified istio-ingress+istio-pilot, using separate namespace
kubectl apply -k test/knative
kubectl wait deployments istio-ingressgateway istio-pilot -n istio-micro --for=condition=available --timeout=${WAIT_TIMEOUT}
# Set host port 30090, for the ingressateway in istio-micro
kubectl apply -f test/kind/ingress-service-micro.yaml
kubectl apply --filename test/knative/serving.yaml
kubectl wait deployments webhook controller activator autoscaler \
-n knative-serving --for=condition=available --timeout=${WAIT_TIMEOUT}
kubectl apply --filename test/knative/service.yaml
# The route may take some small period of time to be create (WAIT_TIMEOUT default is 240s)
# kubectl wait is problematic, as the pod may not exist before the command is issued.
until timeout 120s kubectl get routes helloworld-go; do echo "Waiting for routes to be created..."; done
kubectl wait routes helloworld-go --for=condition=ready --timeout=120s
# Verify that ingress, pilot and knative are all happy
#curl localhost:30090/hello -v -H Host:helloworld-go.default.example.com
run-test-noauth-full:
echo "Skipping - only micro profile in scope, will use telemetry-lite"