diff --git a/install/kubernetes/helm/istio/charts/certmanager/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/certmanager/templates/deployment.yaml index 27c024e35855..83a1e8bab9e3 100644 --- a/install/kubernetes/helm/istio/charts/certmanager/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/certmanager/templates/deployment.yaml @@ -25,7 +25,6 @@ spec: {{- end }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" {{- if .Values.podAnnotations }} {{ toYaml .Values.podAnnotations | indent 8 }} {{- end }} @@ -61,3 +60,4 @@ spec: {{- end }} affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/certmanager/values.yaml b/install/kubernetes/helm/istio/charts/certmanager/values.yaml index 2a4d044142fc..33c2857c7ff4 100644 --- a/install/kubernetes/helm/istio/charts/certmanager/values.yaml +++ b/install/kubernetes/helm/istio/charts/certmanager/values.yaml @@ -9,3 +9,23 @@ tag: v0.6.2 resources: {} nodeSelector: {} +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} diff --git a/install/kubernetes/helm/istio/charts/galley/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/galley/templates/deployment.yaml index 302da24c3c7e..c5c07c9da006 100644 --- a/install/kubernetes/helm/istio/charts/galley/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/galley/templates/deployment.yaml @@ -25,7 +25,6 @@ spec: istio: galley annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: serviceAccountName: istio-galley-service-account {{- if .Values.global.priorityClassName }} @@ -111,3 +110,4 @@ spec: name: istio affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/galley/values.yaml b/install/kubernetes/helm/istio/charts/galley/values.yaml index 4e2271e1e320..5911982a1c0e 100644 --- a/install/kubernetes/helm/istio/charts/galley/values.yaml +++ b/install/kubernetes/helm/istio/charts/galley/values.yaml @@ -6,3 +6,23 @@ replicaCount: 1 image: galley nodeSelector: {} +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} diff --git a/install/kubernetes/helm/istio/charts/gateways/templates/_affinity.tpl b/install/kubernetes/helm/istio/charts/gateways/templates/_affinity.tpl index 6ce92bc80fb4..f1609875a95d 100644 --- a/install/kubernetes/helm/istio/charts/gateways/templates/_affinity.tpl +++ b/install/kubernetes/helm/istio/charts/gateways/templates/_affinity.tpl @@ -41,3 +41,53 @@ {{- end }} {{- end }} {{- end }} + +{{- define "gatewaypodAntiAffinity" }} +{{- if or .podAntiAffinityLabelSelector .podAntiAffinityTermLabelSelector}} + podAntiAffinity: + {{- if .podAntiAffinityLabelSelector }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "gatewaypodAntiAffinityRequiredDuringScheduling" . }} + {{- end }} + {{- if .podAntiAffinityTermLabelSelector }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "gatewaypodAntiAffinityPreferredDuringScheduling" . }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "gatewaypodAntiAffinityRequiredDuringScheduling" }} + {{- range $index, $item := .podAntiAffinityLabelSelector }} + - labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.value }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + {{- end }} +{{- end }} + +{{- define "gatewaypodAntiAffinityPreferredDuringScheduling" }} + {{- range $index, $item := .podAntiAffinityTermLabelSelector }} + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if .value }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + weight: 100 + {{- end }} +{{- end }} diff --git a/install/kubernetes/helm/istio/charts/gateways/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/gateways/templates/deployment.yaml index d73237a9acca..153db6977067 100644 --- a/install/kubernetes/helm/istio/charts/gateways/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/gateways/templates/deployment.yaml @@ -32,7 +32,6 @@ spec: {{- end }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" {{- if $spec.podAnnotations }} {{ toYaml $spec.podAnnotations | indent 8 }} {{ end }} @@ -283,6 +282,7 @@ spec: {{- end }} affinity: {{- include "gatewaynodeaffinity" (dict "root" $ "nodeSelector" $spec.nodeSelector) | indent 6 }} + {{- include "gatewaypodAntiAffinity" (dict "podAntiAffinityLabelSelector" $spec.podAntiAffinityLabelSelector "podAntiAffinityTermLabelSelector" $spec.podAntiAffinityTermLabelSelector) | indent 6 }} --- {{- end }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/gateways/values.yaml b/install/kubernetes/helm/istio/charts/gateways/values.yaml index 36850b00a116..7cd7be85a863 100644 --- a/install/kubernetes/helm/istio/charts/gateways/values.yaml +++ b/install/kubernetes/helm/istio/charts/gateways/values.yaml @@ -112,6 +112,27 @@ istio-ingressgateway: ISTIO_META_ROUTER_MODE: "sni-dnat" nodeSelector: {} + # Specify the pod anti-affinity that allows you to constrain which nodes + # your pod is eligible to be scheduled based on labels on pods that are + # already running on the node rather than based on labels on nodes. + # There are currently two types of anti-affinity: + # "requiredDuringSchedulingIgnoredDuringExecution" + # "preferredDuringSchedulingIgnoredDuringExecution" + # which denote “hard” vs. “soft” requirements, you can define your values + # in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" + # correspondingly. + # For example: + # podAntiAffinityLabelSelector: + # - key: security + # operator: In + # values: S1,S2 + # topologyKey: "kubernetes.io/hostname" + # This pod anti-affinity rule says that the pod requires not to be scheduled + # onto a node if that node is already running a pod with label having key + # “security” and value “S1”. + podAntiAffinityLabelSelector: {} + podAntiAffinityTermLabelSelector: {} + istio-egressgateway: enabled: false labels: @@ -158,6 +179,27 @@ istio-egressgateway: # enable cross cluster routing. ISTIO_META_ROUTER_MODE: "sni-dnat" nodeSelector: {} + + # Specify the pod anti-affinity that allows you to constrain which nodes + # your pod is eligible to be scheduled based on labels on pods that are + # already running on the node rather than based on labels on nodes. + # There are currently two types of anti-affinity: + # "requiredDuringSchedulingIgnoredDuringExecution" + # "preferredDuringSchedulingIgnoredDuringExecution" + # which denote “hard” vs. “soft” requirements, you can define your values + # in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" + # correspondingly. + # For example: + # podAntiAffinityLabelSelector: + # - key: security + # operator: In + # values: S1,S2 + # topologyKey: "kubernetes.io/hostname" + # This pod anti-affinity rule says that the pod requires not to be scheduled + # onto a node if that node is already running a pod with label having key + # “security” and value “S1”. + podAntiAffinityLabelSelector: {} + podAntiAffinityTermLabelSelector: {} # Mesh ILB gateway creates a gateway of type InternalLoadBalancer, # for mesh expansion. It exposes the mtls ports for Pilot,CA as well diff --git a/install/kubernetes/helm/istio/charts/grafana/templates/create-custom-resources-job.yaml b/install/kubernetes/helm/istio/charts/grafana/templates/create-custom-resources-job.yaml index 6f0483426c93..2fe2c96118ad 100644 --- a/install/kubernetes/helm/istio/charts/grafana/templates/create-custom-resources-job.yaml +++ b/install/kubernetes/helm/istio/charts/grafana/templates/create-custom-resources-job.yaml @@ -89,3 +89,6 @@ spec: configMap: name: istio-grafana-custom-resources restartPolicy: OnFailure + affinity: + {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/grafana/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/grafana/templates/deployment.yaml index cba279ed29c7..e5672341cec2 100644 --- a/install/kubernetes/helm/istio/charts/grafana/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/grafana/templates/deployment.yaml @@ -19,7 +19,6 @@ spec: release: {{ .Release.Name }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: securityContext: runAsUser: 472 @@ -97,6 +96,7 @@ spec: subPath: dashboardproviders.yaml affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} volumes: - name: config configMap: diff --git a/install/kubernetes/helm/istio/charts/grafana/templates/tests/test-grafana-connection.yaml b/install/kubernetes/helm/istio/charts/grafana/templates/tests/test-grafana-connection.yaml index f31fef4eb3ac..036391bc36f9 100644 --- a/install/kubernetes/helm/istio/charts/grafana/templates/tests/test-grafana-connection.yaml +++ b/install/kubernetes/helm/istio/charts/grafana/templates/tests/test-grafana-connection.yaml @@ -11,7 +11,6 @@ metadata: heritage: {{ .Release.Service }} istio: grafana annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" sidecar.istio.io/inject: "false" helm.sh/hook: test-success spec: @@ -27,4 +26,5 @@ spec: restartPolicy: Never affinity: {{- include "nodeaffinity" . | indent 4 }} + {{- include "podAntiAffinity" . | indent 4 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/grafana/values.yaml b/install/kubernetes/helm/istio/charts/grafana/values.yaml index 196fd389a005..d5ab7bf8d024 100644 --- a/install/kubernetes/helm/istio/charts/grafana/values.yaml +++ b/install/kubernetes/helm/istio/charts/grafana/values.yaml @@ -28,6 +28,28 @@ security: usernameKey: username passphraseKey: passphrase nodeSelector: {} + +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + contextPath: /grafana service: annotations: {} diff --git a/install/kubernetes/helm/istio/charts/istiocoredns/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/istiocoredns/templates/deployment.yaml index 06bb9a96f653..51da1cf658b4 100644 --- a/install/kubernetes/helm/istio/charts/istiocoredns/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/istiocoredns/templates/deployment.yaml @@ -20,7 +20,6 @@ spec: release: {{ .Release.Name }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: serviceAccountName: istiocoredns-service-account {{- if .Values.global.priorityClassName }} @@ -84,3 +83,4 @@ spec: path: Corefile affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/istiocoredns/values.yaml b/install/kubernetes/helm/istio/charts/istiocoredns/values.yaml index 8484b623526f..161361ef48a3 100644 --- a/install/kubernetes/helm/istio/charts/istiocoredns/values.yaml +++ b/install/kubernetes/helm/istio/charts/istiocoredns/values.yaml @@ -9,3 +9,24 @@ coreDNSImage: coredns/coredns:1.1.2 # The plugin listens for DNS requests from coredns server at 127.0.0.1:8053 coreDNSPluginImage: istio/coredns-plugin:0.2-istio-1.1 nodeSelector: {} + +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} diff --git a/install/kubernetes/helm/istio/charts/kiali/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/kiali/templates/deployment.yaml index 09d3f6f5cd7c..d063122f5f80 100644 --- a/install/kubernetes/helm/istio/charts/kiali/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/kiali/templates/deployment.yaml @@ -72,3 +72,4 @@ spec: optional: true affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/kiali/templates/tests/test-kiali-connection.yaml b/install/kubernetes/helm/istio/charts/kiali/templates/tests/test-kiali-connection.yaml index 07cd155b5e79..3e458d76eae3 100644 --- a/install/kubernetes/helm/istio/charts/kiali/templates/tests/test-kiali-connection.yaml +++ b/install/kubernetes/helm/istio/charts/kiali/templates/tests/test-kiali-connection.yaml @@ -11,7 +11,6 @@ metadata: heritage: {{ .Release.Service }} istio: kiali annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" sidecar.istio.io/inject: "false" helm.sh/hook: test-success spec: @@ -27,4 +26,5 @@ spec: restartPolicy: Never affinity: {{- include "nodeaffinity" . | indent 4 }} + {{- include "podAntiAffinity" . | indent 4 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/kiali/values.yaml b/install/kubernetes/helm/istio/charts/kiali/values.yaml index 6b5c4a4a3d28..793cbc241b63 100644 --- a/install/kubernetes/helm/istio/charts/kiali/values.yaml +++ b/install/kubernetes/helm/istio/charts/kiali/values.yaml @@ -1,12 +1,34 @@ # # addon kiali # -enabled: false +enabled: false # Note that if using the demo or demo-auth yaml when installing via Helm, this default will be `true`. replicaCount: 1 hub: docker.io/kiali tag: v0.16 -contextPath: /kiali +contextPath: /kiali # The root context path to access the Kiali UI. nodeSelector: {} + +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + ingress: enabled: false ## Used to create an Ingress record. @@ -22,13 +44,11 @@ ingress: # - kiali.local dashboard: - secretName: kiali - - # Override the automatically detected Grafana URL, useful when Grafana service has no ExternalIPs - # grafanaURL: - - # Override the automatically detected Jaeger URL, useful when Jaeger service has no ExternalIPs - # jaegerURL: + secretName: kiali # You must create a secret with this name - one is not provided out-of-box. + usernameKey: username # This is the key name within the secret whose value is the actual username. + passphraseKey: passphrase # This is the key name within the secret whose value is the actual passphrase. + grafanaURL: # If you have Grafana installed and it is accessible to client browsers, then set this to its external URL. Kiali will redirect users to this URL when Grafana metrics are to be shown. + jaegerURL: # If you have Jaeger installed and it is accessible to client browsers, then set this property to its external URL. Kiali will redirect users to this URL when Jaeger tracing is to be shown. prometheusAddr: http://prometheus:9090 # When true, a secret will be created with a default username and password. Useful for demos. diff --git a/install/kubernetes/helm/istio/charts/mixer/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/mixer/templates/deployment.yaml index e5828e8ea14d..cbec617ece65 100644 --- a/install/kubernetes/helm/istio/charts/mixer/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/mixer/templates/deployment.yaml @@ -32,6 +32,7 @@ optional: true affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} containers: - name: mixer {{- if contains "/" .Values.image }} @@ -204,6 +205,7 @@ optional: true affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} containers: - name: mixer {{- if contains "/" .Values.image }} @@ -390,7 +392,6 @@ spec: istio-mixer-type: {{ $key }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" {{- with $.Values.podAnnotations }} {{ toYaml . | indent 8 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/mixer/values.yaml b/install/kubernetes/helm/istio/charts/mixer/values.yaml index 0ce0286accf2..62eac48fd65d 100644 --- a/install/kubernetes/helm/istio/charts/mixer/values.yaml +++ b/install/kubernetes/helm/istio/charts/mixer/values.yaml @@ -49,6 +49,27 @@ telemetry: podAnnotations: {} nodeSelector: {} +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + adapters: kubernetesenv: enabled: true diff --git a/install/kubernetes/helm/istio/charts/nodeagent/templates/daemonset.yaml b/install/kubernetes/helm/istio/charts/nodeagent/templates/daemonset.yaml index c19ed797f24d..a5e88189962c 100644 --- a/install/kubernetes/helm/istio/charts/nodeagent/templates/daemonset.yaml +++ b/install/kubernetes/helm/istio/charts/nodeagent/templates/daemonset.yaml @@ -46,3 +46,4 @@ spec: path: /var/run/sds affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/nodeagent/values.yaml b/install/kubernetes/helm/istio/charts/nodeagent/values.yaml index bf4e449fdea2..66f1f3846d4e 100644 --- a/install/kubernetes/helm/istio/charts/nodeagent/values.yaml +++ b/install/kubernetes/helm/istio/charts/nodeagent/values.yaml @@ -11,3 +11,24 @@ env: # names of authentication provider's plugins. Plugins: "" nodeSelector: {} + +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} diff --git a/install/kubernetes/helm/istio/charts/pilot/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/pilot/templates/deployment.yaml index a5634b9d7d6b..8b402f5cf145 100644 --- a/install/kubernetes/helm/istio/charts/pilot/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/pilot/templates/deployment.yaml @@ -37,7 +37,6 @@ spec: istio: pilot annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: serviceAccountName: istio-pilot-service-account {{- if .Values.global.priorityClassName }} @@ -217,3 +216,4 @@ spec: optional: true affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/pilot/values.yaml b/install/kubernetes/helm/istio/charts/pilot/values.yaml index cfc84af6f789..0a7da57ea9fb 100644 --- a/install/kubernetes/helm/istio/charts/pilot/values.yaml +++ b/install/kubernetes/helm/istio/charts/pilot/values.yaml @@ -22,6 +22,27 @@ cpu: targetAverageUtilization: 80 nodeSelector: {} +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + # The following is used to limit how long a sidecar can be connected # to a pilot. It balances out load across pilot instances at the cost of # increasing system churn. diff --git a/install/kubernetes/helm/istio/charts/prometheus/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/prometheus/templates/deployment.yaml index 3656bd7d078c..12cf74055005 100644 --- a/install/kubernetes/helm/istio/charts/prometheus/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/prometheus/templates/deployment.yaml @@ -23,7 +23,6 @@ spec: release: {{ .Release.Name }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: serviceAccountName: prometheus {{- if .Values.global.priorityClassName }} @@ -79,3 +78,4 @@ spec: secretName: istio.default affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/prometheus/templates/tests/test-prometheus-connection.yaml b/install/kubernetes/helm/istio/charts/prometheus/templates/tests/test-prometheus-connection.yaml index 7e47283d978c..ba2c7d8da575 100644 --- a/install/kubernetes/helm/istio/charts/prometheus/templates/tests/test-prometheus-connection.yaml +++ b/install/kubernetes/helm/istio/charts/prometheus/templates/tests/test-prometheus-connection.yaml @@ -11,7 +11,6 @@ metadata: heritage: {{ .Release.Service }} istio: prometheus annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" sidecar.istio.io/inject: "false" helm.sh/hook: test-success spec: @@ -26,4 +25,5 @@ spec: restartPolicy: Never affinity: {{- include "nodeaffinity" . | indent 4 }} + {{- include "podAntiAffinity" . | indent 4 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/prometheus/values.yaml b/install/kubernetes/helm/istio/charts/prometheus/values.yaml index fbcad8c46376..f40b3cdeb375 100644 --- a/install/kubernetes/helm/istio/charts/prometheus/values.yaml +++ b/install/kubernetes/helm/istio/charts/prometheus/values.yaml @@ -8,6 +8,27 @@ tag: v2.8.0 retention: 6h nodeSelector: {} +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + # Controls the frequency of prometheus scraping scrapeInterval: 15s diff --git a/install/kubernetes/helm/istio/charts/security/templates/cleanup-secrets.yaml b/install/kubernetes/helm/istio/charts/security/templates/cleanup-secrets.yaml index 8c80175aa0d2..be6f26e9fed7 100644 --- a/install/kubernetes/helm/istio/charts/security/templates/cleanup-secrets.yaml +++ b/install/kubernetes/helm/istio/charts/security/templates/cleanup-secrets.yaml @@ -115,3 +115,4 @@ spec: restartPolicy: OnFailure affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/security/templates/create-custom-resources-job.yaml b/install/kubernetes/helm/istio/charts/security/templates/create-custom-resources-job.yaml index c5936542153d..4daacdce6c0f 100644 --- a/install/kubernetes/helm/istio/charts/security/templates/create-custom-resources-job.yaml +++ b/install/kubernetes/helm/istio/charts/security/templates/create-custom-resources-job.yaml @@ -89,5 +89,6 @@ spec: name: istio-security-custom-resources restartPolicy: OnFailure affinity: - {{- include "nodeaffinity" . | indent 6 }} + {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/security/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/security/templates/deployment.yaml index a6d8cc751c2d..185a8115a9df 100644 --- a/install/kubernetes/helm/istio/charts/security/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/security/templates/deployment.yaml @@ -26,7 +26,6 @@ spec: istio: citadel annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: serviceAccountName: istio-citadel-service-account {{- if .Values.global.priorityClassName }} @@ -77,3 +76,4 @@ spec: {{- end }} affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/security/templates/tests/test-citadel-connection.yaml b/install/kubernetes/helm/istio/charts/security/templates/tests/test-citadel-connection.yaml index f1173f1bd5de..2e3da06148a5 100644 --- a/install/kubernetes/helm/istio/charts/security/templates/tests/test-citadel-connection.yaml +++ b/install/kubernetes/helm/istio/charts/security/templates/tests/test-citadel-connection.yaml @@ -11,7 +11,6 @@ metadata: heritage: {{ .Release.Service }} istio: citadel annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" sidecar.istio.io/inject: "false" helm.sh/hook: test-success spec: @@ -26,4 +25,5 @@ spec: restartPolicy: Never affinity: {{- include "nodeaffinity" . | indent 4 }} + {{- include "podAntiAffinity" . | indent 4 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/security/values.yaml b/install/kubernetes/helm/istio/charts/security/values.yaml index fd95e94ff1be..01501e1fe315 100644 --- a/install/kubernetes/helm/istio/charts/security/values.yaml +++ b/install/kubernetes/helm/istio/charts/security/values.yaml @@ -7,3 +7,24 @@ image: citadel selfSigned: true # indicate if self-signed CA is used. createMeshPolicy: true nodeSelector: {} + +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} diff --git a/install/kubernetes/helm/istio/charts/servicegraph/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/servicegraph/templates/deployment.yaml index 4a9051d45e73..58d9940f1cb1 100644 --- a/install/kubernetes/helm/istio/charts/servicegraph/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/servicegraph/templates/deployment.yaml @@ -19,7 +19,6 @@ spec: release: {{ .Release.Name }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: {{- if .Values.global.priorityClassName }} priorityClassName: "{{ .Values.global.priorityClassName }}" @@ -58,3 +57,4 @@ spec: {{- end }} affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/servicegraph/templates/tests/test-servicegraph-connection.yaml b/install/kubernetes/helm/istio/charts/servicegraph/templates/tests/test-servicegraph-connection.yaml index 24d2d2b0b59a..1244259d80c0 100644 --- a/install/kubernetes/helm/istio/charts/servicegraph/templates/tests/test-servicegraph-connection.yaml +++ b/install/kubernetes/helm/istio/charts/servicegraph/templates/tests/test-servicegraph-connection.yaml @@ -11,7 +11,6 @@ metadata: heritage: {{ .Release.Service }} istio: servicegraph annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" sidecar.istio.io/inject: "false" helm.sh/hook: test-success spec: @@ -27,4 +26,5 @@ spec: restartPolicy: Never affinity: {{- include "nodeaffinity" . | indent 4 }} + {{- include "podAntiAffinity" . | indent 4 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/servicegraph/values.yaml b/install/kubernetes/helm/istio/charts/servicegraph/values.yaml index de6d72faab6e..037c03d85dcf 100644 --- a/install/kubernetes/helm/istio/charts/servicegraph/values.yaml +++ b/install/kubernetes/helm/istio/charts/servicegraph/values.yaml @@ -5,6 +5,28 @@ enabled: false replicaCount: 1 image: servicegraph nodeSelector: {} + +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + service: annotations: {} name: http diff --git a/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/templates/deployment.yaml b/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/templates/deployment.yaml index 30fbc156fca8..b6b3287103fb 100644 --- a/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/templates/deployment.yaml +++ b/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/templates/deployment.yaml @@ -25,7 +25,6 @@ spec: istio: sidecar-injector annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" spec: serviceAccountName: istio-sidecar-injector-service-account {{- if .Values.global.priorityClassName }} @@ -96,3 +95,4 @@ spec: path: config affinity: {{- include "nodeaffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} diff --git a/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/values.yaml b/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/values.yaml index 7ef47b918342..4f326eaa53ca 100644 --- a/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/values.yaml +++ b/install/kubernetes/helm/istio/charts/sidecarInjectorWebhook/values.yaml @@ -7,6 +7,27 @@ image: sidecar_injector enableNamespacesByDefault: false nodeSelector: {} +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + # If true, webhook or istioctl injector will rewrite PodSpec for liveness # health check to redirect request to sidecar. This makes liveness check work # even when mTLS is enabled. diff --git a/install/kubernetes/helm/istio/charts/tracing/templates/deployment-jaeger.yaml b/install/kubernetes/helm/istio/charts/tracing/templates/deployment-jaeger.yaml index 3bb6304d1e5b..5a342f74d566 100644 --- a/install/kubernetes/helm/istio/charts/tracing/templates/deployment-jaeger.yaml +++ b/install/kubernetes/helm/istio/charts/tracing/templates/deployment-jaeger.yaml @@ -20,7 +20,6 @@ spec: release: {{ .Release.Name }} annotations: sidecar.istio.io/inject: "false" - scheduler.alpha.kubernetes.io/critical-pod: "" prometheus.io/scrape: "true" prometheus.io/port: "16686" {{- if .Values.contextPath }} @@ -79,5 +78,5 @@ spec: {{- end }} affinity: {{- include "nodeaffinity" . | indent 6 }} - + {{- include "podAntiAffinity" . | indent 6 }} {{ end }} diff --git a/install/kubernetes/helm/istio/charts/tracing/templates/deployment-zipkin.yaml b/install/kubernetes/helm/istio/charts/tracing/templates/deployment-zipkin.yaml index 2cb479708083..e628d5e9901d 100644 --- a/install/kubernetes/helm/istio/charts/tracing/templates/deployment-zipkin.yaml +++ b/install/kubernetes/helm/istio/charts/tracing/templates/deployment-zipkin.yaml @@ -67,5 +67,5 @@ spec: value: "{{ .Values.zipkin.maxSpans }}" affinity: {{- include "nodeaffinity" . | indent 6 }} - + {{- include "podAntiAffinity" . | indent 6 }} {{ end }} diff --git a/install/kubernetes/helm/istio/charts/tracing/templates/tests/test-tracing-connection.yaml b/install/kubernetes/helm/istio/charts/tracing/templates/tests/test-tracing-connection.yaml index 10defca55be9..9090cee0fc6b 100644 --- a/install/kubernetes/helm/istio/charts/tracing/templates/tests/test-tracing-connection.yaml +++ b/install/kubernetes/helm/istio/charts/tracing/templates/tests/test-tracing-connection.yaml @@ -10,7 +10,6 @@ metadata: release: {{ .Release.Name }} heritage: {{ .Release.Service }} annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" sidecar.istio.io/inject: "false" helm.sh/hook: test-success spec: @@ -30,4 +29,5 @@ spec: restartPolicy: Never affinity: {{- include "nodeaffinity" . | indent 4 }} + {{- include "podAntiAffinity" . | indent 4 }} {{- end }} diff --git a/install/kubernetes/helm/istio/charts/tracing/values.yaml b/install/kubernetes/helm/istio/charts/tracing/values.yaml index b12d65d40941..a97053d46f62 100644 --- a/install/kubernetes/helm/istio/charts/tracing/values.yaml +++ b/install/kubernetes/helm/istio/charts/tracing/values.yaml @@ -6,6 +6,27 @@ enabled: false provider: jaeger nodeSelector: {} +# Specify the pod anti-affinity that allows you to constrain which nodes +# your pod is eligible to be scheduled based on labels on pods that are +# already running on the node rather than based on labels on nodes. +# There are currently two types of anti-affinity: +# "requiredDuringSchedulingIgnoredDuringExecution" +# "preferredDuringSchedulingIgnoredDuringExecution" +# which denote “hard” vs. “soft” requirements, you can define your values +# in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector" +# correspondingly. +# For example: +# podAntiAffinityLabelSelector: +# - key: security +# operator: In +# values: S1,S2 +# topologyKey: "kubernetes.io/hostname" +# This pod anti-affinity rule says that the pod requires not to be scheduled +# onto a node if that node is already running a pod with label having key +# “security” and value “S1”. +podAntiAffinityLabelSelector: {} +podAntiAffinityTermLabelSelector: {} + jaeger: hub: docker.io/jaegertracing tag: 1.9 diff --git a/install/kubernetes/helm/istio/templates/_affinity.tpl b/install/kubernetes/helm/istio/templates/_affinity.tpl index 88ddede14502..da9b1191503a 100644 --- a/install/kubernetes/helm/istio/templates/_affinity.tpl +++ b/install/kubernetes/helm/istio/templates/_affinity.tpl @@ -40,4 +40,54 @@ - {{ $key }} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} + +{{- define "podAntiAffinity" }} +{{- if or .Values.podAntiAffinityLabelSelector .Values.podAntiAffinityTermLabelSelector}} + podAntiAffinity: + {{- if .Values.podAntiAffinityLabelSelector }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityRequiredDuringScheduling" . }} + {{- end }} + {{- if or .Values.podAntiAffinityTermLabelSelector}} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityPreferredDuringScheduling" . }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "podAntiAffinityRequiredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityLabelSelector }} + - labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.value }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinityPreferredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityTermLabelSelector }} + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if .value }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + weight: 100 + {{- end }} +{{- end }} diff --git a/install/kubernetes/helm/istio/templates/configmap.yaml b/install/kubernetes/helm/istio/templates/configmap.yaml index fd54c93a204b..178e67490ae9 100644 --- a/install/kubernetes/helm/istio/templates/configmap.yaml +++ b/install/kubernetes/helm/istio/templates/configmap.yaml @@ -82,6 +82,9 @@ data: ingressService: istio-ingressgateway {{- end }} + # Default connect timeout for dynamic clusters generated by Pilot and returned via XDS + connectTimeout: 10s + # DNS refresh rate for Envoy clusters of type STRICT_DNS dnsRefreshRate: {{ .Values.global.proxy.dnsRefreshRate }} @@ -115,6 +118,9 @@ data: outboundTrafficPolicy: mode: {{ .Values.global.outboundTrafficPolicy.mode }} + localityLbSetting: +{{ toYaml .Values.global.localityLbSetting | indent 6 }} + # The namespace to treat as the administrative root namespace for istio # configuration. {{- if .Values.global.configRootNamespace }} @@ -149,7 +155,8 @@ data: defaultConfig: # - # TCP connection timeout between Envoy & the application, and between Envoys. + # TCP connection timeout between Envoy & the application, and between Envoys. Used for static clusters + # defined in Envoy's configuration file connectTimeout: 10s # ### ADVANCED SETTINGS ############# diff --git a/install/kubernetes/helm/istio/templates/sidecar-injector-configmap.yaml b/install/kubernetes/helm/istio/templates/sidecar-injector-configmap.yaml index 126de656a24c..6a1335176393 100644 --- a/install/kubernetes/helm/istio/templates/sidecar-injector-configmap.yaml +++ b/install/kubernetes/helm/istio/templates/sidecar-injector-configmap.yaml @@ -53,6 +53,7 @@ data: cpu: 100m memory: 50Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/install/kubernetes/helm/istio/values.yaml b/install/kubernetes/helm/istio/values.yaml index 28a65854ad71..5cc6b531d582 100644 --- a/install/kubernetes/helm/istio/values.yaml +++ b/install/kubernetes/helm/istio/values.yaml @@ -450,6 +450,26 @@ global: # meshNetworks: {} + # Specifies the global locality load balancing settings. + # Locality-weighted load balancing allows administrators to control the distribution of traffic to + # endpoints based on the localities of where the traffic originates and where it will terminate. + # Please set either failover or distribute configuration but not both. + # + # localityLbSetting: + # distribute: + # - from: "us-central1/*" + # to: + # "us-central1/*": 80 + # "us-central2/*": 20 + # + # localityLbSetting: + # failover: + # - from: us-east + # to: eu-west + # - from: us-west + # to: us-east + localityLbSetting: {} + # Specifies whether helm test is enabled or not. # This field is set to false by default, so 'helm template ...' # will ignore the helm test yaml files when generating the template diff --git a/istio.deps b/istio.deps index 7b4181dab6f8..a0b515b97d97 100644 --- a/istio.deps +++ b/istio.deps @@ -4,7 +4,7 @@ "name": "PROXY_REPO_SHA", "repoName": "proxy", "file": "", - "lastStableSHA": "a5d5a464251e940f2d59a23ec598deb0277a8dd3" + "lastStableSHA": "55c80965eab994e6bfa2227e3942fa89928d0d70" }, { "_comment": "", diff --git a/mixer/pkg/runtime/config/ephemeral_test.go b/mixer/pkg/runtime/config/ephemeral_test.go index 8210e6fda53d..22c028af1e38 100644 --- a/mixer/pkg/runtime/config/ephemeral_test.go +++ b/mixer/pkg/runtime/config/ephemeral_test.go @@ -15,7 +15,6 @@ package config import ( - "io/ioutil" "testing" "istio.io/istio/mixer/adapter/list/config" @@ -25,65 +24,12 @@ import ( func TestDynamicHandlerCorruption(t *testing.T) { adapters := data.BuildAdapters(nil) templates := data.BuildTemplates(nil) - - // need an adapter with config - listbackend, err := ioutil.ReadFile("../../../test/listbackend/nosession.yaml") - if err != nil { - t.Fatal(err) - } - - cfg := string(listbackend) - - listentry, err := ioutil.ReadFile("../../../template/listentry/template.yaml") + cfg, err := data.ReadConfigs("../../../test/listbackend/nosession.yaml", "../../../template/listentry/template.yaml") if err != nil { t.Fatal(err) } - cfg = cfg + "\n---\n" + string(listentry) - cfg = cfg + ` ---- -apiVersion: config.istio.io/v1alpha2 -kind: handler -metadata: - name: h1 - namespace: istio-system -spec: - adapter: listbackend-nosession - connection: - address: 127.0.0.1:8080 - params: - provider_url: google.com - overrides: - - a - - b - caching_interval: 5s ---- -apiVersion: config.istio.io/v1alpha2 -kind: handler -metadata: - name: h2 - namespace: istio-system -spec: - adapter: listbackend-nosession - connection: - address: 127.0.0.1:8080 - params: - refresh_interval: 5s - caching_use_count: 50 ---- -apiVersion: config.istio.io/v1alpha2 -kind: handler -metadata: - name: h3 - namespace: istio-system -spec: - adapter: listbackend-nosession - connection: - address: 127.0.0.1:8080 - params: - blacklist: true ---- -` + cfg = data.JoinConfigs(cfg, data.ListHandler1, data.ListHandler2, data.ListHandler3) s, err := GetSnapshotForTest(templates, adapters, data.ServiceConfig, cfg) if err != nil { t.Fatal(err) diff --git a/mixer/pkg/runtime/config/snapshot.go b/mixer/pkg/runtime/config/snapshot.go index 9a36843abd4a..173e977592fe 100644 --- a/mixer/pkg/runtime/config/snapshot.go +++ b/mixer/pkg/runtime/config/snapshot.go @@ -245,6 +245,11 @@ func (h HandlerStatic) AdapterParams() interface{} { return h.Params } +// ConnectionConfig returns nil for static handler +func (h HandlerStatic) ConnectionConfig() interface{} { + return nil +} + // GetName gets name func (i InstanceStatic) GetName() string { return i.Name @@ -275,6 +280,11 @@ func (h HandlerDynamic) AdapterParams() interface{} { return h.AdapterConfig } +// ConnectionConfig gets connection config of dynamic handler +func (h HandlerDynamic) ConnectionConfig() interface{} { + return h.Connection +} + // GetName gets name func (i InstanceDynamic) GetName() string { return i.Name diff --git a/mixer/pkg/runtime/handler/signature.go b/mixer/pkg/runtime/handler/signature.go index cbd445744614..f884548d9bc5 100644 --- a/mixer/pkg/runtime/handler/signature.go +++ b/mixer/pkg/runtime/handler/signature.go @@ -66,6 +66,10 @@ func calculateSignature(handler hndlr, insts interface{}) signature { (reflect.ValueOf(handler.AdapterParams()).Kind() != reflect.Ptr || !reflect.ValueOf(handler.AdapterParams()).IsNil()) { encoded = encoded && encode(buf, handler.AdapterParams()) } + if handler.ConnectionConfig() != nil { + encoded = encoded && encode(buf, handler.ConnectionConfig()) + } + for _, name := range instanceNames { instance := instanceMap[name] encoded = encoded && encode(buf, instance.TemplateName()) @@ -86,6 +90,7 @@ type hndlr interface { GetName() string AdapterName() string AdapterParams() interface{} + ConnectionConfig() interface{} } type inst interface { GetName() string diff --git a/mixer/pkg/runtime/handler/table_test.go b/mixer/pkg/runtime/handler/table_test.go index 50dbf0a2e49f..cd1b74b3c24f 100644 --- a/mixer/pkg/runtime/handler/table_test.go +++ b/mixer/pkg/runtime/handler/table_test.go @@ -20,9 +20,11 @@ import ( "testing" "time" + "istio.io/istio/mixer/pkg/adapter" "istio.io/istio/mixer/pkg/pool" "istio.io/istio/mixer/pkg/runtime/config" "istio.io/istio/mixer/pkg/runtime/testing/data" + "istio.io/istio/mixer/pkg/template" ) // Create a standard global config with Handler H1, Instance I1 and rule R1 referencing I1 and H1. @@ -108,6 +110,42 @@ func TestNew_NoReuse_DifferentConfig(t *testing.T) { } } +func TestNew_NoReuse_DifferentConnectionConfig(t *testing.T) { + templates := map[string]*template.Info{} + adapters := map[string]*adapter.Info{} + + // Load base dynamic config, which includes listentry template and listbackend adapter config + dynamicConfig, err := data.ReadConfigs("../../../template/listentry/template.yaml", "../../../test/listbackend/nosession.yaml") + dynamicConfig = data.JoinConfigs(dynamicConfig, data.InstanceDynamic, data.RuleDynamic) + + // Join base dynamic config with dynamic handler + config1 := data.JoinConfigs(dynamicConfig, data.ListHandler3) + if err != nil { + t.Fatalf("fail to load dynamic config: %v", err) + } + s, _ := config.GetSnapshotForTest(templates, adapters, data.ServiceConfig, config1) + table := NewTable(Empty(), s, nil) + + if len(table.entries) != 1 { + t.Fatalf("got %v entries in route table, want 1", len(table.entries)) + } + + // Join base dynamic config with dynamic handler which has different connection address + config2 := data.JoinConfigs(dynamicConfig, data.ListHandler3Addr) + // NewTable again using the slightly different config + s, _ = config.GetSnapshotForTest(templates, adapters, data.ServiceConfig, config2) + + table2 := NewTable(table, s, nil) + + if len(table2.entries) != 1 { + t.Fatalf("got %v entries in route table, want 1", len(table2.entries)) + } + + if table2.entries[data.FqdnListHandler3] == table.entries[data.FqdnListHandler3] { + t.Fatalf("got same entry %+v in route table after handler config change, want different entries", table2.entries[data.FqdnListHandler3]) + } +} + func TestTable_Get(t *testing.T) { table := &Table{ entries: make(map[string]Entry), diff --git a/mixer/pkg/runtime/testing/data/data.go b/mixer/pkg/runtime/testing/data/data.go index 7f53b50a64ec..593f58a88e27 100644 --- a/mixer/pkg/runtime/testing/data/data.go +++ b/mixer/pkg/runtime/testing/data/data.go @@ -14,7 +14,10 @@ package data -import "strings" +import ( + "io/ioutil" + "strings" +) // ServiceConfig is a standard service config. var ServiceConfig = ` @@ -670,7 +673,115 @@ spec: - iapa1.tapa.istio-system ` +// ListHandler1 is a dynamic handler of listentry template named as hdynlist1 +var ListHandler1 = ` +apiVersion: config.istio.io/v1alpha2 +kind: handler +metadata: + name: hdynlist1 + namespace: istio-system +spec: + adapter: listbackend-nosession + connection: + address: 127.0.0.1:8080 + params: + provider_url: google.com + overrides: + - a + - b + caching_interval: 5s +` + +// ListHandler2 is a dynamic handler of listentry template named as hdynlist2 +var ListHandler2 = ` +apiVersion: config.istio.io/v1alpha2 +kind: handler +metadata: + name: hdynlist2 + namespace: istio-system +spec: + adapter: listbackend-nosession + connection: + address: 127.0.0.1:8080 + params: + refresh_interval: 5s + caching_use_count: 50 +` + +// ListHandler3 is a dynamic handler of listentry template named as hdynlist3 +var ListHandler3 = ` +apiVersion: config.istio.io/v1alpha2 +kind: handler +metadata: + name: hdynlist3 + namespace: istio-system +spec: + adapter: listbackend-nosession + connection: + address: 127.0.0.1:8080 + params: + blacklist: true +` + +// ListHandler3Addr is a dynamic handler that is same as ListHandler3 but has different backend address +var ListHandler3Addr = ` +apiVersion: config.istio.io/v1alpha2 +kind: handler +metadata: + name: hdynlist3 + namespace: istio-system +spec: + adapter: listbackend-nosession + connection: + address: 127.0.0.1:8081 + params: + blacklist: true +` + +// FqdnListHandler3 is fqdn of ListHandler3 +var FqdnListHandler3 = "hdynlist3.handler.istio-system" + +// InstanceDynamic is the instance of the template listentry +var InstanceDynamic = ` +apiVersion: "config.istio.io/v1alpha2" +kind: instance +metadata: + name: idynlistentry1 + namespace: istio-system +spec: + template: listentry + params: +` + +// RuleDynamic is a testing rule of dynamic instance idynlistentry1 and dynamic handler hdynlist3 +var RuleDynamic = ` +apiVersion: "config.istio.io/v1alpha2" +kind: rule +metadata: + name: rdynlist1 + namespace: istio-system +spec: + actions: + - handler: hdynlist3.handler + instances: + - idynlistentry1.istio-system +` + // JoinConfigs is a utility for joining various pieces of config for consumption by store code. func JoinConfigs(configs ...string) string { return strings.Join(configs, "\n---\n") } + +// ReadConfigs reads config from the given path list and returns them as a string. +func ReadConfigs(paths ...string) (string, error) { + cfg := "" + for _, p := range paths { + fc, err := ioutil.ReadFile(p) + if err != nil { + return "", err + } + cfg = JoinConfigs(cfg, string(fc)) + } + + return cfg, nil +} diff --git a/pilot/pkg/kube/inject/testdata/webhook/daemonset.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/daemonset.yaml.injected index 8f2128f612bd..f4282274a117 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/daemonset.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/daemonset.yaml.injected @@ -133,6 +133,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig-multi.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig-multi.yaml.injected index 0fac71963432..e275bcfe1fb3 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig-multi.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig-multi.yaml.injected @@ -136,6 +136,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig.yaml.injected index ed2a9d759b82..77564ba022b0 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/deploymentconfig.yaml.injected @@ -136,6 +136,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/frontend.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/frontend.yaml.injected index 49a4f71aea86..a5f15b079eb4 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/frontend.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/frontend.yaml.injected @@ -138,6 +138,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/hello-config-map-name.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/hello-config-map-name.yaml.injected index 71009e5d4b43..e806599957a0 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/hello-config-map-name.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/hello-config-map-name.yaml.injected @@ -134,6 +134,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/hello-multi.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/hello-multi.yaml.injected index abf31b232e25..085167cde406 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/hello-multi.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/hello-multi.yaml.injected @@ -135,6 +135,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN @@ -286,6 +287,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/hello-probes.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/hello-probes.yaml.injected index 58555aa3fb9c..59383ddf4a78 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/hello-probes.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/hello-probes.yaml.injected @@ -154,6 +154,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/job.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/job.yaml.injected index eb7f28bb2b58..fc7ec726840c 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/job.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/job.yaml.injected @@ -132,6 +132,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/list-frontend.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/list-frontend.yaml.injected index 49a4f71aea86..a5f15b079eb4 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/list-frontend.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/list-frontend.yaml.injected @@ -138,6 +138,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/list.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/list.yaml.injected index abf31b232e25..085167cde406 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/list.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/list.yaml.injected @@ -135,6 +135,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN @@ -286,6 +287,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/multi-init.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/multi-init.yaml.injected index ff8d736333a8..fbe010bc26c7 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/multi-init.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/multi-init.yaml.injected @@ -145,6 +145,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/replicaset.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/replicaset.yaml.injected index f44e2a4d6861..d378178e69f0 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/replicaset.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/replicaset.yaml.injected @@ -132,6 +132,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/replicationcontroller.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/replicationcontroller.yaml.injected index fe9352cdee14..082a3234bf35 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/replicationcontroller.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/replicationcontroller.yaml.injected @@ -133,6 +133,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/resource_annotations.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/resource_annotations.yaml.injected index 20a9dc64ce48..7b4eb19768cf 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/resource_annotations.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/resource_annotations.yaml.injected @@ -131,6 +131,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/statefulset.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/statefulset.yaml.injected index 759b4cc7b3d8..9e96205f6b28 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/statefulset.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/statefulset.yaml.injected @@ -137,6 +137,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/status_annotations.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/status_annotations.yaml.injected index c8ba23d7596a..199282cb4f25 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/status_annotations.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/status_annotations.yaml.injected @@ -137,6 +137,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-empty-includes.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-empty-includes.yaml.injected index 93fc97a8abf5..974ec4e7312b 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-empty-includes.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-empty-includes.yaml.injected @@ -136,6 +136,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-wildcards.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-wildcards.yaml.injected index be0812804072..dd9ab2310a93 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-wildcards.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations-wildcards.yaml.injected @@ -136,6 +136,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations.yaml.injected index 899d291fde2c..526c8b65deb0 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/traffic-annotations.yaml.injected @@ -136,6 +136,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/kube/inject/testdata/webhook/user-volume.yaml.injected b/pilot/pkg/kube/inject/testdata/webhook/user-volume.yaml.injected index e6db207f1044..b231032b1d37 100644 --- a/pilot/pkg/kube/inject/testdata/webhook/user-volume.yaml.injected +++ b/pilot/pkg/kube/inject/testdata/webhook/user-volume.yaml.injected @@ -141,6 +141,7 @@ spec: cpu: 10m memory: 10Mi securityContext: + runAsUser: 0 capabilities: add: - NET_ADMIN diff --git a/pilot/pkg/model/push_context.go b/pilot/pkg/model/push_context.go index f4602e37d673..f7d687d45e96 100644 --- a/pilot/pkg/model/push_context.go +++ b/pilot/pkg/model/push_context.go @@ -93,7 +93,7 @@ type PushContext struct { ServicePort2Name map[string]PortList `json:"-"` // ServiceAccounts contains a map of hostname and port to service accounts. - ServiceAccounts map[Hostname]map[int][]string + ServiceAccounts map[Hostname]map[int][]string `json:"-"` initDone bool } diff --git a/pilot/pkg/model/validation.go b/pilot/pkg/model/validation.go index e295467a23f2..dde9cea6898c 100644 --- a/pilot/pkg/model/validation.go +++ b/pilot/pkg/model/validation.go @@ -41,7 +41,7 @@ const ( dns1123LabelMaxLength int = 63 dns1123LabelFmt string = "[a-zA-Z0-9]([-a-z-A-Z0-9]*[a-zA-Z0-9])?" // a wild-card prefix is an '*', a normal DNS1123 label with a leading '*' or '*-', or a normal DNS1123 label - wildcardPrefix = `\*|(\*|\*-)?(` + dns1123LabelFmt + `)` + wildcardPrefix = `(\*|(\*|\*-)?` + dns1123LabelFmt + `)` // TODO: there is a stricter regex for the labels from validation.go in k8s qualifiedNameFmt string = "[-A-Za-z0-9_./]*" diff --git a/pilot/pkg/model/validation_test.go b/pilot/pkg/model/validation_test.go index df93c49dae70..5ab484e9ce84 100644 --- a/pilot/pkg/model/validation_test.go +++ b/pilot/pkg/model/validation_test.go @@ -2261,6 +2261,21 @@ func TestValidateRouteDestination(t *testing.T) { {name: "simple", routes: []*networking.RouteDestination{{ Destination: &networking.Destination{Host: "foo.baz"}, }}, valid: true}, + {name: "wildcard dash", routes: []*networking.RouteDestination{{ + Destination: &networking.Destination{Host: "*-foo.baz"}, + }}, valid: true}, + {name: "wildcard prefix", routes: []*networking.RouteDestination{{ + Destination: &networking.Destination{Host: "*foo.baz"}, + }}, valid: true}, + {name: "wildcard", routes: []*networking.RouteDestination{{ + Destination: &networking.Destination{Host: "*"}, + }}, valid: true}, + {name: "bad wildcard", routes: []*networking.RouteDestination{{ + Destination: &networking.Destination{Host: "foo.*"}, + }}, valid: false}, + {name: "bad fqdn", routes: []*networking.RouteDestination{{ + Destination: &networking.Destination{Host: "default/baz"}, + }}, valid: false}, {name: "no destination", routes: []*networking.RouteDestination{{ Destination: nil, }}, valid: false}, diff --git a/pilot/pkg/networking/core/v1alpha3/cluster.go b/pilot/pkg/networking/core/v1alpha3/cluster.go index 2eaf858dffb6..08eb8a6ebcc6 100644 --- a/pilot/pkg/networking/core/v1alpha3/cluster.go +++ b/pilot/pkg/networking/core/v1alpha3/cluster.go @@ -18,7 +18,6 @@ import ( "fmt" "strconv" "strings" - "time" apiv2 "github.com/envoyproxy/go-control-plane/envoy/api/v2" "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" @@ -139,8 +138,8 @@ func (configgen *ConfigGeneratorImpl) BuildClusters(env *model.Environment, prox // Add a blackhole and passthrough cluster for catching traffic to unresolved routes // DO NOT CALL PLUGINS for these two clusters. - clusters = append(clusters, buildBlackHoleCluster()) - clusters = append(clusters, buildDefaultPassthroughCluster()) + clusters = append(clusters, buildBlackHoleCluster(env)) + clusters = append(clusters, buildDefaultPassthroughCluster(env)) return normalizeClusters(push, proxy, clusters), nil } @@ -969,11 +968,11 @@ func setUpstreamProtocol(cluster *apiv2.Cluster, port *model.Port) { // generates a cluster that sends traffic to dummy localport 0 // This cluster is used to catch all traffic to unresolved destinations in virtual service -func buildBlackHoleCluster() *apiv2.Cluster { +func buildBlackHoleCluster(env *model.Environment) *apiv2.Cluster { cluster := &apiv2.Cluster{ Name: util.BlackHoleCluster, ClusterDiscoveryType: &apiv2.Cluster_Type{Type: apiv2.Cluster_STATIC}, - ConnectTimeout: 1 * time.Second, + ConnectTimeout: util.GogoDurationToDuration(env.Mesh.ConnectTimeout), LbPolicy: apiv2.Cluster_ROUND_ROBIN, } return cluster @@ -981,11 +980,11 @@ func buildBlackHoleCluster() *apiv2.Cluster { // generates a cluster that sends traffic to the original destination. // This cluster is used to catch all traffic to unknown listener ports -func buildDefaultPassthroughCluster() *apiv2.Cluster { +func buildDefaultPassthroughCluster(env *model.Environment) *apiv2.Cluster { cluster := &apiv2.Cluster{ Name: util.PassthroughCluster, ClusterDiscoveryType: &apiv2.Cluster_Type{Type: apiv2.Cluster_ORIGINAL_DST}, - ConnectTimeout: 1 * time.Second, + ConnectTimeout: util.GogoDurationToDuration(env.Mesh.ConnectTimeout), LbPolicy: apiv2.Cluster_ORIGINAL_DST_LB, } return cluster diff --git a/pilot/pkg/networking/core/v1alpha3/listener.go b/pilot/pkg/networking/core/v1alpha3/listener.go index bfcd1909c0b8..eee2ed9432f8 100644 --- a/pilot/pkg/networking/core/v1alpha3/listener.go +++ b/pilot/pkg/networking/core/v1alpha3/listener.go @@ -77,7 +77,7 @@ var ( EnvoyJSONLogFormat = &google_protobuf.Struct{ Fields: map[string]*google_protobuf.Value{ "start_time": {Kind: &google_protobuf.Value_StringValue{StringValue: "%START_TIME%"}}, - "method": {Kind: &google_protobuf.Value_StringValue{StringValue: "%START_TIME%"}}, + "method": {Kind: &google_protobuf.Value_StringValue{StringValue: "%REQ(:METHOD)%"}}, "path": {Kind: &google_protobuf.Value_StringValue{StringValue: "%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%"}}, "protocol": {Kind: &google_protobuf.Value_StringValue{StringValue: "%PROTOCOL%"}}, "response_code": {Kind: &google_protobuf.Value_StringValue{StringValue: "%RESPONSE_CODE%"}}, diff --git a/pilot/pkg/proxy/envoy/v2/debug.go b/pilot/pkg/proxy/envoy/v2/debug.go index 8d3bdf327943..c2966b62cb69 100644 --- a/pilot/pkg/proxy/envoy/v2/debug.go +++ b/pilot/pkg/proxy/envoy/v2/debug.go @@ -113,7 +113,6 @@ func Syncz(w http.ResponseWriter, _ *http.Request) { } w.Header().Add("Content-Type", "application/json") _, _ = w.Write(out) - w.WriteHeader(http.StatusOK) } // registryz providees debug support for registry - adding and listing model items. @@ -445,8 +444,8 @@ func (s *DiscoveryServer) PushStatusHandler(w http.ResponseWriter, req *http.Req return } w.Header().Add("Content-Type", "application/json") + _, _ = w.Write(out) - w.WriteHeader(http.StatusOK) } func writeAllADS(w io.Writer) { diff --git a/pilot/pkg/proxy/envoy/v2/eds_sh_test.go b/pilot/pkg/proxy/envoy/v2/eds_sh_test.go index 4230ae1937e4..83a21d8f925b 100644 --- a/pilot/pkg/proxy/envoy/v2/eds_sh_test.go +++ b/pilot/pkg/proxy/envoy/v2/eds_sh_test.go @@ -38,8 +38,7 @@ import ( // Testing the Split Horizon EDS. type expectedResults struct { - endpoints []string - weights map[string]uint32 + weights map[string]uint32 } // The test will setup 3 networks with various number of endpoints for the same service within @@ -76,8 +75,12 @@ func TestSplitHorizonEds(t *testing.T) { network: "network1", sidecarID: sidecarID("10.1.0.1", "app3"), want: expectedResults{ - endpoints: []string{"10.1.0.1", "159.122.219.2", "159.122.219.3", "179.114.119.3"}, - weights: map[string]uint32{"159.122.219.2": 2, "159.122.219.3": 3}, + weights: map[string]uint32{ + "10.1.0.1": 2, + "159.122.219.2": 4, + "159.122.219.3": 3, + "179.114.119.3": 3, + }, }, }, { @@ -86,8 +89,13 @@ func TestSplitHorizonEds(t *testing.T) { network: "network2", sidecarID: sidecarID("10.2.0.1", "app3"), want: expectedResults{ - endpoints: []string{"10.2.0.1", "10.2.0.2", "159.122.219.1", "159.122.219.3", "179.114.119.3"}, - weights: map[string]uint32{"159.122.219.1": 1, "159.122.219.3": 3, "179.114.119.3": 3}, + weights: map[string]uint32{ + "10.2.0.1": 2, + "10.2.0.2": 2, + "159.122.219.1": 2, + "159.122.219.3": 3, + "179.114.119.3": 3, + }, }, }, { @@ -96,8 +104,13 @@ func TestSplitHorizonEds(t *testing.T) { network: "network3", sidecarID: sidecarID("10.3.0.1", "app3"), want: expectedResults{ - endpoints: []string{"10.3.0.1", "10.3.0.2", "10.3.0.3", "159.122.219.1", "159.122.219.2"}, - weights: map[string]uint32{"159.122.219.1": 1, "159.122.219.2": 2}, + weights: map[string]uint32{ + "159.122.219.1": 2, + "159.122.219.2": 4, + "10.3.0.1": 2, + "10.3.0.2": 2, + "10.3.0.3": 2, + }, }, }, { @@ -106,8 +119,16 @@ func TestSplitHorizonEds(t *testing.T) { network: "network4", sidecarID: sidecarID("10.4.0.1", "app3"), want: expectedResults{ - endpoints: []string{"10.4.0.1", "10.4.0.2", "10.4.0.3", "10.4.0.4", "159.122.219.1", "159.122.219.2", "159.122.219.3", "179.114.119.3"}, - weights: map[string]uint32{"159.122.219.1": 1, "159.122.219.2": 2, "159.122.219.3": 3, "179.114.119.3": 3}, + weights: map[string]uint32{ + "10.4.0.1": 2, + "10.4.0.2": 2, + "10.4.0.3": 2, + "10.4.0.4": 2, + "159.122.219.1": 2, + "159.122.219.2": 4, + "159.122.219.3": 3, + "179.114.119.3": 3, + }, }, }, } @@ -160,8 +181,8 @@ func verifySplitHorizonResponse(t *testing.T, network string, sidecarID string, } lbEndpoints := eps[0].LbEndpoints - if len(lbEndpoints) != len(expected.endpoints) { - t.Fatal(fmt.Errorf("number of endpoints should be %d but got %d", len(expected.endpoints), len(lbEndpoints))) + if len(lbEndpoints) != len(expected.weights) { + t.Fatal(fmt.Errorf("number of endpoints should be %d but got %d", len(expected.weights), len(lbEndpoints))) } for addr, weight := range expected.weights { diff --git a/pilot/pkg/proxy/envoy/v2/eds_test.go b/pilot/pkg/proxy/envoy/v2/eds_test.go index 87997eecb22a..732ded658ffe 100644 --- a/pilot/pkg/proxy/envoy/v2/eds_test.go +++ b/pilot/pkg/proxy/envoy/v2/eds_test.go @@ -29,6 +29,8 @@ import ( "testing" "time" + endpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" + "istio.io/istio/pilot/pkg/bootstrap" "istio.io/istio/pilot/pkg/model" v2 "istio.io/istio/pilot/pkg/proxy/envoy/v2" @@ -51,18 +53,20 @@ func TestEds(t *testing.T) { os.Setenv("PILOT_ENABLE_LOCALITY_LOAD_BALANCING", "ON") addLocalityEndpoints(server) - // Add the test ads client to list of service instances in order to test the context dependent locality coloring. - addTestClientEndpoint(server) + // Add the test ads clients to list of service instances in order to test the context dependent locality coloring. + addTestClientEndpoints(server) adsc := adsConnectAndWait(t, 0x0a0a0a0a) defer adsc.Close() + adsc2 := adsConnectAndWait(t, 0x0a0a0a0b) + defer adsc2.Close() t.Run("TCPEndpoints", func(t *testing.T) { testTCPEndpoints("127.0.0.1", adsc, t) testEdsz(t) }) t.Run("LocalityPrioritizedEndpoints", func(t *testing.T) { - testLocalityPrioritizedEndpoints(adsc, t) + testLocalityPrioritizedEndpoints(adsc, adsc2, t) }) t.Run("UDSEndpoints", func(t *testing.T) { testUdsEndpoints(server, adsc, t) @@ -119,8 +123,9 @@ func adsConnectAndWait(t *testing.T, ip int) *adsc.ADSC { } var asdcLocality = "region1/zone1/subzone1" +var asdc2Locality = "region2/zone2/subzone2" -func addTestClientEndpoint(server *bootstrap.Server) { +func addTestClientEndpoints(server *bootstrap.Server) { server.EnvoyXdsServer.MemRegistry.AddService("test-1.default", &model.Service{ Hostname: "test-1.default", Ports: model.PortList{ @@ -143,6 +148,18 @@ func addTestClientEndpoint(server *bootstrap.Server) { Locality: asdcLocality, }, }) + server.EnvoyXdsServer.MemRegistry.AddInstance("test-1.default", &model.ServiceInstance{ + Endpoint: model.NetworkEndpoint{ + Address: fmt.Sprintf("10.10.10.11"), + Port: 80, + ServicePort: &model.Port{ + Name: "http", + Port: 80, + Protocol: model.ProtocolHTTP, + }, + Locality: asdc2Locality, + }, + }) server.EnvoyXdsServer.Push(true, nil) } @@ -168,10 +185,15 @@ func testTCPEndpoints(expected string, adsc *adsc.ADSC, t *testing.T) { } } -func testLocalityPrioritizedEndpoints(adsc *adsc.ADSC, t *testing.T) { - items := strings.SplitN(asdcLocality, "/", 3) +func testLocalityPrioritizedEndpoints(adsc *adsc.ADSC, adsc2 *adsc.ADSC, t *testing.T) { + verifyLocalityPriorities(asdcLocality, adsc.EDS["outbound|80||locality.cluster.local"].GetEndpoints(), t) + verifyLocalityPriorities(asdc2Locality, adsc2.EDS["outbound|80||locality.cluster.local"].GetEndpoints(), t) +} + +func verifyLocalityPriorities(proxyLocality string, eps []endpoint.LocalityLbEndpoints, t *testing.T) { + items := strings.SplitN(proxyLocality, "/", 3) region, zone, subzone := items[0], items[1], items[2] - for _, ep := range adsc.EDS["outbound|80||locality.cluster.local"].GetEndpoints() { + for _, ep := range eps { if ep.GetLocality().Region == region { if ep.GetLocality().Zone == zone { if ep.GetLocality().SubZone == subzone { @@ -490,8 +512,11 @@ func addLocalityEndpoints(server *bootstrap.Server) { localities := []string{ "region1/zone1/subzone1", "region1/zone1/subzone2", - "region1/zone2/subzone2", + "region1/zone2/subzone1", "region2/zone1/subzone1", + "region2/zone1/subzone2", + "region2/zone2/subzone1", + "region2/zone2/subzone2", } for i, locality := range localities { server.EnvoyXdsServer.MemRegistry.AddInstance("locality.cluster.local", &model.ServiceInstance{ diff --git a/pilot/pkg/proxy/envoy/v2/ep_filters.go b/pilot/pkg/proxy/envoy/v2/ep_filters.go index 80820c89ed36..147cf791351e 100644 --- a/pilot/pkg/proxy/envoy/v2/ep_filters.go +++ b/pilot/pkg/proxy/envoy/v2/ep_filters.go @@ -39,6 +39,21 @@ func EndpointsByNetworkFilter(endpoints []endpoint.LocalityLbEndpoints, conn *Xd network = "" } + // calculate the multiples of weight. + // It is needed to normalize the LB Weight across different networks. + multiples := 1 + for _, network := range env.MeshNetworks.Networks { + num := 0 + for _, gw := range network.Gateways { + if gwIP := net.ParseIP(gw.GetAddress()); gwIP != nil { + num++ + } + } + if num > 1 { + multiples *= num + } + } + // A new array of endpoints to be returned that will have both local and // remote gateways (if any) filtered := []endpoint.LocalityLbEndpoints{} @@ -56,7 +71,7 @@ func EndpointsByNetworkFilter(endpoints []endpoint.LocalityLbEndpoints, conn *Xd if epNetwork == network { // This is a local endpoint lbEp.LoadBalancingWeight = &types.UInt32Value{ - Value: uint32(1), + Value: uint32(multiples), } lbEndpoints = append(lbEndpoints, lbEp) } else { @@ -65,14 +80,6 @@ func EndpointsByNetworkFilter(endpoints []endpoint.LocalityLbEndpoints, conn *Xd } } - // If there is no MeshNetworks configuration, we don't have gateways information - // so just keep the endpoint with the local ones - if env.MeshNetworks == nil { - newEp := createLocalityLbEndpoints(&ep, lbEndpoints) - filtered = append(filtered, *newEp) - continue - } - // Add endpoints to remote networks' gateways // Iterate over all networks that have the cluster endpoint (weight>0) and @@ -81,14 +88,17 @@ func EndpointsByNetworkFilter(endpoints []endpoint.LocalityLbEndpoints, conn *Xd for n, w := range remoteEps { networkConf, found := env.MeshNetworks.Networks[n] if !found { + adsLog.Infof("the endpoints within network %s will be ignored for no network configured", n) continue } gws := networkConf.Gateways if len(gws) == 0 { + adsLog.Infof("the endpoints within network %s will be ignored for no gateways configured", n) continue } - // There may be multiple gateways for the network. Add an LbEndpoint for + gwEps := []endpoint.LbEndpoint{} + // There may be multiples gateways for the network. Add an LbEndpoint for // each one of them for _, gw := range gws { var gwEp *endpoint.LbEndpoint @@ -111,9 +121,14 @@ func EndpointsByNetworkFilter(endpoints []endpoint.LocalityLbEndpoints, conn *Xd } if gwEp != nil { - lbEndpoints = append(lbEndpoints, *gwEp) + gwEps = append(gwEps, *gwEp) } } + weight := w * uint32(multiples/len(gwEps)) + for _, gwEp := range gwEps { + gwEp.LoadBalancingWeight.Value = weight + lbEndpoints = append(lbEndpoints, gwEp) + } } // Found local endpoint(s) so add to the result a new one LocalityLbEndpoints @@ -147,8 +162,9 @@ func createLocalityLbEndpoints(base *endpoint.LocalityLbEndpoints, lbEndpoints [ if len(lbEndpoints) == 0 { weight = nil } else { - weight = &types.UInt32Value{ - Value: uint32(len(lbEndpoints)), + weight = &types.UInt32Value{} + for _, lbEp := range lbEndpoints { + weight.Value += lbEp.GetLoadBalancingWeight().Value } } ep := &endpoint.LocalityLbEndpoints{ diff --git a/pilot/pkg/proxy/envoy/v2/ep_filters_test.go b/pilot/pkg/proxy/envoy/v2/ep_filters_test.go index 58cc54fadaa8..4f2cecc2738d 100644 --- a/pilot/pkg/proxy/envoy/v2/ep_filters_test.go +++ b/pilot/pkg/proxy/envoy/v2/ep_filters_test.go @@ -72,12 +72,13 @@ func TestEndpointsByNetworkFilter(t *testing.T) { { lbEps: []LbEpInfo{ // 2 local endpoints - {address: "10.0.0.1", weight: 1}, - {address: "10.0.0.2", weight: 1}, + {address: "10.0.0.1", weight: 2}, + {address: "10.0.0.2", weight: 2}, // 1 endpoint to gateway of network2 with weight 1 because it has 1 endpoint {address: "2.2.2.2", weight: 1}, + {address: "2.2.2.20", weight: 1}, }, - weight: 3, + weight: 6, }, }, }, @@ -90,11 +91,11 @@ func TestEndpointsByNetworkFilter(t *testing.T) { { lbEps: []LbEpInfo{ // 1 local endpoint - {address: "20.0.0.1", weight: 1}, - // 1 endpoint to gateway of network1 with weight 2 because it has 2 endpoints - {address: "1.1.1.1", weight: 2}, + {address: "20.0.0.1", weight: 2}, + // 1 endpoint to gateway of network1 with weight 4 because it has 2 endpoints + {address: "1.1.1.1", weight: 4}, }, - weight: 2, + weight: 6, }, }, }, @@ -106,12 +107,13 @@ func TestEndpointsByNetworkFilter(t *testing.T) { want: []LocLbEpInfo{ { lbEps: []LbEpInfo{ - // 1 endpoint to gateway of network1 with weight 2 because it has 2 endpoints - {address: "1.1.1.1", weight: 2}, - // 1 endpoint to gateway of network2 with weight 1 because it has 1 endpoint + // 1 endpoint to gateway of network1 with weight 4 because it has 2 endpoints + {address: "1.1.1.1", weight: 4}, + // 1 endpoint to gateway of network2 with weight 2 because it has 1 endpoint {address: "2.2.2.2", weight: 1}, + {address: "2.2.2.20", weight: 1}, }, - weight: 2, + weight: 6, }, }, }, @@ -124,13 +126,14 @@ func TestEndpointsByNetworkFilter(t *testing.T) { { lbEps: []LbEpInfo{ // 1 local endpoint - {address: "40.0.0.1", weight: 1}, + {address: "40.0.0.1", weight: 2}, // 1 endpoint to gateway of network1 with weight 2 because it has 2 endpoints - {address: "1.1.1.1", weight: 2}, + {address: "1.1.1.1", weight: 4}, // 1 endpoint to gateway of network2 with weight 1 because it has 1 endpoint {address: "2.2.2.2", weight: 1}, + {address: "2.2.2.20", weight: 1}, }, - weight: 3, + weight: 8, }, }, }, @@ -215,6 +218,12 @@ func environment() *model.Environment { }, Port: 80, }, + { + Gw: &meshconfig.Network_IstioNetworkGateway_Address{ + Address: "2.2.2.20", + }, + Port: 80, + }, }, }, "network3": {