Skip to content

Commit

Permalink
Update to Kubernetes v1.14
Browse files Browse the repository at this point in the history
  • Loading branch information
mikkeloscar committed Jul 8, 2019
1 parent ef5261a commit e4ce4c1
Show file tree
Hide file tree
Showing 18 changed files with 241 additions and 202 deletions.
3 changes: 1 addition & 2 deletions cluster/config-defaults.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ image_policy: "trusted"
{{else}}
image_policy: "dev"
{{end}}
compliance_checker_enabled: "true"

# cadvisor settings
cadvisor_cpu: "150m"
Expand Down Expand Up @@ -192,7 +191,7 @@ teapot_admission_controller_validate_application_label: "false"
{{end}}

{{if eq .Environment "e2e"}}
teapot_admission_controller_ignore_namespaces: "^kube-system|(e2e-tests-(downward-api|kubectl|projected|statefulset|pod-network)-.*)$"
teapot_admission_controller_ignore_namespaces: "^kube-system|((downward-api|kubectl|projected|statefulset|pod-network)-.*)$"
{{else}}
teapot_admission_controller_ignore_namespaces: "^kube-system$"
{{end}}
Expand Down
6 changes: 3 additions & 3 deletions cluster/manifests/kube-proxy/daemonset.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
namespace: kube-system
labels:
application: kube-proxy
version: v1.13.7
version: v1.14.3-1
spec:
selector:
matchLabels:
Expand All @@ -17,7 +17,7 @@ spec:
name: kube-proxy
labels:
application: kube-proxy
version: v1.13.7
version: v1.14.3-1
annotations:
config/hash: {{"configmap.yaml" | manifestHash}}
spec:
Expand All @@ -31,7 +31,7 @@ spec:
hostNetwork: true
containers:
- name: kube-proxy
image: registry.opensource.zalan.do/teapot/kube-proxy:v1.13.7
image: registry.opensource.zalan.do/teapot/kube-proxy:v1.14.3-1
args:
- --hostname-override=$(HOSTNAME_OVERRIDE)
- --config=/config/kube-proxy.yaml
Expand Down
20 changes: 10 additions & 10 deletions cluster/node-pools/master-default/userdata.clc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ systemd:
After=docker.service dockercfg.service meta-data-iptables.service private-ipv4.service
[Service]
Environment=KUBELET_IMAGE_TAG=v1.13.7
Environment=KUBELET_IMAGE_TAG=v1.14.3-1
Environment=KUBELET_IMAGE_ARGS=--exec=/kubelet
Environment=KUBELET_IMAGE_URL=docker://registry.opensource.zalan.do/teapot/kubelet
Environment="RKT_RUN_ARGS=--insecure-options=image \
Expand Down Expand Up @@ -356,7 +356,7 @@ storage:
namespace: kube-system
labels:
application: kube-apiserver
version: v1.13.7
version: v1.14.3-1
annotations:
kubernetes-log-watcher/scalyr-parser: |
[{"container": "webhook", "parser": "json-structured-log"}]
Expand All @@ -368,7 +368,7 @@ storage:
hostNetwork: true
containers:
- name: kube-apiserver
image: registry.opensource.zalan.do/teapot/kube-apiserver:v1.13.7
image: registry.opensource.zalan.do/teapot/kube-apiserver:v1.14.3-1
args:
- --apiserver-count={{ .Values.apiserver_count }}
- --bind-address=0.0.0.0
Expand Down Expand Up @@ -611,7 +611,7 @@ storage:
cpu: 100m
memory: 20Mi
{{ end }}
- image: registry.opensource.zalan.do/teapot/image-policy-webhook:{{if eq .Cluster.ConfigItems.compliance_checker_enabled "true"}}master-44{{else}}v0.4.1{{end}}
- image: registry.opensource.zalan.do/teapot/image-policy-webhook:v0.5.2
name: image-policy-webhook
args:
- --policy={{ .Cluster.ConfigItems.image_policy }}
Expand Down Expand Up @@ -712,15 +712,15 @@ storage:
namespace: kube-system
labels:
application: kube-controller-manager
version: v1.13.7
version: v1.14.3-1
spec:
priorityClassName: system-node-critical
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
- name: kube-controller-manager
image: registry.opensource.zalan.do/teapot/kube-controller-manager:v1.13.7
image: registry.opensource.zalan.do/teapot/kube-controller-manager:v1.14.3-1
args:
- --kubeconfig=/etc/kubernetes/controller-kubeconfig
- --leader-elect=true
Expand Down Expand Up @@ -787,7 +787,7 @@ storage:
namespace: kube-system
labels:
application: kube-scheduler
version: v1.13.7
version: v1.14.3-1
spec:
priorityClassName: system-node-critical
tolerations:
Expand All @@ -796,7 +796,7 @@ storage:
hostNetwork: true
containers:
- name: kube-scheduler
image: registry.opensource.zalan.do/teapot/kube-scheduler:v1.13.7
image: registry.opensource.zalan.do/teapot/kube-scheduler:v1.14.3-1
args:
- --master=http://127.0.0.1:8080
- --leader-elect=true
Expand Down Expand Up @@ -1255,7 +1255,7 @@ storage:
--volume dns,kind=host,source=/run/systemd/resolve/resolv.conf,readOnly=true \
--mount volume=dns,target=/etc/resolv.conf \
--net=host \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.13.7 \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.14.3-1 \
--exec=/kubectl -- \
--kubeconfig=/etc/kubernetes/kubeconfig \
label node "$(hostname)" \
Expand All @@ -1268,7 +1268,7 @@ storage:
--net=host \
--volume dns,kind=host,source=/run/systemd/resolve/resolv.conf,readOnly=true \
--mount volume=dns,target=/etc/resolv.conf \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.13.7 \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.14.3-1 \
--exec=/kubectl -- \
--kubeconfig=/etc/kubernetes/kubeconfig \
drain "$(hostname)" \
Expand Down
2 changes: 1 addition & 1 deletion cluster/node-pools/master-ubuntu-default/userdata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ write_files:
cpu: 100m
memory: 20Mi
{{ end }}
- image: registry.opensource.zalan.do/teapot/image-policy-webhook:{{if eq .Cluster.ConfigItems.compliance_checker_enabled "true"}}master-44{{else}}v0.4.1{{end}}
- image: registry.opensource.zalan.do/teapot/image-policy-webhook:v0.5.2
name: image-policy-webhook
args:
- --policy={{ .Cluster.ConfigItems.image_policy }}
Expand Down
6 changes: 3 additions & 3 deletions cluster/node-pools/worker-default/userdata.clc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ systemd:
After=docker.service dockercfg.service meta-data-iptables.service private-ipv4.service collect-instance-metadata.service
[Service]
Environment=KUBELET_IMAGE_TAG=v1.13.7
Environment=KUBELET_IMAGE_TAG=v1.14.3-1
Environment=KUBELET_IMAGE_ARGS=--exec=/kubelet
Environment=KUBELET_IMAGE_URL=docker://registry.opensource.zalan.do/teapot/kubelet
Environment="RKT_RUN_ARGS=--insecure-options=image \
Expand Down Expand Up @@ -483,7 +483,7 @@ storage:
--volume dns,kind=host,source=/run/systemd/resolve/resolv.conf,readOnly=true \
--mount volume=dns,target=/etc/resolv.conf \
--net=host \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.13.7 \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.14.3-1 \
--exec=/kubectl -- \
--kubeconfig=/etc/kubernetes/kubeconfig \
label node "$(hostname)" \
Expand All @@ -496,7 +496,7 @@ storage:
--net=host \
--volume dns,kind=host,source=/run/systemd/resolve/resolv.conf,readOnly=true \
--mount volume=dns,target=/etc/resolv.conf \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.13.7 \
docker://registry.opensource.zalan.do/teapot/kubectl:v1.14.3-1 \
--exec=/kubectl -- \
--kubeconfig=/etc/kubernetes/kubeconfig \
drain "$(hostname)" \
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

BINARY ?= kubernetes-on-aws-e2e
VERSION ?= $(shell git describe --tags --always --dirty)
KUBE_VERSION ?= v1.13.5
KUBE_VERSION ?= v1.14.3
IMAGE ?= registry-write.opensource.zalan.do/teapot/$(BINARY)
TAG ?= $(VERSION)
DOCKERFILE ?= Dockerfile
Expand Down
18 changes: 9 additions & 9 deletions test/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ scratch and test the Kubernetes type Foo.
defer func() {
By("deleting the foo)
defer GinkgoRecover()
err2 := cs.Core().Foo(ns).Delete(foo.Name, metav1.NewDeleteOptions(0))
err2 := cs.CoreV1().Foo(ns).Delete(foo.Name, metav1.NewDeleteOptions(0))
Expect(err2).NotTo(HaveOccurred())
}()
// creates the Ingress Object
_, err := cs.Core().Foo(ns).Create(foo)
_, err := cs.CoreV1().Foo(ns).Create(foo)
Expect(err).NotTo(HaveOccurred())
})
})
Expand All @@ -113,10 +113,10 @@ scratch and test the Kubernetes type Foo.
defer func() {
By("deleting the pod")
defer GinkgoRecover()
err2 := cs.Core().Pods(ns).Delete(pod.Name, metav1.NewDeleteOptions(0))
err2 := cs.CoreV1().Pods(ns).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err2).NotTo(HaveOccurred())
}()
_, err = cs.Core().Pods(ns).Create(pod)
_, err = cs.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
```
Expand All @@ -137,10 +137,10 @@ scratch and test the Kubernetes type Foo.
defer func() {
By("deleting the service")
defer GinkgoRecover()
err2 := cs.Core().Services(ns).Delete(service.Name, metav1.NewDeleteOptions(0))
err2 := cs.CoreV1().Services(ns).Delete(service.Name, metav1.NewDeleteOptions(0))
Expect(err2).NotTo(HaveOccurred())
}()
_, err := cs.Core().Services(ns).Create(service)
_, err := cs.CoreV1().Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())
```

Expand All @@ -163,14 +163,14 @@ Create Kubernetes ingress object:
defer func() {
By("deleting the ingress")
defer GinkgoRecover()
err2 := cs.Extensions().Ingresses(ns).Delete(ing.Name, metav1.NewDeleteOptions(0))
err2 := cs.ExtensionsV1beta1().Ingresses(ns).Delete(ing.Name, metav1.NewDeleteOptions(0))
Expect(err2).NotTo(HaveOccurred())
}()
ingressCreate, err := cs.Extensions().Ingresses(ns).Create(ing)
ingressCreate, err := cs.ExtensionsV1beta1().Ingresses(ns).Create(ing)
Expect(err).NotTo(HaveOccurred())
addr, err := jig.WaitForIngressAddress(cs, ns, ingressCreate.Name, 3*time.Minute)
Expect(err).NotTo(HaveOccurred())
ingress, err := cs.Extensions().Ingresses(ns).Get(ing.Name, metav1.GetOptions{ResourceVersion: "0"})
ingress, err := cs.ExtensionsV1beta1().Ingresses(ns).Get(ing.Name, metav1.GetOptions{ResourceVersion: "0"})
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("ALB endpoint from ingress status: %s", ingress.Status.LoadBalancer.Ingress[0].Hostname))
```
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/apiserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("API Server webhook tests", func() {
defer func() {
By(fmt.Sprintf("Delete a compliant deployment: %s", deployment.Name))
defer GinkgoRecover()
err := cs.Extensions().Deployments(ns).Delete(deployment.Name, metav1.NewDeleteOptions(0))
err := cs.ExtensionsV1beta1().Deployments(ns).Delete(deployment.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}()
Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("API Server webhook tests", func() {
defer func() {
By(fmt.Sprintf("Delete a compliant deployment: %s", deployment.Name))
defer GinkgoRecover()
err := cs.Extensions().Deployments(ns).Delete(deployment.Name, metav1.NewDeleteOptions(0))
err := cs.ExtensionsV1beta1().Deployments(ns).Delete(deployment.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}()
err = framework.WaitForDeploymentWithCondition(cs, ns, deployment.Name, "FailedCreate", appsv1.DeploymentReplicaFailure)
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/audit.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,10 @@ func expectEvents(f *framework.Framework, expectedEvents []utils.AuditEvent) {
missingReport, err := utils.CheckAuditLines(stream, expectedEvents, auditv1.SchemeGroupVersion)
if err != nil {
framework.Logf("Failed to observe audit events: %v", err)
} else if len(missingReport) > 0 {
} else if len(missingReport.MissingEvents) > 0 {
framework.Logf("Events %#v not found!", missingReport)
}
return len(missingReport) == 0, nil
return len(missingReport.MissingEvents) == 0, nil
})
framework.ExpectNoError(err, "after %v failed to observe audit events", pollingTimeout)
}
10 changes: 5 additions & 5 deletions test/e2e/egress.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ var _ = framework.KubeDescribe("Static Egress creation", func() {
defer func() {
By("deleting the pod")
defer GinkgoRecover()
cs.Core().Pods(ns).Delete(pingPod.Name, metav1.NewDeleteOptions(0))
cs.CoreV1().Pods(ns).Delete(pingPod.Name, metav1.NewDeleteOptions(0))
// don't care about POD deletion, because it should exit by itself
}()
_, err := cs.Core().Pods(ns).Create(pingPod)
_, err := cs.CoreV1().Pods(ns).Create(pingPod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(f.WaitForPodRunning(pingPod.Name))

Expand All @@ -63,15 +63,15 @@ var _ = framework.KubeDescribe("Static Egress creation", func() {
defer func() {
By("deleting the configmap")
defer GinkgoRecover()
err2 := cs.Core().ConfigMaps(ns).Delete(cmap.Name, metav1.NewDeleteOptions(0))
err2 := cs.CoreV1().ConfigMaps(ns).Delete(cmap.Name, metav1.NewDeleteOptions(0))
Expect(err2).NotTo(HaveOccurred())
}()
_, err = cs.Core().ConfigMaps(ns).Create(cmap)
_, err = cs.CoreV1().ConfigMaps(ns).Create(cmap)
Expect(err).NotTo(HaveOccurred())

// wait for egress route and NAT GWs ready and POD exit code 0 vs 2
for {
p, err := cs.Core().Pods(ns).Get(pingPod.Name, metav1.GetOptions{})
p, err := cs.CoreV1().Pods(ns).Get(pingPod.Name, metav1.GetOptions{})
if err != nil {
Expect(fmt.Errorf("Could not get POD %s", pingPod.Name)).NotTo(HaveOccurred())
return
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/external_dns.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,26 +50,26 @@ var _ = framework.KubeDescribe("External DNS creation", func() {

By("Creating service " + serviceName + " in namespace " + ns)
defer func() {
err := cs.Core().Services(ns).Delete(serviceName, nil)
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
Expect(err).NotTo(HaveOccurred())
}()

hostName := fmt.Sprintf("%s-%d.%s", serviceName, time.Now().UTC().Unix(), E2EHostedZone())
service := createServiceTypeLoadbalancer(serviceName, hostName, labels, port)

_, err := cs.Core().Services(ns).Create(service)
_, err := cs.CoreV1().Services(ns).Create(service)
Expect(err).NotTo(HaveOccurred())

By("Submitting the pod to kubernetes")
pod := createNginxPod(nameprefix, ns, labels, port)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
err2 := cs.Core().Pods(ns).Delete(pod.Name, metav1.NewDeleteOptions(0))
err2 := cs.CoreV1().Pods(ns).Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err2).NotTo(HaveOccurred())
}()

_, err = cs.Core().Pods(ns).Create(pod)
_, err = cs.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())

framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
Expand Down
Loading

0 comments on commit e4ce4c1

Please sign in to comment.