Skip to content
This repository has been archived by the owner on May 6, 2020. It is now read-only.

Commit

Permalink
Update Lokomotive to Kubernetes 1.15
Browse files Browse the repository at this point in the history
This commit pulls in code from upstream Typhoon with terrafrom 0.11.*
supported.

Signed-off-by: Suraj Deshmukh <suraj@kinvolk.io>
  • Loading branch information
surajssd committed Sep 9, 2019
1 parent ad76e16 commit d209acf
Show file tree
Hide file tree
Showing 57 changed files with 270 additions and 167 deletions.
2 changes: 1 addition & 1 deletion aws/flatcar-linux/kubernetes/bootkube.tf
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "github.com/kinvolk/terraform-render-bootkube?ref=d07243a9e7f6084cfe08b708731a79c26146badb"
source = "github.com/kinvolk/terraform-render-bootkube?ref=7e237ffa21fd85f76ddf2a215073aa7cd6ef2476"

cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
Expand Down
6 changes: 4 additions & 2 deletions aws/flatcar-linux/kubernetes/cl/controller.yaml.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ systemd:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.3.13"
Environment="ETCD_IMAGE_TAG=v3.3.15"
Environment="ETCD_NAME=${etcd_name}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
Expand Down Expand Up @@ -65,6 +65,7 @@ systemd:
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log \
--insecure-options=image"
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
Expand All @@ -80,6 +81,7 @@ systemd:
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
Expand Down Expand Up @@ -121,7 +123,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.14.1
KUBELET_IMAGE_TAG=v1.15.3
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
Expand Down
5 changes: 3 additions & 2 deletions aws/flatcar-linux/kubernetes/controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ resource "aws_instance" "controllers" {
volume_type = "${var.disk_type}"
volume_size = "${var.disk_size}"
iops = "${var.disk_iops}"
encrypted = true
}

# network
Expand Down Expand Up @@ -66,8 +67,8 @@ data "template_file" "controller-configs" {
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"

# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"

etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
cgroup_driver = "${local.channel == "edge" ? "systemd":"cgroupfs"}"
ssh_authorized_key = "${var.ssh_authorized_key}"
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
Expand Down
6 changes: 3 additions & 3 deletions aws/flatcar-linux/kubernetes/require.tf
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,19 @@ terraform {
}

provider "aws" {
version = ">= 1.13, < 3.0"
version = "2.25.0"
}

provider "local" {
version = "~> 1.0"
}

provider "null" {
version = "~> 1.0"
version = "~> 2.1"
}

provider "template" {
version = "~> 1.0"
version = "~> 2.1"
}

provider "tls" {
Expand Down
50 changes: 29 additions & 21 deletions aws/flatcar-linux/kubernetes/security.tf
Original file line number Diff line number Diff line change
Expand Up @@ -42,36 +42,40 @@ resource "aws_security_group_rule" "controller-etcd-metrics" {
source_security_group_id = "${aws_security_group.worker.id}"
}

resource "aws_security_group_rule" "controller-apiserver" {
security_group_id = "${aws_security_group.controller.id}"
resource "aws_security_group_rule" "controller-vxlan" {
count = "${var.networking == "flannel" ? 1 : 0}"

type = "ingress"
protocol = "tcp"
from_port = 6443
to_port = 6443
cidr_blocks = ["0.0.0.0/0"]
}

resource "aws_security_group_rule" "controller-flannel" {
security_group_id = "${aws_security_group.controller.id}"

type = "ingress"
protocol = "udp"
from_port = 8472
to_port = 8472
from_port = 4789
to_port = 4789
source_security_group_id = "${aws_security_group.worker.id}"
}

resource "aws_security_group_rule" "controller-flannel-self" {
resource "aws_security_group_rule" "controller-vxlan-self" {
count = "${var.networking == "flannel" ? 1 : 0}"

security_group_id = "${aws_security_group.controller.id}"

type = "ingress"
protocol = "udp"
from_port = 8472
to_port = 8472
from_port = 4789
to_port = 4789
self = true
}

resource "aws_security_group_rule" "controller-apiserver" {
security_group_id = "${aws_security_group.controller.id}"

type = "ingress"
protocol = "tcp"
from_port = 6443
to_port = 6443
cidr_blocks = ["0.0.0.0/0"]
}

# Allow Prometheus to scrape node-exporter daemonset
resource "aws_security_group_rule" "controller-node-exporter" {
security_group_id = "${aws_security_group.controller.id}"
Expand Down Expand Up @@ -216,23 +220,27 @@ resource "aws_security_group_rule" "worker-https" {
cidr_blocks = ["0.0.0.0/0"]
}

resource "aws_security_group_rule" "worker-flannel" {
resource "aws_security_group_rule" "worker-vxlan" {
count = "${var.networking == "flannel" ? 1 : 0}"

security_group_id = "${aws_security_group.worker.id}"

type = "ingress"
protocol = "udp"
from_port = 8472
to_port = 8472
from_port = 4789
to_port = 4789
source_security_group_id = "${aws_security_group.controller.id}"
}

resource "aws_security_group_rule" "worker-flannel-self" {
resource "aws_security_group_rule" "worker-vxlan-self" {
count = "${var.networking == "flannel" ? 1 : 0}"

security_group_id = "${aws_security_group.worker.id}"

type = "ingress"
protocol = "udp"
from_port = 8472
to_port = 8472
from_port = 4789
to_port = 4789
self = true
}

Expand Down
6 changes: 4 additions & 2 deletions aws/flatcar-linux/kubernetes/workers/cl/worker.yaml.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ systemd:
--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \
--mount volume=iscsiadm,target=/usr/sbin/iscsiadm \
--insecure-options=image"
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
Expand All @@ -56,6 +57,7 @@ systemd:
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
Expand Down Expand Up @@ -100,7 +102,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.14.1
KUBELET_IMAGE_TAG=v1.15.3
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
Expand Down Expand Up @@ -131,7 +133,7 @@ storage:
--volume config,kind=host,source=/etc/kubernetes \
--mount volume=config,target=/etc/kubernetes \
--insecure-options=image \
docker://k8s.gcr.io/hyperkube:v1.14.1 \
docker://k8s.gcr.io/hyperkube:v1.15.3 \
--net=host \
--dns=host \
--exec=/kubectl -- --kubeconfig=/etc/kubernetes/kubeconfig delete node $(hostname)
Expand Down
2 changes: 2 additions & 0 deletions aws/flatcar-linux/kubernetes/workers/workers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ resource "aws_launch_configuration" "worker" {
volume_type = "${var.disk_type}"
volume_size = "${var.disk_size}"
iops = "${var.disk_iops}"
encrypted = true
}

# network
Expand All @@ -79,6 +80,7 @@ data "template_file" "worker-config" {

vars = {
kubeconfig = "${indent(10, var.kubeconfig)}"
cgroup_driver = "${local.channel == "edge" ? "systemd":"cgroupfs"}"
ssh_authorized_key = "${var.ssh_authorized_key}"
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
Expand Down
20 changes: 14 additions & 6 deletions azure/flatcar-linux/kubernetes/bootkube.tf
Original file line number Diff line number Diff line change
@@ -1,12 +1,20 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/kinvolk/terraform-render-bootkube.git?ref=d07243a9e7f6084cfe08b708731a79c26146badb"
source = "github.com/kinvolk/terraform-render-bootkube?ref=7e237ffa21fd85f76ddf2a215073aa7cd6ef2476"

cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
etcd_servers = ["${formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)}"]
asset_dir = "${var.asset_dir}"

networking = "${var.networking}"

# only effective with Calico networking
network_encapsulation = "vxlan"

# we should be able to use 1450 MTU, but in practice, 1410 was needed
network_mtu = "1410"

cluster_name = "${var.cluster_name}"
api_servers = ["${format("%s.%s", var.cluster_name, var.dns_zone)}"]
etcd_servers = ["${formatlist("%s.%s", azurerm_dns_a_record.etcds.*.name, var.dns_zone)}"]
asset_dir = "${var.asset_dir}"
networking = "flannel"
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
cluster_domain_suffix = "${var.cluster_domain_suffix}"
Expand Down
6 changes: 4 additions & 2 deletions azure/flatcar-linux/kubernetes/cl/controller.yaml.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ systemd:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.3.13"
Environment="ETCD_IMAGE_TAG=v3.3.15"
Environment="ETCD_NAME=${etcd_name}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://${etcd_domain}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://${etcd_domain}:2380"
Expand Down Expand Up @@ -63,6 +63,7 @@ systemd:
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log \
--insecure-options=image"
Environment=KUBELET_CGROUP_DRIVER=${cgroup_driver}
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
Expand All @@ -77,6 +78,7 @@ systemd:
--anonymous-auth=false \
--authentication-token-webhook \
--authorization-mode=Webhook \
--cgroup-driver=$${KUBELET_CGROUP_DRIVER} \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns=${cluster_dns_service_ip} \
--cluster_domain=${cluster_domain_suffix} \
Expand Down Expand Up @@ -123,7 +125,7 @@ storage:
contents:
inline: |
KUBELET_IMAGE_URL=docker://k8s.gcr.io/hyperkube
KUBELET_IMAGE_TAG=v1.14.1
KUBELET_IMAGE_TAG=v1.15.3
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
Expand Down
8 changes: 5 additions & 3 deletions azure/flatcar-linux/kubernetes/controllers.tf
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,9 @@ resource "azurerm_network_interface" "controllers" {

# Add controller NICs to the controller backend address pool
resource "azurerm_network_interface_backend_address_pool_association" "controllers" {
network_interface_id = "${azurerm_network_interface.controllers.id}"
count = "${var.controller_count}"

network_interface_id = "${azurerm_network_interface.controllers.*.id}"
ip_configuration_name = "ip0"
backend_address_pool_id = "${azurerm_lb_backend_address_pool.controller.id}"
}
Expand Down Expand Up @@ -149,8 +151,8 @@ data "template_file" "controller-configs" {
etcd_domain = "${var.cluster_name}-etcd${count.index}.${var.dns_zone}"

# etcd0=https://cluster-etcd0.example.com,etcd1=https://cluster-etcd1.example.com,...
etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"

etcd_initial_cluster = "${join(",", data.template_file.etcds.*.rendered)}"
cgroup_driver = "${var.os_channel == "edge" ? "systemd":"cgroupfs"}"
kubeconfig = "${indent(10, module.bootkube.kubeconfig-kubelet)}"
ssh_authorized_key = "${var.ssh_authorized_key}"
cluster_dns_service_ip = "${cidrhost(var.service_cidr, 10)}"
Expand Down
4 changes: 2 additions & 2 deletions azure/flatcar-linux/kubernetes/require.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ provider "local" {
}

provider "null" {
version = "~> 1.0"
version = "~> 2.1"
}

provider "template" {
version = "~> 1.0"
version = "~> 2.1"
}

provider "tls" {
Expand Down
12 changes: 6 additions & 6 deletions azure/flatcar-linux/kubernetes/security.tf
Original file line number Diff line number Diff line change
Expand Up @@ -68,17 +68,17 @@ resource "azurerm_network_security_rule" "controller-apiserver" {
destination_address_prefix = "${azurerm_subnet.controller.address_prefix}"
}

resource "azurerm_network_security_rule" "controller-flannel" {
resource "azurerm_network_security_rule" "controller-vxlan" {
resource_group_name = "${azurerm_resource_group.cluster.name}"

name = "allow-flannel"
name = "allow-vxlan"
network_security_group_name = "${azurerm_network_security_group.controller.name}"
priority = "2020"
access = "Allow"
direction = "Inbound"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "8472"
destination_port_range = "4789"
source_address_prefixes = ["${azurerm_subnet.controller.address_prefix}", "${azurerm_subnet.worker.address_prefix}"]
destination_address_prefix = "${azurerm_subnet.controller.address_prefix}"
}
Expand Down Expand Up @@ -204,17 +204,17 @@ resource "azurerm_network_security_rule" "worker-https" {
destination_address_prefix = "${azurerm_subnet.worker.address_prefix}"
}

resource "azurerm_network_security_rule" "worker-flannel" {
resource "azurerm_network_security_rule" "worker-vxlan" {
resource_group_name = "${azurerm_resource_group.cluster.name}"

name = "allow-flannel"
name = "allow-vxlan"
network_security_group_name = "${azurerm_network_security_group.worker.name}"
priority = "2015"
access = "Allow"
direction = "Inbound"
protocol = "Udp"
source_port_range = "*"
destination_port_range = "8472"
destination_port_range = "4789"
source_address_prefixes = ["${azurerm_subnet.controller.address_prefix}", "${azurerm_subnet.worker.address_prefix}"]
destination_address_prefix = "${azurerm_subnet.worker.address_prefix}"
}
Expand Down
6 changes: 6 additions & 0 deletions azure/flatcar-linux/kubernetes/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,12 @@ variable "asset_dir" {
type = "string"
}

variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
default = "flannel"
}

variable "host_cidr" {
description = "CIDR IPv4 range to assign to instances"
type = "string"
Expand Down
Loading

0 comments on commit d209acf

Please sign in to comment.