Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kubeadm support #1631

Merged
merged 6 commits into from
Sep 13, 2017
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 61 additions & 16 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ before_script:
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
KUBEADM_ENABLED: "false"
RESOLVCONF_MODE: docker_dns
LOG_LEVEL: "-vv"
ETCD_DEPLOYMENT: "docker"
Expand Down Expand Up @@ -117,16 +118,17 @@ before_script:
-e bootstrap_os=${BOOTSTRAP_OS}
-e cert_management=${CERT_MGMT:-script}
-e cloud_provider=gce
-e deploy_netchecker=true
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
cluster.yml
Expand All @@ -144,17 +146,20 @@ before_script:
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e deploy_netchecker=true
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e kubeadm_enabled=${KUBEADM_ENABLED}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

oh yes, that needs to be fixed

-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
$PLAYBOOK;
Expand All @@ -178,14 +183,18 @@ before_script:
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
cluster.yml;
Expand Down Expand Up @@ -221,14 +230,18 @@ before_script:
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_localhost=${DOWNLOAD_LOCALHOST}
-e download_run_once=${DOWNLOAD_RUN_ONCE}
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e "{deploy_netchecker: true}"
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}"
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}"
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubedns_min_replicas=1
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
-e "${AUTHORIZATION_MODES}"
--limit "all:!fake_hosts"
cluster.yml;
Expand Down Expand Up @@ -280,6 +293,17 @@ before_script:
UPGRADE_TEST: "graceful"
STARTUP_SCRIPT: ""

.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note: RBAC and the Node authorizer are always on.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is internal logic to determine if rbac is enabled. We will refactor this soon.

CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_MACHINE_TYPE: "n1-standard-2"
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: default
KUBEADM_ENABLED: "true"
STARTUP_SCRIPT: ""

.rhel7_weave_variables: &rhel7_weave_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: weave
Expand Down Expand Up @@ -470,6 +494,27 @@ ubuntu-canal-ha-rbac-triggers:
when: on_success
only: ['triggers']

ubuntu-canal-kubeadm-rbac:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]

ubuntu-canal-kubeadm-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_kubeadm_variables
when: on_success
only: ['triggers']

rhel7-weave:
stage: deploy-gce-part1
<<: *job
Expand Down
11 changes: 11 additions & 0 deletions cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,17 @@
roles:
- { role: kubespray-defaults}
- { role: kubernetes/master, tags: master }

- hosts: k8s-cluster
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are we going to need to skip any components w/ kubeadm enabled?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

kubernetes/secrets gets skipped and the new task file roles/kubernetes/master/tasks/static-pod-setup.yml will be skipped in kubeadm mode


- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }

Expand Down
4 changes: 4 additions & 0 deletions inventory/group_vars/all.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ bin_dir: /usr/local/bin
#openstack_lbaas_monitor_timeout: "30s"
#openstack_lbaas_monitor_max_retries: "3"

## Uncomment to enable experimental kubeadm deployment mode
#kubeadm_enabled: false
#kubeadm_token: "{{ lookup('password', 'credentials/kubeadm_token length=15') }}"
#
## Set these proxy values in order to update docker daemon to use proxies
#http_proxy: ""
#https_proxy: ""
Expand Down
9 changes: 6 additions & 3 deletions library/kube.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,24 +135,27 @@ def _execute_nofail(self, cmd):
return None
return out.splitlines()

def create(self, check=True):
def create(self, check=True, force=True):
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this relevant to this PR?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's related for replacing kubeadm's unescapable kubedns

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You mind posting a little more context for this? Curious

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

from doc:

Delete and re-create the specified resource, when PATCH encounters conflict and has retried for 5 times.

It's the only option in some upgrade scenarios.

if check and self.exists():
return []

cmd = ['apply']

if force:
cmd.append('--force')

if not self.filename:
self.module.fail_json(msg='filename required to create')

cmd.append('--filename=' + self.filename)

return self._execute(cmd)

def replace(self):
def replace(self, force=True):

cmd = ['apply']

if self.force:
if force:
cmd.append('--force')

if not self.filename:
Expand Down
14 changes: 13 additions & 1 deletion roles/download/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ download_always_pull: False

# Versions
kube_version: v1.7.3
kubeadm_version: "{{ kube_version }}"
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why would kubeadm version differ from k8s version here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It defaults to match kube version, but it can be overridden if desired.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok, SGTM

etcd_version: v3.2.4
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
Expand All @@ -31,11 +32,13 @@ flannel_version: "v0.8.0"
flannel_cni_version: "v0.2.0"
pod_infra_version: 3.0

# Download URL's
# Download URLs
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/amd64/kubeadm"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@luxas Is there a containerized kubeadm release? Is it part of hyperkube?

Copy link

@luxas luxas Sep 11, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure what you mean with containerized kubeadm. It is a CLI binary and has no deps, just like kubectl.
It just writes down a couple of files and expects kubelet to be running.
You can run kubelet however you want, be it containerized, on-host or in an rkt pod or whatever

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And no, not part of hyperkube, by design.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have the option to run nearly every component in a container ( including helm ). I'd like to carry this model forward if possible with kubeadm. Especially in this case since we're just downloading an unpackaged binary, it makes it interesting when managing kubeadm versions and understanding what's actually installed.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok. Neither kubectl or kubeadm is packaged in container images officially.
You can easily create your own image though.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that would be nice to have indeed. See kubernetes/kubernetes#35041, didn't make it tho :)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

All , just wanted to understand why can't kubeadm support both systemd and containerized components. Since we are talking of kubeadm entity as a primary bootstrap mechanism the support for components in systemd would be a catalyst for a larger community, I would love to weigh in on any ideas or any work someone can direct me to.


# Checksums
etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b"
kubeadm_checksum: "378e6052f8b178f8e6a38e8637681c72d389443b66b78b51b8ddc9a162c655c3"

# Containers
# Possible values: host, docker
Expand Down Expand Up @@ -132,6 +135,15 @@ downloads:
container: "{{ etcd_deployment_type in [ 'docker', 'rkt' ] }}"
repo: "{{ etcd_image_repo }}"
tag: "{{ etcd_image_tag }}"
kubeadm:
version: "{{ kubeadm_version }}"
dest: "kubeadm"
sha256: "{{ kubeadm_checksum }}"
source_url: "{{ kubeadm_download_url }}"
url: "{{ kubeadm_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
hyperkube:
container: true
repo: "{{ hyperkube_image_repo }}"
Expand Down
2 changes: 1 addition & 1 deletion roles/etcd/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

- name: "Gen_certs | Get etcd certificate serials"
shell: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial | cut -d= -f2"
register: "node-{{ inventory_hostname }}_serial"
register: "etcd_client_cert_serial"
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort

- include: "install_{{ etcd_deployment_type }}.yml"
Expand Down
12 changes: 12 additions & 0 deletions roles/kubernetes-apps/ansible/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,18 @@
delay: 6
when: inventory_hostname == groups['kube-master'][0]

- name: kubeadm | Delete kubeadm kubedns
kube:
name: "kubedns"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "deploy"
state: absent
when:
- kubeadm_enabled|default(false)
- kubeadm_init.changed|default(false)
- inventory_hostname == groups['kube-master'][0]

- name: Kubernetes Apps | Lay Down KubeDNS Template
template:
src: "{{item.file}}"
Expand Down
41 changes: 41 additions & 0 deletions roles/kubernetes/kubeadm/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
---
- name: Set kubeadm_discovery_address
set_fact:
kubeadm_discovery_address: >-
{%- if "127.0.0.1" or "localhost" in kube_apiserver_endpoint -%}
{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- else -%}
{{ kube_apiserver_endpoint }}
{%- endif %}
when: not is_kube_master
tags: facts

- name: Create kubeadm client config
template:
src: kubeadm-client.conf.j2
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
backup: yes
when: not is_kube_master
register: kubeadm_client_conf

- name: Join to cluster if needed
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is it worth running the preflight checks in an earlier play? like kubernetes/preinstall?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We re-run kubeadm join any time the templated conf gets updated. It won't harm anything to run again and again. It's idempotent.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that's right.
However, I wouldn't recommend doing that for upgrades, although that works in most cases.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actual upgrades is still the usual process:
cordon/drain
upgrade kubelet binary
restart daemon
uncordon

kubeadm doesn't play a role unless the config params changed

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ok. (They might do though, but minor issue now)
What about master/control plane upgrades? How's that done?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

etcds are all on-the-fly in parallel (and pray it doesn't go bad, but make a backup on each host just in case)
since all we're doing is changing how k8s master static pods get templated, there is no effective change.

The current process for upgrading k8s master:
(one master at a time)
cordon/drain master
upgrade kubelet (replace binary and restart daemon)
upgrade static pods all at once (just replace manifests)
update any CNI changes (except for ones that require kubectl commands)
wait for them to be up
uncordon

at the end, if cni components require update, apply them with kubectl apply.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd reorder the kubelet upgrade and master upgrade as kubelets support newer masters but not vice versa in e2e tests...
I see the practical reason to not do so but anyway

Copy link
Contributor Author

@mattymo mattymo Sep 11, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, upgrade all kubelets on all nodes first, then master components last. Or masters first, then kubelets?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The general k8s policy is master first, kubelet then

command: kubeadm join --config {{ kube_config_dir}}/kubeadm-client.conf --skip-preflight-checks
register: kubeadm_join
when: not is_kube_master and kubeadm_client_conf.changed

- name: Update server field in kubelet kubeconfig
replace:
path: "{{ kube_config_dir }}/kubelet.conf"
regexp: '(\s+){{ first_kube_master }}:{{ kube_apiserver_port }}(\s+.*)?$'
replace: '\1{{ kube_apiserver_endpoint }}\2'
backup: yes
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This feels a bit fragile but it's needed. Is this a feature request we can log w/ kubeadm to specify correct value?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we could set a different host for discovery and for kube apiserver endpoint, that would work.

We can't get kubelet up in such a way that it's ready for kubeadm AND deploy the nginx localhost proxy as a static pod. We could move it to a standard docker container, but I don't prefer that option.

when: not is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint

# FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will this be adding a conditional to kubelet to flex on kubeconfig filename?

Copy link
Contributor Author

@mattymo mattymo Sep 11, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably just generate a new kubeconfig file and purge the old one, after updating refs in all configs

- name: Symlink kubelet kubeconfig for calico/canal
file:
src: "{{ kube_config_dir }}//kubelet.conf"
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml"
state: link
force: yes
when: kube_network_plugin in ['calico','canal']
6 changes: 6 additions & 0 deletions roles/kubernetes/kubeadm/templates/kubeadm-client.conf.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: NodeConfiguration
caCertPath: {{ kube_config_dir }}/ssl/ca.crt
token: {{ kubeadm_token }}
discoveryTokenAPIServers:
- {{ kubeadm_discovery_address | replace("https://", "")}}
4 changes: 4 additions & 0 deletions roles/kubernetes/master/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,3 +66,7 @@ apiserver_custom_flags: []
controller_mgr_custom_flags: []

scheduler_custom_flags: []

# kubeadm settings
# Value of 0 means it never expires
kubeadm_token_ttl: 0
4 changes: 4 additions & 0 deletions roles/kubernetes/master/handlers/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,7 @@
until: result.status == 200
retries: 20
delay: 6

- name: Master | set secret_changed
set_fact:
secret_changed: true
35 changes: 35 additions & 0 deletions roles/kubernetes/master/tasks/kubeadm-setup.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
---
- name: kubeadm | aggregate all SANs
set_fact:
apiserver_sans: >-
kubernetes
kubernetes.default
kubernetes.default.svc
kubernetes.default.svc.{{ dns_domain }}
{{ kube_apiserver_ip }}
localhost
127.0.0.1
{{ ' '.join(groups['kube-master']) }}
{%- if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
{{ apiserver_loadbalancer_domain_name }}
{%- endif %}
{%- for host in groups['kube-master'] -%}
{%- if hostvars[host]['access_ip'] is defined %}{{ hostvars[host]['access_ip'] }}{% endif -%}
{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
{%- endfor %}
tags: facts

- name: kubeadm | Copy etcd cert dir under k8s cert dir
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we be changing our default etcd_cert_dir to be in line with this? Is it a standard enforced with kubeadm?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I want kubeadm to support mounting the dir where the etcd certs are located. It lets you specify a path, but doesn't mount them. @luxas what do you think?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Actually, in v1.8.0 beta1, it bind mounts the etcd cert dir. We can get rid of this command.

changed_when: false

- name: kubeadm | Create kubeadm config
template:
src: kubeadm-config.yaml.j2
dest: "{{ kube_config_dir }}/kubeadm-config.yaml"
register: kubeadm_config

- name: kubeadm | Initialize cluster
command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks
register: kubeadm_init
when: kubeadm_config.changed
Loading