-
Notifications
You must be signed in to change notification settings - Fork 6.5k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
kubeadm support #1631
kubeadm support #1631
Changes from 2 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -53,6 +53,7 @@ before_script: | |
IDEMPOT_CHECK: "false" | ||
RESET_CHECK: "false" | ||
UPGRADE_TEST: "false" | ||
KUBEADM_ENABLED: "false" | ||
RESOLVCONF_MODE: docker_dns | ||
LOG_LEVEL: "-vv" | ||
ETCD_DEPLOYMENT: "docker" | ||
|
@@ -117,16 +118,17 @@ before_script: | |
-e bootstrap_os=${BOOTSTRAP_OS} | ||
-e cert_management=${CERT_MGMT:-script} | ||
-e cloud_provider=gce | ||
-e deploy_netchecker=true | ||
-e download_localhost=${DOWNLOAD_LOCALHOST} | ||
-e download_run_once=${DOWNLOAD_RUN_ONCE} | ||
-e "{deploy_netchecker: true}" | ||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}" | ||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}" | ||
-e etcd_deployment_type=${ETCD_DEPLOYMENT} | ||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} | ||
-e kubedns_min_replicas=1 | ||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} | ||
-e local_release_dir=${PWD}/downloads | ||
-e resolvconf_mode=${RESOLVCONF_MODE} | ||
-e vault_deployment_type=${VAULT_DEPLOYMENT} | ||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" | ||
-e "${AUTHORIZATION_MODES}" | ||
--limit "all:!fake_hosts" | ||
cluster.yml | ||
|
@@ -144,17 +146,20 @@ before_script: | |
-e ansible_ssh_user=${SSH_USER} | ||
-e bootstrap_os=${BOOTSTRAP_OS} | ||
-e cloud_provider=gce | ||
-e deploy_netchecker=true | ||
-e download_localhost=${DOWNLOAD_LOCALHOST} | ||
-e download_run_once=${DOWNLOAD_RUN_ONCE} | ||
-e "{deploy_netchecker: true}" | ||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}" | ||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}" | ||
-e etcd_deployment_type=${ETCD_DEPLOYMENT} | ||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} | ||
-e kubedns_min_replicas=1 | ||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} | ||
-e local_release_dir=${PWD}/downloads | ||
-e resolvconf_mode=${RESOLVCONF_MODE} | ||
-e vault_deployment_type=${VAULT_DEPLOYMENT} | ||
-e kubeadm_enabled=${KUBEADM_ENABLED} | ||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT} | ||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT} | ||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" | ||
-e "${AUTHORIZATION_MODES}" | ||
--limit "all:!fake_hosts" | ||
$PLAYBOOK; | ||
|
@@ -178,14 +183,18 @@ before_script: | |
--private-key=${HOME}/.ssh/id_rsa | ||
-e bootstrap_os=${BOOTSTRAP_OS} | ||
-e ansible_python_interpreter=${PYPATH} | ||
-e download_localhost=${DOWNLOAD_LOCALHOST} | ||
-e download_run_once=${DOWNLOAD_RUN_ONCE} | ||
-e deploy_netchecker=true | ||
-e resolvconf_mode=${RESOLVCONF_MODE} | ||
-e local_release_dir=${PWD}/downloads | ||
-e "{deploy_netchecker: true}" | ||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}" | ||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}" | ||
-e etcd_deployment_type=${ETCD_DEPLOYMENT} | ||
-e kubedns_min_replicas=1 | ||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} | ||
-e local_release_dir=${PWD}/downloads | ||
-e resolvconf_mode=${RESOLVCONF_MODE} | ||
-e vault_deployment_type=${VAULT_DEPLOYMENT} | ||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" | ||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT} | ||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT} | ||
-e "${AUTHORIZATION_MODES}" | ||
--limit "all:!fake_hosts" | ||
cluster.yml; | ||
|
@@ -221,14 +230,18 @@ before_script: | |
--private-key=${HOME}/.ssh/id_rsa | ||
-e bootstrap_os=${BOOTSTRAP_OS} | ||
-e ansible_python_interpreter=${PYPATH} | ||
-e download_localhost=${DOWNLOAD_LOCALHOST} | ||
-e download_run_once=${DOWNLOAD_RUN_ONCE} | ||
-e deploy_netchecker=true | ||
-e resolvconf_mode=${RESOLVCONF_MODE} | ||
-e local_release_dir=${PWD}/downloads | ||
-e "{deploy_netchecker: true}" | ||
-e "{download_localhost: ${DOWNLOAD_LOCALHOST}}" | ||
-e "{download_run_once: ${DOWNLOAD_RUN_ONCE}}" | ||
-e etcd_deployment_type=${ETCD_DEPLOYMENT} | ||
-e kubedns_min_replicas=1 | ||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} | ||
-e local_release_dir=${PWD}/downloads | ||
-e resolvconf_mode=${RESOLVCONF_MODE} | ||
-e vault_deployment_type=${VAULT_DEPLOYMENT} | ||
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}" | ||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT} | ||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT} | ||
-e "${AUTHORIZATION_MODES}" | ||
--limit "all:!fake_hosts" | ||
cluster.yml; | ||
|
@@ -280,6 +293,17 @@ before_script: | |
UPGRADE_TEST: "graceful" | ||
STARTUP_SCRIPT: "" | ||
|
||
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables | ||
# stage: deploy-gce-part1 | ||
KUBE_NETWORK_PLUGIN: canal | ||
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note: RBAC and the Node authorizer are always on. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There is internal logic to determine if rbac is enabled. We will refactor this soon. |
||
CLOUD_IMAGE: ubuntu-1604-xenial | ||
CLOUD_MACHINE_TYPE: "n1-standard-2" | ||
CLOUD_REGION: europe-west1-b | ||
CLUSTER_MODE: default | ||
KUBEADM_ENABLED: "true" | ||
STARTUP_SCRIPT: "" | ||
|
||
.rhel7_weave_variables: &rhel7_weave_variables | ||
# stage: deploy-gce-part1 | ||
KUBE_NETWORK_PLUGIN: weave | ||
|
@@ -470,6 +494,27 @@ ubuntu-canal-ha-rbac-triggers: | |
when: on_success | ||
only: ['triggers'] | ||
|
||
ubuntu-canal-kubeadm-rbac: | ||
stage: deploy-gce-part1 | ||
<<: *job | ||
<<: *gce | ||
variables: | ||
<<: *gce_variables | ||
<<: *ubuntu_canal_kubeadm_variables | ||
when: manual | ||
except: ['triggers'] | ||
only: ['master', /^pr-.*$/] | ||
|
||
ubuntu-canal-kubeadm-triggers: | ||
stage: deploy-gce-part1 | ||
<<: *job | ||
<<: *gce | ||
variables: | ||
<<: *gce_variables | ||
<<: *ubuntu_canal_kubeadm_variables | ||
when: on_success | ||
only: ['triggers'] | ||
|
||
rhel7-weave: | ||
stage: deploy-gce-part1 | ||
<<: *job | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -69,6 +69,17 @@ | |
roles: | ||
- { role: kubespray-defaults} | ||
- { role: kubernetes/master, tags: master } | ||
|
||
- hosts: k8s-cluster | ||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" | ||
roles: | ||
- { role: kubespray-defaults} | ||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" } | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. are we going to need to skip any components w/ kubeadm enabled? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. kubernetes/secrets gets skipped and the new task file roles/kubernetes/master/tasks/static-pod-setup.yml will be skipped in kubeadm mode |
||
|
||
- hosts: kube-master | ||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" | ||
roles: | ||
- { role: kubespray-defaults} | ||
- { role: kubernetes-apps/network_plugin, tags: network } | ||
- { role: kubernetes-apps/policy_controller, tags: policy-controller } | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -135,24 +135,27 @@ def _execute_nofail(self, cmd): | |
return None | ||
return out.splitlines() | ||
|
||
def create(self, check=True): | ||
def create(self, check=True, force=True): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this relevant to this PR? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's related for replacing kubeadm's unescapable kubedns There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. You mind posting a little more context for this? Curious There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. from doc:
It's the only option in some upgrade scenarios. |
||
if check and self.exists(): | ||
return [] | ||
|
||
cmd = ['apply'] | ||
|
||
if force: | ||
cmd.append('--force') | ||
|
||
if not self.filename: | ||
self.module.fail_json(msg='filename required to create') | ||
|
||
cmd.append('--filename=' + self.filename) | ||
|
||
return self._execute(cmd) | ||
|
||
def replace(self): | ||
def replace(self, force=True): | ||
|
||
cmd = ['apply'] | ||
|
||
if self.force: | ||
if force: | ||
cmd.append('--force') | ||
|
||
if not self.filename: | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,7 @@ download_always_pull: False | |
|
||
# Versions | ||
kube_version: v1.7.3 | ||
kubeadm_version: "{{ kube_version }}" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why would kubeadm version differ from k8s version here? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It defaults to match kube version, but it can be overridden if desired. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok, SGTM |
||
etcd_version: v3.2.4 | ||
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults | ||
# after migration to container download | ||
|
@@ -31,11 +32,13 @@ flannel_version: "v0.8.0" | |
flannel_cni_version: "v0.2.0" | ||
pod_infra_version: 3.0 | ||
|
||
# Download URL's | ||
# Download URLs | ||
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd" | ||
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/amd64/kubeadm" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @luxas Is there a containerized kubeadm release? Is it part of hyperkube? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure what you mean with containerized kubeadm. It is a CLI binary and has no deps, just like kubectl. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. And no, not part of hyperkube, by design. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We have the option to run nearly every component in a container ( including helm ). I'd like to carry this model forward if possible with kubeadm. Especially in this case since we're just downloading an unpackaged binary, it makes it interesting when managing kubeadm versions and understanding what's actually installed. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ok. Neither kubectl or kubeadm is packaged in container images officially. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, that would be nice to have indeed. See kubernetes/kubernetes#35041, didn't make it tho :) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. All , just wanted to understand why can't kubeadm support both systemd and containerized components. Since we are talking of kubeadm entity as a primary bootstrap mechanism the support for components in systemd would be a catalyst for a larger community, I would love to weigh in on any ideas or any work someone can direct me to. |
||
|
||
# Checksums | ||
etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b" | ||
kubeadm_checksum: "378e6052f8b178f8e6a38e8637681c72d389443b66b78b51b8ddc9a162c655c3" | ||
|
||
# Containers | ||
# Possible values: host, docker | ||
|
@@ -132,6 +135,15 @@ downloads: | |
container: "{{ etcd_deployment_type in [ 'docker', 'rkt' ] }}" | ||
repo: "{{ etcd_image_repo }}" | ||
tag: "{{ etcd_image_tag }}" | ||
kubeadm: | ||
version: "{{ kubeadm_version }}" | ||
dest: "kubeadm" | ||
sha256: "{{ kubeadm_checksum }}" | ||
source_url: "{{ kubeadm_download_url }}" | ||
url: "{{ kubeadm_download_url }}" | ||
unarchive: false | ||
owner: "root" | ||
mode: "0755" | ||
hyperkube: | ||
container: true | ||
repo: "{{ hyperkube_image_repo }}" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,41 @@ | ||
--- | ||
- name: Set kubeadm_discovery_address | ||
set_fact: | ||
kubeadm_discovery_address: >- | ||
{%- if "127.0.0.1" or "localhost" in kube_apiserver_endpoint -%} | ||
{{ first_kube_master }}:{{ kube_apiserver_port }} | ||
{%- else -%} | ||
{{ kube_apiserver_endpoint }} | ||
{%- endif %} | ||
when: not is_kube_master | ||
tags: facts | ||
|
||
- name: Create kubeadm client config | ||
template: | ||
src: kubeadm-client.conf.j2 | ||
dest: "{{ kube_config_dir }}/kubeadm-client.conf" | ||
backup: yes | ||
when: not is_kube_master | ||
register: kubeadm_client_conf | ||
|
||
- name: Join to cluster if needed | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is it worth running the preflight checks in an earlier play? like kubernetes/preinstall? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We re-run kubeadm join any time the templated conf gets updated. It won't harm anything to run again and again. It's idempotent. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, that's right. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actual upgrades is still the usual process: kubeadm doesn't play a role unless the config params changed There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ok. (They might do though, but minor issue now) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. etcds are all on-the-fly in parallel (and pray it doesn't go bad, but make a backup on each host just in case) The current process for upgrading k8s master: at the end, if cni components require update, apply them with kubectl apply. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'd reorder the kubelet upgrade and master upgrade as kubelets support newer masters but not vice versa in e2e tests... There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So, upgrade all kubelets on all nodes first, then master components last. Or masters first, then kubelets? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The general k8s policy is master first, kubelet then |
||
command: kubeadm join --config {{ kube_config_dir}}/kubeadm-client.conf --skip-preflight-checks | ||
register: kubeadm_join | ||
when: not is_kube_master and kubeadm_client_conf.changed | ||
|
||
- name: Update server field in kubelet kubeconfig | ||
replace: | ||
path: "{{ kube_config_dir }}/kubelet.conf" | ||
regexp: '(\s+){{ first_kube_master }}:{{ kube_apiserver_port }}(\s+.*)?$' | ||
replace: '\1{{ kube_apiserver_endpoint }}\2' | ||
backup: yes | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This feels a bit fragile but it's needed. Is this a feature request we can log w/ kubeadm to specify correct value? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If we could set a different host for discovery and for kube apiserver endpoint, that would work. We can't get kubelet up in such a way that it's ready for kubeadm AND deploy the nginx localhost proxy as a static pod. We could move it to a standard docker container, but I don't prefer that option. |
||
when: not is_kube_master and kubeadm_discovery_address != kube_apiserver_endpoint | ||
|
||
# FIXME(mattymo): Reconcile kubelet kubeconfig filename for both deploy modes | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. will this be adding a conditional to kubelet to flex on kubeconfig filename? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Probably just generate a new kubeconfig file and purge the old one, after updating refs in all configs |
||
- name: Symlink kubelet kubeconfig for calico/canal | ||
file: | ||
src: "{{ kube_config_dir }}//kubelet.conf" | ||
dest: "{{ kube_config_dir }}/node-kubeconfig.yaml" | ||
state: link | ||
force: yes | ||
when: kube_network_plugin in ['calico','canal'] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
apiVersion: kubeadm.k8s.io/v1alpha1 | ||
kind: NodeConfiguration | ||
caCertPath: {{ kube_config_dir }}/ssl/ca.crt | ||
token: {{ kubeadm_token }} | ||
discoveryTokenAPIServers: | ||
- {{ kubeadm_discovery_address | replace("https://", "")}} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
--- | ||
- name: kubeadm | aggregate all SANs | ||
set_fact: | ||
apiserver_sans: >- | ||
kubernetes | ||
kubernetes.default | ||
kubernetes.default.svc | ||
kubernetes.default.svc.{{ dns_domain }} | ||
{{ kube_apiserver_ip }} | ||
localhost | ||
127.0.0.1 | ||
{{ ' '.join(groups['kube-master']) }} | ||
{%- if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} | ||
{{ apiserver_loadbalancer_domain_name }} | ||
{%- endif %} | ||
{%- for host in groups['kube-master'] -%} | ||
{%- if hostvars[host]['access_ip'] is defined %}{{ hostvars[host]['access_ip'] }}{% endif -%} | ||
{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} | ||
{%- endfor %} | ||
tags: facts | ||
|
||
- name: kubeadm | Copy etcd cert dir under k8s cert dir | ||
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we be changing our default etcd_cert_dir to be in line with this? Is it a standard enforced with kubeadm? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I want kubeadm to support mounting the dir where the etcd certs are located. It lets you specify a path, but doesn't mount them. @luxas what do you think? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually, in v1.8.0 beta1, it bind mounts the etcd cert dir. We can get rid of this command. |
||
changed_when: false | ||
|
||
- name: kubeadm | Create kubeadm config | ||
template: | ||
src: kubeadm-config.yaml.j2 | ||
dest: "{{ kube_config_dir }}/kubeadm-config.yaml" | ||
register: kubeadm_config | ||
|
||
- name: kubeadm | Initialize cluster | ||
command: timeout -k 240s 240s kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --skip-preflight-checks | ||
register: kubeadm_init | ||
when: kubeadm_config.changed |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
isnt this a duplicate of https://github.com/kubernetes-incubator/kubespray/pull/1631/files#diff-96edf7a6f008de9e928d04e1ae5e12a5R159 ?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
oh yes, that needs to be fixed