Skip to content

Commit

Permalink
[CPDEV-97376] Support of Kubernetes v1.29.1 (#576)
Browse files Browse the repository at this point in the history
* Implement kube-proxy reconfiguration

* Add services.kubeadm_kube-proxy.conntrack.min property

Reconfigure conntrack.min during upgrade.

* Support renew super-admin cert

* Support Kubernetes v1.29.0

* k8s v1.29.1
calico v3.27.0
ingress v1.9.5
crictl v1.29.0
localpach provisioner v0.0.26

* update test

* fix test

* Improvements and fixes in GitHub workflows (#591)

* Fix collecting events if integration tests are failed

* Make manifests original

* Fix check that manifests are not changed manually

* Run mypy on all python versions

* Add supported versions doc for v1.29.1

* Workaround kubernetes/ingress-nginx#10942

---------

Co-authored-by: sergey kryazhev <sergey.kryazhev@netcracker.com>
  • Loading branch information
ilia1243 and koryaga authored Jan 30, 2024
1 parent 82cdfcf commit 94eb677
Show file tree
Hide file tree
Showing 25 changed files with 6,629 additions and 69 deletions.
46 changes: 40 additions & 6 deletions documentation/Installation.md

Large diffs are not rendered by default.

3 changes: 2 additions & 1 deletion documentation/Maintenance.md
Original file line number Diff line number Diff line change
Expand Up @@ -1161,7 +1161,7 @@ The `cert_renew` procedure allows you to renew some certificates on an existing

For Kubernetes, most of the internal certificates could be updated, specifically:
`apiserver`, `apiserver-etcd-client`, `apiserver-kubelet-client`, `etcd-healthcheck-client`, `etcd-peer`, `etcd-server`,
`admin.conf`, `controller-manager.conf`, `scheduler.conf`, `front-proxy-client`.
`admin.conf`, `super-admin.conf`, `controller-manager.conf`, `scheduler.conf`, `front-proxy-client`.
Certificate used by `kubelet.conf` by default is updated automatically by Kubernetes,
link to Kubernetes docs regarding `kubelet.conf` rotation: https://kubernetes.io/docs/tasks/tls/certificate-rotation/#understanding-the-certificate-rotation-configuration.

Expand Down Expand Up @@ -1233,6 +1233,7 @@ kubernetes:
- etcd-peer
- etcd-server
- admin.conf
- super-admin.conf
- controller-manager.conf
- scheduler.conf
- front-proxy-client
Expand Down
44 changes: 14 additions & 30 deletions kubemarine/admission.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,29 +361,20 @@ def restart_pods_task(cluster: KubernetesCluster) -> None:


def update_kubeadm_configmap_psp(first_control_plane: NodeGroup, target_state: str) -> str:
yaml = ruamel.yaml.YAML()

# load kubeadm config map and retrieve cluster config
result = first_control_plane.sudo("kubectl get cm kubeadm-config -n kube-system -o yaml")
kubeadm_cm = yaml.load(list(result.values())[0].stdout)
cluster_config = yaml.load(kubeadm_cm["data"]["ClusterConfiguration"])
kubeadm_cm = kubernetes.KubernetesObject(first_control_plane.cluster, 'ConfigMap', 'kubeadm-config', 'kube-system')
kubeadm_cm.reload(first_control_plane)
cluster_config = yaml.safe_load(kubeadm_cm.obj["data"]["ClusterConfiguration"])

# resolve resulting admission plugins list
final_plugins_string = resolve_final_plugins_list(cluster_config, target_state)

# update kubeadm config map with updated plugins list
cluster_config["apiServer"]["extraArgs"]["enable-admission-plugins"] = final_plugins_string
buf = io.StringIO()
yaml.dump(cluster_config, buf)
kubeadm_cm["data"]["ClusterConfiguration"] = buf.getvalue()
kubeadm_cm.obj["data"]["ClusterConfiguration"] = yaml.dump(cluster_config)

# apply updated kubeadm config map
buf = io.StringIO()
yaml.dump(kubeadm_cm, buf)
filename = uuid.uuid4().hex
first_control_plane.put(buf, "/tmp/%s.yaml" % filename)
first_control_plane.sudo("kubectl apply -f /tmp/%s.yaml" % filename)
first_control_plane.sudo("rm -f /tmp/%s.yaml" % filename)
kubeadm_cm.apply(first_control_plane)

return final_plugins_string

Expand Down Expand Up @@ -743,18 +734,18 @@ def update_kubeapi_config_pss(control_planes: NodeGroup, features_list: str) ->


def update_kubeadm_configmap_pss(first_control_plane: NodeGroup, target_state: str) -> str:
yaml = ruamel.yaml.YAML()
cluster: KubernetesCluster = first_control_plane.cluster

final_feature_list = ""

# load kubeadm config map and retrieve cluster config
result = first_control_plane.sudo("kubectl get cm kubeadm-config -n kube-system -o yaml")
kubeadm_cm = yaml.load(list(result.values())[0].stdout)
cluster_config = yaml.load(kubeadm_cm["data"]["ClusterConfiguration"])
kubeadm_cm = kubernetes.KubernetesObject(cluster, 'ConfigMap', 'kubeadm-config', 'kube-system')
kubeadm_cm.reload(first_control_plane)
cluster_config = yaml.safe_load(kubeadm_cm.obj["data"]["ClusterConfiguration"])

# update kubeadm config map with feature list
if target_state == "enabled":
if not is_pod_security_unconditional(first_control_plane.cluster):
if not is_pod_security_unconditional(cluster):
if "feature-gates" in cluster_config["apiServer"]["extraArgs"]:
enabled_admissions = cluster_config["apiServer"]["extraArgs"]["feature-gates"]
if 'PodSecurity=true' not in enabled_admissions:
Expand All @@ -771,12 +762,12 @@ def update_kubeadm_configmap_pss(first_control_plane: NodeGroup, target_state: s
final_feature_list = "PodSecurity=true"
else:
cluster_config["apiServer"]["extraArgs"]["admission-control-config-file"] = admission_path
if first_control_plane.cluster.context['initial_procedure'] == 'upgrade':
if cluster.context['initial_procedure'] == 'upgrade':
if cluster_config["apiServer"]["extraArgs"].get("feature-gates"):
del cluster_config["apiServer"]["extraArgs"]["feature-gates"]
final_feature_list = "PodSecurity deprecated in %s" % cluster_config['kubernetesVersion']
elif target_state == "disabled":
if not is_pod_security_unconditional(first_control_plane.cluster):
if not is_pod_security_unconditional(cluster):
feature_list = cluster_config["apiServer"]["extraArgs"]["feature-gates"].replace("PodSecurity=true", "")
final_feature_list = feature_list.replace(",,", ",")
if len(final_feature_list) == 0:
Expand All @@ -795,17 +786,10 @@ def update_kubeadm_configmap_pss(first_control_plane: NodeGroup, target_state: s
else:
del cluster_config["apiServer"]["extraArgs"]["admission-control-config-file"]

buf = io.StringIO()
yaml.dump(cluster_config, buf)
kubeadm_cm["data"]["ClusterConfiguration"] = buf.getvalue()
kubeadm_cm.obj["data"]["ClusterConfiguration"] = yaml.dump(cluster_config)

# apply updated kubeadm config map
buf = io.StringIO()
yaml.dump(kubeadm_cm, buf)
filename = uuid.uuid4().hex
first_control_plane.put(buf, "/tmp/%s.yaml" % filename)
first_control_plane.sudo("kubectl apply -f /tmp/%s.yaml" % filename)
first_control_plane.sudo("rm -f /tmp/%s.yaml" % filename)
kubeadm_cm.apply(first_control_plane)

return final_feature_list

Expand Down
1 change: 1 addition & 0 deletions kubemarine/core/defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,6 +516,7 @@ def manage_primitive_values(inventory: dict, _: KubernetesCluster) -> dict:
(['services', 'cri', 'containerdConfig',
'plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options',
'SystemdCgroup'], utils.strtobool, False),
(['services', 'kubeadm_kube-proxy', 'conntrack', 'min'], utils.strtoint, True),
(['services', 'modprobe', '*', '*'], str, True),
# kernel parameters are actually not always represented as integers
(['services', 'sysctl', '*'], utils.strtoint, True),
Expand Down
1 change: 1 addition & 0 deletions kubemarine/jinja.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def _precompile(filter_: str, struct: str) -> str:
precompile_filters['isipv4'] = lambda ip: utils.isipv(ip, [4])
precompile_filters['minorversion'] = utils.minor_version
precompile_filters['majorversion'] = utils.major_version
precompile_filters['versionkey'] = utils.version_key
precompile_filters['b64encode'] = lambda s: base64.b64encode(s.encode()).decode()
precompile_filters['b64decode'] = lambda s: base64.b64decode(s.encode()).decode()
precompile_filters['url_quote'] = lambda u: quote_plus(u)
Expand Down
44 changes: 41 additions & 3 deletions kubemarine/kubernetes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import uuid
from contextlib import contextmanager
from copy import deepcopy
from typing import List, Dict, Iterator, Any, Optional
from typing import List, Dict, Iterator, Any, Optional, Callable

import yaml
from jinja2 import Template
Expand All @@ -35,6 +35,7 @@
)
from kubemarine.core.errors import KME
from kubemarine.cri import containerd
from kubemarine.kubernetes.object import KubernetesObject

ERROR_DOWNGRADE='Kubernetes old version \"%s\" is greater than new one \"%s\"'
ERROR_SAME='Kubernetes old version \"%s\" is the same as new one \"%s\"'
Expand All @@ -48,6 +49,11 @@ def is_container_runtime_not_configurable(cluster: KubernetesCluster) -> bool:
return utils.version_key(kubernetes_version)[0:2] >= utils.minor_version_key("v1.27")


def kube_proxy_overwrites_higher_system_values(cluster: KubernetesCluster) -> bool:
kubernetes_version = cluster.inventory["services"]["kubeadm"]["kubernetesVersion"]
return utils.version_key(kubernetes_version)[0:2] >= utils.minor_version_key("v1.29")


def add_node_enrichment(inventory: dict, cluster: KubernetesCluster) -> dict:
if cluster.context.get('initial_procedure') != 'add_node':
return inventory
Expand Down Expand Up @@ -610,13 +616,18 @@ def init_first_control_plane(group: NodeGroup) -> None:


def wait_for_any_pods(cluster: KubernetesCluster, connection: NodeGroup, apply_filter: str = None) -> None:
plugins.expect_pods(cluster, [
wait_for_pods(cluster, connection, [
'kube-apiserver',
'kube-controller-manager',
'kube-proxy',
'kube-scheduler',
'etcd'
], node=connection, apply_filter=apply_filter,
], apply_filter=apply_filter)


def wait_for_pods(cluster: KubernetesCluster, connection: NodeGroup,
pods_list: List[str], apply_filter: str = None) -> None:
plugins.expect_pods(cluster, pods_list, node=connection, apply_filter=apply_filter,
timeout=cluster.inventory['globals']['expect']['pods']['kubernetes']['timeout'],
retries=cluster.inventory['globals']['expect']['pods']['kubernetes']['retries'])

Expand Down Expand Up @@ -799,6 +810,33 @@ def get_kubeadm_config(inventory: dict) -> str:
return f'{kubeadm_kube_proxy}---\n{kubeadm_kubelet}---\n{kubeadm}'


def reconfigure_kube_proxy_configmap(control_plane: NodeGroup, mutate_func: Callable[[dict], dict]) -> None:
cluster: KubernetesCluster = control_plane.cluster

# Load kube-proxy config map and retrieve config
kube_proxy_cm = KubernetesObject(cluster, 'ConfigMap', 'kube-proxy', 'kube-system')
kube_proxy_cm.reload(control_plane)
cluster_config: dict = yaml.safe_load(kube_proxy_cm.obj["data"]["config.conf"])

# Always perform the reconfiguration entirely even if nothing is changed.
# This is necessary because the operation is not atomic, but idempotent.
cluster_config = mutate_func(cluster_config)
kube_proxy_cm.obj["data"]["config.conf"] = yaml.dump(cluster_config)

# Apply updated kube-proxy config map
kube_proxy_cm.apply(control_plane)

for node in cluster.make_group_from_roles(['control-plane', 'worker']).get_ordered_members_list():
node_name = node.get_node_name()
control_plane.sudo(
f"kubectl delete pod -n kube-system $("
f" sudo kubectl describe node {node_name} "
f" | awk '/kube-system\\s+kube-proxy-[a-z,0-9]{{5}}/{{print $2}}'"
f")")

wait_for_pods(cluster, control_plane,['kube-proxy'], apply_filter=node_name)


def upgrade_first_control_plane(upgrade_group: NodeGroup, cluster: KubernetesCluster, **drain_kwargs: Any) -> None:
version = cluster.inventory["services"]["kubeadm"]["kubernetesVersion"]
first_control_plane = cluster.nodes['control-plane'].get_first_member()
Expand Down
17 changes: 9 additions & 8 deletions kubemarine/kubernetes/object.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import io
import json
import uuid
from copy import deepcopy
from typing import TypeVar, Optional

import yaml
Expand All @@ -33,7 +32,7 @@ class KubernetesObject:
def __init__(self, cluster: KubernetesCluster, kind: str = None, name: str = None,
namespace: str = None, obj: dict = None) -> None:

self._cluster = cluster
self.cluster = cluster
self._reload_result: Optional[RunnersResult] = None

if not kind and not name and not namespace and not obj:
Expand All @@ -59,7 +58,7 @@ def __str__(self) -> str:

@property
def obj(self) -> dict:
return deepcopy(self._obj)
return self._obj

@property
def uid(self) -> str:
Expand Down Expand Up @@ -96,22 +95,24 @@ def is_reloaded(self) -> bool:

def reload(self: _T, control_plane: NodeGroup = None, suppress_exceptions: bool = False) -> _T:
if not control_plane:
control_plane = self._cluster.nodes['control-plane'].get_any_member()
control_plane = self.cluster.nodes['control-plane'].get_any_member()
cmd = f'kubectl get {self.kind} -n {self.namespace} {self.name} -o json'
result = control_plane.sudo(cmd, warn=suppress_exceptions)
self._cluster.log.verbose(result)
self.cluster.log.verbose(result)
self._reload_result = result.get_simple_result()
if self._reload_result:
self._obj = json.loads(self._reload_result.stdout)
return self

def apply(self, control_plane: NodeGroup = None) -> None:
if not control_plane:
control_plane = self._cluster.nodes['control-plane'].get_any_member()
control_plane = self.cluster.nodes['control-plane'].get_any_member()

json_str = self.to_json()
obj_filename = "_".join([self.kind, self.namespace, self.name, self.uid]) + '.json'
obj_path = f'/tmp/{obj_filename}'

control_plane.put(io.StringIO(json_str), obj_path, sudo=True)
control_plane.sudo(f'kubectl apply -f {obj_path} && sudo rm -f {obj_path}')
defer = control_plane.new_defer()
defer.put(io.StringIO(json_str), obj_path, sudo=True)
defer.sudo(f'kubectl apply -f {obj_path} && sudo rm -f {obj_path}')
defer.flush()
15 changes: 10 additions & 5 deletions kubemarine/patches/software_upgrade.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@
# The order of upgrade of defined by the implementation.

thirdparties:
calicoctl: []
crictl: []
calicoctl:
- v1.29.1
crictl:
- v1.29.1
packages:
docker:
version_rhel: []
Expand All @@ -34,7 +36,10 @@ packages:
version_rhel9: false
version_debian: false
plugins:
calico: []
nginx-ingress-controller: []
calico:
- v1.29.1
nginx-ingress-controller:
- v1.29.1
kubernetes-dashboard: []
local-path-provisioner: []
local-path-provisioner:
- v1.29.1
Loading

0 comments on commit 94eb677

Please sign in to comment.