Skip to content

Commit

Permalink
Promote nodes by setting machine labels
Browse files Browse the repository at this point in the history
Switch to the preferred way to promote agent to server in Rancher
by setting the following labels on corresponding machines:

```
rke.cattle.io/control-plane-role: "true"
rke.cattle.io/etcd-role: "true"
```

Signed-off-by: Kiefer Chang <kiefer.chang@suse.com>
  • Loading branch information
bk201 authored and gitlawr committed Sep 14, 2021
1 parent 965a9c9 commit 6df9b48
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 12 deletions.
43 changes: 43 additions & 0 deletions deploy/charts/harvester/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,46 @@ data:
mode: {{ .Values.service.vip.mode }}
hwAddress: {{ .Values.service.vip.hwAddress | quote }}
loadBalancerIP: {{ .Values.service.vip.loadBalancerIP | quote }}

---
kind: ConfigMap
apiVersion: v1
metadata:
name: harvester-helpers
labels:
{{ include "harvester.labels" . | indent 4 }}
data:
promote.sh: |-
{{`KUBECTL="/host/$(readlink /host/var/lib/rancher/rke2/bin)/kubectl"
CUSTOM_MACHINE=$($KUBECTL get node $HOSTNAME -o go-template=$'{{index .metadata.annotations "cluster.x-k8s.io/machine"}}\n')
until $KUBECTL get machines.cluster.x-k8s.io $CUSTOM_MACHINE -n fleet-local &> /dev/null
do
echo Waiting for custom machine $CUSTOM_MACHINE...
sleep 2
done
VIP=$($KUBECTL get configmap vip -n harvester-system -o=jsonpath='{.data.ip}')
cat > /host/etc/rancher/rke2/config.yaml.d/90-harvester-server.yaml <<EOF
cni: multus,canal
disable: rke2-ingress-nginx
cluster-cidr: 10.52.0.0/16
service-cidr: 10.53.0.0/16
cluster-dns: 10.53.0.10
tls-san:
- $VIP
EOF
$KUBECTL label -n fleet-local machines.cluster.x-k8s.io $CUSTOM_MACHINE rke.cattle.io/control-plane-role=true rke.cattle.io/etcd-role=true
while true
do
CONTROL_PLANE=$($KUBECTL get node $HOSTNAME -o go-template=$'{{index .metadata.labels "node-role.kubernetes.io/control-plane"}}\n' || true)
if [ "$CONTROL_PLANE" = "true" ]; then
break
fi
echo Waiting for promotion...
sleep 2
done
`}}
27 changes: 15 additions & 12 deletions pkg/controller/master/node/promote_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,17 +44,9 @@ const (
promoteImage = "busybox:1.32.0"
promoteRootMountPath = "/host"

promoteCommand = `echo start promote && \
cat > /etc/rancher/rke2/config.yaml.d/90-harvester-server.yaml <<EOF
cni: multus,canal
disable: rke2-ingress-nginx
cluster-cidr: 10.52.0.0/16
service-cidr: 10.53.0.0/16
cluster-dns: 10.53.0.10
EOF
systemctl disable --now rke2-agent && \
systemctl enable --now rke2-server && \
echo finish promote`
promoteScriptsMountPath = "/harvester-helpers"
promoteScript = "/harvester-helpers/promote.sh"
helperConfigMapName = "harvester-helpers"
)

var (
Expand Down Expand Up @@ -449,7 +441,17 @@ func buildPromoteJob(namespace string, node *corev1.Node) *batchv1.Job {
Path: "/", Type: &hostPathDirectory,
},
},
}, {
Name: "helpers",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: helperConfigMapName,
},
},
},
}},
ServiceAccountName: "harvester",
},
},
},
Expand All @@ -461,10 +463,11 @@ func buildPromoteJob(namespace string, node *corev1.Node) *batchv1.Job {
Name: "promote",
Image: promoteImage,
Command: []string{"sh"},
Args: []string{"-c", fmt.Sprintf(`chroot %s bash -c '%s'`, promoteRootMountPath, promoteCommand)},
Args: []string{"-e", promoteScript},
Resources: corev1.ResourceRequirements{},
VolumeMounts: []corev1.VolumeMount{
{Name: "host-root", MountPath: promoteRootMountPath},
{Name: "helpers", MountPath: promoteScriptsMountPath},
},
ImagePullPolicy: corev1.PullIfNotPresent,
SecurityContext: &corev1.SecurityContext{
Expand Down

0 comments on commit 6df9b48

Please sign in to comment.