diff --git a/redhat/ocp4/4.3/4.3.cpu.manager.md b/redhat/ocp4/4.3/4.3.cpu.manager.md index 9f3d293c5..c222707c8 100644 --- a/redhat/ocp4/4.3/4.3.cpu.manager.md +++ b/redhat/ocp4/4.3/4.3.cpu.manager.md @@ -1,5 +1,10 @@ # cpu manager https://docs.openshift.com/container-platform/4.3/scalability_and_performance/using-cpu-manager.html + +video +- https://youtu.be/gzdb2AURhvo +- https://www.bilibili.com/video/BV1Ua4y1t7aQ/ + ```bash oc get node @@ -29,7 +34,7 @@ spec: cpuManagerPolicy: static cpuManagerReconcilePeriod: 5s EOF -oc create -f cpumanager-kubeletconfig.yaml +oc apply -f cpumanager-kubeletconfig.yaml alias urldecode='python3 -c "import sys, urllib.parse as ul; \ print(ul.unquote_plus(sys.argv[1]))"' @@ -37,14 +42,18 @@ alias urldecode='python3 -c "import sys, urllib.parse as ul; \ alias urlencode='python3 -c "import sys, urllib.parse as ul; \ print (ul.quote_plus(sys.argv[1]))"' -urldecode $(oc get mc 99-worker-29611144-39e5-4297-830d-026561fe377d-kubelet -o json | jq -r .spec.config.storage.files[0].contents.source | sed "s/data:text\/plain,//g") | jq +worker_mc_kubelet_yaml=$(oc get mc | grep kubelet | grep 99 | awk '{print $1}') + +urldecode $(oc get mc ${worker_mc_kubelet_yaml} -o json | jq -r .spec.config.storage.files[0].contents.source | sed "s/data:text\/plain,//g") | jq +oc debug node/infra0.hsc.redhat.ren +cat /host/etc/kubernetes/kubelet.conf | grep cpuManager cat << EOF > cpumanager-pod.yaml apiVersion: v1 kind: Pod metadata: - generateName: cpumanager- + name: cpumanager-0 spec: containers: - name: cpumanager @@ -59,7 +68,7 @@ spec: nodeSelector: cpumanager: "true" EOF -oc create -f cpumanager-pod.yaml +oc apply -f cpumanager-pod.yaml systemctl status # └─kubepods.slice @@ -74,11 +83,11 @@ systemctl status cd /sys/fs/cgroup/cpuset/kubepods.slice/kubepods-podcc529083_9d0a_43aa_9d9f_1fc0dc3b626b.slice/crio-b67ba6af381740b5f9b459482e41a14d4ced2cd8e9431598d84066d20027ef06.scope for i in `ls cpuset.cpus tasks` ; do echo -n "$i "; cat $i ; done -# cpuset.cpus 2 -# tasks 1434975 +# cpuset.cpus 12 +# tasks 30894 grep Cpus_allowed_list /proc/1434975/status -# Cpus_allowed_list: 2 +# Cpus_allowed_list: 12 systemctl status # ├─kubepods-burstable.slice @@ -104,6 +113,20 @@ cat /sys/fs/cgroup/cpuset/kubepods.slice/kubepods-burstable.slice/kubepods-burst oc describe node ip-10-0-138-181.us-west-2.compute.internal +# 可以看到其他pod被限制了使用12号cpu,有一些进程不被限制,是控制进程。 +cd /sys/fs/cgroup/cpuset/kubepods.slice/kubepods-besteffort.slice +find . -name cpuset.cpus | grep crio | xargs -I DEMO cat DEMO +# 0-11,13-23 +# 0-23 +# 0-11,13-23 +# 0-23 +# 0-11,13-23 +# 0-23 +# 0-11,13-23 +# 0-23 +# 0-11,13-23 +# 0-23 +# 0-23 ``` diff --git a/redhat/ocp4/4.3/4.3.numa.md b/redhat/ocp4/4.3/4.3.numa.md index ae218db5f..45213c8e2 100644 --- a/redhat/ocp4/4.3/4.3.numa.md +++ b/redhat/ocp4/4.3/4.3.numa.md @@ -4,6 +4,10 @@ https://docs.openshift.com/container-platform/4.3/scalability_and_performance/us https://www.sharcnet.ca/help/index.php/Using_numactl +video +- https://youtu.be/J2VQQZxk3eY +- https://www.bilibili.com/video/BV1HK4y1r7Di/ + ```bash oc get featuregate/cluster -o yaml @@ -33,7 +37,7 @@ cat << EOF > cpumanager-pod.yaml apiVersion: v1 kind: Pod metadata: - generateName: cpumanager- + name: cpumanager-numa spec: containers: - name: cpumanager @@ -48,8 +52,9 @@ spec: nodeSelector: cpumanager: "true" EOF -oc create -f cpumanager-pod.yaml +oc apply -f cpumanager-pod.yaml +# on the worker node yum install numactl # 指定命令运行在NUMA NODE0上(CPU,内存都来自NUMA NODE0) numactl --cpunodebind=0 --membind=0 COMMAND diff --git a/redhat/training/ocp4.install.homework.calico.md b/redhat/training/ocp4.install.homework.calico.md index 7a6015ee5..cedb7ee85 100644 --- a/redhat/training/ocp4.install.homework.calico.md +++ b/redhat/training/ocp4.install.homework.calico.md @@ -85,10 +85,20 @@ oc get pod -o json | jq -r '.items[].spec.containers[].image' | sort | uniq oc project calico-system oc get pod -o json | jq -r '.items[].spec.containers[].image' | sort | uniq +# calico/ctl:v3.13.2 # docker.io/calico/kube-controllers:v3.13.2 # docker.io/calico/node:v3.13.2 # docker.io/calico/typha:v3.13.2 +oc apply -f calicoctl.yaml + +oc exec calicoctl -n calico-system -it -- /calicoctl get node -o wide + +oc exec calicoctl -n calico-system -it -- /calicoctl ipam show --show-blocks + +oc exec calicoctl -n calico-system -it -- /calicoctl get ipPool -o wide + + ```