Skip to content

Latest commit

 

History

History
109 lines (90 loc) · 4.91 KB

4.3.cpu.manager.md

File metadata and controls

109 lines (90 loc) · 4.91 KB

cpu manager

https://docs.openshift.com/container-platform/4.3/scalability_and_performance/using-cpu-manager.html

oc get node

oc label node ip-10-0-138-181.us-west-2.compute.internal cpumanager=true
oc label node worker-0 cpumanager=true

oc get machineconfigpool worker -o yaml

# oc edit machineconfigpool worker
# metadata:
#   creationTimestamp: 2019-xx-xxx
#   generation: 3
#   labels:
#     custom-kubelet: cpumanager-enabled
oc patch machineconfigpool worker -p '{"metadata":{"labels": { "custom-kubelet": "cpumanager-enabled" } } }' --type=merge

cat << EOF > cpumanager-kubeletconfig.yaml
apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
metadata:
  name: cpumanager-enabled
spec:
  machineConfigPoolSelector:
    matchLabels:
      custom-kubelet: cpumanager-enabled
  kubeletConfig:
     cpuManagerPolicy: static
     cpuManagerReconcilePeriod: 5s
EOF
oc create -f cpumanager-kubeletconfig.yaml

alias urldecode='python3 -c "import sys, urllib.parse as ul; \
    print(ul.unquote_plus(sys.argv[1]))"'

alias urlencode='python3 -c "import sys, urllib.parse as ul; \
    print (ul.quote_plus(sys.argv[1]))"'

urldecode $(oc get mc 99-worker-29611144-39e5-4297-830d-026561fe377d-kubelet -o json | jq -r .spec.config.storage.files[0].contents.source | sed "s/data:text\/plain,//g") | jq


cat << EOF > cpumanager-pod.yaml
apiVersion: v1
kind: Pod
metadata:
  generateName: cpumanager-
spec:
  containers:
  - name: cpumanager
    image: gcr.io/google_containers/pause-amd64:3.0
    resources:
      requests:
        cpu: 1
        memory: "1G"
      limits:
        cpu: 1
        memory: "1G"
  nodeSelector:
    cpumanager: "true"
EOF
oc create -f cpumanager-pod.yaml

systemctl status
# └─kubepods.slice
#   ├─kubepods-podcc529083_9d0a_43aa_9d9f_1fc0dc3b626b.slice
#   │ ├─crio-conmon-b67ba6af381740b5f9b459482e41a14d4ced2cd8e9431598d84066d20027ef06.scope
#   │ │ └─1434963 /usr/libexec/crio/conmon -s -c b67ba6af381740b5f9b459482e41a14d4ced2cd8e9431598d84066d20027ef06 -n k8s_cpumanager_>            │ ├─crio-conmon-4ab85736504471dcca960aea960ca01ab0fa582439e444d407ac8d001d6dbd2b.scope
#   │ │ └─1434127 /usr/libexec/crio/conmon -s -c 4ab85736504471dcca960aea960ca01ab0fa582439e444d407ac8d001d6dbd2b -n k8s_POD_cpumana>            │ ├─crio-b67ba6af381740b5f9b459482e41a14d4ced2cd8e9431598d84066d20027ef06.scope
#   │ │ └─1434975 /pause
#   │ └─crio-4ab85736504471dcca960aea960ca01ab0fa582439e444d407ac8d001d6dbd2b.scope
#   │   └─1434151 /usr/bin/pod

cd /sys/fs/cgroup/cpuset/kubepods.slice/kubepods-podcc529083_9d0a_43aa_9d9f_1fc0dc3b626b.slice/crio-b67ba6af381740b5f9b459482e41a14d4ced2cd8e9431598d84066d20027ef06.scope

for i in `ls cpuset.cpus tasks` ; do echo -n "$i "; cat $i ; done
# cpuset.cpus 2
# tasks 1434975

grep Cpus_allowed_list /proc/1434975/status
# Cpus_allowed_list:      2

systemctl status
# ├─kubepods-burstable.slice
# │ ├─kubepods-burstable-podb8410218_65e9_4ec2_b944_6f0f1709e6a9.slice
# │ │ │ └─6696 /usr/bin/configmap-reload --webhook-url=http://localhost:8080/-/reload --volume-dir=/etc/serving-certs-ca-bundle
# │ │ ├─crio-conmon-958273b72d8d6f1a06a640bd158aa1f5dcc9372b232c79af9f3731068b0bcb9f.scope
# │ │ │ └─6922 /usr/libexec/crio/conmon -s -c 958273b72d8d6f1a06a640bd158aa1f5dcc9372b232c79af9f3731068b0bcb9f -n k8s_kube-rbac-pr>
# │ │ ├─crio-conmon-dc78df658a47a6bcad1772c5f0154c058b3b517f924c842eb9ba2c878edf86a3.scope
# │ │ │ └─6256 /usr/libexec/crio/conmon -s -c dc78df658a47a6bcad1772c5f0154c058b3b517f924c842eb9ba2c878edf86a3 -n k8s_telemeter-cl>
# │ │ ├─crio-958273b72d8d6f1a06a640bd158aa1f5dcc9372b232c79af9f3731068b0bcb9f.scope
# │ │ │ └─6958 /usr/bin/kube-rbac-proxy --secure-listen-address=:8443 --upstream=http://127.0.0.1:8080/ --tls-cert-file=/etc/tls/p>
# │ │ ├─crio-conmon-7a9aaeff818804cb48c6de76ef604e1241717ef25f9d2e31502bca5e03a0a126.scope
# │ │ │ └─5215 /usr/libexec/crio/conmon -s -c 7a9aaeff818804cb48c6de76ef604e1241717ef25f9d2e31502bca5e03a0a126 -n k8s_POD_telemete>
# │ │ ├─crio-dc78df658a47a6bcad1772c5f0154c058b3b517f924c842eb9ba2c878edf86a3.scope
# │ │ │ └─6321 /usr/bin/telemeter-client --id=02b8c3b4-9aed-4268-b1b7-84c998b50184 --from=https://prometheus-k8s.openshift-monitor>
# │ │ ├─crio-conmon-6cefa86b950deb57dac809b57246fb553e0c96fc31ae1cd7b8efa43207995749.scope
# │ │ │ └─6635 /usr/libexec/crio/conmon -s -c 6cefa86b950deb57dac809b57246fb553e0c96fc31ae1cd7b8efa43207995749 -n k8s_reload_telem>
# │ │ └─crio-7a9aaeff818804cb48c6de76ef604e1241717ef25f9d2e31502bca5e03a0a126.scope
# │ │   └─5292 /usr/bin/pod

cat /sys/fs/cgroup/cpuset/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8410218_65e9_4ec2_b944_6f0f1709e6a9.slice/crio-dc78df658a47a6bcad1772c5f0154c058b3b517f924c842eb9ba2c878edf86a3.scope/cpuset.cpus
# 0-1,3

oc describe node ip-10-0-138-181.us-west-2.compute.internal