@@ -7,28 +7,42 @@ We plug in our own bootstrap provider with the goal of enabling cluster-api node
77* Note* : the name & zone matter, we need to match the values we'll create later in the CAPI resources.
88
99```
10- kops create cluster clusterapi.k8s.local --zones us-east4-a
11- kops update cluster clusterapi.k8s.local --yes --admin
12- kops validate cluster --wait=10m
10+ go run ./cmd/ kops create cluster clusterapi.k8s.local --zones us-east4-a
11+ go run ./cmd/ kops update cluster clusterapi.k8s.local --yes --admin
12+ go run ./cmd/ kops validate cluster --wait=10m
1313```
1414
15- #cd cluster-api-provider-gcp
16- #REGISTRY=${USER} make docker-build docker-push
17- #REGISTRY=${USER} make install-management-cluster # Doesn't yet exist in capg
18-
15+ # Install cert-manager
1916
17+ ```
18+ kubectl apply --server-side -f https://github.com/cert-manager/cert-manager/releases/download/v1.18.2/cert-manager.yaml
2019
21- # TODO: Install cert-manager
20+ kubectl wait --for=condition=Available --timeout=5m -n cert-manager deployment/cert-manager
21+ kubectl wait --for=condition=Available --timeout=5m -n cert-manager deployment/cert-manager-cainjector
22+ kubectl wait --for=condition=Available --timeout=5m -n cert-manager deployment/cert-manager-webhook
23+ ```
2224
2325# Install CAPI and CAPG
2426```
25- cd clusterapi
26- kubectl apply --- server-side -f manifests/build
27+ REPO_ROOT=$(git rev-parse --show-toplevel)
28+ kubectl apply --server-side -f ${REPO_ROOT}/clusterapi/ manifests/build
2729```
2830
2931# Install our CRDs
3032```
31- kustomize build config | kubectl apply --server-side -f -
33+ kustomize build ${REPO_ROOT}/k8s | kubectl apply --server-side -f -
34+ kustomize build ${REPO_ROOT}/clusterapi/config | kubectl apply --server-side -f -
35+ ```
36+
37+ ## Create our Cluster object
38+ ```
39+ go run ./cmd/kops get cluster clusterapi.k8s.local -oyaml | kubectl apply --server-side -n kube-system -f -
40+ ```
41+
42+ ## Create our instancegroup object
43+
44+ ```
45+ go run ./cmd/kops get ig nodes-us-east4-a --name clusterapi.k8s.local -oyaml | kubectl apply --server-side -n kube-system -f -
3246```
3347
3448# Remove any stuff left over from previous runs
@@ -37,9 +51,16 @@ kubectl delete machinedeployment --all
3751kubectl delete gcpmachinetemplate --all
3852```
3953
54+ ```
55+ # No longer needed?
56+ # Very carefully create a MachineDeployment matching our configuration
57+ #cat ${REPO_ROOT}/clusterapi/examples/manifest.yaml | IMAGE_ID=projects/ubuntu-os-cloud/global/images/family/ubuntu-2404-lts-amd64 GCP_NODE_MACHINE_TYPE=e2-medium KUBERNETES_VERSION=v1.34.0 WORKER_MACHINE_COUNT=1 GCP_ZONE=us-east4-a GCP_REGION=us-east4 GCP_NETWORK_NAME=clusterapi-k8s-local GCP_SUBNET=us-east4-clusterapi-k8s-local GCP_PROJECT=$(gcloud config get project) CLUSTER_NAME=clusterapi-k8s-local envsubst | kubectl apply --server-side -n kube-system -f -
58+ ```
59+
60+
4061```
4162# Very carefully create a MachineDeployment matching our configuration
42- cat examples/manifest .yaml | IMAGE_ID=projects/ubuntu-os-cloud/global/images/family/ubuntu-2204 -lts GCP_NODE_MACHINE_TYPE=e2-medium KUBERNETES_VERSION=v1.28.6 WORKER_MACHINE_COUNT=1 GCP_ZONE=us-east4-a GCP_REGION =us-east4 GCP_NETWORK_NAME= clusterapi-k8s-local GCP_SUBNET=us-east4- clusterapi- k8s- local GCP_PROJECT=$(gcloud config get project) CLUSTER_NAME =clusterapi-k8s-local envsubst | kubectl apply --server-side -n kube-system -f -
63+ cat ${REPO_ROOT}/clusterapi/ examples/machinedeployment-direct .yaml | IMAGE_ID=projects/ubuntu-os-cloud/global/images/family/ubuntu-2404 -lts-amd64 GCP_NODE_MACHINE_TYPE=e2-medium KUBERNETES_VERSION=v1.34.0 WORKER_MACHINE_COUNT=1 GCP_ZONE=us-east4-a GCP_SUBNET =us-east4- clusterapi-k8s-local CLUSTER_NAME= clusterapi. k8s. local CLUSTER_NAME_ESCAPED =clusterapi-k8s-local envsubst | kubectl apply --server-side -n kube-system -f -
4364```
4465
4566# IMAGE_ID=projects/debian-cloud/global/images/family/debian-12 doesn't work with user-data (????)
0 commit comments