Skip to content

Commit

Permalink
WIP: tests: add test for bare-metal with ipv6
Browse files Browse the repository at this point in the history
IPv6 brings some new complexities, particularly around IPAM.
  • Loading branch information
justinsb committed Nov 11, 2024
1 parent 80806a8 commit 346c7ce
Show file tree
Hide file tree
Showing 2 changed files with 247 additions and 0 deletions.
26 changes: 26 additions & 0 deletions .github/workflows/e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,29 @@ jobs:
with:
name: tests-e2e-scenarios-bare-metal
path: /tmp/artifacts/

tests-e2e-scenarios-bare-metal-ipv6:
runs-on: ubuntu-24.04
timeout-minutes: 70
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
path: ${{ env.GOPATH }}/src/k8s.io/kops

- name: Set up go
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed
with:
go-version-file: '${{ env.GOPATH }}/src/k8s.io/kops/go.mod'

- name: tests/e2e/scenarios/bare-metal/run-test
working-directory: ${{ env.GOPATH }}/src/k8s.io/kops
run: |
timeout 60m tests/e2e/scenarios/bare-metal/scenario-ipv6
env:
ARTIFACTS: /tmp/artifacts
- name: Archive production artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: tests-e2e-scenarios-bare-metal-ipv6
path: /tmp/artifacts/
221 changes: 221 additions & 0 deletions tests/e2e/scenarios/bare-metal/scenario-ipv6
Original file line number Diff line number Diff line change
@@ -0,0 +1,221 @@
#!/usr/bin/env bash

# Copyright 2024 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

set -o errexit
set -o nounset
set -o pipefail
set -o xtrace

REPO_ROOT=$(git rev-parse --show-toplevel)
cd ${REPO_ROOT}

WORKDIR=${REPO_ROOT}/.build/

BINDIR=${WORKDIR}/bin
mkdir -p "${BINDIR}"
go build -o ${BINDIR}/kops ./cmd/kops

KOPS=${BINDIR}/kops

function cleanup() {
echo "running dump-artifacts"
${REPO_ROOT}/tests/e2e/scenarios/bare-metal/dump-artifacts || true

echo "running cleanup"
${REPO_ROOT}/tests/e2e/scenarios/bare-metal/cleanup || true
}

if [[ -z "${SKIP_CLEANUP:-}" ]]; then
trap cleanup EXIT
fi

# Create the directory that will back our mock s3 storage
rm -rf ${WORKDIR}/s3
mkdir -p ${WORKDIR}/s3/

# Start our VMs
${REPO_ROOT}/tests/e2e/scenarios/bare-metal/start-vms

. hack/dev-build-metal.sh

echo "Waiting 10 seconds for VMs to start"
sleep 10

# Remove from known-hosts in case of reuse
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.10 || true
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.11 || true
ssh-keygen -f ~/.ssh/known_hosts -R 10.123.45.12 || true

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.10 uptime
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.11 uptime
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.12 uptime

cd ${REPO_ROOT}

# Enable feature flag for bare metal
export KOPS_FEATURE_FLAGS=Metal

# Set up the AWS credentials
export AWS_SECRET_ACCESS_KEY=secret
export AWS_ACCESS_KEY_ID=accesskey
export AWS_ENDPOINT_URL=http://10.123.45.1:8443
export AWS_REGION=us-east-1

export S3_ENDPOINT=${AWS_ENDPOINT_URL}
export S3_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
export S3_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}

# Create the state-store bucket in our mock s3 server
export KOPS_STATE_STORE=s3://kops-state-store/
aws --version
aws s3 ls s3://kops-state-store || aws s3 mb s3://kops-state-store

export CLUSTER_NAME=metalipv6.k8s.local

# List clusters (there should not be any yet)
${KOPS} get cluster || true

# Create a cluster
${KOPS} create cluster --cloud=metal ${CLUSTER_NAME} --zones main --networking cni --ipv6

# Set the IP ingress, required for metal cloud
# TODO: is this the best option?
${KOPS} edit cluster ${CLUSTER_NAME} --set spec.api.publicName=10.123.45.10

# Use latest etcd-manager image (while we're adding features)
${KOPS} edit cluster ${CLUSTER_NAME} --set 'spec.etcdClusters[*].manager.image=us-central1-docker.pkg.dev/k8s-staging-images/etcd-manager/etcd-manager-static:latest'

# Use 1.31 kubernetes so we get kube-apiserver fixes
export KOPS_RUN_TOO_NEW_VERSION=1
"${KOPS}" edit cluster ${CLUSTER_NAME} "--set=cluster.spec.kubernetesVersion=1.31.0"

# List clusters
${KOPS} get cluster
${KOPS} get cluster -oyaml

# List instance groups
${KOPS} get ig --name ${CLUSTER_NAME}
${KOPS} get ig --name ${CLUSTER_NAME} -oyaml

# Apply basic configuration
${KOPS} update cluster ${CLUSTER_NAME}
${KOPS} update cluster ${CLUSTER_NAME} --yes --admin

# Start an SSH agent; enroll assumes SSH connectivity to the VMs with the key in the agent
eval $(ssh-agent)
ssh-add ${REPO_ROOT}/.build/.ssh/id_ed25519

# Enroll the control-plane VM
${KOPS} toolbox enroll --cluster ${CLUSTER_NAME} --instance-group control-plane-main --host 10.123.45.10 --v=2

# Manual creation of "volumes" for etcd, and setting up peer nodes
cat <<EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.10 tee -a /etc/hosts
# Hosts added for etcd discovery
10.123.45.10 node0.main.${CLUSTER_NAME}
10.123.45.10 node0.events.${CLUSTER_NAME}
EOF

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.10 cat /etc/hosts

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.10 mkdir -p /mnt/disks/${CLUSTER_NAME}--main--0/mnt
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.10 touch /mnt/disks/${CLUSTER_NAME}--main--0/mnt/please-create-new-cluster

ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.10 mkdir -p /mnt/disks/${CLUSTER_NAME}--events--0/mnt
ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@10.123.45.10 touch /mnt/disks/${CLUSTER_NAME}--events--0/mnt/please-create-new-cluster


echo "Waiting 300 seconds for kube to start"
sleep 300

kubectl get nodes
kubectl get pods -A

# Install kindnet
kubectl create -f https://raw.githubusercontent.com/aojea/kindnet/main/install-kindnet.yaml
echo "Waiting 10 seconds for kindnet to start"
sleep 10
kubectl get nodes
kubectl get pods -A

# For host records
kubectl create ns kops-system
kubectl apply -f ${REPO_ROOT}/k8s/crds/kops.k8s.io_hosts.yaml

# kops-controller extra permissions
kubectl apply --server-side -f - <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kops-controller:pki-verifier
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller:pki-verifier
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kops-controller:pki-verifier
rules:
- apiGroups:
- "kops.k8s.io"
resources:
- hosts
verbs:
- get
- list
- watch
# Must be able to set node addresses
# TODO: Move out?
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
EOF

function enroll_node() {
local node_ip=$1

# Manual "discovery" for control-plane endpoints
# TODO: Replace with well-known IP
cat <<EOF | ssh -o StrictHostKeyChecking=accept-new -i ${REPO_ROOT}/.build/.ssh/id_ed25519 root@${node_ip} tee -a /etc/hosts
# Hosts added for leader discovery
10.123.45.10 kops-controller.internal.${CLUSTER_NAME}
10.123.45.10 api.internal.${CLUSTER_NAME}
EOF

timeout 10m ${KOPS} toolbox enroll --cluster ${CLUSTER_NAME} --instance-group nodes-main --host ${node_ip} --v=2
}

enroll_node 10.123.45.11
enroll_node 10.123.45.12

echo "Waiting 30 seconds for nodes to be ready"
sleep 30

kubectl get nodes
kubectl get pods -A


echo "Test successful"

0 comments on commit 346c7ce

Please sign in to comment.