Skip to content

Commit

Permalink
test/e2e: use vSphere projects from Boskos
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer buringerst@vmware.com
  • Loading branch information
sbueringer committed Jun 10, 2024
1 parent d9007c8 commit 26cbd28
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 48 deletions.
136 changes: 89 additions & 47 deletions hack/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,59 @@ set -o pipefail # any non-zero exit code in a piped command causes the pipeline
export PATH=${PWD}/hack/tools/bin:${PATH}
REPO_ROOT=$(git rev-parse --show-toplevel)

# In CI, ARTIFACTS is set to a different directory. This stores the value of
# ARTIFACTS in ORIGINAL_ARTIFACTS and replaces ARTIFACTS by a temporary directory
# which gets cleaned up from credentials at the end of the test.
export ORIGINAL_ARTIFACTS=""
export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}"
if [[ "${ARTIFACTS}" != "${REPO_ROOT}/_artifacts" ]]; then
ORIGINAL_ARTIFACTS="${ARTIFACTS}"
ARTIFACTS=$(mktemp -d)
fi

export BOSKOS_RESOURCE_OWNER=cloud-provider-vsphere
if [[ "${JOB_NAME}" != "" ]]; then
export BOSKOS_RESOURCE_OWNER="${JOB_NAME}/${BUILD_ID}"
fi
export BOSKOS_RESOURCE_TYPE=vsphere-project-cloud-provider

on_exit() {
# release IPClaim
echo "Releasing IP claims"
kubectl --kubeconfig="${KUBECONFIG}" delete "ipaddressclaim.ipam.cluster.x-k8s.io" "${CONTROL_PLANE_IPCLAIM_NAME}" || true
kubectl --kubeconfig="${KUBECONFIG}" delete "ipaddressclaim.ipam.cluster.x-k8s.io" "${WORKLOAD_IPCLAIM_NAME}" || true
# Stop boskos heartbeat
[[ -z ${HEART_BEAT_PID:-} ]] || kill -9 "${HEART_BEAT_PID}"

# If Boskos is being used then release the vsphere project.
[ -z "${BOSKOS_HOST:-}" ] || docker run -e VSPHERE_USERNAME -e VSPHERE_PASSWORD gcr.io/k8s-staging-capi-vsphere/extra/boskosctl:latest release --boskos-host="${BOSKOS_HOST}" --resource-owner="${BOSKOS_RESOURCE_OWNER}" --resource-name="${BOSKOS_RESOURCE_NAME}" --vsphere-server="${VSPHERE_SERVER}" --vsphere-tls-thumbprint="${VSPHERE_TLS_THUMBPRINT}" --vsphere-folder="${BOSKOS_RESOURCE_FOLDER}" --vsphere-resource-pool="${BOSKOS_RESOURCE_POOL}"

# kill the VPN
docker kill vpn

# no need to revoke credentials as it is GCE-provided
# Cleanup VSPHERE_PASSWORD from temporary artifacts directory.
if [[ "${ORIGINAL_ARTIFACTS}" != "" ]]; then
# Delete non-text files from artifacts directory to not leak files accidentially
find "${ARTIFACTS}" -type f -exec file --mime-type {} \; | grep -v -E -e "text/plain|text/xml|application/json|inode/x-empty" | while IFS= read -r line
do
file="$(echo "${line}" | cut -d ':' -f1)"
mimetype="$(echo "${line}" | cut -d ':' -f2)"
echo "Deleting file ${file} of type ${mimetype}"
rm "${file}"
done || true
# Replace secret and base64 secret in all files.
if [ -n "$VSPHERE_PASSWORD" ]; then
grep -I -r -l -e "${VSPHERE_PASSWORD}" "${ARTIFACTS}" | while IFS= read -r file
do
echo "Cleaning up VSPHERE_PASSWORD from file ${file}"
sed -i "s/${VSPHERE_PASSWORD}/REDACTED/g" "${file}"
done || true
VSPHERE_PASSWORD_B64=$(echo -n "${VSPHERE_PASSWORD}" | base64 --wrap=0)
grep -I -r -l -e "${VSPHERE_PASSWORD_B64}" "${ARTIFACTS}" | while IFS= read -r file
do
echo "Cleaning up VSPHERE_PASSWORD_B64 from file ${file}"
sed -i "s/${VSPHERE_PASSWORD_B64}/REDACTED/g" "${file}"
done || true
fi
# Move all artifacts to the original artifacts location.
mv "${ARTIFACTS}"/* "${ORIGINAL_ARTIFACTS}/"
fi
}

trap on_exit EXIT
Expand All @@ -50,57 +93,56 @@ docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \
# Tail the vpn logs
docker logs vpn

# Sleep to allow vpn container to start running
sleep 30

function kubectl_get_jsonpath() {
local OBJECT_KIND="${1}"
local OBJECT_NAME="${2}"
local JSON_PATH="${3}"
# Wait until the VPN connection is active.
function wait_for_vpn_up() {
local n=0
until [ $n -ge 30 ]; do
OUTPUT=$(kubectl --kubeconfig="${KUBECONFIG}" get "${OBJECT_KIND}.ipam.cluster.x-k8s.io" "${OBJECT_NAME}" -o=jsonpath="${JSON_PATH}")
if [[ "${OUTPUT}" != "" ]]; then
curl "https://${VSPHERE_SERVER}" --connect-timeout 2 -k -v && RET=$? || RET=$?
if [[ "$RET" -eq 0 ]]; then
break
fi
n=$((n + 1))
sleep 1
done

if [[ "${OUTPUT}" == "" ]]; then
echo "Received empty output getting ${JSON_PATH} from ${OBJECT_KIND}/${OBJECT_NAME}" 1>&2
return 1
else
echo "${OUTPUT}"
return 0
fi
}

function claim_ip() {
IPCLAIM_NAME="$1"
export IPCLAIM_NAME
sed \
-e "s/\${IPCLAIM_NAME}/${IPCLAIM_NAME}/" \
-e "s/\${BUILD_ID}/${BUILD_ID}/" \
-e "s/\${JOB_NAME}/${JOB_NAME}/" \
"${REPO_ROOT}/hack/ipclaim-template.yaml" | kubectl --kubeconfig="${KUBECONFIG}" create -f - 1>&2
IPADDRESS_NAME=$(kubectl_get_jsonpath ipaddressclaim "${IPCLAIM_NAME}" '{@.status.addressRef.name}')
kubectl --kubeconfig="${KUBECONFIG}" get "ipaddresses.ipam.cluster.x-k8s.io" "${IPADDRESS_NAME}" -o=jsonpath='{@.spec.address}'
return "$RET"
}
wait_for_vpn_up

# If BOSKOS_HOST is set then acquire a vsphere-project from Boskos.
if [ -n "${BOSKOS_HOST:-}" ]; then
# Check out the account from Boskos and store the produced environment
# variables in a temporary file.
account_env_var_file="$(mktemp)"
docker run gcr.io/k8s-staging-capi-vsphere/extra/boskosctl:latest acquire --boskos-host="${BOSKOS_HOST}" --resource-owner="${BOSKOS_RESOURCE_OWNER}" --resource-type="${BOSKOS_RESOURCE_TYPE}" 1>"${account_env_var_file}"
checkout_account_status="${?}"

# If the checkout process was a success then load the account's
# environment variables into this process.
# shellcheck disable=SC1090
[ "${checkout_account_status}" = "0" ] && . "${account_env_var_file}"
export BOSKOS_RESOURCE_NAME=${BOSKOS_RESOURCE_NAME}
export VSPHERE_FOLDER=${BOSKOS_RESOURCE_FOLDER}
export VSPHERE_RESOURCE_POOL=${BOSKOS_RESOURCE_POOL}
export CONTROL_PLANE_ENDPOINT_IP="${BOSKOS_RESOURCE_IP_POOL_IP_0}"
export WORKLOAD_CONTROL_PLANE_ENDPOINT_IP="${BOSKOS_RESOURCE_IP_POOL_IP_1}"

# Always remove the account environment variable file. It contains
# sensitive information.
rm -f "${account_env_var_file}"

if [ ! "${checkout_account_status}" = "0" ]; then
echo "error getting vsphere project from Boskos" 1>&2
exit "${checkout_account_status}"
fi

export KUBECONFIG="/root/ipam-conf/capv-services.conf"


# Retrieve an IP to be used as the kube-vip IP
CONTROL_PLANE_IPCLAIM_NAME="ip-claim-$(openssl rand -hex 20)"
CONTROL_PLANE_ENDPOINT_IP=$(claim_ip "${CONTROL_PLANE_IPCLAIM_NAME}")

# Retrieve an IP to be used for the workload cluster in v1a3/v1a4 -> v1b1 upgrade tests
WORKLOAD_IPCLAIM_NAME="workload-ip-claim-$(openssl rand -hex 20)"
WORKLOAD_CONTROL_PLANE_ENDPOINT_IP=$(claim_ip "${WORKLOAD_IPCLAIM_NAME}")

export CONTROL_PLANE_ENDPOINT_IP
export WORKLOAD_CONTROL_PLANE_ENDPOINT_IP
# Run the heartbeat to tell boskos periodically that we are still
# using the checked out account.
docker run gcr.io/k8s-staging-capi-vsphere/extra/boskosctl:latest heartbeat --boskos-host="${BOSKOS_HOST}" --resource-owner="${BOSKOS_RESOURCE_OWNER}" --resource-name="${BOSKOS_RESOURCE_NAME}" >>"${ARTIFACTS}/boskos-heartbeat.log" 2>&1 &
HEART_BEAT_PID=$!
else
echo "error getting vsphere project from Boskos, BOSKOS_HOST not set" 1>&2
exit 1
fi

# build cloud-provider-vsphere image and save it as tarball
CPI_IMAGE_NAME="gcr.io/k8s-staging-cloud-pv-vsphere/cloud-provider-vsphere"
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/config/vsphere-ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ variables:
VSPHERE_RESOURCE_POOL: "/SDDC-Datacenter/host/Cluster-1/Resources/Compute-ResourcePool/cloud-provider-vsphere"
VSPHERE_DATASTORE: "WorkloadDatastore"
VSPHERE_STORAGE_POLICY: "Cluster API vSphere Storage Policy"
VSPHERE_NETWORK: "sddc-cgw-network-6"
VSPHERE_NETWORK: "sddc-cgw-network-10"
VSPHERE_TEMPLATE: "ubuntu-2204-kube-v1.30.0"
FLATCAR_VSPHERE_TEMPLATE: "flatcar-stable-3815.2.2-kube-v1.30.0"
VSPHERE_INSECURE_CSI: "true"
Expand Down

0 comments on commit 26cbd28

Please sign in to comment.