diff --git a/README.md b/README.md index 89115ef4..69a84084 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,48 @@ -# cicd-tools -Repository containing our pr check enhancement tools. +Utilities used to run smoke tests in an ephemeral environment within a CI/CD pipeline + + +See examples directory for a pr_check template, and some unit test templates. +# Scripts + +## bootstrap.sh + +Clone bonfire into workspace, setup python venv, modify PATH, login to container registries, login to Kube/OCP, and set envvars used by following scripts. + +## build.sh + +Using docker (rhel7) or podman (else) build, tag, and push an image to Quay and Red Hat registries. + +If its a GitHub or GitLab PR/MR triggered script execution, tag image with `pr-123-SHA` and `pr-123-testing`, else use a short SHA for the target repo HEAD. + +## deploy_ephemeral_db.sh + +Deploy using `bonfire process` and ` apply`, removing dependencies and setting up database envvars. + +## deploy_ephemeral_env.sh + +Deploy using `bonfire deploy` into ephemeral, specifying app, component, and relevant image tag args. Passes `EXTRA_DEPLOY_ARGS` which can be set by the caller via pr_checks.sh. + +## cji_smoke_test.sh + +Run iqe-tests container for the relevant app plugin using `bonfire deploy-iqe-cji`. Waits for tests to complete, and fetches artifacts using minio. + +## post_test_results.sh + +Using artifacts fetched from `cji_smoke_test.sh`, add a GitHub status or GitLab comment linking to the relevant test results in Ibutsu. + +## smoke_test.sh + +DEPRECATED, use `cji_smoke_test.sh` + +## iqe_pod + +DEPRECATED, use `cji_smoke_test.sh` + + +# Contributing + +Suggested method for testing changes to these scripts: +- Modify `bootstrap.sh` to `git clone` your fork and branch of bonfire. +- Open a PR in a repo using bonfire pr_checks and the relevant scripts, modifying `pr_check` script to clone your fork and branch of bonfire. +- Observe modified scripts running in the relevant CI/CD pipeline. +# diff --git a/_common_container_logic.sh b/_common_container_logic.sh new file mode 100644 index 00000000..98d51906 --- /dev/null +++ b/_common_container_logic.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Env vars set by bootstrap.sh: +# DOCKER_CONFIG -- docker conf path + +# Env vars normally supplied by CI environment: +#QUAY_USER +#QUAY_TOKEN +#QUAY_API_TOKEN +#RH_REGISTRY_USER +#RH_REGISTRY_TOKEN + +set -e + +function login { + if test -f /etc/redhat-release && grep -q -i "release 7" /etc/redhat-release; then + # on RHEL7, use docker + docker_login + else + # on RHEL8 or anything else, use podman + podman_login + fi +} + +function docker_login { + set -x + docker login -u="$QUAY_USER" -p="$QUAY_TOKEN" quay.io + docker login -u="$RH_REGISTRY_USER" -p="$RH_REGISTRY_TOKEN" registry.redhat.io + set +x +} + +function podman_login { + podman login -u="$QUAY_USER" -p="$QUAY_TOKEN" quay.io + podman login -u="$RH_REGISTRY_USER" -p="$RH_REGISTRY_TOKEN" registry.redhat.io +} + +if [[ -z "$QUAY_USER" || -z "$QUAY_TOKEN" ]]; then + echo "QUAY_USER and QUAY_TOKEN must be set" + exit 1 +fi + +if [[ -z "$RH_REGISTRY_USER" || -z "$RH_REGISTRY_TOKEN" ]]; then + echo "RH_REGISTRY_USER and RH_REGISTRY_TOKEN must be set" + exit 1 +fi + diff --git a/_common_deploy_logic.sh b/_common_deploy_logic.sh new file mode 100644 index 00000000..28267a34 --- /dev/null +++ b/_common_deploy_logic.sh @@ -0,0 +1,128 @@ +# Env vars caller defines: +#APP_NAME="myapp" # name of app-sre "application" folder this component lives in +#COMPONENT_NAME="mycomponent" # name of app-sre "resourceTemplate" in deploy.yaml for this component +#IMAGE="quay.io/cloudservices/mycomponent" # image that this application uses +#COMPONENTS="component1 component2" # specific components to deploy (optional, default: all) +#COMPONENTS_W_RESOURCES="component1 component2" # components which should preserve resource settings (optional, default: none) +#DEPLOY_TIMEOUT="900" # bonfire deployment timeout parameter in seconds +#RELEASE_NAMESPACE="true" # release namespace after PR check ends (default: true) +#ALWAYS_COLLECT_LOGS="true" # collect logs on teardown even if tests passed (default: false) +#REF_ENV="insights-production" # name of bonfire reference environment (default: insights-production) + +# Env vars set by 'bootstrap.sh': +#IMAGE_TAG="abcd123" # image tag for the PR being tested +#GIT_COMMIT="abcd123defg456" # full git commit hash of the PR being tested +#ARTIFACTS_DIR -- directory where test run artifacts are stored + +add_cicd_bin_to_path + +function trap_proxy { + # https://stackoverflow.com/questions/9256644/identifying-received-signal-name-in-bash + func="$1"; shift + for sig; do + trap "$func $sig" "$sig" + done +} + +trap_proxy teardown EXIT ERR SIGINT SIGTERM + +set -e + +: ${COMPONENTS:=""} +: ${COMPONENTS_W_RESOURCES:=""} +: ${DEPLOY_TIMEOUT:="900"} +: ${REF_ENV:="insights-production"} +: ${RELEASE_NAMESPACE:="true"} +: ${ALWAYS_COLLECT_LOGS:="false"} + +K8S_ARTIFACTS_DIR="$ARTIFACTS_DIR/k8s_artifacts" +TEARDOWN_RAN=0 + +function get_pod_logs() { + local ns=$1 + LOGS_DIR="$K8S_ARTIFACTS_DIR/$ns/logs" + mkdir -p $LOGS_DIR + # get array of pod_name:container1,container2,..,containerN for all containers in all pods + echo "Collecting container logs..." + PODS_CONTAINERS=($(oc_wrapper get pods --ignore-not-found=true -n $ns -o "jsonpath={range .items[*]}{' '}{.metadata.name}{':'}{range .spec['containers', 'initContainers'][*]}{.name}{','}")) + for pc in ${PODS_CONTAINERS[@]}; do + # https://stackoverflow.com/a/4444841 + POD=${pc%%:*} + CONTAINERS=${pc#*:} + for container in ${CONTAINERS//,/ }; do + oc_wrapper logs $POD -c $container -n $ns > $LOGS_DIR/${POD}_${container}.log 2> /dev/null || continue + oc_wrapper logs $POD -c $container --previous -n $ns > $LOGS_DIR/${POD}_${container}-previous.log 2> /dev/null || continue + done + done +} + +function collect_k8s_artifacts() { + local ns=$1 + DIR="$K8S_ARTIFACTS_DIR/$ns" + mkdir -p $DIR + get_pod_logs $ns + echo "Collecting events and k8s configs..." + oc_wrapper get events -n $ns --sort-by='.lastTimestamp' > $DIR/oc_get_events.txt + oc_wrapper get all -n $ns -o yaml > $DIR/oc_get_all.yaml + oc_wrapper get clowdapp -n $ns -o yaml > $DIR/oc_get_clowdapp.yaml + oc_wrapper get clowdenvironment env-$ns -o yaml > $DIR/oc_get_clowdenvironment.yaml + oc_wrapper get clowdjobinvocation -n $ns -o yaml > $DIR/oc_get_clowdjobinvocation.yaml +} + +function teardown { + local CAPTURED_SIGNAL="$1" + + add_cicd_bin_to_path + + set +x + [ "$TEARDOWN_RAN" -ne "0" ] && return + echo "------------------------" + echo "----- TEARING DOWN -----" + echo "------------------------" + local ns + + echo "Tear down operation triggered by signal: $CAPTURED_SIGNAL" + + # run teardown on all namespaces possibly reserved in this run + RESERVED_NAMESPACES=("${NAMESPACE}" "${DB_NAMESPACE}" "${SMOKE_NAMESPACE}") + # remove duplicates (https://stackoverflow.com/a/13648438) + UNIQUE_NAMESPACES=($(echo "${RESERVED_NAMESPACES[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) + + for ns in ${UNIQUE_NAMESPACES[@]}; do + echo "Running teardown for ns: $ns" + set +e + + if [ "$ALWAYS_COLLECT_LOGS" != "true" ] && [ "$CAPTURED_SIGNAL" == "EXIT" ] && check_junit_files "${ARTIFACTS_DIR}/junit-*.xml"; then + echo "No errors or failures detected on JUnit reports, skipping K8s artifacts collection" + else + [ "$ALWAYS_COLLECT_LOGS" != "true" ] && echo "Errors or failures detected, collecting K8s artifacts" + collect_k8s_artifacts $ns + fi + + if [ "${RELEASE_NAMESPACE}" != "false" ]; then + echo "Releasing namespace reservation" + bonfire namespace release $ns -f + fi + set -e + done + TEARDOWN_RAN=1 +} + +function transform_arg { + # transform components to "$1" options for bonfire + options="" + option="$1"; shift; + components="$@" + for c in $components; do + options="$options $option $c" + done + echo "$options" +} + +if [ ! -z "$COMPONENTS" ]; then + export COMPONENTS_ARG=$(transform_arg --component $COMPONENTS) +fi + +if [ ! -z "$COMPONENTS_W_RESOURCES" ]; then + export COMPONENTS_RESOURCES_ARG=$(transform_arg --no-remove-resources $COMPONENTS_W_RESOURCES) +fi diff --git a/bin/check_junit_files b/bin/check_junit_files new file mode 100755 index 00000000..e06205d5 --- /dev/null +++ b/bin/check_junit_files @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 + +import logging +import sys +from glob import iglob +from xml.etree.ElementTree import ParseError + +from junitparser import JUnitXml + + +def errors_or_failures_found(junit_report): + for test_suite in junit_report: + if test_suite.errors or test_suite.failures: + return True + return False + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + if len(sys.argv) == 1: + raise SystemExit(f"usage: {sys.argv[0]} ") + + glob_path = sys.argv[1] + report_paths = list(iglob(glob_path)) + + if report_paths: + junit_xml = JUnitXml() + for report_path in report_paths: + try: + junit_xml += JUnitXml.fromfile(report_path) + except ParseError as parse_error: + raise SystemExit(f"file {report_path} hit XML parse error: {parse_error}") + + if errors_or_failures_found(junit_xml): + sys.exit(1) + else: + raise SystemExit(f"no junit artifacts found for '{report_paths}'") diff --git a/bin/oc_wrapper b/bin/oc_wrapper new file mode 100755 index 00000000..0e9aefb2 --- /dev/null +++ b/bin/oc_wrapper @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 + +import logging +import sys + +from ocviapy import oc +from sh import ErrorReturnCode + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + args = sys.argv[1:] + try: + oc(*args, _silent=True, _print=True, _in=sys.stdin) + except ErrorReturnCode as err: + sys.exit(err.exit_code) diff --git a/bootstrap.sh b/bootstrap.sh new file mode 100644 index 00000000..81ea210f --- /dev/null +++ b/bootstrap.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +set -e + +# check that unit_test.sh complies w/ best practices +URL="https://github.com/RedHatInsights/bonfire/tree/master/cicd/examples" +if test -f unit_test.sh; then + if grep 'exit $result' unit_test.sh; then + echo "----------------------------" + echo "ERROR: unit_test.sh is calling 'exit' improperly, refer to examples at $URL" + echo "----------------------------" + exit 1 + fi +fi + +export APP_ROOT=$(pwd) +export WORKSPACE=${WORKSPACE:-$APP_ROOT} # if running in jenkins, use the build's workspace +export BONFIRE_ROOT=${WORKSPACE}/.bonfire +export CICD_ROOT=${BONFIRE_ROOT}/cicd +export IMAGE_TAG=$(git rev-parse --short=7 HEAD) +export BONFIRE_BOT="true" +export BONFIRE_NS_REQUESTER="${JOB_NAME}-${BUILD_NUMBER}" +# which branch to fetch cidd scripts from in bonfire repo +export BONFIRE_REPO_BRANCH="${BONFIRE_REPO_BRANCH:-master}" +export BONFIRE_REPO_ORG="${BONFIRE_REPO_ORG:-RedHatInsights}" + +set -x +# Set up docker cfg +export DOCKER_CONFIG="$WORKSPACE/.docker" +rm -fr $DOCKER_CONFIG +mkdir $DOCKER_CONFIG + +# Set up podman cfg +# No longer needed due to podman now using the DOCKER_CONFIG +#AUTH_CONF_DIR="$WORKSPACE/.podman" +#rm -fr $AUTH_CONF_DIR +#mkdir $AUTH_CONF_DIR +#export REGISTRY_AUTH_FILE="$AUTH_CONF_DIR/auth.json" + +# Set up kube cfg +export KUBECONFIG_DIR="$WORKSPACE/.kube" +export KUBECONFIG="$KUBECONFIG_DIR/config" +rm -fr $KUBECONFIG_DIR +mkdir $KUBECONFIG_DIR +set +x + +# if this is a PR, use a different tag, since PR tags expire +if [ ! -z "$ghprbPullId" ]; then + export IMAGE_TAG="pr-${ghprbPullId}-${IMAGE_TAG}" +fi + +if [ ! -z "$gitlabMergeRequestIid" ]; then + export IMAGE_TAG="pr-${gitlabMergeRequestIid}-${IMAGE_TAG}" +fi + + +export GIT_COMMIT=$(git rev-parse HEAD) +export ARTIFACTS_DIR="$WORKSPACE/artifacts" + +rm -fr $ARTIFACTS_DIR && mkdir -p $ARTIFACTS_DIR + +# TODO: create custom jenkins agent image that has a lot of this stuff pre-installed +export LANG=en_US.utf-8 +export LC_ALL=en_US.utf-8 + +python3 -m venv .bonfire_venv +source .bonfire_venv/bin/activate + +pip install --upgrade pip 'setuptools<58' wheel +pip install --upgrade 'crc-bonfire>=4.10.4' + +# clone repo to download cicd scripts +rm -fr $BONFIRE_ROOT +echo "Fetching branch '$BONFIRE_REPO_BRANCH' of https://github.com/${BONFIRE_REPO_ORG}/bonfire.git" +git clone --branch "$BONFIRE_REPO_BRANCH" "https://github.com/${BONFIRE_REPO_ORG}/bonfire.git" "$BONFIRE_ROOT" + +# Do a docker login to ensure our later 'docker pull' calls have an auth file created +source ${CICD_ROOT}/_common_container_logic.sh +login + +# Gives access to helper commands such as "oc_wrapper" +add_cicd_bin_to_path() { + if ! command -v oc_wrapper; then export PATH=$PATH:${CICD_ROOT}/bin; fi +} + +check_available_server() { + echo "Checking connectivity to ephemeral cluster ..." + (curl -s $OC_LOGIN_SERVER > /dev/null) + RET_CODE=$? + if [ $RET_CODE -ge 1 ]; then echo "Connectivity check failed"; fi + return $RET_CODE +} + +# Hotswap based on availability +login_to_available_server() { + if check_available_server; then + # log in to ephemeral cluster + oc_wrapper login --token=$OC_LOGIN_TOKEN --server=$OC_LOGIN_SERVER + echo "logging in to Ephemeral cluster" + else + # switch to crcd cluster + oc_wrapper login --token=$OC_LOGIN_TOKEN_DEV --server=$OC_LOGIN_SERVER_DEV + echo "logging in to CRCD cluster" + fi +} + +add_cicd_bin_to_path + +login_to_available_server diff --git a/build.sh b/build.sh new file mode 100644 index 00000000..ca74d6c8 --- /dev/null +++ b/build.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +# Env vars caller defines: +#IMAGE="quay.io/myorg/myapp" -- docker image URI to push to +#DOCKERFILE=Dockerfile.custom -- dockerfile to use (optional) +#CACHE_FROM_LATEST_IMAGE=true -- build image from cache from latest image (optional) +: ${QUAY_EXPIRE_TIME:="3d"} # sets a time to expire from when the image is built + +# Env vars set by bootstrap.sh: +#IMAGE_TAG="abcd123" -- image tag to push to +#APP_ROOT="/path/to/app/root" -- path to the cloned app repo + +# Env vars for local use +IMAGE_TAG_OPTS="-t ${IMAGE}:${IMAGE_TAG}" +set -e + +source ${CICD_ROOT}/_common_container_logic.sh + +function build { + if [ ! -f "$APP_ROOT/$DOCKERFILE" ]; then + echo "ERROR: No $DOCKERFILE found" + exit 1 + fi + + # if this is a PR, set the tag to expire in 3 days + if [ ! -z "$ghprbPullId" ] || [ ! -z "$gitlabMergeRequestIid" ]; then + echo "LABEL quay.expires-after=${QUAY_EXPIRE_TIME}" >> $APP_ROOT/$DOCKERFILE + # use IMAGE_TAG_LATEST later to detect extra tag to push + IMAGE_TAG_LATEST="$(cut -d "-" -f 1,2 <<< $IMAGE_TAG)-latest" + IMAGE_TAG_OPTS+=" -t ${IMAGE}:${IMAGE_TAG_LATEST}" + fi + + if test -f /etc/redhat-release && grep -q -i "release 7" /etc/redhat-release; then + # on RHEL7, use docker + docker_build + else + # on RHEL8 or anything else, use podman + podman_build + fi +} + +function docker_build { + if [ "$CACHE_FROM_LATEST_IMAGE" == "true" ]; then + echo "Attempting to build image using cache" + { + set -x + docker pull "${IMAGE}" && + docker build $IMAGE_TAG_OPTS $APP_ROOT -f $APP_ROOT/$DOCKERFILE --cache-from "${IMAGE}" + set +x + } || { + echo "Build from cache failed, attempting build without cache" + set -x + docker build $IMAGE_TAG_OPTS $APP_ROOT -f $APP_ROOT/$DOCKERFILE + set +x + } + else + set -x + docker build $IMAGE_TAG_OPTS $APP_ROOT -f $APP_ROOT/$DOCKERFILE + set +x + fi + set -x + + docker push "${IMAGE}:${IMAGE_TAG}" + if [ ! -z "$IMAGE_TAG_LATEST" ]; then + docker push "${IMAGE}:${IMAGE_TAG_LATEST}" + fi + set +x +} + +function podman_build { + set -x + podman build -f $APP_ROOT/$DOCKERFILE ${IMAGE_TAG_OPTS} $APP_ROOT + podman push "${IMAGE}:${IMAGE_TAG}" + if [ ! -z "$IMAGE_TAG_LATEST" ]; then + podman push "${IMAGE}:${IMAGE_TAG_LATEST}" + fi + set +x +} + + +: ${DOCKERFILE:="Dockerfile"} +: ${CACHE_FROM_LATEST_IMAGE:="false"} + +# Login to registry with podman/docker +login + +if [[ $IMAGE == quay.io/* ]]; then + # if using quay, check to see if this tag already exists + echo "checking if image '$IMAGE:$IMAGE_TAG' already exists in quay.io..." + QUAY_REPO=${IMAGE#"quay.io/"} + RESPONSE=$( \ + curl -Ls -H "Authorization: Bearer $QUAY_API_TOKEN" \ + "https://quay.io/api/v1/repository/$QUAY_REPO/tag/?specificTag=$IMAGE_TAG" \ + ) + # find all non-expired tags + VALID_TAGS_LENGTH=$(echo $RESPONSE | jq '[ .tags[] | select(.end_ts == null) ] | length') + if [[ "$VALID_TAGS_LENGTH" -gt 0 ]]; then + echo "$IMAGE:$IMAGE_TAG already present in quay, not rebuilding" + else + # image does not yet exist, build and push it + build + fi +else + # if not pushing to quay, always build + build +fi diff --git a/cji_smoke_test.sh b/cji_smoke_test.sh new file mode 100644 index 00000000..24a630cf --- /dev/null +++ b/cji_smoke_test.sh @@ -0,0 +1,146 @@ +# Run smoke tests as a ClowdJobInvocation deployed by bonfire + +# Env vars defined by caller: +#COMPONENT_NAME -- name of ClowdApp to run tests against / app-sre "resourceTemplate" +#IQE_CJI_TIMEOUT="10m" -- timeout value to pass to 'oc wait', should be slightly higher than expected test run time +#IQE_MARKER_EXPRESSION="something AND something_else" -- pytest marker, can be "" if no filter desired +#IQE_FILTER_EXPRESSION="something AND something_else" -- pytest filter, can be "" if no filter desired +#IQE_IMAGE_TAG="something" -- image tag to use for IQE pod, leave unset to use ClowdApp's iqePlugin value +#IQE_REQUIREMENTS="something,something_else" -- iqe requirements filter, can be "" if no filter desired +#IQE_REQUIREMENTS_PRIORITY="something,something_else" -- iqe requirements filter, can be "" if no filter desired +#IQE_TEST_IMPORTANCE="something,something_else" -- iqe test importance filter, can be "" if no filter desired +#IQE_PLUGINS="plugin1,plugin2" -- IQE plugins to run tests for, leave unset to use ClowdApp's iqePlugin value +#IQE_ENV="something" -- value to set for ENV_FOR_DYNACONF, default is "clowder_smoke" +#IQE_SELENIUM="true" -- whether to run IQE pod with a selenium container, default is "false" + +#NAMESPACE="mynamespace" -- namespace to deploy iqe pod into, usually already set by 'deploy_ephemeral_env.sh' + +# Env vars set by 'bootstrap.sh': +#ARTIFACTS_DIR -- directory where test run artifacts are stored + +# In order for the deploy-iqe-cji to run correctly, we must set the marker and filter to "" if they +# are not already set by caller +# https://unix.stackexchange.com/questions/122845/using-a-b-for-variable-assignment-in-scripts/122848#122848 +set -e + +: "${IQE_MARKER_EXPRESSION:='""'}" +: "${IQE_FILTER_EXPRESSION:='""'}" +: "${IQE_IMAGE_TAG:='""'}" +: "${IQE_REQUIREMENTS:='""'}" +: "${IQE_REQUIREMENTS_PRIORITY:='""'}" +: "${IQE_TEST_IMPORTANCE:='""'}" +: "${IQE_PLUGINS:='""'}" +: "${IQE_ENV:=clowder_smoke}" +: "${IQE_SELENIUM:=false}" + + +# minio client is used to fetch test artifacts from minio in the ephemeral ns +MC_IMAGE="quay.io/cloudservices/mc:latest" +echo "Running: docker pull ${MC_IMAGE}" +docker pull ${MC_IMAGE} + +CJI_NAME="$COMPONENT_NAME" + +if [[ -z $IQE_CJI_TIMEOUT ]]; then + echo "Error: no timeout set; export IQE_CJI_TIMEOUT before invoking cji_smoke_test.sh" + exit 1 +fi + +SELENIUM_ARG="" +if [ "$IQE_SELENIUM" = "true" ]; then + SELENIUM_ARG=" --selenium " +fi + +# Invoke the CJI using the options set via env vars +set -x +POD=$( + bonfire deploy-iqe-cji $COMPONENT_NAME \ + --marker "$IQE_MARKER_EXPRESSION" \ + --filter "$IQE_FILTER_EXPRESSION" \ + --image-tag "${IQE_IMAGE_TAG}" \ + --requirements "$IQE_REQUIREMENTS" \ + --requirements-priority "$IQE_REQUIREMENTS_PRIORITY" \ + --test-importance "$IQE_TEST_IMPORTANCE" \ + --plugins "$IQE_PLUGINS" \ + --env "$IQE_ENV" \ + --cji-name $CJI_NAME \ + $SELENIUM_ARG \ + --namespace $NAMESPACE) +set +x + +# Pipe logs to background to keep them rolling in jenkins +CONTAINER=$(oc_wrapper get pod $POD -n $NAMESPACE -o jsonpath="{.status.containerStatuses[0].name}") +oc_wrapper logs -n $NAMESPACE $POD -c $CONTAINER -f & + +# Wait for the job to Complete or Fail before we try to grab artifacts +# condition=complete does trigger when the job fails +set -x +oc_wrapper wait --timeout=$IQE_CJI_TIMEOUT --for=condition=JobInvocationComplete -n $NAMESPACE cji/$CJI_NAME +set +x + +# Set up port-forward for minio +set -x +LOCAL_SVC_PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +oc_wrapper port-forward svc/env-$NAMESPACE-minio $LOCAL_SVC_PORT:9000 -n $NAMESPACE & +set +x +sleep 5 +PORT_FORWARD_PID=$! + +# Get the secret from the env +set -x +oc_wrapper get secret env-$NAMESPACE-minio -o json -n $NAMESPACE | jq -r '.data' > minio-creds.json +set +x + +# Grab the needed creds from the secret +export MINIO_ACCESS=$(jq -r .accessKey < minio-creds.json | base64 -d) +export MINIO_SECRET_KEY=$(jq -r .secretKey < minio-creds.json | base64 -d) +export MINIO_HOST=localhost +export MINIO_PORT=$LOCAL_SVC_PORT + +if [ -z "$MINIO_ACCESS" ] || [ -z "$MINIO_SECRET_KEY" ] || [ -z "$MINIO_PORT" ]; then + echo "Failed to fetch minio connection info when running 'oc' commands" + exit 1 +fi + +# Setup the minio client to auth to the local eph minio in the ns +echo "Fetching artifacts from minio..." + +CONTAINER_NAME="mc-${JOB_NAME}-${BUILD_NUMBER}" +BUCKET_NAME="${POD}-artifacts" +CMD="mkdir -p /artifacts && +mc --no-color --quiet alias set minio http://${MINIO_HOST}:${MINIO_PORT} ${MINIO_ACCESS} ${MINIO_SECRET_KEY} && +mc --no-color --quiet mirror --overwrite minio/${BUCKET_NAME} /artifacts/ +" + +run_mc () { + echo "running: docker run -t --net=host --name=$CONTAINER_NAME --entrypoint=\"/bin/sh\" $MC_IMAGE -c \"$CMD\"" + set +e + docker run -t --net=host --name=$CONTAINER_NAME --entrypoint="/bin/sh" $MC_IMAGE -c "$CMD" + RET_CODE=$? + docker cp $CONTAINER_NAME:/artifacts/. $ARTIFACTS_DIR + docker rm $CONTAINER_NAME + set -e + return $RET_CODE +} + +# Add retry logic for intermittent minio connection failures +MINIO_SUCCESS=false +for i in $(seq 1 5); do + if run_mc; then + MINIO_SUCCESS=true + break + else + if [ "$i" -lt "5" ]; then + echo "WARNING: minio artifact copy failed, retrying in 5sec..." + sleep 5 + fi + fi +done + +if [ "$MINIO_SUCCESS" = false ]; then + echo "ERROR: minio artifact copy failed" + exit 1 +fi + +echo "copied artifacts from iqe pod: " +ls -l $ARTIFACTS_DIR diff --git a/deploy_ephemeral_db.sh b/deploy_ephemeral_db.sh new file mode 100644 index 00000000..0188f381 --- /dev/null +++ b/deploy_ephemeral_db.sh @@ -0,0 +1,57 @@ +# Reserve a namespace, deploy your app without dependencies just to get a DB set up +# Stores database env vars + +source ${CICD_ROOT}/_common_deploy_logic.sh + +# the db that the unit test relies on can be set before 'source'ing this script via +# DB_DEPLOYMENT_NAME -- by default it is '-db' +DB_DEPLOYMENT_NAME="${DB_DEPLOYMENT_NAME:-$COMPONENT_NAME-db}" + +# Deploy k8s resources for app without its dependencies +export BONFIRE_NS_REQUESTER="${JOB_NAME}-${BUILD_NUMBER}-db" +NAMESPACE=$(bonfire namespace reserve) +DB_NAMESPACE=$NAMESPACE # track which namespace was used here for 'teardown' in common_deploy_logic +# TODO: add code to bonfire to deploy an app if it is defined in 'sharedAppDbName' on the ClowdApp +# TODO: add a bonfire command to deploy just an app's DB +set -x +bonfire process \ + $APP_NAME \ + --source=appsre \ + --ref-env ${REF_ENV} \ + --set-template-ref ${COMPONENT_NAME}=${GIT_COMMIT} \ + --set-image-tag $IMAGE=$IMAGE_TAG \ + --namespace $NAMESPACE \ + --optional-deps-method none \ + $COMPONENTS_ARG \ + $COMPONENTS_RESOURCES_ARG | oc_wrapper apply -f - -n $NAMESPACE + +bonfire namespace wait-on-resources $NAMESPACE --db-only + +# Set up port-forward for DB +LOCAL_DB_PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()') +oc_wrapper port-forward svc/$DB_DEPLOYMENT_NAME $LOCAL_DB_PORT:5432 -n $NAMESPACE & +PORT_FORWARD_PID=$! + +# Store database access info to env vars +oc_wrapper get secret $COMPONENT_NAME -o json -n $NAMESPACE | jq -r '.data["cdappconfig.json"]' | base64 -d | jq .database > db-creds.json +set +x + +export DATABASE_NAME=$(jq -r .name < db-creds.json) +export DATABASE_ADMIN_USERNAME=$(jq -r .adminUsername < db-creds.json) +export DATABASE_ADMIN_PASSWORD=$(jq -r .adminPassword < db-creds.json) +export DATABASE_USER=$(jq -r .username < db-creds.json) +export DATABASE_PASSWORD=$(jq -r .password < db-creds.json) +export DATABASE_HOST=localhost +export DATABASE_PORT=$LOCAL_DB_PORT + +if [ -z "$DATABASE_NAME" ]; then + echo "DATABASE_NAME is null, error with ephemeral env / clowder config, exiting" + exit 1 +else + echo "DB_DEPLOYMENT_NAME: ${DB_DEPLOYMENT_NAME}" + echo "DATABASE_NAME: ${DATABASE_NAME}" +fi + +# If we got here, the DB came up successfully, clear the k8s artifacts dir in case +# 'source deploy_ephemeral_env.sh' is called later +rm -f $K8S_ARTIFACTS_DIR diff --git a/deploy_ephemeral_env.sh b/deploy_ephemeral_env.sh new file mode 100644 index 00000000..f1529d8d --- /dev/null +++ b/deploy_ephemeral_env.sh @@ -0,0 +1,37 @@ +source ${CICD_ROOT}/_common_deploy_logic.sh + +# Caller can define any extra deploy arguments to be passed to bonfire +: ${EXTRA_DEPLOY_ARGS:=""} + +# Caller can specify the type of pool to use +: ${NAMESPACE_POOL:="default"} + +# Caller can alter the default dependency fetching method if desired +: ${OPTIONAL_DEPS_METHOD:="hybrid"} + +# Whether or not to deploy frontends (default: false) +: ${DEPLOY_FRONTENDS:="false"} + +# Deploy k8s resources for app and its dependencies (use insights-stage instead of insights-production for now) +# -> use this PR as the template ref when downloading configurations for this component +# -> use this PR's newly built image in the deployed configurations +set -x +export BONFIRE_NS_REQUESTER="${JOB_NAME}-${BUILD_NUMBER}" +export NAMESPACE=$(bonfire namespace reserve --pool ${NAMESPACE_POOL}) +SMOKE_NAMESPACE=$NAMESPACE # track which namespace was used here for 'teardown' in common_deploy_logic + + +bonfire deploy \ + ${APP_NAME} \ + --source=appsre \ + --ref-env ${REF_ENV} \ + --set-template-ref ${COMPONENT_NAME}=${GIT_COMMIT} \ + --set-image-tag ${IMAGE}=${IMAGE_TAG} \ + --namespace ${NAMESPACE} \ + --timeout ${DEPLOY_TIMEOUT} \ + --optional-deps-method ${OPTIONAL_DEPS_METHOD} \ + --frontends ${DEPLOY_FRONTENDS} \ + ${COMPONENTS_ARG} \ + ${COMPONENTS_RESOURCES_ARG} \ + ${EXTRA_DEPLOY_ARGS} +set +x diff --git a/examples/pr_check_template.sh b/examples/pr_check_template.sh new file mode 100644 index 00000000..c4f6eb11 --- /dev/null +++ b/examples/pr_check_template.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# -------------------------------------------- +# Options that must be configured by app owner +# -------------------------------------------- +APP_NAME="CHANGEME" # name of app-sre "application" folder this component lives in +COMPONENT_NAME="CHANGEME" # name of app-sre "resourceTemplate" in deploy.yaml for this component +IMAGE="quay.io/cloudservices/CHANGEME" # image location on quay + +IQE_PLUGINS="CHANGEME" # name of the IQE plugin for this app. +IQE_MARKER_EXPRESSION="CHANGEME" # This is the value passed to pytest -m +IQE_FILTER_EXPRESSION="" # This is the value passed to pytest -k +IQE_CJI_TIMEOUT="30m" # This is the time to wait for smoke test to complete or fail + + +# Install bonfire repo/initialize +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/bootstrap.sh +# This script automates the install / config of bonfire +CICD_URL=https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd +curl -s $CICD_URL/bootstrap.sh > .cicd_bootstrap.sh && source .cicd_bootstrap.sh + +# The contents of build.sh can be found at: +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/build.sh +# This script is used to build the image that is used in the PR Check +source $CICD_ROOT/build.sh + +# Your APP's unit tests should be run in the unit_test.sh script. Two different +# examples of unit_test.sh are provided in: +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/examples/ +# +# One of these scripts should be choosen based on your APP's architecture, modified, and placed +# in your APP's git repository. The ephemeral DB example is for when the unit tests require a +# real DB, the other is for a more traditional unit test where everything runs self-contained. +# +# One thing to note is that the unit test run results are expected to be in a junit XML format, +# in the examples we demonstrate how to create a 'dummy result file' as a temporary work-around. +source $APP_ROOT/unit_test.sh + +# The contents of this script can be found at: +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/deploy_ephemeral_env.sh +# This script is used to deploy the ephemeral environment for smoke tests. +# The manual steps for this can be found in: +# https://internal.cloud.redhat.com/docs/devprod/ephemeral/02-deploying/ +source $CICD_ROOT/deploy_ephemeral_env.sh + +# (DEPRECATED!) Run smoke tests using smoke_test.sh +# +# The contents of this script can be found at: +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/smoke_test.sh +# This script is used to run the smoke tests for a given APP. The ENV VARs are +# defined at the top in the "Options that must be configured by app owner" section +# will control the behavior of the test. +#source $CICD_ROOT/smoke_test.sh + +# Run smoke tests using a ClowdJobInvocation (preferred) +# The contents of this script can be found at: +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/cji_smoke_test.sh +source $CICD_ROOT/cji_smoke_test.sh + +# Post a comment with test run IDs to the PR +# The contents of this script can be found at: +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/post_test_results.sh +source $CICD_ROOT/post_test_results.sh diff --git a/examples/unit_test_example.sh b/examples/unit_test_example.sh new file mode 100644 index 00000000..d89258c8 --- /dev/null +++ b/examples/unit_test_example.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Add you unit test specific code +export GO111MODULE="on" +export GOPATH=/var/gopath + +# go get stuff... + +# If your app requires a 'cdappconfig.json' when running unit tests, create a dummy cdappconfig +# that has appropraite values and. Store this file in your git repo. Example can be found here: +# https://github.com/RedHatInsights/insights-ingress-go/blob/master/cdappconfig.json +set +e +ACG_CONFIG="$(pwd)/cdappconfig.json" go test -v -race -coverprofile=coverage.txt -covermode=atomic ./... +result=$? +set -e + +# Evaluate the test result. + +# If you set up a python virtual env for your tests, move back to the bonfire virtual env... +source .bonfire_venv/bin/activate + +# If your unit tests store junit xml results, you should store them in a file matching format `artifacts/junit-*.xml` +# If you have no junit file, use the below code to create a 'dummy' result file so Jenkins will not fail +mkdir -p $ARTIFACTS_DIR +cat << EOF > $ARTIFACTS_DIR/junit-dummy.xml + + + +EOF + +if [ $result -ne 0 ]; then + echo '=====================================' + echo '==== ✖ ERROR: UNIT TEST FAILED ====' + echo '=====================================' + exit 1 +fi diff --git a/examples/unit_test_example_ephemeral_db.sh b/examples/unit_test_example_ephemeral_db.sh new file mode 100644 index 00000000..f3f1d317 --- /dev/null +++ b/examples/unit_test_example_ephemeral_db.sh @@ -0,0 +1,39 @@ +# This script is used to deploy an ephemeral DB for your unit tests run against +# This script can be found at: +# https://raw.githubusercontent.com/RedHatInsights/bonfire/master/cicd/deploy_ephemeral_db.sh +source $CICD_ROOT/deploy_ephemeral_db.sh + +# Here we remap env vars set by `deploy_ephemeral_db.sh`. APPs call the DB ENV VARs +# different names, if your env vars do not match what the shell script sets, +# they should be remapped here. +export PGPASSWORD=$DATABASE_ADMIN_PASSWORD + +# Run the code needed for unit tests, example below ... +python3 -m venv app-venv +. app-venv/bin/activate +pip install --upgrade pip setuptools wheel pipenv tox psycopg2-binary +set +e +tox -r +result=$? +set -e + +# Evaluate the test result. + +# Move back out of the app virtual env +source .bonfire_venv/bin/activate + +# If your unit tests store junit xml results, you should store them in a file matching format `artifacts/junit-*.xml` +# If you have no junit file, use the below code to create a 'dummy' result file so Jenkins will not fail +mkdir -p $ARTIFACTS_DIR +cat << EOF > $ARTIFACTS_DIR/junit-dummy.xml + + + +EOF + +if [ $result -ne 0 ]; then + echo '=====================================' + echo '==== ✖ ERROR: UNIT TEST FAILED ====' + echo '=====================================' + exit 1 +fi diff --git a/iqe_pod/create_iqe_pod.py b/iqe_pod/create_iqe_pod.py new file mode 100644 index 00000000..62c66ac0 --- /dev/null +++ b/iqe_pod/create_iqe_pod.py @@ -0,0 +1,215 @@ +import base64 +import json +import logging +import os +import sys + +import click +import yaml +from env_parser import EnvParser +from ocviapy import oc, wait_for_ready + +from bonfire.utils import split_equals + +SECRET_NAME = "iqe-settings" + + +def _get_base_pod_cfg(): + iqe_image = os.getenv("IQE_IMAGE", "quay.io/cloudservices/iqe-tests:latest") + return { + "apiVersion": "v1", + "kind": "Pod", + "metadata": {"name": "iqe-tests"}, + "spec": { + "serviceAccountName": "iqe", + "containers": [ + { + "command": ["/bin/cat"], + "image": iqe_image, + "imagePullPolicy": "Always", + "name": "iqe-tests", + "resources": { + "limits": {"cpu": "1", "memory": "2Gi"}, + "requests": {"cpu": "500m", "memory": "1Gi"}, + }, + "stdin": True, + "tty": True, + "env": [{"name": "IQE_TESTS_LOCAL_CONF_PATH", "value": "/iqe_settings"}], + "volumeMounts": [{"mountPath": "/iqe_settings", "name": "iqe-settings-volume"}], + } + ], + "imagePullSecrets": [{"name": "quay-cloudservices-pull"}], + "restartPolicy": "Never", + "volumes": [ + { + "name": "iqe-settings-volume", + "secret": {"defaultMode": 420, "secretName": SECRET_NAME}, + } + ], + }, + } + + +def _build_test_conf(env_parser): + conf = {} + env_name = "clowder_smoke" + env_conf = conf[env_name] = {} + + # mq plugin configuration is now present in the plugin's settings.default.yaml + + # ingress configuration is now present in the plugin's settings.default.yaml + + # host-inventory configuration is now present in the plugin's settings.default.yaml + + if env_parser.app_present("marketplace"): + mp_storage_cfg = env_parser.get_storage_config("marketplace") + bucket = env_parser.get_bucket_config("marketplace", "marketplace-s3") + env_conf["MARKETPLACE"] = { + "aws_access_key_id": bucket.accessKey, + "aws_secret_access_key": bucket.secretKey, + "aws_s3_endpoint": f"{mp_storage_cfg.hostname}:{mp_storage_cfg.port}", + "aws_s3_bucket": bucket.name, + "aws_s3_secure": "false", + "service_objects": { + "api_v1": { + "config": { + "hostname": env_parser.get_hostname("ingress", "ingress-service"), + "port": env_parser.get_port("ingress", "ingress-service"), + "scheme": "http", + } + } + }, + } + + if env_parser.app_present("advisor"): + env_conf["ADVISOR"] = { + "kafka_dc_name": env_parser.get_kafka_hostname("advisor").split(".")[0], + "db_dc_name": "advisor-db", + "service_dc_name": "advisor-service", + "api_dc_name": "advisor-api", + "upload_dc_name": "ingress-service", + "pup_dc_name": "puptoo-processor", + "kafka_dc_port": env_parser.get_kafka_port("advisor"), + "engine_results_topic": env_parser.get_kafka_topic( + "advisor", "platform.engine.results" + ), + "inventory_events_topic": env_parser.get_kafka_topic( + "advisor", "platform.legacy-bridge.events" + ), + "payload_tracker_topic": env_parser.get_kafka_topic( + "advisor", "platform.payload-status" + ), + "kafka_hooks_topic": env_parser.get_kafka_topic("advisor", "hooks.outbox"), + "db_hostname": env_parser.get_db_config("advisor").hostname, + "db_database": env_parser.get_db_config("advisor").name, + "db_username": env_parser.get_db_config("advisor").username, + "db_password": env_parser.get_db_config("advisor").password, + "db_port": env_parser.get_db_config("advisor").port, + "service_objects": { + "api": { + "config": { + "hostname": env_parser.get_hostname("advisor", "api"), + "port": env_parser.get_port("advisor", "api"), + "scheme": "http", + } + } + }, + } + if env_parser.app_present("rbac"): + hostname = env_parser.get_hostname("rbac", "service") + port = env_parser.get_port("rbac", "service") + env_conf["ADVISOR"][ + "rbac_url" + ] = f"http://{hostname}:{port}/api/rbac/v1/access/?application=advisor" + if env_parser.app_present("host-inventory"): + env_conf["ADVISOR"]["egress_topic"] = ( + env_parser.get_kafka_topic("host-inventory", "platform.inventory.host-egress"), + ) + + if env_parser.app_present("playbook-dispatcher"): + env_conf["RHC"] = { + "kafka": { + "playbook_validation_topic": env_parser.get_kafka_topic( + "playbook-dispatcher", "platform.upload.validation" + ), + }, + "service_objects": { + "playbook_dispatcher_api_v1": { + "config": { + "hostname": env_parser.get_hostname( + "playbook-dispatcher", "playbook-dispatcher-api" + ), + "port": env_parser.get_port( + "playbook-dispatcher", "playbook-dispatcher-api" + ), + "scheme": "http", + } + }, + "playbook_dispatcher_internal_api_v1": { + "config": { + "hostname": env_parser.get_hostname( + "playbook-dispatcher", "playbook-dispatcher-api" + ), + "port": env_parser.get_port( + "playbook-dispatcher", "playbook-dispatcher-api" + ), + "scheme": "http", + } + }, + }, + } + + return conf + + +def _create_conf_secret(namespace): + env_parser = EnvParser(namespace) + conf_data = _build_test_conf(env_parser) + encoded_conf = base64.b64encode(yaml.dump(conf_data).encode()).decode() + secret = { + "apiVersion": "v1", + "kind": "Secret", + "metadata": {"name": SECRET_NAME}, + "data": {"settings.local.yaml": encoded_conf}, + } + oc("apply", f="-", n=namespace, _in=json.dumps(secret)) + + +def _create_pod(namespace, pod_name, env): + pod = _get_base_pod_cfg() + + pod["metadata"]["name"] = pod_name + env_vars = split_equals(env, allow_null=True) + if env_vars: + pod_env_vars = pod["spec"]["containers"][0]["env"] + for key, val in env_vars.items(): + if val: + pod_env_vars.append({"name": key, "value": val}) + + oc("apply", f="-", n=namespace, _in=json.dumps(pod)) + + +@click.command(context_settings=dict(help_option_names=["-h", "--help"])) +@click.argument("namespace", type=str, required=True) +@click.option("--pod-name", type=str, default="iqe-tests", help="name of pod (default: iqe-tests)") +@click.option( + "--env", + "-e", + type=str, + multiple=True, + help="Env var to set on container using format KEY=VAL", +) +def main(namespace, pod_name, env): + logging.basicConfig(level=logging.INFO) + logging.getLogger("sh").setLevel(logging.CRITICAL) + + _create_conf_secret(namespace) + _create_pod(namespace, pod_name, env) + + if not wait_for_ready(namespace, "pod", pod_name): + sys.exit(1) + print(pod_name) + + +if __name__ == "__main__": + main() diff --git a/iqe_pod/env_parser.py b/iqe_pod/env_parser.py new file mode 100644 index 00000000..6abc593b --- /dev/null +++ b/iqe_pod/env_parser.py @@ -0,0 +1,122 @@ +""" +Read configurations and status objects from a clowder-managed namespace to get app connection info +""" +import base64 +import json + +from app_common_python.types import AppConfig + +from bonfire.openshift import find_clowd_env_for_ns, get_json + + +class EnvParser: + def __init__(self, namespace): + self.namespace = namespace + self._status_for = {} + self._cdapp_config_for = {} + + def get_clowdenv_status(self, app_name): + if app_name not in self._status_for: + clowd_env = find_clowd_env_for_ns(self.namespace) + if not clowd_env: + raise ValueError( + f"unable to locate ClowdEnvironment associated with ns '{self.namespace}'" + ) + status = clowd_env["status"] + for app in status.get("apps", []): + self._status_for[app["name"]] = app + if app_name not in self._status_for: + raise ValueError(f"app '{app_name}' not found in status") + return self._status_for[app_name] + + def app_present(self, app_name): + try: + self.get_clowdenv_status(app_name) + return True + except ValueError: + return False + + def get_deployment_status(self, app_name, component_name): + status = self.get_clowdenv_status(app_name) + for deployment in status.get("deployments", []): + if deployment["name"] == component_name: + return deployment + raise ValueError(f"no deployment found with name '{component_name}' on app '{app_name}'") + + def get_hostname(self, app_name, component_name): + status = self.get_deployment_status(app_name, component_name) + if "hostname" not in status: + raise ValueError(f"no hostname listed for '{component_name}' on app '{app_name}'") + return status["hostname"] + + def get_port(self, app_name, component_name): + status = self.get_deployment_status(app_name, component_name) + if "port" not in status: + raise ValueError(f"no hostname listed for '{component_name}' on app '{app_name}'") + return status["port"] + + def get_cdapp_config(self, app_name): + if app_name not in self._cdapp_config_for: + secret = get_json("secret", app_name, namespace=self.namespace) + if not secret: + raise ValueError(f"secret '{app_name}' not found in namespace") + content = json.loads(base64.b64decode(secret["data"]["cdappconfig.json"])) + self._cdapp_config_for[app_name] = AppConfig.dictToObject(content) + return self._cdapp_config_for[app_name] + + def get_kafka_hostname(self, app_name): + try: + return self.get_cdapp_config(app_name).kafka.brokers[0].hostname + except (IndexError, TypeError): + raise ValueError(f"brokers config not present for app {app_name}") + + def get_kafka_port(self, app_name): + try: + return self.get_cdapp_config(app_name).kafka.brokers[0].port + except (IndexError, TypeError): + raise ValueError(f"brokers config not present for app {app_name}") + + def get_kafka_topic(self, app_name, topic_name): + try: + topics = self.get_cdapp_config(app_name).kafka.topics + except (TypeError): + raise ValueError(f"topics config not present for app {app_name}") + + for topic_config in topics: + if topic_config.requestedName == topic_name: + return topic_config.name + + raise ValueError( + f"no topic config found on app '{app_name}' with requestedName '{topic_name}'" + ) + + def get_db_config(self, app_name): + """ + Return app_common_python.types.DatabaseConfig if it exists for the app + """ + db_config = self.get_cdapp_config(app_name).database + if not db_config: + raise ValueError(f"no database config present for app '{app_name}'") + return db_config + + def get_storage_config(self, app_name): + """ + Return app_common_python.types.ObjectStoreConfig if it exists for the app + """ + config = self.get_cdapp_config(app_name).objectStore + if not config: + raise ValueError(f"no object storage config present for app '{app_name}'") + return config + + def get_bucket_config(self, app_name, bucket_name): + """ + Return app_common_python.types.ObjectStoreBucket for bucket with matching 'requestedName' + """ + buckets = self.get_storage_config(app_name).buckets + for b in buckets: + if b.requestedName == bucket_name: + return b + + raise ValueError( + f"no bucket config found on app '{app_name}' with requestedName '{bucket_name}'" + ) diff --git a/iqe_pod/iqe_runner.sh b/iqe_pod/iqe_runner.sh new file mode 100644 index 00000000..1427d9e5 --- /dev/null +++ b/iqe_pod/iqe_runner.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# This script is intended to be run in an iqe-tests pod + +# Env vars required for this script: + +#IQE_PLUGINS="plugin1,plugin2" +#IQE_MARKER_EXPRESSION="mymarker" +#IQE_FILTER_EXPRESSION="something AND something_else" + +set -ex + +ARTIFACTS_DIR="artifacts" + +mkdir -p $ARTIFACTS_DIR + +# The plugin *should* be pre-installed in the container +#for plugin in $IQE_PLUGINS; do +# iqe plugin install $plugin +#done + +# TODO: add vault env vars +#export ENV_FOR_DYNACONF=smoke + +# TODO: deprecate clowder_smoke env in iqe configs once everything is migrated +export ENV_FOR_DYNACONF=clowder_smoke + +PLUGIN_ARRAY=${IQE_PLUGINS//,/ } + + +set +e # test pass/fail should be determined by analyzing the junit xml artifacts left in the pod + +for plugin in $PLUGIN_ARRAY; do + _marker="" + [ -n "${IQE_MARKER_EXPRESSION}" ] && _marker="and (${IQE_MARKER_EXPRESSION})" + # run tests marked for 'parallel' + marker="parallel ${_marker}" + # Convert image name to plugin name for clowder + plugin=$(echo $plugin | tr "-" "_") + iqe tests plugin ${plugin} \ + --junitxml=${ARTIFACTS_DIR}/junit-${plugin}-parallel.xml \ + -m "${marker}" \ + -k "${IQE_FILTER_EXPRESSION}" \ + -n 2 \ + -s \ + --log-file=${ARTIFACTS_DIR}/iqe-${plugin}-parallel.log 2>&1 + + # run non-parallel tests in sequence + marker="not parallel ${_marker}" + iqe tests plugin ${plugin} \ + --junitxml=${ARTIFACTS_DIR}/junit-${plugin}-sequential.xml \ + -m "${marker}" \ + -k "${IQE_FILTER_EXPRESSION}" \ + -s \ + --log-file=${ARTIFACTS_DIR}/iqe-${plugin}-sequential.log 2>&1 +done + +ls -l ${ARTIFACTS_DIR}/ diff --git a/post_test_results.sh b/post_test_results.sh new file mode 100644 index 00000000..904e1fbf --- /dev/null +++ b/post_test_results.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# starting the script +echo "Posting test results" + +# getting archives uuids +UUIDS="$(ls $ARTIFACTS_DIR | grep .tar.gz | sed -e 's/\.tar.gz$//')" + +if [[ -n $UUIDS ]] +then + base_message="Test results are available in Ibutsu" + + # if it is a GitHub PR + if [[ -n $ghprbPullId ]]; then + + # set +e so that if this POST fails, the entire run will not fail + set +e + + # post a status api message for each test run separately + for uuid in $UUIDS; + do + curl \ + -X POST \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: token ${GITHUB_TOKEN}" \ + -H "Content-Type: application/json; charset=utf-8" \ + ${GITHUB_API_URL}/repos/${ghprbGhRepository}/statuses/${ghprbActualCommit} \ + -d "{\"state\":\"success\",\"target_url\":\"https://url.corp.redhat.com/ibutsu-runs-${uuid}\",\"description\":\"${base_message}\",\"context\":\"ibutsu/run-${uuid}\"}" + done + + set -e + fi + + # if it is a GitLab MR + if [[ -n $gitlabMergeRequestIid ]]; then + # construct the comment message + message="${base_message}:" + for uuid in $UUIDS + do + message="${message}\nhttps://url.corp.redhat.com/ibutsu-runs-${uuid}" + done + + # set +e so that if this POST fails, the entire run will not fail + set +e + + # post a comment to GitLab + curl \ + -X POST \ + -H "PRIVATE-TOKEN: ${GITLAB_TOKEN_IQE_BOT}" \ + -H "Content-Type: application/json; charset=utf-8" \ + ${GITLAB_HOST_IQE_BOT}/api/v4/projects/${gitlabMergeRequestTargetProjectId}/merge_requests/${gitlabMergeRequestIid}/notes \ + -d "{\"body\":\"$message\"}" -v + set -e + fi +fi + +echo "end of posting test results" diff --git a/smoke_test.sh b/smoke_test.sh new file mode 100644 index 00000000..b12d7f28 --- /dev/null +++ b/smoke_test.sh @@ -0,0 +1,41 @@ +# DEPRECATED: please use cji_smoke_test.sh + +# Spin up iqe pod and execute IQE tests in it + +# Env vars defined by caller: +#IQE_PLUGINS="plugin1,plugin2" -- pytest plugins to run separated by "," +#IQE_MARKER_EXPRESSION="mymarker" -- pytest marker expression +#IQE_FILTER_EXPRESSION="something AND something_else" -- pytest filter, can be "" if no filter desired +#NAMESPACE="mynamespace" -- namespace to deploy iqe pod into, can be set by 'deploy_ephemeral_env.sh' + +# Env vars set by 'bootstrap.sh': +#ARTIFACTS_DIR -- directory where test run artifacts are stored + +echo "*** DEPRECATED: 'smoke_test.sh' is deprecated, please switch to 'cji_smoke_test.sh' ***" + +IQE_POD_NAME="iqe-tests" + +add_cicd_bin_to_path + +# create a custom svc acct for the iqe pod to run with that has elevated permissions +SA=$(oc_wrapper get -n $NAMESPACE sa iqe --ignore-not-found -o jsonpath='{.metadata.name}') +if [ -z "$SA" ]; then + oc_wrapper create -n $NAMESPACE sa iqe +fi +oc_wrapper policy -n $NAMESPACE add-role-to-user edit system:serviceaccount:$NAMESPACE:iqe +oc_wrapper secrets -n $NAMESPACE link iqe quay-cloudservices-pull --for=pull,mount + +python $CICD_ROOT/iqe_pod/create_iqe_pod.py $NAMESPACE \ + -e IQE_PLUGINS="$IQE_PLUGINS" \ + -e IQE_MARKER_EXPRESSION="$IQE_MARKER_EXPRESSION" \ + -e IQE_FILTER_EXPRESSION="$IQE_FILTER_EXPRESSION" \ + -e ENV_FOR_DYNACONF=smoke \ + -e NAMESPACE=$NAMESPACE + +oc_wrapper cp -n $NAMESPACE $CICD_ROOT/iqe_pod/iqe_runner.sh $IQE_POD_NAME:/iqe_venv/iqe_runner.sh +oc_wrapper exec $IQE_POD_NAME -n $NAMESPACE -- bash /iqe_venv/iqe_runner.sh + +oc_wrapper cp -n $NAMESPACE $IQE_POD_NAME:artifacts/ $ARTIFACTS_DIR + +echo "copied artifacts from iqe pod: " +ls -l $ARTIFACTS_DIR