Skip to content

Commit

Permalink
Adding all file from bonfire to cicd-tools
Browse files Browse the repository at this point in the history
  • Loading branch information
maknop committed Feb 21, 2023
1 parent d80a32a commit f940660
Show file tree
Hide file tree
Showing 18 changed files with 1,361 additions and 2 deletions.
50 changes: 48 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,48 @@
# cicd-tools
Repository containing our pr check enhancement tools.
Utilities used to run smoke tests in an ephemeral environment within a CI/CD pipeline


See examples directory for a pr_check template, and some unit test templates.
# Scripts

## bootstrap.sh

Clone bonfire into workspace, setup python venv, modify PATH, login to container registries, login to Kube/OCP, and set envvars used by following scripts.

## build.sh

Using docker (rhel7) or podman (else) build, tag, and push an image to Quay and Red Hat registries.

If its a GitHub or GitLab PR/MR triggered script execution, tag image with `pr-123-SHA` and `pr-123-testing`, else use a short SHA for the target repo HEAD.

## deploy_ephemeral_db.sh

Deploy using `bonfire process` and `<oc_wrapper> apply`, removing dependencies and setting up database envvars.

## deploy_ephemeral_env.sh

Deploy using `bonfire deploy` into ephemeral, specifying app, component, and relevant image tag args. Passes `EXTRA_DEPLOY_ARGS` which can be set by the caller via pr_checks.sh.

## cji_smoke_test.sh

Run iqe-tests container for the relevant app plugin using `bonfire deploy-iqe-cji`. Waits for tests to complete, and fetches artifacts using minio.

## post_test_results.sh

Using artifacts fetched from `cji_smoke_test.sh`, add a GitHub status or GitLab comment linking to the relevant test results in Ibutsu.

## smoke_test.sh

DEPRECATED, use `cji_smoke_test.sh`

## iqe_pod

DEPRECATED, use `cji_smoke_test.sh`


# Contributing

Suggested method for testing changes to these scripts:
- Modify `bootstrap.sh` to `git clone` your fork and branch of bonfire.
- Open a PR in a repo using bonfire pr_checks and the relevant scripts, modifying `pr_check` script to clone your fork and branch of bonfire.
- Observe modified scripts running in the relevant CI/CD pipeline.
#
46 changes: 46 additions & 0 deletions _common_container_logic.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#!/bin/bash

# Env vars set by bootstrap.sh:
# DOCKER_CONFIG -- docker conf path

# Env vars normally supplied by CI environment:
#QUAY_USER
#QUAY_TOKEN
#QUAY_API_TOKEN
#RH_REGISTRY_USER
#RH_REGISTRY_TOKEN

set -e

function login {
if test -f /etc/redhat-release && grep -q -i "release 7" /etc/redhat-release; then
# on RHEL7, use docker
docker_login
else
# on RHEL8 or anything else, use podman
podman_login
fi
}

function docker_login {
set -x
docker login -u="$QUAY_USER" -p="$QUAY_TOKEN" quay.io
docker login -u="$RH_REGISTRY_USER" -p="$RH_REGISTRY_TOKEN" registry.redhat.io
set +x
}

function podman_login {
podman login -u="$QUAY_USER" -p="$QUAY_TOKEN" quay.io
podman login -u="$RH_REGISTRY_USER" -p="$RH_REGISTRY_TOKEN" registry.redhat.io
}

if [[ -z "$QUAY_USER" || -z "$QUAY_TOKEN" ]]; then
echo "QUAY_USER and QUAY_TOKEN must be set"
exit 1
fi

if [[ -z "$RH_REGISTRY_USER" || -z "$RH_REGISTRY_TOKEN" ]]; then
echo "RH_REGISTRY_USER and RH_REGISTRY_TOKEN must be set"
exit 1
fi

128 changes: 128 additions & 0 deletions _common_deploy_logic.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
# Env vars caller defines:
#APP_NAME="myapp" # name of app-sre "application" folder this component lives in
#COMPONENT_NAME="mycomponent" # name of app-sre "resourceTemplate" in deploy.yaml for this component
#IMAGE="quay.io/cloudservices/mycomponent" # image that this application uses
#COMPONENTS="component1 component2" # specific components to deploy (optional, default: all)
#COMPONENTS_W_RESOURCES="component1 component2" # components which should preserve resource settings (optional, default: none)
#DEPLOY_TIMEOUT="900" # bonfire deployment timeout parameter in seconds
#RELEASE_NAMESPACE="true" # release namespace after PR check ends (default: true)
#ALWAYS_COLLECT_LOGS="true" # collect logs on teardown even if tests passed (default: false)
#REF_ENV="insights-production" # name of bonfire reference environment (default: insights-production)

# Env vars set by 'bootstrap.sh':
#IMAGE_TAG="abcd123" # image tag for the PR being tested
#GIT_COMMIT="abcd123defg456" # full git commit hash of the PR being tested
#ARTIFACTS_DIR -- directory where test run artifacts are stored

add_cicd_bin_to_path

function trap_proxy {
# https://stackoverflow.com/questions/9256644/identifying-received-signal-name-in-bash
func="$1"; shift
for sig; do
trap "$func $sig" "$sig"
done
}

trap_proxy teardown EXIT ERR SIGINT SIGTERM

set -e

: ${COMPONENTS:=""}
: ${COMPONENTS_W_RESOURCES:=""}
: ${DEPLOY_TIMEOUT:="900"}
: ${REF_ENV:="insights-production"}
: ${RELEASE_NAMESPACE:="true"}
: ${ALWAYS_COLLECT_LOGS:="false"}

K8S_ARTIFACTS_DIR="$ARTIFACTS_DIR/k8s_artifacts"
TEARDOWN_RAN=0

function get_pod_logs() {
local ns=$1
LOGS_DIR="$K8S_ARTIFACTS_DIR/$ns/logs"
mkdir -p $LOGS_DIR
# get array of pod_name:container1,container2,..,containerN for all containers in all pods
echo "Collecting container logs..."
PODS_CONTAINERS=($(oc_wrapper get pods --ignore-not-found=true -n $ns -o "jsonpath={range .items[*]}{' '}{.metadata.name}{':'}{range .spec['containers', 'initContainers'][*]}{.name}{','}"))
for pc in ${PODS_CONTAINERS[@]}; do
# https://stackoverflow.com/a/4444841
POD=${pc%%:*}
CONTAINERS=${pc#*:}
for container in ${CONTAINERS//,/ }; do
oc_wrapper logs $POD -c $container -n $ns > $LOGS_DIR/${POD}_${container}.log 2> /dev/null || continue
oc_wrapper logs $POD -c $container --previous -n $ns > $LOGS_DIR/${POD}_${container}-previous.log 2> /dev/null || continue
done
done
}

function collect_k8s_artifacts() {
local ns=$1
DIR="$K8S_ARTIFACTS_DIR/$ns"
mkdir -p $DIR
get_pod_logs $ns
echo "Collecting events and k8s configs..."
oc_wrapper get events -n $ns --sort-by='.lastTimestamp' > $DIR/oc_get_events.txt
oc_wrapper get all -n $ns -o yaml > $DIR/oc_get_all.yaml
oc_wrapper get clowdapp -n $ns -o yaml > $DIR/oc_get_clowdapp.yaml
oc_wrapper get clowdenvironment env-$ns -o yaml > $DIR/oc_get_clowdenvironment.yaml
oc_wrapper get clowdjobinvocation -n $ns -o yaml > $DIR/oc_get_clowdjobinvocation.yaml
}

function teardown {
local CAPTURED_SIGNAL="$1"

add_cicd_bin_to_path

set +x
[ "$TEARDOWN_RAN" -ne "0" ] && return
echo "------------------------"
echo "----- TEARING DOWN -----"
echo "------------------------"
local ns

echo "Tear down operation triggered by signal: $CAPTURED_SIGNAL"

# run teardown on all namespaces possibly reserved in this run
RESERVED_NAMESPACES=("${NAMESPACE}" "${DB_NAMESPACE}" "${SMOKE_NAMESPACE}")
# remove duplicates (https://stackoverflow.com/a/13648438)
UNIQUE_NAMESPACES=($(echo "${RESERVED_NAMESPACES[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' '))

for ns in ${UNIQUE_NAMESPACES[@]}; do
echo "Running teardown for ns: $ns"
set +e

if [ "$ALWAYS_COLLECT_LOGS" != "true" ] && [ "$CAPTURED_SIGNAL" == "EXIT" ] && check_junit_files "${ARTIFACTS_DIR}/junit-*.xml"; then
echo "No errors or failures detected on JUnit reports, skipping K8s artifacts collection"
else
[ "$ALWAYS_COLLECT_LOGS" != "true" ] && echo "Errors or failures detected, collecting K8s artifacts"
collect_k8s_artifacts $ns
fi

if [ "${RELEASE_NAMESPACE}" != "false" ]; then
echo "Releasing namespace reservation"
bonfire namespace release $ns -f
fi
set -e
done
TEARDOWN_RAN=1
}

function transform_arg {
# transform components to "$1" options for bonfire
options=""
option="$1"; shift;
components="$@"
for c in $components; do
options="$options $option $c"
done
echo "$options"
}

if [ ! -z "$COMPONENTS" ]; then
export COMPONENTS_ARG=$(transform_arg --component $COMPONENTS)
fi

if [ ! -z "$COMPONENTS_W_RESOURCES" ]; then
export COMPONENTS_RESOURCES_ARG=$(transform_arg --no-remove-resources $COMPONENTS_W_RESOURCES)
fi
38 changes: 38 additions & 0 deletions bin/check_junit_files
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#!/usr/bin/env python3

import logging
import sys
from glob import iglob
from xml.etree.ElementTree import ParseError

from junitparser import JUnitXml


def errors_or_failures_found(junit_report):
for test_suite in junit_report:
if test_suite.errors or test_suite.failures:
return True
return False


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)

if len(sys.argv) == 1:
raise SystemExit(f"usage: {sys.argv[0]} <file path/glob expression>")

glob_path = sys.argv[1]
report_paths = list(iglob(glob_path))

if report_paths:
junit_xml = JUnitXml()
for report_path in report_paths:
try:
junit_xml += JUnitXml.fromfile(report_path)
except ParseError as parse_error:
raise SystemExit(f"file {report_path} hit XML parse error: {parse_error}")

if errors_or_failures_found(junit_xml):
sys.exit(1)
else:
raise SystemExit(f"no junit artifacts found for '{report_paths}'")
15 changes: 15 additions & 0 deletions bin/oc_wrapper
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/usr/bin/env python3

import logging
import sys

from ocviapy import oc
from sh import ErrorReturnCode

if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = sys.argv[1:]
try:
oc(*args, _silent=True, _print=True, _in=sys.stdin)
except ErrorReturnCode as err:
sys.exit(err.exit_code)
109 changes: 109 additions & 0 deletions bootstrap.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
#!/bin/bash

set -e

# check that unit_test.sh complies w/ best practices
URL="https://github.com/RedHatInsights/bonfire/tree/master/cicd/examples"
if test -f unit_test.sh; then
if grep 'exit $result' unit_test.sh; then
echo "----------------------------"
echo "ERROR: unit_test.sh is calling 'exit' improperly, refer to examples at $URL"
echo "----------------------------"
exit 1
fi
fi

export APP_ROOT=$(pwd)
export WORKSPACE=${WORKSPACE:-$APP_ROOT} # if running in jenkins, use the build's workspace
export BONFIRE_ROOT=${WORKSPACE}/.bonfire
export CICD_ROOT=${BONFIRE_ROOT}/cicd
export IMAGE_TAG=$(git rev-parse --short=7 HEAD)
export BONFIRE_BOT="true"
export BONFIRE_NS_REQUESTER="${JOB_NAME}-${BUILD_NUMBER}"
# which branch to fetch cidd scripts from in bonfire repo
export BONFIRE_REPO_BRANCH="${BONFIRE_REPO_BRANCH:-master}"
export BONFIRE_REPO_ORG="${BONFIRE_REPO_ORG:-RedHatInsights}"

set -x
# Set up docker cfg
export DOCKER_CONFIG="$WORKSPACE/.docker"
rm -fr $DOCKER_CONFIG
mkdir $DOCKER_CONFIG

# Set up podman cfg
# No longer needed due to podman now using the DOCKER_CONFIG
#AUTH_CONF_DIR="$WORKSPACE/.podman"
#rm -fr $AUTH_CONF_DIR
#mkdir $AUTH_CONF_DIR
#export REGISTRY_AUTH_FILE="$AUTH_CONF_DIR/auth.json"

# Set up kube cfg
export KUBECONFIG_DIR="$WORKSPACE/.kube"
export KUBECONFIG="$KUBECONFIG_DIR/config"
rm -fr $KUBECONFIG_DIR
mkdir $KUBECONFIG_DIR
set +x

# if this is a PR, use a different tag, since PR tags expire
if [ ! -z "$ghprbPullId" ]; then
export IMAGE_TAG="pr-${ghprbPullId}-${IMAGE_TAG}"
fi

if [ ! -z "$gitlabMergeRequestIid" ]; then
export IMAGE_TAG="pr-${gitlabMergeRequestIid}-${IMAGE_TAG}"
fi


export GIT_COMMIT=$(git rev-parse HEAD)
export ARTIFACTS_DIR="$WORKSPACE/artifacts"

rm -fr $ARTIFACTS_DIR && mkdir -p $ARTIFACTS_DIR

# TODO: create custom jenkins agent image that has a lot of this stuff pre-installed
export LANG=en_US.utf-8
export LC_ALL=en_US.utf-8

python3 -m venv .bonfire_venv
source .bonfire_venv/bin/activate

pip install --upgrade pip 'setuptools<58' wheel
pip install --upgrade 'crc-bonfire>=4.10.4'

# clone repo to download cicd scripts
rm -fr $BONFIRE_ROOT
echo "Fetching branch '$BONFIRE_REPO_BRANCH' of https://github.com/${BONFIRE_REPO_ORG}/bonfire.git"
git clone --branch "$BONFIRE_REPO_BRANCH" "https://github.com/${BONFIRE_REPO_ORG}/bonfire.git" "$BONFIRE_ROOT"

# Do a docker login to ensure our later 'docker pull' calls have an auth file created
source ${CICD_ROOT}/_common_container_logic.sh
login

# Gives access to helper commands such as "oc_wrapper"
add_cicd_bin_to_path() {
if ! command -v oc_wrapper; then export PATH=$PATH:${CICD_ROOT}/bin; fi
}

check_available_server() {
echo "Checking connectivity to ephemeral cluster ..."
(curl -s $OC_LOGIN_SERVER > /dev/null)
RET_CODE=$?
if [ $RET_CODE -ge 1 ]; then echo "Connectivity check failed"; fi
return $RET_CODE
}

# Hotswap based on availability
login_to_available_server() {
if check_available_server; then
# log in to ephemeral cluster
oc_wrapper login --token=$OC_LOGIN_TOKEN --server=$OC_LOGIN_SERVER
echo "logging in to Ephemeral cluster"
else
# switch to crcd cluster
oc_wrapper login --token=$OC_LOGIN_TOKEN_DEV --server=$OC_LOGIN_SERVER_DEV
echo "logging in to CRCD cluster"
fi
}

add_cicd_bin_to_path

login_to_available_server
Loading

0 comments on commit f940660

Please sign in to comment.