diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 75c78ba0..666d0a9d 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -27,5 +27,8 @@ jobs: with: cluster_name: kind + - name: Check connectivity to the cluster + run: ./compiled/tutorial/scripts/kubectl get pods + - name: Run chart-testing (install) - run: ./compiled/tutorial/scripts/apply.sh + run: ./compiled/tutorial/scripts/apply \ No newline at end of file diff --git a/.kapitan b/.kapitan index 62a6962a..c13ede0a 100644 --- a/.kapitan +++ b/.kapitan @@ -1,4 +1,16 @@ -version: 0.31 +version: 0.32 compile: - prune: true - embed-refs: true + prune: true + embed-refs: true + fetch: true + yaml-dump-null-as-empty: true + compose-node-name: true + refs-path: ./system/refs + jinja2-filters: ./system/lib/jinja2_filters.py + search-paths: + - . + - ./system/ + - ./system/lib + - ./system/generators +refs: + refs-path: ./system/refs \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 95e4c2b9..7061bc0c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ -exclude: ^compiled/|^components/charts/ +exclude: ^compiled/|^system/sources/ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: trailing-whitespace - id: check-docstring-first @@ -9,18 +9,18 @@ repos: - id: check-yaml exclude: | (?x)( - ^Docs/| + ^system/sources/| ) - id: debug-statements - id: name-tests-test - id: requirements-txt-fixer - id: check-merge-conflict - repo: https://github.com/psf/black - rev: 22.8.0 + rev: 23.7.0 hooks: - id: black - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort args: ["--profile", "black"] \ No newline at end of file diff --git a/README.md b/README.md index 30e4ac04..adae7ce8 100644 --- a/README.md +++ b/README.md @@ -11,23 +11,30 @@ $ git clone git@github.com:kapicorp/kapitan-reference.git kapitan-templates $ cd kapitan-templates $ ./kapitan compile -Compiled postgres-proxy (1.51s) -Compiled tesoro (1.70s) -Compiled echo-server (1.64s) -Compiled mysql (1.67s) -Compiled gke-pvm-killer (1.17s) -Compiled prod-sockshop (4.74s) -Compiled dev-sockshop (4.74s) -Compiled tutorial (1.68s) -Compiled global (0.76s) -Compiled examples (2.60s) -Compiled pritunl (2.03s) -Compiled sock-shop (4.36s) +Rendered inventory (3.45s) +Compiled pritunl (0.23s) +Compiled vault (0.27s) +Compiled examples (0.28s) +Compiled gke-pvm-killer (0.10s) +Compiled mysql (0.10s) +Compiled postgres-proxy (0.11s) +Compiled sock-shop (0.23s) +Compiled echo-server (0.11s) +Compiled global (0.09s) +Compiled guestbook-argocd (0.12s) +Compiled tutorial (0.15s) +Compiled kapicorp-project-123 (0.09s) +Compiled kapicorp-terraform-admin (0.10s) +Compiled tesoro (0.13s) +Compiled dev-sockshop (0.24s) +Compiled prod-sockshop (0.27s) +Compiled argocd (0.99s) +Compiled github-actions (6.99s) ``` -## Slow walk-through +## Generators documentation (IN PROGRESS) -[Manifest Generator Documentation](components/generators/kubernetes/README.md) +[generators.kapitan.dev](https://generators.kapitan.dev/) ### Tools @@ -37,11 +44,10 @@ For now, you can see that the [`./kapitan`](kapitan) file is a wrapper script th *Note*: For speed, if kapitan is already installed, it will prefer the non-docker version. -| Script | Description | -| ------ | ----------- | +| Script | Description | +|-----------|----------------------------------| | ./kapitan | Wrapper script to invoke kapitan | -| [generate_sa_secrets.sh](templates/scripts/generate_sa_secrets.sh) | Templated script to automatically inject service accounts into refs | -| [import_kubernetes_clusters](scripts/import_kubernetes_clusters) | Helper scripts that looks for GKE cluster and automatically imports them into the inventory | + ### Libraries @@ -49,83 +55,46 @@ This repo already packs some important libraries that you will want to have when | Name | Description | Inventory file | | ---- | ----------- | -------------- | -| [kube-libsonnet](https://github.com/bitnami-labs/kube-libsonnet) | bitnami-labs kube library | [kube.yml](inventory/classes/kapitan/kube.yml) | -| [kubernetes-generator](components/generators/kubernetes) | [Synthace](www.synthace.com) manifests generator | [generators/kubernetes.yml](inventory/classes/kapitan/generators/kubernetes.yml)| -| [ingresses-generator](components/generators/ingresses) | [Synthace](www.synthace.com) ingresses generator | [generators/ingresses.yml](inventory/classes/kapitan/generators/ingresses.yml)| -| [utils](lib/utils.libsonnet) | helpful utilites || -| [kap](lib/kap.libsonnet) | Kapitan boilerplate in one file || +|kgenlib| Kapitan Generators SKD | [kgenlib.yml](inventory/classes/kapitan/kgenlib.yml) + Kapitan allows you to manage external dependencies like the above libraries. -For instance, in the [spinnaker.yml](inventory/classes/kapitan/spinnaker.yml) file, the "dependencies" directive tells Kapitan where to find the library. -To update them, run: +This repo enables fetching by default through the ``.kapitan` file, which only fetches non existing dependencies. -```shell script -./kapitan compile --fetch -Dependency lib/kube.libjsonnet : already exists. Ignoring -./kapitan compiledd -Compiled tesoro (1.70s) -Compiled echo-server (1.64s) -Compiled mysql (1.67s) -Compiled gke-pvm-killer (1.17s) -Compiled prod-sockshop (4.74s) -Compiled dev-sockshop (4.74s) -Compiled tutorial (1.68s) -Compiled global (0.76s) -Compiled examples (2.60s) -Compiled pritunl (2.03s) -Compiled sock-shop (4.36s) +``` +version: 0.32 +compile: + prune: true + embed-refs: true + fetch: true ``` -## Generators - -As explained in the blog post [Keep your ship together with Kapitan](https://medium.com/kapitan-blog/keep-your-ship-together-with-kapitan-d82d441cc3e7). generators are a -powerful idea to simplify the management your setup. - -We will release initially generators for kubernetes manifests, terraform and spinnaker pipelines. - -For now, only the `manifests` and `ingresses` generators are available - -### Manifests generator - -The `manifests` generator allows you to quickly generate Kubernetes manifests from a much simpler yaml configuration. - -The aim for this approach is to allow you to cover the vast majority of the needs you will have for your components. -More complex scenarios can also be achieved by expanding the library, or implementing your own template. - -### Examples -To help you get started, please look at the following examples: - -| source | description | output | -| ------ | ----------- | ------ | -|[mysql](inventory/classes/components/mysql.yml)| Example MySQL statefulset | [manifests](compiled/mysql/manifests)| -|[echo-server](inventory/classes/components/echo-server.yml)| Example using [echo-server](https://github.com/jmalloc/echo-server) | [manifests](compiled/echo-server/manifests)| -|[gke-pvm-killer](inventory/classes/components/gke-pvm-killer.yml)| Example using [estafette-gke-preemptible-killer](https://github.com/estafette/estafette-gke-preemptible-killer)| [manifests](compiled/gke-pvm-killer/manifests)| -|[postgres-proxy](inventory/classes/components/postgres-proxy.yml)| Example using [cloud-sql-proxy](https://github.com/GoogleCloudPlatform/cloudsql-proxy) to connect to a Cloud SQL Postgres instance| [manifests](compiled/postgres-proxy/manifests)| -|[logstash](inventory/classes/components/logstash.yml)| Example of [Logstash](https://www.elastic.co/logstash) configuration | [manifests](compiled/examples/manifests) -|[tesoro](inventory/classes/components/kapicorp/tesoro.yml)| Example of [tesoro](https://github.com/kapicorp/tesoro) configuration | [manifests](compiled/tesoro/manifests) -|[pritunl](inventory/classes/components/pritunl/pritunl.yml)| Example of [pritunl](https://pritunl.com/) configuration | [manifests](compiled/pritunl/manifests) - - - -Please find the generated manifests in the [compiled](compiled) folder - - -### Ingresses generator - -The `ingresses` generator adds to the `manifests` generator the ability to easily define ingress resources. - -### Examples -To help you get started, please look at the following examples: - -| source | description | output | -| ------ | ----------- | ------ | -|[echo-server](inventory/classes/components/echo-server.yml)| Defining ingress paths using [echo-server](https://github.com/jmalloc/echo-server) | [manifests](compiled/echo-server/manifests)| - -[Documentation](components/generators/kubernetes/README.md) - -### Request or submit your examples -We have used this generator extensively, and we know it covers the majority of the use cases. -If you want a specific example, please let us know (or submit your PR) +To update them from the upstream version, force fetch by running: -By adding more example we will be able to stress test the library to make sure we really satisfy all the most common use cases. +```shell script +./kapitan compile --force-fetch +Dependency https://github.com/kapicorp/generators.git: saved to system/lib +Dependency https://github.com/kapicorp/generators.git: saved to system/generators/kubernetes +Dependency https://github.com/kapicorp/generators.git: saved to system/generators/terraform +Dependency argo-cd: saved to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3 +Rendered inventory (3.45s) +Compiled vault (0.27s) +Compiled pritunl (0.27s) +Compiled examples (0.32s) +Compiled gke-pvm-killer (0.10s) +Compiled mysql (0.10s) +Compiled postgres-proxy (0.10s) +Compiled sock-shop (0.23s) +Compiled echo-server (0.11s) +Compiled global (0.09s) +Compiled tutorial (0.14s) +Compiled guestbook-argocd (0.11s) +Compiled kapicorp-project-123 (0.09s) +Compiled kapicorp-terraform-admin (0.09s) +Compiled tesoro (0.13s) +Compiled dev-sockshop (0.24s) +Compiled prod-sockshop (0.27s) +Compiled argocd (0.97s) +Compiled github-actions (7.13s) +``` \ No newline at end of file diff --git a/compiled/argocd/docs/README.md b/compiled/argocd/README.md similarity index 79% rename from compiled/argocd/docs/README.md rename to compiled/argocd/README.md index 5d7f127f..2211ad03 100644 --- a/compiled/argocd/docs/README.md +++ b/compiled/argocd/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | argocd | -| **Project** | `not defined`| +| **Project** | `argocd`| | **Cluster** | 'Not defined' | | **Namespace** | `argocd` | diff --git a/compiled/argocd/manifests/argo-cd.yml b/compiled/argocd/manifests/argo-cd.yml index 043faf99..b02b7b6c 100644 --- a/compiled/argocd/manifests/argo-cd.yml +++ b/compiled/argocd/manifests/argo-cd.yml @@ -3507,7 +3507,7 @@ spec: apiVersion: v1 kind: Service metadata: - annotations: null + annotations: labels: app: redis-ha chart: redis-ha-4.12.17 @@ -3539,7 +3539,7 @@ spec: apiVersion: v1 kind: Service metadata: - annotations: null + annotations: labels: app: redis-ha chart: redis-ha-4.12.17 @@ -3812,7 +3812,7 @@ spec: runAsNonRoot: true runAsUser: 1000 serviceAccountName: argo-cd-redis-ha-haproxy - tolerations: null + tolerations: volumes: - configMap: name: argo-cd-redis-ha-configmap @@ -4538,7 +4538,7 @@ spec: name: data - mountPath: /health name: health - - args: null + - args: env: - name: REDIS_ADDR value: redis://localhost:6379 @@ -4563,7 +4563,7 @@ spec: periodSeconds: 15 timeoutSeconds: 3 resources: {} - volumeMounts: null + volumeMounts: initContainers: - args: - /readonly-config/init.sh @@ -4604,7 +4604,7 @@ spec: type: RollingUpdate volumeClaimTemplates: - metadata: - annotations: null + annotations: name: data spec: accessModes: diff --git a/compiled/argocd/scripts/bash.include b/compiled/argocd/scripts/bash.include new file mode 100644 index 00000000..e26e0336 --- /dev/null +++ b/compiled/argocd/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="argocd" +TARGET_PATH="argocd" +GCP_PROJECT_ID="argocd" +TARGET="argocd" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/argocd/scripts/bash.include-test.sh b/compiled/argocd/scripts/bash.include-test.sh new file mode 100755 index 00000000..fcd18e40 --- /dev/null +++ b/compiled/argocd/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "argocd" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "argocd" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/argocd/scripts/gcloud b/compiled/argocd/scripts/gcloud new file mode 100755 index 00000000..fdaa887b --- /dev/null +++ b/compiled/argocd/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project argocd "$@" diff --git a/compiled/argocd/scripts/gcloud.include b/compiled/argocd/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/argocd/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/argocd/scripts/get_project_number b/compiled/argocd/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/argocd/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/argocd/scripts/kapitan.include b/compiled/argocd/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/argocd/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/argocd/scripts/set_reference b/compiled/argocd/scripts/set_reference new file mode 100755 index 00000000..2deed145 --- /dev/null +++ b/compiled/argocd/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/argocd/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/dev-sockshop/docs/README.md b/compiled/dev-sockshop/README.md similarity index 97% rename from compiled/dev-sockshop/docs/README.md rename to compiled/dev-sockshop/README.md index dbe7a3a7..297f8d10 100644 --- a/compiled/dev-sockshop/docs/README.md +++ b/compiled/dev-sockshop/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | dev-sockshop | -| **Project** | `not defined`| +| **Project** | `dev-sockshop`| | **Cluster** | kind | | **Namespace** | `dev-sockshop` | diff --git a/compiled/dev-sockshop/scripts/apply.sh b/compiled/dev-sockshop/scripts/apply similarity index 58% rename from compiled/dev-sockshop/scripts/apply.sh rename to compiled/dev-sockshop/scripts/apply index 0ef04979..956bafa0 100755 --- a/compiled/dev-sockshop/scripts/apply.sh +++ b/compiled/dev-sockshop/scripts/apply @@ -1,26 +1,15 @@ #!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel)/ -KAPITAN="${ROOT}/kapitan" +# generated with Kapitan -FILE=${1:-} +source $(dirname ${BASH_SOURCE[0]})/bash.include -# Only GNU xargs supports --no-run-if-empty -XARGS="xargs --no-run-if-empty" -if ! echo | $XARGS 2>/dev/null; then - # Looks like we have BSD xargs, use -x instead - XARGS="xargs" -fi +FILE=${1:-} -## if tesoro is enabled, no need to reveal apply () { FILEPATH=${1?} - ${KAPITAN} refs --reveal -f "${FILEPATH}" | ${DIR}/kubectl.sh apply -f - + ${KAPITAN_COMMAND} refs --reveal -f "${FILEPATH}" | ${KUBECTL_SCRIPT} apply -f - } - - - if [[ ! -z $FILE ]] then # Apply files passed at the command line @@ -38,7 +27,7 @@ else fi # Apply files in specific order - for SECTION in pre-deploy manifests + for SECTION in manifests do echo "## run kubectl apply for ${SECTION}" DEPLOY_PATH=${DIR}/../${SECTION} @@ -47,4 +36,4 @@ else apply "${DEPLOY_PATH}" fi done -fi +fi \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/bash.include b/compiled/dev-sockshop/scripts/bash.include new file mode 100644 index 00000000..e4ea901e --- /dev/null +++ b/compiled/dev-sockshop/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="dev-sockshop" +TARGET_PATH="dev-sockshop" +GCP_PROJECT_ID="dev-sockshop" +TARGET="dev-sockshop" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/bash.include-test.sh b/compiled/dev-sockshop/scripts/bash.include-test.sh new file mode 100755 index 00000000..00bfc2e1 --- /dev/null +++ b/compiled/dev-sockshop/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "dev-sockshop" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "dev-sockshop" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/delete_completed b/compiled/dev-sockshop/scripts/delete_completed new file mode 100755 index 00000000..6910da53 --- /dev/null +++ b/compiled/dev-sockshop/scripts/delete_completed @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +${KUBECTL_COMMAND} delete pod --field-selector=status.phase==Failed \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/gcloud b/compiled/dev-sockshop/scripts/gcloud new file mode 100755 index 00000000..924f647d --- /dev/null +++ b/compiled/dev-sockshop/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project dev-sockshop "$@" diff --git a/compiled/dev-sockshop/scripts/gcloud.include b/compiled/dev-sockshop/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/dev-sockshop/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/get_project_number b/compiled/dev-sockshop/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/dev-sockshop/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/kapitan.include b/compiled/dev-sockshop/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/dev-sockshop/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/kubectl b/compiled/dev-sockshop/scripts/kubectl new file mode 100755 index 00000000..af3d4574 --- /dev/null +++ b/compiled/dev-sockshop/scripts/kubectl @@ -0,0 +1,12 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + + +if [[ -p /dev/stdin ]] +then + cat | ${KUBECTL_COMMAND} "$@" +else + ${KUBECTL_COMMAND} "$@" +fi \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/kubectl.sh b/compiled/dev-sockshop/scripts/kubectl.sh deleted file mode 100755 index febdc63f..00000000 --- a/compiled/dev-sockshop/scripts/kubectl.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -${DIR}/setup_context.sh >/dev/null -if [[ -p /dev/stdin ]] -then - INPUT=$( cat ) -fi -KUBECTL="kubectl --context dev-sockshop" -echo "${INPUT}" | ${KUBECTL} "$@" \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/set_reference b/compiled/dev-sockshop/scripts/set_reference new file mode 100755 index 00000000..6b66888d --- /dev/null +++ b/compiled/dev-sockshop/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/dev-sockshop/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/setup_cluster b/compiled/dev-sockshop/scripts/setup_cluster new file mode 100755 index 00000000..3934a455 --- /dev/null +++ b/compiled/dev-sockshop/scripts/setup_cluster @@ -0,0 +1,13 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +setup_kubectl() { + ${DIR}/setup_cluster >/dev/null + ${DIR}/setup_context >/dev/null +} + + +KIND="kind" +$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" +$KIND export kubeconfig diff --git a/compiled/dev-sockshop/scripts/setup_cluster.sh b/compiled/dev-sockshop/scripts/setup_cluster.sh deleted file mode 100755 index 14da73a8..00000000 --- a/compiled/dev-sockshop/scripts/setup_cluster.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit - - -KIND="kind" -$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" -$KIND export kubeconfig diff --git a/compiled/dev-sockshop/scripts/setup_context b/compiled/dev-sockshop/scripts/setup_context new file mode 100755 index 00000000..686c8bd2 --- /dev/null +++ b/compiled/dev-sockshop/scripts/setup_context @@ -0,0 +1,7 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +${KUBECTL_COMMAND} config set-context ${KUBECTL_CONTEXT} --cluster kind-kind --user kind-kind --namespace dev-sockshop \ No newline at end of file diff --git a/compiled/dev-sockshop/scripts/setup_context.sh b/compiled/dev-sockshop/scripts/setup_context.sh deleted file mode 100755 index 906eb2ba..00000000 --- a/compiled/dev-sockshop/scripts/setup_context.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit -KUBECTL="kubectl" - - -${KUBECTL} config set-context dev-sockshop --cluster kind-kind --user kind-kind --namespace dev-sockshop diff --git a/compiled/echo-server/docs/README.md b/compiled/echo-server/README.md similarity index 90% rename from compiled/echo-server/docs/README.md rename to compiled/echo-server/README.md index 4fd33aa9..cacffd74 100644 --- a/compiled/echo-server/docs/README.md +++ b/compiled/echo-server/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | echo-server | -| **Project** | `not defined`| +| **Project** | `echo-server`| | **Cluster** | 'Not defined' | | **Namespace** | `echo-server` | diff --git a/compiled/echo-server/scripts/bash.include b/compiled/echo-server/scripts/bash.include new file mode 100644 index 00000000..39147662 --- /dev/null +++ b/compiled/echo-server/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="echo-server" +TARGET_PATH="echo-server" +GCP_PROJECT_ID="echo-server" +TARGET="echo-server" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/echo-server/scripts/bash.include-test.sh b/compiled/echo-server/scripts/bash.include-test.sh new file mode 100755 index 00000000..22aa14ed --- /dev/null +++ b/compiled/echo-server/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "echo-server" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "echo-server" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/echo-server/scripts/gcloud b/compiled/echo-server/scripts/gcloud new file mode 100755 index 00000000..dfceeaa7 --- /dev/null +++ b/compiled/echo-server/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project echo-server "$@" diff --git a/compiled/echo-server/scripts/gcloud.include b/compiled/echo-server/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/echo-server/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/echo-server/scripts/get_project_number b/compiled/echo-server/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/echo-server/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/echo-server/scripts/kapitan.include b/compiled/echo-server/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/echo-server/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/echo-server/scripts/set_reference b/compiled/echo-server/scripts/set_reference new file mode 100755 index 00000000..81bf1d77 --- /dev/null +++ b/compiled/echo-server/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/echo-server/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/examples/docs/README.md b/compiled/examples/README.md similarity index 94% rename from compiled/examples/docs/README.md rename to compiled/examples/README.md index 9eb7ce45..0dd31085 100644 --- a/compiled/examples/docs/README.md +++ b/compiled/examples/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | examples | -| **Project** | `not defined`| +| **Project** | `examples`| | **Cluster** | 'Not defined' | | **Namespace** | `examples` | diff --git a/compiled/examples/manifests/base64-as-base64-secret.yml b/compiled/examples/manifests/base64-as-base64-secret.yml index 1a83c73f..46eeed03 100644 --- a/compiled/examples/manifests/base64-as-base64-secret.yml +++ b/compiled/examples/manifests/base64-as-base64-secret.yml @@ -4,7 +4,6 @@ metadata: labels: name: base64-as-base64 name: base64-as-base64 - namespace: examples stringData: CONNECTION: xyz://?{base64:eyJkYXRhIjogIlpERndRMWt3Y0c5VGEzQjJZV2M5UFE9PSIsICJlbmNvZGluZyI6ICJiYXNlNjQiLCAidHlwZSI6ICJiYXNlNjQifQ==:embedded}-someotherstuff type: Opaque diff --git a/compiled/examples/manifests/base64-as-plain-secret.yml b/compiled/examples/manifests/base64-as-plain-secret.yml index 6b547e36..b11f0585 100644 --- a/compiled/examples/manifests/base64-as-plain-secret.yml +++ b/compiled/examples/manifests/base64-as-plain-secret.yml @@ -4,7 +4,6 @@ metadata: labels: name: base64-as-plain name: base64-as-plain - namespace: examples stringData: CONNECTION: xyz://?{base64:eyJkYXRhIjogIlUxQjJjVmh3YWtwR1NXVjFRVUozIiwgImVuY29kaW5nIjogIm9yaWdpbmFsIiwgInR5cGUiOiAiYmFzZTY0In0=:embedded}_someotherstuff type: Opaque diff --git a/compiled/examples/manifests/mysql-bundle.yml b/compiled/examples/manifests/mysql-bundle.yml index f7d7ef3c..be728353 100644 --- a/compiled/examples/manifests/mysql-bundle.yml +++ b/compiled/examples/manifests/mysql-bundle.yml @@ -57,7 +57,7 @@ spec: - name: secrets secret: defaultMode: 420 - secretName: mysql-95bc97ff + secretName: mysql-5ebf7f24 updateStrategy: rollingUpdate: partition: 0 diff --git a/compiled/examples/manifests/mysql-secret.yml b/compiled/examples/manifests/mysql-secret.yml index 97e3afc1..b542284f 100644 --- a/compiled/examples/manifests/mysql-secret.yml +++ b/compiled/examples/manifests/mysql-secret.yml @@ -6,6 +6,6 @@ kind: Secret metadata: labels: name: mysql - name: mysql-95bc97ff + name: mysql-5ebf7f24 namespace: examples type: Opaque diff --git a/compiled/examples/manifests/plain-base64-secret.yml b/compiled/examples/manifests/plain-base64-secret.yml index da5c2e02..57495420 100644 --- a/compiled/examples/manifests/plain-base64-secret.yml +++ b/compiled/examples/manifests/plain-base64-secret.yml @@ -4,7 +4,6 @@ metadata: labels: name: plain-base64 name: plain-base64 - namespace: examples stringData: CONNECTION: xyz://SW9wR3dGb2Q4M0tQTVdDWFJHUUU=_xx_someotherstuff type: Opaque diff --git a/compiled/examples/manifests/plain-plain-connection-b64-secret.yml b/compiled/examples/manifests/plain-plain-connection-b64-secret.yml index 90a85167..b7fdcad4 100644 --- a/compiled/examples/manifests/plain-plain-connection-b64-secret.yml +++ b/compiled/examples/manifests/plain-plain-connection-b64-secret.yml @@ -4,7 +4,6 @@ metadata: labels: name: plain-plain-connection-b64 name: plain-plain-connection-b64 - namespace: examples stringData: - CONNECTION: postgresql://myUser/database + CONNECTION: postgresql://myUser:myPass/database type: Opaque diff --git a/compiled/examples/manifests/plain-plain-connection-non-b64-secret.yml b/compiled/examples/manifests/plain-plain-connection-non-b64-secret.yml index 6aea5e86..7f27bc0a 100644 --- a/compiled/examples/manifests/plain-plain-connection-non-b64-secret.yml +++ b/compiled/examples/manifests/plain-plain-connection-non-b64-secret.yml @@ -4,7 +4,6 @@ metadata: labels: name: plain-plain-connection-non-b64 name: plain-plain-connection-non-b64 - namespace: examples stringData: - CONNECTION: postgresql://myUser/database + CONNECTION: postgresql://myUser:myPass/database type: Opaque diff --git a/compiled/examples/manifests/plain-plain-connection-secret.yml b/compiled/examples/manifests/plain-plain-connection-secret.yml index 3f65e864..abbe78f1 100644 --- a/compiled/examples/manifests/plain-plain-connection-secret.yml +++ b/compiled/examples/manifests/plain-plain-connection-secret.yml @@ -4,7 +4,6 @@ metadata: labels: name: plain-plain-connection name: plain-plain-connection - namespace: examples stringData: - CONNECTION: postgresql://myUser/database + CONNECTION: postgresql://myUser:myPass/database type: Opaque diff --git a/compiled/examples/manifests/trivy-rbac.yml b/compiled/examples/manifests/trivy-rbac.yml index d59de786..7363161b 100644 --- a/compiled/examples/manifests/trivy-rbac.yml +++ b/compiled/examples/manifests/trivy-rbac.yml @@ -36,6 +36,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: Role + name: trivy subjects: - kind: ServiceAccount name: trivy diff --git a/compiled/examples/scripts/bash.include b/compiled/examples/scripts/bash.include new file mode 100644 index 00000000..9d402c60 --- /dev/null +++ b/compiled/examples/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="examples" +TARGET_PATH="examples" +GCP_PROJECT_ID="examples" +TARGET="examples" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/examples/scripts/bash.include-test.sh b/compiled/examples/scripts/bash.include-test.sh new file mode 100755 index 00000000..05473033 --- /dev/null +++ b/compiled/examples/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "examples" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "examples" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/examples/scripts/gcloud b/compiled/examples/scripts/gcloud new file mode 100755 index 00000000..0c5ba648 --- /dev/null +++ b/compiled/examples/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project examples "$@" diff --git a/compiled/examples/scripts/gcloud.include b/compiled/examples/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/examples/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/examples/scripts/get_project_number b/compiled/examples/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/examples/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/examples/scripts/kapitan.include b/compiled/examples/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/examples/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/examples/scripts/set_reference b/compiled/examples/scripts/set_reference new file mode 100755 index 00000000..f60cd9ee --- /dev/null +++ b/compiled/examples/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/examples/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/github-actions/docs/README.md b/compiled/github-actions/README.md similarity index 81% rename from compiled/github-actions/docs/README.md rename to compiled/github-actions/README.md index 62bb866b..d56178a7 100644 --- a/compiled/github-actions/docs/README.md +++ b/compiled/github-actions/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | github-actions | -| **Project** | `not defined`| +| **Project** | `github-actions`| | **Cluster** | 'Not defined' | | **Namespace** | `actions-runner-system` | diff --git a/compiled/github-actions/manifests/github-runners-bundle.yml b/compiled/github-actions/manifests/github-runners-bundle.yml index 71ea3af9..dccf1784 100644 --- a/compiled/github-actions/manifests/github-runners-bundle.yml +++ b/compiled/github-actions/manifests/github-runners-bundle.yml @@ -41,6 +41,7 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: + creationTimestamp: name: manager-role rules: - apiGroups: @@ -484,7 +485,8 @@ kind: Issuer metadata: name: selfsigned-issuer namespace: actions-runner-system -spec: {} +spec: + selfSigned: {} --- apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration diff --git a/compiled/github-actions/manifests/github-runners-crds.yml b/compiled/github-actions/manifests/github-runners-crds.yml index 9841b326..d350839d 100644 --- a/compiled/github-actions/manifests/github-runners-crds.yml +++ b/compiled/github-actions/manifests/github-runners-crds.yml @@ -4,6 +4,7 @@ metadata: annotations: argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: name: horizontalrunnerautoscalers.actions.summerwind.dev spec: group: actions.summerwind.dev @@ -306,11 +307,14 @@ spec: type: object served: true storage: true - subresources: {} + subresources: + status: {} status: acceptedNames: kind: '' plural: '' + conditions: [] + storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -318,6 +322,7 @@ metadata: annotations: argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: name: runnerdeployments.actions.summerwind.dev spec: group: actions.summerwind.dev @@ -9337,11 +9342,14 @@ spec: type: object served: true storage: true - subresources: {} + subresources: + status: {} status: acceptedNames: kind: '' plural: '' + conditions: [] + storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -9349,6 +9357,7 @@ metadata: annotations: argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: name: runnerreplicasets.actions.summerwind.dev spec: group: actions.summerwind.dev @@ -18355,11 +18364,14 @@ spec: type: object served: true storage: true - subresources: {} + subresources: + status: {} status: acceptedNames: kind: '' plural: '' + conditions: [] + storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -18367,6 +18379,7 @@ metadata: annotations: argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: name: runners.actions.summerwind.dev spec: group: actions.summerwind.dev @@ -26642,11 +26655,14 @@ spec: type: object served: true storage: true - subresources: {} + subresources: + status: {} status: acceptedNames: kind: '' plural: '' + conditions: [] + storedVersions: [] --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -26654,6 +26670,7 @@ metadata: annotations: argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true controller-gen.kubebuilder.io/version: v0.7.0 + creationTimestamp: name: runnersets.actions.summerwind.dev spec: group: actions.summerwind.dev @@ -34599,8 +34616,11 @@ spec: type: object served: true storage: true - subresources: {} + subresources: + status: {} status: acceptedNames: kind: '' plural: '' + conditions: [] + storedVersions: [] diff --git a/compiled/github-actions/scripts/bash.include b/compiled/github-actions/scripts/bash.include new file mode 100644 index 00000000..69d50c9a --- /dev/null +++ b/compiled/github-actions/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="github-actions" +TARGET_PATH="github-actions" +GCP_PROJECT_ID="github-actions" +TARGET="github-actions" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/github-actions/scripts/bash.include-test.sh b/compiled/github-actions/scripts/bash.include-test.sh new file mode 100755 index 00000000..85d23658 --- /dev/null +++ b/compiled/github-actions/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "github-actions" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "github-actions" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/github-actions/scripts/gcloud b/compiled/github-actions/scripts/gcloud new file mode 100755 index 00000000..3d34f3c9 --- /dev/null +++ b/compiled/github-actions/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project github-actions "$@" diff --git a/compiled/github-actions/scripts/gcloud.include b/compiled/github-actions/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/github-actions/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/github-actions/scripts/get_project_number b/compiled/github-actions/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/github-actions/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/github-actions/scripts/kapitan.include b/compiled/github-actions/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/github-actions/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/github-actions/scripts/set_reference b/compiled/github-actions/scripts/set_reference new file mode 100755 index 00000000..05cc5d40 --- /dev/null +++ b/compiled/github-actions/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/github-actions/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/gke-pvm-killer/docs/README.md b/compiled/gke-pvm-killer/README.md similarity index 100% rename from compiled/gke-pvm-killer/docs/README.md rename to compiled/gke-pvm-killer/README.md diff --git a/compiled/gke-pvm-killer/scripts/bash.include b/compiled/gke-pvm-killer/scripts/bash.include new file mode 100644 index 00000000..68ef48c4 --- /dev/null +++ b/compiled/gke-pvm-killer/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="gke-pvm-killer" +TARGET_PATH="gke-pvm-killer" +GCP_PROJECT_ID="example-gce-project" +TARGET="gke-pvm-killer" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/gke-pvm-killer/scripts/bash.include-test.sh b/compiled/gke-pvm-killer/scripts/bash.include-test.sh new file mode 100755 index 00000000..782002b7 --- /dev/null +++ b/compiled/gke-pvm-killer/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "gke-pvm-killer" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "gke-pvm-killer" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/gke-pvm-killer/scripts/gcloud b/compiled/gke-pvm-killer/scripts/gcloud new file mode 100755 index 00000000..6341cb37 --- /dev/null +++ b/compiled/gke-pvm-killer/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project example-gce-project "$@" diff --git a/compiled/gke-pvm-killer/scripts/gcloud.include b/compiled/gke-pvm-killer/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/gke-pvm-killer/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/gke-pvm-killer/scripts/generate_sa_secrets.sh b/compiled/gke-pvm-killer/scripts/generate_sa_secrets.sh deleted file mode 100755 index 01be6911..00000000 --- a/compiled/gke-pvm-killer/scripts/generate_sa_secrets.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -e - -TARGET=gke-pvm-killer - - -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel) -KAPITAN_COMMAND=${ROOT}/kapitan - -echo "Generating secret for gke-pvm-killer@example-gce-project.iam.gserviceaccount.com" -gcloud --project example-gce-project iam service-accounts keys \ -create - \ ---iam-account=gke-pvm-killer@example-gce-project.iam.gserviceaccount.com | ${KAPITAN_COMMAND} refs --write plain:targets/gke-pvm-killer/gke-pvm-killer-service-account --base64 -f - -t ${TARGET} - -echo "Summary of available keys (please remove obsolete ones after deploying changes)" - -gcloud --project example-gce-project iam service-accounts keys \ -list --iam-account=gke-pvm-killer@example-gce-project.iam.gserviceaccount.com - -##### diff --git a/compiled/gke-pvm-killer/scripts/get_project_number b/compiled/gke-pvm-killer/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/gke-pvm-killer/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/gke-pvm-killer/scripts/kapitan.include b/compiled/gke-pvm-killer/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/gke-pvm-killer/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/gke-pvm-killer/scripts/set_reference b/compiled/gke-pvm-killer/scripts/set_reference new file mode 100755 index 00000000..83dfd77b --- /dev/null +++ b/compiled/gke-pvm-killer/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/gke-pvm-killer/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/global/README.md b/compiled/global/README.md index f9bfc86e..51489875 100644 --- a/compiled/global/README.md +++ b/compiled/global/README.md @@ -8,7 +8,6 @@ |[gke-pvm-killer](../gke-pvm-killer/docs/README.md)| |[global](../global/docs/README.md)| |[guestbook-argocd](../guestbook-argocd/docs/README.md)| -|[kapicorp-demo-march](../kapicorp-demo-march/docs/README.md)| |[kapicorp-project-123](../kapicorp-project-123/docs/README.md)| |[kapicorp-terraform-admin](../kapicorp-terraform-admin/docs/README.md)| |[mysql](../mysql/docs/README.md)| diff --git a/compiled/global/docs/README.md b/compiled/global/docs/README.md deleted file mode 100644 index a7dc29e3..00000000 --- a/compiled/global/docs/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# global - -||| -| --- | --- | -| **Target** | global | -| **Project** | `not defined`| -| **Cluster** | 'Not defined' | -| **Namespace** | `global` | - - -## Deployments diff --git a/compiled/global/scripts/bash.include b/compiled/global/scripts/bash.include new file mode 100644 index 00000000..4ce7fff7 --- /dev/null +++ b/compiled/global/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="global" +TARGET_PATH="global" +GCP_PROJECT_ID="global" +TARGET="global" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/global/scripts/bash.include-test.sh b/compiled/global/scripts/bash.include-test.sh new file mode 100755 index 00000000..017fd415 --- /dev/null +++ b/compiled/global/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "global" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "global" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/global/scripts/gcloud b/compiled/global/scripts/gcloud new file mode 100755 index 00000000..960ec766 --- /dev/null +++ b/compiled/global/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project global "$@" diff --git a/compiled/global/scripts/gcloud.include b/compiled/global/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/global/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/global/scripts/get_project_number b/compiled/global/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/global/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/global/scripts/kapitan.include b/compiled/global/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/global/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/global/scripts/set_reference b/compiled/global/scripts/set_reference new file mode 100755 index 00000000..db49ec5c --- /dev/null +++ b/compiled/global/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/global/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/guestbook-argocd/docs/README.md b/compiled/guestbook-argocd/README.md similarity index 80% rename from compiled/guestbook-argocd/docs/README.md rename to compiled/guestbook-argocd/README.md index caa00418..8ef1a353 100644 --- a/compiled/guestbook-argocd/docs/README.md +++ b/compiled/guestbook-argocd/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | guestbook-argocd | -| **Project** | `not defined`| +| **Project** | `guestbook-argocd`| | **Cluster** | 'Not defined' | | **Namespace** | `guestbook-argocd` | diff --git a/compiled/guestbook-argocd/argocd/a-config-map.yml b/compiled/guestbook-argocd/argocd/a-config-map.yml deleted file mode 100644 index 0d24b4e0..00000000 --- a/compiled/guestbook-argocd/argocd/a-config-map.yml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -data: - xxxx: xxxyyyy - yyyy: zzzyyyy -kind: ConfigMap -metadata: - labels: - name: a-config-map - name: a-config-map - namespace: argocd diff --git a/compiled/guestbook-argocd/argocd/guestbook-app-argo-application.yml b/compiled/guestbook-argocd/argocd/guestbook-app-argo-application.yml deleted file mode 100644 index 2448e461..00000000 --- a/compiled/guestbook-argocd/argocd/guestbook-app-argo-application.yml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - labels: - name: guestbook-app - name: guestbook-app - namespace: argocd -spec: - destination: - namespace: argocd - server: https://kubernetes.default.svc - project: guestbook-project - source: - path: guestbook - plugin: - env: - - name: VAULT_ADDR - value: https://vault.example.com - - name: VAULT_TOKEN - value: MyVaultToken - - name: VAULT_CAPATH - value: /etc/ssl/certs/example.root.crt - - name: VAULT_SKIP_VERIFY - value: 'True' - name: kapitan - repoURL: https://github.com/argoproj/argocd-example-apps.git - syncPolicy: - automated: - selfHeal: true - syncOptions: - - Validate=false - targetRevision: master diff --git a/compiled/guestbook-argocd/argocd/guestbook-project-argo-appproject.yml b/compiled/guestbook-argocd/argocd/guestbook-project-argo-appproject.yml deleted file mode 100644 index feca86bb..00000000 --- a/compiled/guestbook-argocd/argocd/guestbook-project-argo-appproject.yml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: argoproj.io/v1alpha1 -kind: AppProject -metadata: - labels: - name: guestbook-project - name: guestbook-project - namespace: argocd -spec: - clusterResourceWhitelist: - - group: '*' - kind: '*' - destinations: - - namespace: '*' - server: '*' - sourceRepos: - - '*' diff --git a/compiled/guestbook-argocd/argocd/repo-my-repo-name.yml b/compiled/guestbook-argocd/argocd/repo-my-repo-name.yml deleted file mode 100644 index 734ff0e7..00000000 --- a/compiled/guestbook-argocd/argocd/repo-my-repo-name.yml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - annotations: - managed-by: argocd.argoproj.io - labels: - argocd.argoproj.io/secret-type: repository - name: repo-my-repo-name - name: repo-my-repo-name - namespace: argocd -stringData: - insecure: 'true' - password: mypsasword123 - type: git - url: https://github.com/argoproj/argocd-example-apps.git -type: Opaque diff --git a/compiled/guestbook-argocd/scripts/bash.include b/compiled/guestbook-argocd/scripts/bash.include new file mode 100644 index 00000000..70f6411b --- /dev/null +++ b/compiled/guestbook-argocd/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="guestbook-argocd" +TARGET_PATH="guestbook-argocd" +GCP_PROJECT_ID="guestbook-argocd" +TARGET="guestbook-argocd" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/guestbook-argocd/scripts/bash.include-test.sh b/compiled/guestbook-argocd/scripts/bash.include-test.sh new file mode 100755 index 00000000..12c7e0ba --- /dev/null +++ b/compiled/guestbook-argocd/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "guestbook-argocd" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "guestbook-argocd" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/guestbook-argocd/scripts/gcloud b/compiled/guestbook-argocd/scripts/gcloud new file mode 100755 index 00000000..a6cf5687 --- /dev/null +++ b/compiled/guestbook-argocd/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project guestbook-argocd "$@" diff --git a/compiled/guestbook-argocd/scripts/gcloud.include b/compiled/guestbook-argocd/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/guestbook-argocd/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/guestbook-argocd/scripts/get_project_number b/compiled/guestbook-argocd/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/guestbook-argocd/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/guestbook-argocd/scripts/kapitan.include b/compiled/guestbook-argocd/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/guestbook-argocd/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/guestbook-argocd/scripts/set_reference b/compiled/guestbook-argocd/scripts/set_reference new file mode 100755 index 00000000..c2efbd00 --- /dev/null +++ b/compiled/guestbook-argocd/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/guestbook-argocd/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/kapicorp-demo-march/docs/README.md b/compiled/kapicorp-demo-march/docs/README.md deleted file mode 100644 index af81a58f..00000000 --- a/compiled/kapicorp-demo-march/docs/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# kapicorp-demo-march - -||| -| --- | --- | -| **Target** | kapicorp-demo-march | -| **Project** | `not defined`| -| **Cluster** | 'Not defined' | -| **Namespace** | `kapicorp-demo-march` | - - -## Deployments diff --git a/compiled/kapicorp-demo-march/manifests/kapicorp-demo-march-namespace.yml b/compiled/kapicorp-demo-march/manifests/kapicorp-demo-march-namespace.yml deleted file mode 100644 index 5bf797ff..00000000 --- a/compiled/kapicorp-demo-march/manifests/kapicorp-demo-march-namespace.yml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - name: kapicorp-demo-march - name: kapicorp-demo-march diff --git a/compiled/kapicorp-demo-march/scripts/terraform.sh b/compiled/kapicorp-demo-march/scripts/terraform.sh deleted file mode 100755 index a930dbeb..00000000 --- a/compiled/kapicorp-demo-march/scripts/terraform.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -set -e # If a command fails, the whole script exit -set -u # Treat unset variables as an error, and immediately exit. -set -o pipefail # this will make your script exit if any command in a pipeline errors - - -DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel) -TARGET_NAME=kapicorp-demo-march -GCP_PROJECT=kapicorp-demo-march - -######################################################################################## -# Check required binaries are installed - -error(){ - echo "${@}" -} - -check_installed() { - CMD=$1 - if ! $(which ${CMD} > /dev/null); then - error "${CMD} not installed. Exiting..." - fi -} - -check_installed terraform - -######################################################################################## -# Variables -export DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) # Folder where this script is -export TF_DIR=$(realpath ${DIR}/../terraform) # Folder where TF files are -export TF_DATA_DIR=$(realpath -m ${DIR}/../../../.TF_DATA_DIR/${GCP_PROJECT}) # Folder for TF initialization (preferable outside of compiled) -export OUTPUT_DIR=$(realpath -m ${DIR}/../../../output/${GCP_PROJECT}) # Folder for storing output files (preferable outside of compiled) -export TERRAFORM="terraform" -DEBUG=${DEBUG:-0} - -######################################################################################## -# MAIN - -if [ $DEBUG -ne 0 ]; then - debug -fi - -pushd $TF_DIR &> /dev/null - -terraform "$@" - -if [[ -f $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl ]] -then - mkdir -p $ROOT/resources/state/${TARGET_NAME}/ - cp $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl \ - $ROOT/resources/state/${TARGET_NAME}/.terraform.lock.hcl -fi \ No newline at end of file diff --git a/compiled/kapicorp-demo-march/terraform/google_container_cluster.tf.json b/compiled/kapicorp-demo-march/terraform/google_container_cluster.tf.json deleted file mode 100644 index 1fded251..00000000 --- a/compiled/kapicorp-demo-march/terraform/google_container_cluster.tf.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "resource": { - "google_container_cluster": { - "primary": { - "name": "gke-cluster", - "location": "europe-west1", - "initial_node_count": 1, - "node_config": { - "service_account": "${google_service_account.default.email}", - "oauth_scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "labels": { - "target": "kapicorp-demo-march" - } - }, - "depends_on": [ - "google_project_service.enable_container_service" - ], - "timeouts": { - "create": "30m", - "update": "40m" - } - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-demo-march/terraform/google_project.tf.json b/compiled/kapicorp-demo-march/terraform/google_project.tf.json deleted file mode 100644 index d1904f0f..00000000 --- a/compiled/kapicorp-demo-march/terraform/google_project.tf.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "resource": { - "google_project": { - "project": { - "name": "Example Terraform Project", - "project_id": "kapicorp-demo-march", - "billing_account": "017012-945270-0844F0", - "org_id": 163756623419 - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-demo-march/terraform/google_project_service.tf.json b/compiled/kapicorp-demo-march/terraform/google_project_service.tf.json deleted file mode 100644 index b8e1f930..00000000 --- a/compiled/kapicorp-demo-march/terraform/google_project_service.tf.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "resource": { - "google_project_service": { - "enable_cloudbilling_service": { - "service": "cloudbilling.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_iam_service": { - "service": "iam.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_compute_service": { - "service": "storage-component.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_container_service": { - "service": "container.googleapis.com", - "project": "${google_project.project.project_id}" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-demo-march/terraform/google_service_account.tf.json b/compiled/kapicorp-demo-march/terraform/google_service_account.tf.json deleted file mode 100644 index 5881529d..00000000 --- a/compiled/kapicorp-demo-march/terraform/google_service_account.tf.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "resource": { - "google_service_account": { - "default": { - "account_id": "gke-sa", - "display_name": "Service Account for GKE" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-demo-march/terraform/provider.tf.json b/compiled/kapicorp-demo-march/terraform/provider.tf.json deleted file mode 100644 index f9f95fe0..00000000 --- a/compiled/kapicorp-demo-march/terraform/provider.tf.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provider": [ - { - "google": { - "project": "kapicorp-demo-march", - "region": "europe-west1", - "zone": "europe-west1-b", - "impersonate_service_account": "terraform@kapicorp-terraform-admin.iam.gserviceaccount.com" - } - } - ] -} \ No newline at end of file diff --git a/compiled/kapicorp-demo-march/terraform/terraform.tf.json b/compiled/kapicorp-demo-march/terraform/terraform.tf.json deleted file mode 100644 index ce05835b..00000000 --- a/compiled/kapicorp-demo-march/terraform/terraform.tf.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "terraform": { - "required_providers": { - "google": { - "version": "3.46.0" - } - }, - "backend": { - "gcs": { - "bucket": "state-kapicorp-terraform-admin", - "prefix": "terraform/state/kapicorp-demo-march" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-project-123/docs/README.md b/compiled/kapicorp-project-123/README.md similarity index 80% rename from compiled/kapicorp-project-123/docs/README.md rename to compiled/kapicorp-project-123/README.md index 8161fc94..6ad58da0 100644 --- a/compiled/kapicorp-project-123/docs/README.md +++ b/compiled/kapicorp-project-123/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | kapicorp-project-123 | -| **Project** | `not defined`| +| **Project** | `kapicorp-project-123`| | **Cluster** | 'Not defined' | | **Namespace** | `kapicorp-project-123` | diff --git a/compiled/kapicorp-project-123/scripts/bash.include b/compiled/kapicorp-project-123/scripts/bash.include new file mode 100644 index 00000000..9513e864 --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="kapicorp-project-123" +TARGET_PATH="kapicorp-project-123" +GCP_PROJECT_ID="kapicorp-project-123" +TARGET="kapicorp-project-123" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/kapicorp-project-123/scripts/bash.include-test.sh b/compiled/kapicorp-project-123/scripts/bash.include-test.sh new file mode 100755 index 00000000..69d33be1 --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "kapicorp-project-123" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "kapicorp-project-123" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/kapicorp-project-123/scripts/gcloud b/compiled/kapicorp-project-123/scripts/gcloud new file mode 100755 index 00000000..dbf5aa25 --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project kapicorp-project-123 "$@" diff --git a/compiled/kapicorp-project-123/scripts/gcloud.include b/compiled/kapicorp-project-123/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/kapicorp-project-123/scripts/get_project_number b/compiled/kapicorp-project-123/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/kapicorp-project-123/scripts/kapitan.include b/compiled/kapicorp-project-123/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/kapicorp-project-123/scripts/set_reference b/compiled/kapicorp-project-123/scripts/set_reference new file mode 100755 index 00000000..83dded6e --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/kapicorp-project-123/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/kapicorp-project-123/scripts/terraform b/compiled/kapicorp-project-123/scripts/terraform new file mode 100755 index 00000000..6f41e4b9 --- /dev/null +++ b/compiled/kapicorp-project-123/scripts/terraform @@ -0,0 +1,46 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +######################################################################################## +# Check required binaries are installed + +error(){ + echo "${@}" +} + +DOCKER_ROOT=/src +TF_DIR=${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/terraform +TF_DATA_DIR="${DOCKER_ROOT}/.TF_DATA_DIR/${TARGET_PATH}" +OUTPUT_DIR="output/${TARGET_PATH}" +LOCK_FILE=${TF_DIR}/.terraform.lock.hcl +STATE_DIR=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/resources/state/${TARGET_PATH} + +DEBUG=${DEBUG:-0} +TERRAFORM_IMAGE=hashicorp/terraform:1.4 + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +TERRAFORM_BINARY="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -e TF_DATA_DIR=${TF_DATA_DIR} \ + -e TF_LOG \ + -w ${DOCKER_ROOT}/${TF_DIR} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + ${TERRAFORM_IMAGE}" + +${TERRAFORM_BINARY} "$@" + + +if [[ -f ${LOCK_FILE} ]] +then + mkdir -p ${STATE_DIR} + cp ${LOCK_FILE} ${STATE_DIR}/.terraform.lock.hcl +fi \ No newline at end of file diff --git a/compiled/kapicorp-project-123/scripts/terraform.sh b/compiled/kapicorp-project-123/scripts/terraform.sh deleted file mode 100755 index 60136968..00000000 --- a/compiled/kapicorp-project-123/scripts/terraform.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -set -e # If a command fails, the whole script exit -set -u # Treat unset variables as an error, and immediately exit. -set -o pipefail # this will make your script exit if any command in a pipeline errors - - -DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel) -TARGET_NAME=kapicorp-project-123 -GCP_PROJECT=kapicorp-project-123 - -######################################################################################## -# Check required binaries are installed - -error(){ - echo "${@}" -} - -check_installed() { - CMD=$1 - if ! $(which ${CMD} > /dev/null); then - error "${CMD} not installed. Exiting..." - fi -} - -check_installed terraform - -######################################################################################## -# Variables -export DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) # Folder where this script is -export TF_DIR=$(realpath ${DIR}/../terraform) # Folder where TF files are -export TF_DATA_DIR=$(realpath -m ${DIR}/../../../.TF_DATA_DIR/${GCP_PROJECT}) # Folder for TF initialization (preferable outside of compiled) -export OUTPUT_DIR=$(realpath -m ${DIR}/../../../output/${GCP_PROJECT}) # Folder for storing output files (preferable outside of compiled) -export TERRAFORM="terraform" -DEBUG=${DEBUG:-0} - -######################################################################################## -# MAIN - -if [ $DEBUG -ne 0 ]; then - debug -fi - -pushd $TF_DIR &> /dev/null - -terraform "$@" - -if [[ -f $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl ]] -then - mkdir -p $ROOT/resources/state/${TARGET_NAME}/ - cp $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl \ - $ROOT/resources/state/${TARGET_NAME}/.terraform.lock.hcl -fi \ No newline at end of file diff --git a/compiled/kapicorp-project-123/terraform/gcp_project_id.tf.json b/compiled/kapicorp-project-123/terraform/gcp_project_id.tf.json new file mode 100644 index 00000000..cbe43781 --- /dev/null +++ b/compiled/kapicorp-project-123/terraform/gcp_project_id.tf.json @@ -0,0 +1,13 @@ +{ + "resource": { + "gcp_project_id": { + "main": { + "name": "kapicorp-project-123", + "org_id": "az1oDhA50eU5d2ToHhNFrSaWNqAa1iaosXyZfd6SZQ2", + "auto_create_network": false, + "project_id": "kapicorp-project-123", + "billing_account": "jpzaR_ArxEkpIIljqRpFstsP_yw34RR07D6lAynfwIw" + } + } + } +} \ No newline at end of file diff --git a/compiled/kapicorp-project-123/terraform/gcp_project_id_service.tf.json b/compiled/kapicorp-project-123/terraform/gcp_project_id_service.tf.json new file mode 100644 index 00000000..275e5962 --- /dev/null +++ b/compiled/kapicorp-project-123/terraform/gcp_project_id_service.tf.json @@ -0,0 +1,12 @@ +{ + "resource": { + "gcp_project_id_service": { + "cloudbilling": { + "service": "cloudbilling.googleapis.com" + }, + "iam": { + "service": "iam.googleapis.com" + } + } + } +} \ No newline at end of file diff --git a/compiled/kapicorp-project-123/terraform/google_project.tf.json b/compiled/kapicorp-project-123/terraform/google_project.tf.json deleted file mode 100644 index 07ad453c..00000000 --- a/compiled/kapicorp-project-123/terraform/google_project.tf.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "resource": { - "google_project": { - "project": { - "name": "Example Terraform Project", - "project_id": "kapicorp-project-123", - "billing_account": "017012-945270-0844F0", - "org_id": 163756623419 - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-project-123/terraform/google_project_service.tf.json b/compiled/kapicorp-project-123/terraform/google_project_service.tf.json deleted file mode 100644 index 451b5f7a..00000000 --- a/compiled/kapicorp-project-123/terraform/google_project_service.tf.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "resource": { - "google_project_service": { - "enable_cloudbilling_service": { - "service": "cloudbilling.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_iam_service": { - "service": "iam.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_compute_service": { - "service": "storage-component.googleapis.com", - "project": "${google_project.project.project_id}" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-project-123/terraform/provider.tf.json b/compiled/kapicorp-project-123/terraform/provider.tf.json index 7f87e318..a159301d 100644 --- a/compiled/kapicorp-project-123/terraform/provider.tf.json +++ b/compiled/kapicorp-project-123/terraform/provider.tf.json @@ -1,12 +1,10 @@ { - "provider": [ - { - "google": { - "project": "kapicorp-project-123", - "region": "europe-west1", - "zone": "europe-west1-b", - "impersonate_service_account": "terraform@kapicorp-terraform-admin.iam.gserviceaccount.com" - } + "provider": { + "google": { + "project": "kapicorp-project-123", + "region": "europe-west1", + "zone": "europe-west1-b", + "impersonate_service_account": "terraform@kapicorp-terraform-admin.iam.gserviceaccount.com" } - ] + } } \ No newline at end of file diff --git a/compiled/kapicorp-project-123/terraform/terraform.tf.json b/compiled/kapicorp-project-123/terraform/terraform.tf.json index 905eb891..2278efba 100644 --- a/compiled/kapicorp-project-123/terraform/terraform.tf.json +++ b/compiled/kapicorp-project-123/terraform/terraform.tf.json @@ -1,15 +1,15 @@ { "terraform": { - "required_providers": { - "google": { - "version": "3.46.0" - } - }, "backend": { "gcs": { - "bucket": "state-kapicorp-terraform-admin", + "bucket": "kapicorp-terraform-state", "prefix": "terraform/state/kapicorp-project-123" } + }, + "required_providers": { + "google": { + "version": "4.64.0" + } } } } \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/docs/README.md b/compiled/kapicorp-terraform-admin/README.md similarity index 80% rename from compiled/kapicorp-terraform-admin/docs/README.md rename to compiled/kapicorp-terraform-admin/README.md index 52e6c84c..1c7ba1db 100644 --- a/compiled/kapicorp-terraform-admin/docs/README.md +++ b/compiled/kapicorp-terraform-admin/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | kapicorp-terraform-admin | -| **Project** | `not defined`| +| **Project** | `kapicorp-terraform-admin`| | **Cluster** | 'Not defined' | | **Namespace** | `kapicorp-terraform-admin` | diff --git a/compiled/kapicorp-terraform-admin/scripts/bash.include b/compiled/kapicorp-terraform-admin/scripts/bash.include new file mode 100644 index 00000000..1ca68775 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="kapicorp-terraform-admin" +TARGET_PATH="kapicorp-terraform-admin" +GCP_PROJECT_ID="kapicorp-terraform-admin" +TARGET="kapicorp-terraform-admin" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/scripts/bash.include-test.sh b/compiled/kapicorp-terraform-admin/scripts/bash.include-test.sh new file mode 100755 index 00000000..e3748858 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "kapicorp-terraform-admin" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "kapicorp-terraform-admin" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/scripts/gcloud b/compiled/kapicorp-terraform-admin/scripts/gcloud new file mode 100755 index 00000000..a0128042 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project kapicorp-terraform-admin "$@" diff --git a/compiled/kapicorp-terraform-admin/scripts/gcloud.include b/compiled/kapicorp-terraform-admin/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/scripts/get_project_number b/compiled/kapicorp-terraform-admin/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/scripts/kapitan.include b/compiled/kapicorp-terraform-admin/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/scripts/set_reference b/compiled/kapicorp-terraform-admin/scripts/set_reference new file mode 100755 index 00000000..36a082d9 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/kapicorp-terraform-admin/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/scripts/terraform b/compiled/kapicorp-terraform-admin/scripts/terraform new file mode 100755 index 00000000..6f41e4b9 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/scripts/terraform @@ -0,0 +1,46 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +######################################################################################## +# Check required binaries are installed + +error(){ + echo "${@}" +} + +DOCKER_ROOT=/src +TF_DIR=${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/terraform +TF_DATA_DIR="${DOCKER_ROOT}/.TF_DATA_DIR/${TARGET_PATH}" +OUTPUT_DIR="output/${TARGET_PATH}" +LOCK_FILE=${TF_DIR}/.terraform.lock.hcl +STATE_DIR=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/resources/state/${TARGET_PATH} + +DEBUG=${DEBUG:-0} +TERRAFORM_IMAGE=hashicorp/terraform:1.4 + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +TERRAFORM_BINARY="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -e TF_DATA_DIR=${TF_DATA_DIR} \ + -e TF_LOG \ + -w ${DOCKER_ROOT}/${TF_DIR} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + ${TERRAFORM_IMAGE}" + +${TERRAFORM_BINARY} "$@" + + +if [[ -f ${LOCK_FILE} ]] +then + mkdir -p ${STATE_DIR} + cp ${LOCK_FILE} ${STATE_DIR}/.terraform.lock.hcl +fi \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/scripts/terraform.sh b/compiled/kapicorp-terraform-admin/scripts/terraform.sh deleted file mode 100755 index cfdd9155..00000000 --- a/compiled/kapicorp-terraform-admin/scripts/terraform.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -set -e # If a command fails, the whole script exit -set -u # Treat unset variables as an error, and immediately exit. -set -o pipefail # this will make your script exit if any command in a pipeline errors - - -DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel) -TARGET_NAME=kapicorp-terraform-admin -GCP_PROJECT=kapicorp-terraform-admin - -######################################################################################## -# Check required binaries are installed - -error(){ - echo "${@}" -} - -check_installed() { - CMD=$1 - if ! $(which ${CMD} > /dev/null); then - error "${CMD} not installed. Exiting..." - fi -} - -check_installed terraform - -######################################################################################## -# Variables -export DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) # Folder where this script is -export TF_DIR=$(realpath ${DIR}/../terraform) # Folder where TF files are -export TF_DATA_DIR=$(realpath -m ${DIR}/../../../.TF_DATA_DIR/${GCP_PROJECT}) # Folder for TF initialization (preferable outside of compiled) -export OUTPUT_DIR=$(realpath -m ${DIR}/../../../output/${GCP_PROJECT}) # Folder for storing output files (preferable outside of compiled) -export TERRAFORM="terraform" -DEBUG=${DEBUG:-0} - -######################################################################################## -# MAIN - -if [ $DEBUG -ne 0 ]; then - debug -fi - -pushd $TF_DIR &> /dev/null - -terraform "$@" - -if [[ -f $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl ]] -then - mkdir -p $ROOT/resources/state/${TARGET_NAME}/ - cp $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl \ - $ROOT/resources/state/${TARGET_NAME}/.terraform.lock.hcl -fi \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/gcp_project_id.tf.json b/compiled/kapicorp-terraform-admin/terraform/gcp_project_id.tf.json new file mode 100644 index 00000000..75c6e863 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/terraform/gcp_project_id.tf.json @@ -0,0 +1,13 @@ +{ + "resource": { + "gcp_project_id": { + "main": { + "name": "kapicorp-terraform-admin", + "org_id": "az1oDhA50eU5d2ToHhNFrSaWNqAa1iaosXyZfd6SZQ2", + "auto_create_network": false, + "project_id": "kapicorp-terraform-admin", + "billing_account": "jpzaR_ArxEkpIIljqRpFstsP_yw34RR07D6lAynfwIw" + } + } + } +} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/gcp_project_id_service.tf.json b/compiled/kapicorp-terraform-admin/terraform/gcp_project_id_service.tf.json new file mode 100644 index 00000000..275e5962 --- /dev/null +++ b/compiled/kapicorp-terraform-admin/terraform/gcp_project_id_service.tf.json @@ -0,0 +1,12 @@ +{ + "resource": { + "gcp_project_id_service": { + "cloudbilling": { + "service": "cloudbilling.googleapis.com" + }, + "iam": { + "service": "iam.googleapis.com" + } + } + } +} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/google_organization_iam_member.tf.json b/compiled/kapicorp-terraform-admin/terraform/google_organization_iam_member.tf.json deleted file mode 100644 index a2f9fbb9..00000000 --- a/compiled/kapicorp-terraform-admin/terraform/google_organization_iam_member.tf.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "resource": { - "google_organization_iam_member": { - "terraform_owner": { - "org_id": 163756623419, - "role": "roles/owner", - "member": "serviceAccount:${google_service_account.terraform.email}" - }, - "terraform_billing": { - "org_id": 163756623419, - "role": "roles/billing.user", - "member": "serviceAccount:${google_service_account.terraform.email}" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/google_project.tf.json b/compiled/kapicorp-terraform-admin/terraform/google_project.tf.json deleted file mode 100644 index c9cbb869..00000000 --- a/compiled/kapicorp-terraform-admin/terraform/google_project.tf.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "resource": { - "google_project": { - "project": { - "name": "Terraform Admin Project", - "project_id": "kapicorp-terraform-admin", - "billing_account": "017012-945270-0844F0", - "org_id": 163756623419 - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/google_project_service.tf.json b/compiled/kapicorp-terraform-admin/terraform/google_project_service.tf.json deleted file mode 100644 index c7b18f71..00000000 --- a/compiled/kapicorp-terraform-admin/terraform/google_project_service.tf.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "resource": { - "google_project_service": { - "enable_cloudbilling_service": { - "service": "cloudbilling.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_iam_service": { - "service": "iam.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_compute_service": { - "service": "storage-component.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_container_service": { - "service": "container.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_cloudresourcemanager_service": { - "service": "cloudresourcemanager.googleapis.com", - "project": "${google_project.project.project_id}" - }, - "enable_serviceusage_service": { - "service": "serviceusage.googleapis.com", - "project": "${google_project.project.project_id}" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/google_service_account.tf.json b/compiled/kapicorp-terraform-admin/terraform/google_service_account.tf.json deleted file mode 100644 index e0eeccb5..00000000 --- a/compiled/kapicorp-terraform-admin/terraform/google_service_account.tf.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "resource": { - "google_service_account": { - "terraform": { - "account_id": "terraform", - "description": "Terraform Service Account" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/google_storage_bucket.tf.json b/compiled/kapicorp-terraform-admin/terraform/google_storage_bucket.tf.json deleted file mode 100644 index 154db3fb..00000000 --- a/compiled/kapicorp-terraform-admin/terraform/google_storage_bucket.tf.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "resource": { - "google_storage_bucket": { - "terraform-state": { - "name": "state-kapicorp-terraform-admin", - "location": "EU", - "storage_class": "MULTI_REGIONAL" - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/google_storage_bucket_iam_binding.tf.json b/compiled/kapicorp-terraform-admin/terraform/google_storage_bucket_iam_binding.tf.json deleted file mode 100644 index 5ab40bda..00000000 --- a/compiled/kapicorp-terraform-admin/terraform/google_storage_bucket_iam_binding.tf.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "resource": { - "google_storage_bucket_iam_binding": { - "binding": { - "bucket": "${google_storage_bucket.terraform-state.name}", - "role": "roles/storage.admin", - "members": [ - "serviceAccount:${google_service_account.terraform.email}" - ] - } - } - } -} \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/provider.tf.json b/compiled/kapicorp-terraform-admin/terraform/provider.tf.json index b9ed8167..ef1f7023 100644 --- a/compiled/kapicorp-terraform-admin/terraform/provider.tf.json +++ b/compiled/kapicorp-terraform-admin/terraform/provider.tf.json @@ -1,11 +1,10 @@ { - "provider": [ - { - "google": { - "project": "kapicorp-terraform-admin", - "region": "europe-west1", - "zone": "europe-west1-b" - } + "provider": { + "google": { + "project": "kapicorp-terraform-admin", + "region": "europe-west1", + "zone": "europe-west1-b", + "impersonate_service_account": "terraform@kapicorp-terraform-admin.iam.gserviceaccount.com" } - ] + } } \ No newline at end of file diff --git a/compiled/kapicorp-terraform-admin/terraform/terraform.tf.json b/compiled/kapicorp-terraform-admin/terraform/terraform.tf.json index 0ce09c6c..1897521a 100644 --- a/compiled/kapicorp-terraform-admin/terraform/terraform.tf.json +++ b/compiled/kapicorp-terraform-admin/terraform/terraform.tf.json @@ -1,13 +1,14 @@ { "terraform": { - "required_providers": { - "google": { - "version": "3.46.0" + "backend": { + "gcs": { + "bucket": "kapicorp-terraform-state", + "prefix": "terraform/state/kapicorp-terraform-admin" } }, - "backend": { - "local": { - "path": "../../../state/terraform.tfstate" + "required_providers": { + "google": { + "version": "4.64.0" } } } diff --git a/compiled/mysql/docs/README.md b/compiled/mysql/README.md similarity index 89% rename from compiled/mysql/docs/README.md rename to compiled/mysql/README.md index c36751e9..a54f73e9 100644 --- a/compiled/mysql/docs/README.md +++ b/compiled/mysql/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | mysql | -| **Project** | `not defined`| +| **Project** | `mysql`| | **Cluster** | 'Not defined' | | **Namespace** | `mysql` | diff --git a/compiled/mysql/manifests/mysql-bundle.yml b/compiled/mysql/manifests/mysql-bundle.yml index 3b754f88..caf47875 100644 --- a/compiled/mysql/manifests/mysql-bundle.yml +++ b/compiled/mysql/manifests/mysql-bundle.yml @@ -57,7 +57,7 @@ spec: - name: secrets secret: defaultMode: 420 - secretName: mysql-13f7ba31 + secretName: mysql-a61a1e7c updateStrategy: rollingUpdate: partition: 0 diff --git a/compiled/mysql/manifests/mysql-secret.yml b/compiled/mysql/manifests/mysql-secret.yml index 474a0784..3e74a688 100644 --- a/compiled/mysql/manifests/mysql-secret.yml +++ b/compiled/mysql/manifests/mysql-secret.yml @@ -6,6 +6,6 @@ kind: Secret metadata: labels: name: mysql - name: mysql-13f7ba31 + name: mysql-a61a1e7c namespace: mysql type: Opaque diff --git a/compiled/mysql/scripts/bash.include b/compiled/mysql/scripts/bash.include new file mode 100644 index 00000000..b004c021 --- /dev/null +++ b/compiled/mysql/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="mysql" +TARGET_PATH="mysql" +GCP_PROJECT_ID="mysql" +TARGET="mysql" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/mysql/scripts/bash.include-test.sh b/compiled/mysql/scripts/bash.include-test.sh new file mode 100755 index 00000000..0af5e8ea --- /dev/null +++ b/compiled/mysql/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "mysql" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "mysql" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/mysql/scripts/gcloud b/compiled/mysql/scripts/gcloud new file mode 100755 index 00000000..e83531a4 --- /dev/null +++ b/compiled/mysql/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project mysql "$@" diff --git a/compiled/mysql/scripts/gcloud.include b/compiled/mysql/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/mysql/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/mysql/scripts/get_project_number b/compiled/mysql/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/mysql/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/mysql/scripts/kapitan.include b/compiled/mysql/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/mysql/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/mysql/scripts/set_reference b/compiled/mysql/scripts/set_reference new file mode 100755 index 00000000..56ab433b --- /dev/null +++ b/compiled/mysql/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/mysql/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/postgres-proxy/docs/README.md b/compiled/postgres-proxy/README.md similarity index 100% rename from compiled/postgres-proxy/docs/README.md rename to compiled/postgres-proxy/README.md diff --git a/compiled/postgres-proxy/manifests/postgres-proxy-scaling.yml b/compiled/postgres-proxy/manifests/postgres-proxy-scaling.yml index d8b48d83..6bd1f4fd 100644 --- a/compiled/postgres-proxy/manifests/postgres-proxy-scaling.yml +++ b/compiled/postgres-proxy/manifests/postgres-proxy-scaling.yml @@ -1,24 +1,3 @@ -apiVersion: autoscaling.k8s.io/v1beta2 -kind: VerticalPodAutoscaler -metadata: - labels: - app.kapicorp.dev/component: postgres-proxy - name: postgres-proxy - tier: db - name: postgres-proxy - namespace: postgres-proxy -spec: - resourcePolicy: - containerPolicies: - - containerName: istio-proxy - mode: 'Off' - targetRef: - apiVersion: apps/v1 - kind: Deployment - name: postgres-proxy - updatePolicy: - updateMode: Auto ---- apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: @@ -34,17 +13,19 @@ spec: name: postgres-proxy tier: db --- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler metadata: labels: app.kapicorp.dev/component: postgres-proxy name: postgres-proxy + tier: db name: postgres-proxy namespace: postgres-proxy spec: - minAvailable: 2 - selector: - matchLabels: - name: postgres-proxy - tier: db + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: postgres-proxy + updatePolicy: + updateMode: Auto diff --git a/compiled/postgres-proxy/scripts/bash.include b/compiled/postgres-proxy/scripts/bash.include new file mode 100644 index 00000000..5f89dc32 --- /dev/null +++ b/compiled/postgres-proxy/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="postgres-proxy" +TARGET_PATH="postgres-proxy" +GCP_PROJECT_ID="example-project" +TARGET="postgres-proxy" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/postgres-proxy/scripts/bash.include-test.sh b/compiled/postgres-proxy/scripts/bash.include-test.sh new file mode 100755 index 00000000..0daf3ad2 --- /dev/null +++ b/compiled/postgres-proxy/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "postgres-proxy" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "postgres-proxy" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/postgres-proxy/scripts/gcloud b/compiled/postgres-proxy/scripts/gcloud new file mode 100755 index 00000000..b05160de --- /dev/null +++ b/compiled/postgres-proxy/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project example-project "$@" diff --git a/compiled/postgres-proxy/scripts/gcloud.include b/compiled/postgres-proxy/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/postgres-proxy/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/postgres-proxy/scripts/generate_sa_secrets.sh b/compiled/postgres-proxy/scripts/generate_sa_secrets.sh deleted file mode 100755 index 587a8520..00000000 --- a/compiled/postgres-proxy/scripts/generate_sa_secrets.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -e - -TARGET=postgres-proxy - - -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel) -KAPITAN_COMMAND=${ROOT}/kapitan - -echo "Generating secret for postgres-proxy@example-project.iam.gserviceaccount.com" -gcloud --project example-project iam service-accounts keys \ -create - \ ---iam-account=postgres-proxy@example-project.iam.gserviceaccount.com | ${KAPITAN_COMMAND} refs --write plain:targets/postgres-proxy/postgres-proxy-service-account --base64 -f - -t ${TARGET} - -echo "Summary of available keys (please remove obsolete ones after deploying changes)" - -gcloud --project example-project iam service-accounts keys \ -list --iam-account=postgres-proxy@example-project.iam.gserviceaccount.com - -##### diff --git a/compiled/postgres-proxy/scripts/get_project_number b/compiled/postgres-proxy/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/postgres-proxy/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/postgres-proxy/scripts/kapitan.include b/compiled/postgres-proxy/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/postgres-proxy/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/postgres-proxy/scripts/set_reference b/compiled/postgres-proxy/scripts/set_reference new file mode 100755 index 00000000..5a376612 --- /dev/null +++ b/compiled/postgres-proxy/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/postgres-proxy/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/pritunl/docs/README.md b/compiled/pritunl/README.md similarity index 92% rename from compiled/pritunl/docs/README.md rename to compiled/pritunl/README.md index 0c6f4f7d..f9a3d0e6 100644 --- a/compiled/pritunl/docs/README.md +++ b/compiled/pritunl/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | pritunl | -| **Project** | `not defined`| +| **Project** | `pritunl`| | **Cluster** | kind | | **Namespace** | `pritunl` | diff --git a/compiled/pritunl/scripts/apply.sh b/compiled/pritunl/scripts/apply similarity index 58% rename from compiled/pritunl/scripts/apply.sh rename to compiled/pritunl/scripts/apply index 459a52f4..bb1a9f51 100755 --- a/compiled/pritunl/scripts/apply.sh +++ b/compiled/pritunl/scripts/apply @@ -1,26 +1,15 @@ #!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel)/ -KAPITAN="${ROOT}/kapitan" +# generated with Kapitan -FILE=${1:-} +source $(dirname ${BASH_SOURCE[0]})/bash.include -# Only GNU xargs supports --no-run-if-empty -XARGS="xargs --no-run-if-empty" -if ! echo | $XARGS 2>/dev/null; then - # Looks like we have BSD xargs, use -x instead - XARGS="xargs" -fi +FILE=${1:-} -## if tesoro is enabled, no need to reveal apply () { FILEPATH=${1?} - ${KAPITAN} refs --reveal -f "${FILEPATH}" | ${DIR}/kubectl.sh apply -f - + ${KAPITAN_COMMAND} refs --reveal -f "${FILEPATH}" | ${KUBECTL_SCRIPT} apply -f - } - - - if [[ ! -z $FILE ]] then # Apply files passed at the command line @@ -38,7 +27,7 @@ else fi # Apply files in specific order - for SECTION in pre-deploy manifests + for SECTION in manifests do echo "## run kubectl apply for ${SECTION}" DEPLOY_PATH=${DIR}/../${SECTION} @@ -47,4 +36,4 @@ else apply "${DEPLOY_PATH}" fi done -fi +fi \ No newline at end of file diff --git a/compiled/pritunl/scripts/bash.include b/compiled/pritunl/scripts/bash.include new file mode 100644 index 00000000..e0bf23a3 --- /dev/null +++ b/compiled/pritunl/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="pritunl" +TARGET_PATH="pritunl" +GCP_PROJECT_ID="pritunl" +TARGET="pritunl" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/pritunl/scripts/bash.include-test.sh b/compiled/pritunl/scripts/bash.include-test.sh new file mode 100755 index 00000000..f0416deb --- /dev/null +++ b/compiled/pritunl/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "pritunl" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "pritunl" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/pritunl/scripts/delete_completed b/compiled/pritunl/scripts/delete_completed new file mode 100755 index 00000000..6910da53 --- /dev/null +++ b/compiled/pritunl/scripts/delete_completed @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +${KUBECTL_COMMAND} delete pod --field-selector=status.phase==Failed \ No newline at end of file diff --git a/compiled/pritunl/scripts/gcloud b/compiled/pritunl/scripts/gcloud new file mode 100755 index 00000000..bc84746a --- /dev/null +++ b/compiled/pritunl/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project pritunl "$@" diff --git a/compiled/pritunl/scripts/gcloud.include b/compiled/pritunl/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/pritunl/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/pritunl/scripts/get_project_number b/compiled/pritunl/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/pritunl/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/pritunl/scripts/kapitan.include b/compiled/pritunl/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/pritunl/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/pritunl/scripts/kubectl b/compiled/pritunl/scripts/kubectl new file mode 100755 index 00000000..af3d4574 --- /dev/null +++ b/compiled/pritunl/scripts/kubectl @@ -0,0 +1,12 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + + +if [[ -p /dev/stdin ]] +then + cat | ${KUBECTL_COMMAND} "$@" +else + ${KUBECTL_COMMAND} "$@" +fi \ No newline at end of file diff --git a/compiled/pritunl/scripts/kubectl.sh b/compiled/pritunl/scripts/kubectl.sh deleted file mode 100755 index 8518f541..00000000 --- a/compiled/pritunl/scripts/kubectl.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -${DIR}/setup_context.sh >/dev/null -if [[ -p /dev/stdin ]] -then - INPUT=$( cat ) -fi -KUBECTL="kubectl --context pritunl" -echo "${INPUT}" | ${KUBECTL} "$@" \ No newline at end of file diff --git a/compiled/pritunl/scripts/set_reference b/compiled/pritunl/scripts/set_reference new file mode 100755 index 00000000..7e8ff7b0 --- /dev/null +++ b/compiled/pritunl/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/pritunl/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/pritunl/scripts/setup_cluster b/compiled/pritunl/scripts/setup_cluster new file mode 100755 index 00000000..3934a455 --- /dev/null +++ b/compiled/pritunl/scripts/setup_cluster @@ -0,0 +1,13 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +setup_kubectl() { + ${DIR}/setup_cluster >/dev/null + ${DIR}/setup_context >/dev/null +} + + +KIND="kind" +$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" +$KIND export kubeconfig diff --git a/compiled/pritunl/scripts/setup_cluster.sh b/compiled/pritunl/scripts/setup_cluster.sh deleted file mode 100755 index 14da73a8..00000000 --- a/compiled/pritunl/scripts/setup_cluster.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit - - -KIND="kind" -$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" -$KIND export kubeconfig diff --git a/compiled/pritunl/scripts/setup_context b/compiled/pritunl/scripts/setup_context new file mode 100755 index 00000000..a95f0f3a --- /dev/null +++ b/compiled/pritunl/scripts/setup_context @@ -0,0 +1,7 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +${KUBECTL_COMMAND} config set-context ${KUBECTL_CONTEXT} --cluster kind-kind --user kind-kind --namespace pritunl \ No newline at end of file diff --git a/compiled/pritunl/scripts/setup_context.sh b/compiled/pritunl/scripts/setup_context.sh deleted file mode 100755 index 3e05af2b..00000000 --- a/compiled/pritunl/scripts/setup_context.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit -KUBECTL="kubectl" - - -${KUBECTL} config set-context pritunl --cluster kind-kind --user kind-kind --namespace pritunl diff --git a/compiled/prod-sockshop/docs/README.md b/compiled/prod-sockshop/README.md similarity index 100% rename from compiled/prod-sockshop/docs/README.md rename to compiled/prod-sockshop/README.md diff --git a/compiled/prod-sockshop/manifests/sockshop.kapicorp.com-secret.yml b/compiled/prod-sockshop/manifests/sockshop.kapicorp.com-secret.yml index 9967ee28..86b654db 100644 --- a/compiled/prod-sockshop/manifests/sockshop.kapicorp.com-secret.yml +++ b/compiled/prod-sockshop/manifests/sockshop.kapicorp.com-secret.yml @@ -7,5 +7,4 @@ metadata: labels: name: sockshop.kapicorp.com name: sockshop.kapicorp.com - namespace: prod-sockshop type: kubernetes.io/tls diff --git a/compiled/prod-sockshop/scripts/apply.sh b/compiled/prod-sockshop/scripts/apply similarity index 58% rename from compiled/prod-sockshop/scripts/apply.sh rename to compiled/prod-sockshop/scripts/apply index c0eaef2b..17043674 100755 --- a/compiled/prod-sockshop/scripts/apply.sh +++ b/compiled/prod-sockshop/scripts/apply @@ -1,26 +1,15 @@ #!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel)/ -KAPITAN="${ROOT}/kapitan" +# generated with Kapitan -FILE=${1:-} +source $(dirname ${BASH_SOURCE[0]})/bash.include -# Only GNU xargs supports --no-run-if-empty -XARGS="xargs --no-run-if-empty" -if ! echo | $XARGS 2>/dev/null; then - # Looks like we have BSD xargs, use -x instead - XARGS="xargs" -fi +FILE=${1:-} -## if tesoro is enabled, no need to reveal apply () { FILEPATH=${1?} - ${KAPITAN} refs --reveal -f "${FILEPATH}" | ${DIR}/kubectl.sh apply -f - + ${KAPITAN_COMMAND} refs --reveal -f "${FILEPATH}" | ${KUBECTL_SCRIPT} apply -f - } - - - if [[ ! -z $FILE ]] then # Apply files passed at the command line @@ -38,7 +27,7 @@ else fi # Apply files in specific order - for SECTION in pre-deploy manifests + for SECTION in manifests do echo "## run kubectl apply for ${SECTION}" DEPLOY_PATH=${DIR}/../${SECTION} @@ -47,4 +36,4 @@ else apply "${DEPLOY_PATH}" fi done -fi +fi \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/bash.include b/compiled/prod-sockshop/scripts/bash.include new file mode 100644 index 00000000..9376d1e5 --- /dev/null +++ b/compiled/prod-sockshop/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="prod-sockshop" +TARGET_PATH="prod-sockshop" +GCP_PROJECT_ID="kapitan-demo" +TARGET="prod-sockshop" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/bash.include-test.sh b/compiled/prod-sockshop/scripts/bash.include-test.sh new file mode 100755 index 00000000..b625bbbf --- /dev/null +++ b/compiled/prod-sockshop/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "prod-sockshop" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "prod-sockshop" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/delete_completed b/compiled/prod-sockshop/scripts/delete_completed new file mode 100755 index 00000000..6910da53 --- /dev/null +++ b/compiled/prod-sockshop/scripts/delete_completed @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +${KUBECTL_COMMAND} delete pod --field-selector=status.phase==Failed \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/gcloud b/compiled/prod-sockshop/scripts/gcloud new file mode 100755 index 00000000..faee61f8 --- /dev/null +++ b/compiled/prod-sockshop/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project kapitan-demo "$@" diff --git a/compiled/prod-sockshop/scripts/gcloud.include b/compiled/prod-sockshop/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/prod-sockshop/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/get_project_number b/compiled/prod-sockshop/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/prod-sockshop/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/kapitan.include b/compiled/prod-sockshop/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/prod-sockshop/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/kubectl b/compiled/prod-sockshop/scripts/kubectl new file mode 100755 index 00000000..af3d4574 --- /dev/null +++ b/compiled/prod-sockshop/scripts/kubectl @@ -0,0 +1,12 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + + +if [[ -p /dev/stdin ]] +then + cat | ${KUBECTL_COMMAND} "$@" +else + ${KUBECTL_COMMAND} "$@" +fi \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/kubectl.sh b/compiled/prod-sockshop/scripts/kubectl.sh deleted file mode 100755 index a899bbec..00000000 --- a/compiled/prod-sockshop/scripts/kubectl.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -${DIR}/setup_context.sh >/dev/null -if [[ -p /dev/stdin ]] -then - INPUT=$( cat ) -fi -KUBECTL="kubectl --context prod-sockshop" -echo "${INPUT}" | ${KUBECTL} "$@" \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/set_reference b/compiled/prod-sockshop/scripts/set_reference new file mode 100755 index 00000000..8f3d5e03 --- /dev/null +++ b/compiled/prod-sockshop/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/prod-sockshop/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/setup_cluster b/compiled/prod-sockshop/scripts/setup_cluster new file mode 100755 index 00000000..09b7e1a8 --- /dev/null +++ b/compiled/prod-sockshop/scripts/setup_cluster @@ -0,0 +1,21 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +setup_kubectl() { + ${DIR}/setup_cluster >/dev/null + ${DIR}/setup_context >/dev/null +} + + +CLUSTER_INFO=$(${KUBECTL_COMMAND} cluster-info || (setup_kubectl && ${KUBECTL_COMMAND} cluster-info)) + +CLUSTER_PUBLIC_IP=$(echo ${CLUSTER_INFO} | egrep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b" | head -n 1 | iconv -f utf-8 -t ascii//translit) +INVENTORY_CLUSTER_PUBLIC_IP="UNKNOWN" + +# Check if the cluster public IP has changed +if [[ "${CLUSTER_PUBLIC_IP}" != "${INVENTORY_CLUSTER_PUBLIC_IP}" ]] +then + setup_kubectl +fi +${GCLOUD_COMMAND} container clusters get-credentials demo --zone europe-west1-b --project kapitan-demo diff --git a/compiled/prod-sockshop/scripts/setup_cluster.sh b/compiled/prod-sockshop/scripts/setup_cluster.sh deleted file mode 100755 index 0b52ccc5..00000000 --- a/compiled/prod-sockshop/scripts/setup_cluster.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit - - -GCLOUD="gcloud" -${GCLOUD} container clusters get-credentials demo --zone europe-west1-b --project kapitan-demo diff --git a/compiled/prod-sockshop/scripts/setup_context b/compiled/prod-sockshop/scripts/setup_context new file mode 100755 index 00000000..2a2d15dc --- /dev/null +++ b/compiled/prod-sockshop/scripts/setup_context @@ -0,0 +1,7 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +${KUBECTL_COMMAND} config set-context ${KUBECTL_CONTEXT} --cluster gke_kapitan-demo_europe-west1-b_demo --user gke_kapitan-demo_europe-west1-b_demo --namespace prod-sockshop \ No newline at end of file diff --git a/compiled/prod-sockshop/scripts/setup_context.sh b/compiled/prod-sockshop/scripts/setup_context.sh deleted file mode 100755 index c79b6772..00000000 --- a/compiled/prod-sockshop/scripts/setup_context.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit -KUBECTL="kubectl" - - -${KUBECTL} config set-context prod-sockshop --cluster gke_kapitan-demo_europe-west1-b_demo --user gke_kapitan-demo_europe-west1-b_demo --namespace prod-sockshop diff --git a/compiled/sock-shop/docs/README.md b/compiled/sock-shop/README.md similarity index 97% rename from compiled/sock-shop/docs/README.md rename to compiled/sock-shop/README.md index 25e3aad7..17928103 100644 --- a/compiled/sock-shop/docs/README.md +++ b/compiled/sock-shop/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | sock-shop | -| **Project** | `not defined`| +| **Project** | `sock-shop`| | **Cluster** | kind | | **Namespace** | `sock-shop` | diff --git a/compiled/sock-shop/scripts/apply.sh b/compiled/sock-shop/scripts/apply similarity index 58% rename from compiled/sock-shop/scripts/apply.sh rename to compiled/sock-shop/scripts/apply index 16e0345f..e57f19c2 100755 --- a/compiled/sock-shop/scripts/apply.sh +++ b/compiled/sock-shop/scripts/apply @@ -1,26 +1,15 @@ #!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel)/ -KAPITAN="${ROOT}/kapitan" +# generated with Kapitan -FILE=${1:-} +source $(dirname ${BASH_SOURCE[0]})/bash.include -# Only GNU xargs supports --no-run-if-empty -XARGS="xargs --no-run-if-empty" -if ! echo | $XARGS 2>/dev/null; then - # Looks like we have BSD xargs, use -x instead - XARGS="xargs" -fi +FILE=${1:-} -## if tesoro is enabled, no need to reveal apply () { FILEPATH=${1?} - ${KAPITAN} refs --reveal -f "${FILEPATH}" | ${DIR}/kubectl.sh apply -f - + ${KAPITAN_COMMAND} refs --reveal -f "${FILEPATH}" | ${KUBECTL_SCRIPT} apply -f - } - - - if [[ ! -z $FILE ]] then # Apply files passed at the command line @@ -38,7 +27,7 @@ else fi # Apply files in specific order - for SECTION in pre-deploy manifests + for SECTION in manifests do echo "## run kubectl apply for ${SECTION}" DEPLOY_PATH=${DIR}/../${SECTION} @@ -47,4 +36,4 @@ else apply "${DEPLOY_PATH}" fi done -fi +fi \ No newline at end of file diff --git a/compiled/sock-shop/scripts/bash.include b/compiled/sock-shop/scripts/bash.include new file mode 100644 index 00000000..d7141f9d --- /dev/null +++ b/compiled/sock-shop/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="sock-shop" +TARGET_PATH="sock-shop" +GCP_PROJECT_ID="sock-shop" +TARGET="sock-shop" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/sock-shop/scripts/bash.include-test.sh b/compiled/sock-shop/scripts/bash.include-test.sh new file mode 100755 index 00000000..b17d6b7c --- /dev/null +++ b/compiled/sock-shop/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "sock-shop" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "sock-shop" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/sock-shop/scripts/delete_completed b/compiled/sock-shop/scripts/delete_completed new file mode 100755 index 00000000..6910da53 --- /dev/null +++ b/compiled/sock-shop/scripts/delete_completed @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +${KUBECTL_COMMAND} delete pod --field-selector=status.phase==Failed \ No newline at end of file diff --git a/compiled/sock-shop/scripts/gcloud b/compiled/sock-shop/scripts/gcloud new file mode 100755 index 00000000..52201326 --- /dev/null +++ b/compiled/sock-shop/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project sock-shop "$@" diff --git a/compiled/sock-shop/scripts/gcloud.include b/compiled/sock-shop/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/sock-shop/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/sock-shop/scripts/get_project_number b/compiled/sock-shop/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/sock-shop/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/sock-shop/scripts/kapitan.include b/compiled/sock-shop/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/sock-shop/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/sock-shop/scripts/kubectl b/compiled/sock-shop/scripts/kubectl new file mode 100755 index 00000000..af3d4574 --- /dev/null +++ b/compiled/sock-shop/scripts/kubectl @@ -0,0 +1,12 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + + +if [[ -p /dev/stdin ]] +then + cat | ${KUBECTL_COMMAND} "$@" +else + ${KUBECTL_COMMAND} "$@" +fi \ No newline at end of file diff --git a/compiled/sock-shop/scripts/kubectl.sh b/compiled/sock-shop/scripts/kubectl.sh deleted file mode 100755 index e46a0a47..00000000 --- a/compiled/sock-shop/scripts/kubectl.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -${DIR}/setup_context.sh >/dev/null -if [[ -p /dev/stdin ]] -then - INPUT=$( cat ) -fi -KUBECTL="kubectl --context sock-shop" -echo "${INPUT}" | ${KUBECTL} "$@" \ No newline at end of file diff --git a/compiled/sock-shop/scripts/set_reference b/compiled/sock-shop/scripts/set_reference new file mode 100755 index 00000000..1b2c6a28 --- /dev/null +++ b/compiled/sock-shop/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/sock-shop/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/sock-shop/scripts/setup_cluster b/compiled/sock-shop/scripts/setup_cluster new file mode 100755 index 00000000..3934a455 --- /dev/null +++ b/compiled/sock-shop/scripts/setup_cluster @@ -0,0 +1,13 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +setup_kubectl() { + ${DIR}/setup_cluster >/dev/null + ${DIR}/setup_context >/dev/null +} + + +KIND="kind" +$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" +$KIND export kubeconfig diff --git a/compiled/sock-shop/scripts/setup_cluster.sh b/compiled/sock-shop/scripts/setup_cluster.sh deleted file mode 100755 index 14da73a8..00000000 --- a/compiled/sock-shop/scripts/setup_cluster.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit - - -KIND="kind" -$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" -$KIND export kubeconfig diff --git a/compiled/sock-shop/scripts/setup_context b/compiled/sock-shop/scripts/setup_context new file mode 100755 index 00000000..1fb2452d --- /dev/null +++ b/compiled/sock-shop/scripts/setup_context @@ -0,0 +1,7 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +${KUBECTL_COMMAND} config set-context ${KUBECTL_CONTEXT} --cluster kind-kind --user kind-kind --namespace sock-shop \ No newline at end of file diff --git a/compiled/sock-shop/scripts/setup_context.sh b/compiled/sock-shop/scripts/setup_context.sh deleted file mode 100755 index afb94495..00000000 --- a/compiled/sock-shop/scripts/setup_context.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit -KUBECTL="kubectl" - - -${KUBECTL} config set-context sock-shop --cluster kind-kind --user kind-kind --namespace sock-shop diff --git a/compiled/tesoro/docs/README.md b/compiled/tesoro/README.md similarity index 89% rename from compiled/tesoro/docs/README.md rename to compiled/tesoro/README.md index ed9c9896..ed767f17 100644 --- a/compiled/tesoro/docs/README.md +++ b/compiled/tesoro/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | tesoro | -| **Project** | `not defined`| +| **Project** | `tesoro`| | **Cluster** | kind | | **Namespace** | `tesoro` | diff --git a/compiled/tesoro/scripts/apply.sh b/compiled/tesoro/scripts/apply similarity index 58% rename from compiled/tesoro/scripts/apply.sh rename to compiled/tesoro/scripts/apply index bde339bd..6c76c696 100755 --- a/compiled/tesoro/scripts/apply.sh +++ b/compiled/tesoro/scripts/apply @@ -1,26 +1,15 @@ #!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel)/ -KAPITAN="${ROOT}/kapitan" +# generated with Kapitan -FILE=${1:-} +source $(dirname ${BASH_SOURCE[0]})/bash.include -# Only GNU xargs supports --no-run-if-empty -XARGS="xargs --no-run-if-empty" -if ! echo | $XARGS 2>/dev/null; then - # Looks like we have BSD xargs, use -x instead - XARGS="xargs" -fi +FILE=${1:-} -## if tesoro is enabled, no need to reveal apply () { FILEPATH=${1?} - ${KAPITAN} refs --reveal -f "${FILEPATH}" | ${DIR}/kubectl.sh apply -f - + ${KAPITAN_COMMAND} refs --reveal -f "${FILEPATH}" | ${KUBECTL_SCRIPT} apply -f - } - - - if [[ ! -z $FILE ]] then # Apply files passed at the command line @@ -38,7 +27,7 @@ else fi # Apply files in specific order - for SECTION in pre-deploy manifests + for SECTION in manifests do echo "## run kubectl apply for ${SECTION}" DEPLOY_PATH=${DIR}/../${SECTION} @@ -47,4 +36,4 @@ else apply "${DEPLOY_PATH}" fi done -fi +fi \ No newline at end of file diff --git a/compiled/tesoro/scripts/bash.include b/compiled/tesoro/scripts/bash.include new file mode 100644 index 00000000..c4e42ddc --- /dev/null +++ b/compiled/tesoro/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="tesoro" +TARGET_PATH="tesoro" +GCP_PROJECT_ID="tesoro" +TARGET="tesoro" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/tesoro/scripts/bash.include-test.sh b/compiled/tesoro/scripts/bash.include-test.sh new file mode 100755 index 00000000..f8368f45 --- /dev/null +++ b/compiled/tesoro/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "tesoro" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "tesoro" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/tesoro/scripts/delete_completed b/compiled/tesoro/scripts/delete_completed new file mode 100755 index 00000000..6910da53 --- /dev/null +++ b/compiled/tesoro/scripts/delete_completed @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +${KUBECTL_COMMAND} delete pod --field-selector=status.phase==Failed \ No newline at end of file diff --git a/compiled/tesoro/scripts/gcloud b/compiled/tesoro/scripts/gcloud new file mode 100755 index 00000000..6c4cfb1a --- /dev/null +++ b/compiled/tesoro/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project tesoro "$@" diff --git a/compiled/tesoro/scripts/gcloud.include b/compiled/tesoro/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/tesoro/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/tesoro/scripts/generate_tesoro_certs.sh b/compiled/tesoro/scripts/generate_tesoro_certs.sh index 355aa3a9..02a951c9 100755 --- a/compiled/tesoro/scripts/generate_tesoro_certs.sh +++ b/compiled/tesoro/scripts/generate_tesoro_certs.sh @@ -1,11 +1,9 @@ #!/bin/bash -set -e +# generated with Kapitan -SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" -trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT +source $(dirname ${BASH_SOURCE[0]})/bash.include NAMESPACE=tesoro -TARGET_NAME=tesoro # Generates new certificates CACERT_KEY=rootCA.key @@ -15,15 +13,16 @@ CERT_PEM=cert.pem CN=tesoro.${NAMESPACE}.svc pushd ${SCRIPT_TMP_DIR} -openssl genrsa -out ${CACERT_KEY} 4096 > /dev/null -openssl req -x509 -new -nodes -key ${CACERT_KEY} -subj "/CN=CA-${CN}" -sha256 -days 1024 -out ${CACERT_PEM} > /dev/null + openssl genrsa -out ${CACERT_KEY} 4096 > /dev/null + openssl req -x509 -new -nodes -key ${CACERT_KEY} -subj "/CN=CA-${CN}" -sha256 -days 1024 -out ${CACERT_PEM} > /dev/null -openssl genrsa -out ${CERT_KEY} 2048 > /dev/null -openssl req -new -sha256 -key ${CERT_KEY} -subj "/CN=${CN}" -out csr.csr >/dev/null -openssl x509 -req -in csr.csr -CA ${CACERT_PEM} -extfile <(printf "subjectAltName=DNS:${CN}") -CAkey ${CACERT_KEY} -CAcreateserial -out ${CERT_PEM} -days 500 -sha256 > /dev/null -openssl x509 -in ${CERT_PEM} -noout + openssl genrsa -out ${CERT_KEY} 2048 > /dev/null + openssl req -new -sha256 -key ${CERT_KEY} -subj "/CN=${CN}" -out csr.csr >/dev/null + openssl x509 -req -in csr.csr -CA ${CACERT_PEM} -extfile <(printf "subjectAltName=DNS:${CN}") -CAkey ${CACERT_KEY} -CAcreateserial -out ${CERT_PEM} -days 500 -sha256 > /dev/null + openssl x509 -in ${CERT_PEM} -noout popd -kapitan refs -t tesoro --write plain:targets/tesoro/kapicorp-tesoro-cert-pem --base64 -f ${SCRIPT_TMP_DIR}/${CERT_PEM} -kapitan refs -t tesoro --write plain:targets/tesoro/kapicorp-tesoro-cert-key --base64 -f ${SCRIPT_TMP_DIR}/${CERT_KEY} -kapitan refs -t tesoro --write plain:targets/tesoro/kapicorp-tesoro-cacert-pem --base64 -f ${SCRIPT_TMP_DIR}/${CACERT_PEM} \ No newline at end of file + +cat ${SCRIPT_TMP_DIR}/${CERT_PEM} | set_reference plain:targets/tesoro/kapicorp-tesoro-cert-pem --base64 +cat ${SCRIPT_TMP_DIR}/${CERT_KEY} | set_reference plain:targets/tesoro/kapicorp-tesoro-cert-key --base64 +cat ${SCRIPT_TMP_DIR}/${CACERT_PEM} | set_reference plain:targets/tesoro/kapicorp-tesoro-cacert-pem --base64 \ No newline at end of file diff --git a/compiled/tesoro/scripts/get_project_number b/compiled/tesoro/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/tesoro/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/tesoro/scripts/kapitan.include b/compiled/tesoro/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/tesoro/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/tesoro/scripts/kubectl b/compiled/tesoro/scripts/kubectl new file mode 100755 index 00000000..af3d4574 --- /dev/null +++ b/compiled/tesoro/scripts/kubectl @@ -0,0 +1,12 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + + +if [[ -p /dev/stdin ]] +then + cat | ${KUBECTL_COMMAND} "$@" +else + ${KUBECTL_COMMAND} "$@" +fi \ No newline at end of file diff --git a/compiled/tesoro/scripts/kubectl.sh b/compiled/tesoro/scripts/kubectl.sh deleted file mode 100755 index 9b4573b6..00000000 --- a/compiled/tesoro/scripts/kubectl.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -${DIR}/setup_context.sh >/dev/null -if [[ -p /dev/stdin ]] -then - INPUT=$( cat ) -fi -KUBECTL="kubectl --context tesoro" -echo "${INPUT}" | ${KUBECTL} "$@" \ No newline at end of file diff --git a/compiled/tesoro/scripts/set_reference b/compiled/tesoro/scripts/set_reference new file mode 100755 index 00000000..a4edd9df --- /dev/null +++ b/compiled/tesoro/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/tesoro/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/tesoro/scripts/setup_cluster b/compiled/tesoro/scripts/setup_cluster new file mode 100755 index 00000000..3934a455 --- /dev/null +++ b/compiled/tesoro/scripts/setup_cluster @@ -0,0 +1,13 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +setup_kubectl() { + ${DIR}/setup_cluster >/dev/null + ${DIR}/setup_context >/dev/null +} + + +KIND="kind" +$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" +$KIND export kubeconfig diff --git a/compiled/tesoro/scripts/setup_cluster.sh b/compiled/tesoro/scripts/setup_cluster.sh deleted file mode 100755 index 14da73a8..00000000 --- a/compiled/tesoro/scripts/setup_cluster.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit - - -KIND="kind" -$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" -$KIND export kubeconfig diff --git a/compiled/tesoro/scripts/setup_context b/compiled/tesoro/scripts/setup_context new file mode 100755 index 00000000..30d9d1f9 --- /dev/null +++ b/compiled/tesoro/scripts/setup_context @@ -0,0 +1,7 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +${KUBECTL_COMMAND} config set-context ${KUBECTL_CONTEXT} --cluster kind-kind --user kind-kind --namespace tesoro \ No newline at end of file diff --git a/compiled/tesoro/scripts/setup_context.sh b/compiled/tesoro/scripts/setup_context.sh deleted file mode 100755 index a6cdcec6..00000000 --- a/compiled/tesoro/scripts/setup_context.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit -KUBECTL="kubectl" - - -${KUBECTL} config set-context tesoro --cluster kind-kind --user kind-kind --namespace tesoro diff --git a/compiled/tutorial/docs/README.md b/compiled/tutorial/README.md similarity index 89% rename from compiled/tutorial/docs/README.md rename to compiled/tutorial/README.md index bdaf2173..ecd927f1 100644 --- a/compiled/tutorial/docs/README.md +++ b/compiled/tutorial/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | tutorial | -| **Project** | `not defined`| +| **Project** | `tutorial`| | **Cluster** | kind | | **Namespace** | `tutorial` | diff --git a/compiled/tutorial/scripts/apply.sh b/compiled/tutorial/scripts/apply similarity index 59% rename from compiled/tutorial/scripts/apply.sh rename to compiled/tutorial/scripts/apply index 5f0f8e4f..db485107 100755 --- a/compiled/tutorial/scripts/apply.sh +++ b/compiled/tutorial/scripts/apply @@ -1,26 +1,15 @@ #!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel)/ -KAPITAN="${ROOT}/kapitan" +# generated with Kapitan -FILE=${1:-} +source $(dirname ${BASH_SOURCE[0]})/bash.include -# Only GNU xargs supports --no-run-if-empty -XARGS="xargs --no-run-if-empty" -if ! echo | $XARGS 2>/dev/null; then - # Looks like we have BSD xargs, use -x instead - XARGS="xargs" -fi +FILE=${1:-} -## if tesoro is enabled, no need to reveal apply () { FILEPATH=${1?} - ${DIR}/kubectl.sh apply --recursive -f "${FILEPATH}" + ${KAPITAN_COMMAND} refs --reveal -f "${FILEPATH}" | ${KUBECTL_SCRIPT} apply -f - } - - - if [[ ! -z $FILE ]] then # Apply files passed at the command line @@ -38,7 +27,7 @@ else fi # Apply files in specific order - for SECTION in pre-deploy manifests + for SECTION in manifests do echo "## run kubectl apply for ${SECTION}" DEPLOY_PATH=${DIR}/../${SECTION} @@ -47,4 +36,4 @@ else apply "${DEPLOY_PATH}" fi done -fi +fi \ No newline at end of file diff --git a/compiled/tutorial/scripts/bash.include b/compiled/tutorial/scripts/bash.include new file mode 100644 index 00000000..421d8238 --- /dev/null +++ b/compiled/tutorial/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="tutorial" +TARGET_PATH="tutorial" +GCP_PROJECT_ID="tutorial" +TARGET="tutorial" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/tutorial/scripts/bash.include-test.sh b/compiled/tutorial/scripts/bash.include-test.sh new file mode 100755 index 00000000..8b47663a --- /dev/null +++ b/compiled/tutorial/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "tutorial" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "tutorial" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/tutorial/scripts/delete_completed b/compiled/tutorial/scripts/delete_completed new file mode 100755 index 00000000..6910da53 --- /dev/null +++ b/compiled/tutorial/scripts/delete_completed @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +${KUBECTL_COMMAND} delete pod --field-selector=status.phase==Failed \ No newline at end of file diff --git a/compiled/tutorial/scripts/gcloud b/compiled/tutorial/scripts/gcloud new file mode 100755 index 00000000..d78007c2 --- /dev/null +++ b/compiled/tutorial/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project tutorial "$@" diff --git a/compiled/tutorial/scripts/gcloud.include b/compiled/tutorial/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/tutorial/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/tutorial/scripts/get_project_number b/compiled/tutorial/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/tutorial/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/tutorial/scripts/kapitan.include b/compiled/tutorial/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/tutorial/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/tutorial/scripts/kubectl b/compiled/tutorial/scripts/kubectl new file mode 100755 index 00000000..af3d4574 --- /dev/null +++ b/compiled/tutorial/scripts/kubectl @@ -0,0 +1,12 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + + +if [[ -p /dev/stdin ]] +then + cat | ${KUBECTL_COMMAND} "$@" +else + ${KUBECTL_COMMAND} "$@" +fi \ No newline at end of file diff --git a/compiled/tutorial/scripts/kubectl.sh b/compiled/tutorial/scripts/kubectl.sh deleted file mode 100755 index 3dbec1cb..00000000 --- a/compiled/tutorial/scripts/kubectl.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -${DIR}/setup_context.sh >/dev/null -if [[ -p /dev/stdin ]] -then - INPUT=$( cat ) -fi -KUBECTL="kubectl --context tutorial" -echo "${INPUT}" | ${KUBECTL} "$@" \ No newline at end of file diff --git a/compiled/tutorial/scripts/set_reference b/compiled/tutorial/scripts/set_reference new file mode 100755 index 00000000..a8b7a688 --- /dev/null +++ b/compiled/tutorial/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/tutorial/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/compiled/tutorial/scripts/setup_cluster b/compiled/tutorial/scripts/setup_cluster new file mode 100755 index 00000000..3934a455 --- /dev/null +++ b/compiled/tutorial/scripts/setup_cluster @@ -0,0 +1,13 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +setup_kubectl() { + ${DIR}/setup_cluster >/dev/null + ${DIR}/setup_context >/dev/null +} + + +KIND="kind" +$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" +$KIND export kubeconfig diff --git a/compiled/tutorial/scripts/setup_cluster.sh b/compiled/tutorial/scripts/setup_cluster.sh deleted file mode 100755 index 14da73a8..00000000 --- a/compiled/tutorial/scripts/setup_cluster.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit - - -KIND="kind" -$KIND create cluster -q --name kind || echo "Kind cluster kind already exists!" -$KIND export kubeconfig diff --git a/compiled/tutorial/scripts/setup_context b/compiled/tutorial/scripts/setup_context new file mode 100755 index 00000000..fadc24a4 --- /dev/null +++ b/compiled/tutorial/scripts/setup_context @@ -0,0 +1,7 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +${KUBECTL_COMMAND} config set-context ${KUBECTL_CONTEXT} --cluster kind-kind --user kind-kind --namespace tutorial \ No newline at end of file diff --git a/compiled/tutorial/scripts/setup_context.sh b/compiled/tutorial/scripts/setup_context.sh deleted file mode 100755 index bc384e61..00000000 --- a/compiled/tutorial/scripts/setup_context.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit -KUBECTL="kubectl" - - -${KUBECTL} config set-context tutorial --cluster kind-kind --user kind-kind --namespace tutorial diff --git a/compiled/vault/docs/README.md b/compiled/vault/README.md similarity index 89% rename from compiled/vault/docs/README.md rename to compiled/vault/README.md index 722a3726..ad98f3c8 100644 --- a/compiled/vault/docs/README.md +++ b/compiled/vault/README.md @@ -3,7 +3,7 @@ ||| | --- | --- | | **Target** | vault | -| **Project** | `not defined`| +| **Project** | `vault`| | **Cluster** | 'Not defined' | | **Namespace** | `vault` | diff --git a/compiled/vault/manifests/vault-rbac.yml b/compiled/vault/manifests/vault-rbac.yml index a654bc8c..81110b58 100644 --- a/compiled/vault/manifests/vault-rbac.yml +++ b/compiled/vault/manifests/vault-rbac.yml @@ -38,6 +38,7 @@ metadata: roleRef: apiGroup: rbac.authorization.k8s.io kind: Role + name: vault subjects: - kind: ServiceAccount name: vault diff --git a/compiled/vault/scripts/bash.include b/compiled/vault/scripts/bash.include new file mode 100644 index 00000000..be90cb1b --- /dev/null +++ b/compiled/vault/scripts/bash.include @@ -0,0 +1,57 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +TARGET_NAME="vault" +TARGET_PATH="vault" +GCP_PROJECT_ID="vault" +TARGET="vault" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT \ No newline at end of file diff --git a/compiled/vault/scripts/bash.include-test.sh b/compiled/vault/scripts/bash.include-test.sh new file mode 100755 index 00000000..bd9e903b --- /dev/null +++ b/compiled/vault/scripts/bash.include-test.sh @@ -0,0 +1,35 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + + +testTargetName() { + assertEquals ${TARGET_NAME} "vault" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "vault" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 \ No newline at end of file diff --git a/compiled/vault/scripts/gcloud b/compiled/vault/scripts/gcloud new file mode 100755 index 00000000..c4c3d554 --- /dev/null +++ b/compiled/vault/scripts/gcloud @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + + ${GCLOUD_BINARY} --project vault "$@" diff --git a/compiled/vault/scripts/gcloud.include b/compiled/vault/scripts/gcloud.include new file mode 100644 index 00000000..5395fb45 --- /dev/null +++ b/compiled/vault/scripts/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" \ No newline at end of file diff --git a/compiled/vault/scripts/get_project_number b/compiled/vault/scripts/get_project_number new file mode 100755 index 00000000..3079638c --- /dev/null +++ b/compiled/vault/scripts/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number \ No newline at end of file diff --git a/compiled/vault/scripts/kapitan.include b/compiled/vault/scripts/kapitan.include new file mode 100644 index 00000000..a36bfdc6 --- /dev/null +++ b/compiled/vault/scripts/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} \ No newline at end of file diff --git a/compiled/vault/scripts/set_reference b/compiled/vault/scripts/set_reference new file mode 100755 index 00000000..01d9bc0a --- /dev/null +++ b/compiled/vault/scripts/set_reference @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/vault/reference_name"} +shift + +set_reference ${REFERENCE} "$@" \ No newline at end of file diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/Chart.lock b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/Chart.lock deleted file mode 100644 index b2db77b6..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/Chart.lock +++ /dev/null @@ -1,6 +0,0 @@ -dependencies: -- name: common - repository: https://charts.bitnami.com/bitnami - version: 1.13.1 -digest: sha256:1056dac8da880ed967a191e8d9eaf04766f77bda66a5715456d5dd4494a4a942 -generated: "2022-04-29T17:57:20.256061606Z" diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/Chart.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/Chart.yaml deleted file mode 100644 index 6cae5f6d..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/Chart.yaml +++ /dev/null @@ -1,29 +0,0 @@ -annotations: - category: Infrastructure -apiVersion: v2 -appVersion: 1.12.1 -dependencies: -- name: common - repository: https://charts.bitnami.com/bitnami - tags: - - bitnami-common - version: 1.x.x -description: The RabbitMQ Cluster Kubernetes Operator automates provisioning, management, - and operations of RabbitMQ clusters running on Kubernetes. -home: https://github.com/rabbitmq/cluster-operator -icon: https://bitnami.com/assets/stacks/rabbitmq-cluster-operator/img/rabbitmq-cluster-operator-stack-220x234.png -keywords: -- rabbitmq -- operator -- infrastructure -- message queue -- AMQP -kubeVersion: '>= 1.19.0-0' -maintainers: -- email: containers@bitnami.com - name: Bitnami -name: rabbitmq-cluster-operator -sources: -- https://github.com/bitnami/bitnami-docker-rabbitmq-cluster-operator -- https://github.com/rabbitmq/cluster-operator -version: 2.6.1 diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/README.md b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/README.md deleted file mode 100644 index fab3fe78..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/README.md +++ /dev/null @@ -1,502 +0,0 @@ - - -# RabbitMQ Cluster Operator packaged by Bitnami - -The RabbitMQ Cluster Kubernetes Operator automates provisioning, management, and operations of RabbitMQ clusters running on Kubernetes. - -[Overview of RabbitMQ Cluster Operator](https://github.com/rabbitmq/cluster-operator) - -Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. - -## TL;DR - -```console -$ helm repo add bitnami https://charts.bitnami.com/bitnami -$ helm install my-release bitnami/rabbitmq-cluster-operator -``` - -## Introduction - -Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads. - -This chart bootstraps a [RabbitMQ Cluster Operator](https://www.rabbitmq.com/kubernetes/operator/operator-overview.html) Deployment in a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. - -## Prerequisites - -- Kubernetes 1.19+ -- Helm 3.2.0+ -- PV provisioner support in the underlying infrastructure - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```console -helm install my-release bitnami/rabbitmq-cluster-operators -``` - -The command deploy the RabbitMQ Cluster Kubernetes Operator on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Differences between the Bitnami RabbitMQ chart and the Bitnami RabbitMQ Operator chart - -In the Bitnami catalog we offer both the *bitnami/rabbitmq* and *bitnami/rabbitmq-operator* charts. Each solution covers different needs and use cases. - -The *bitnami/rabbitmq* chart deploys a single RabbitMQ installation using a Kubernetes StatefulSet object (together with Services, PVCs, ConfigMaps, etc.). The figure below shows the deployed objects in the cluster after executing *helm install*: - -``` - +--------------+ +-----+ - | | | | - Service | RabbitMQ +<------------+ PVC | -<-------------------+ | | | - | StatefulSet | +-----+ - | | - +-----------+--+ - ^ +------------+ - | | | - +----------------+ Configmaps | - | Secrets | - +------------+ - -``` - -Its lifecycle is managed using Helm and, at the RabbitMQ container level, the following operations are automated: persistence management, configuration based on environment variables and plugin initialization. The StatefulSet do not require any ServiceAccounts with special RBAC privileges so this solution would fit better in more restricted Kubernetes installations. - -The *bitnami/rabbitmq-operator* chart deploys a RabbitMQ Operator installation using a Kubernetes Deployment. The figure below shows the RabbitMQ operator deployment after executing *helm install*: - -``` -+--------------------+ -| | +---------------+ -| RabbitMQ Operator | | | -| | | RBAC | -| Deployment | | Privileges | -+-------+------------+ +-------+-------+ - ^ | - | +-----------------+ | - +---+ Service Account +<----+ - +-----------------+ -``` - -The operator will extend the Kubernetes API with the following object: *RabbitmqCluster*. From that moment, the user will be able to deploy objects of these kinds and the previously deployed Operator will take care of deploying all the required StatefulSets, ConfigMaps and Services for running a RabbitMQ instance. Its lifecycle is managed using *kubectl* on the RabbitmqCluster objects. The following figure shows the deployed objects after deploying a *RabbitmqCluster* object using *kubectl*: - -``` - +--------------------+ - | | +---------------+ - | RabbitMQ Operator | | | - | | | RBAC | - | Deployment | | Privileges | - +-------+------------+ +-------+-------+ - | ^ | - | | +-----------------+ | - | +---+ Service Account +<----+ - | +-----------------+ - | - | - | - | - | ------------------------------------------------------------------------- - | | | - | | +--------------+ +-----+ | - | | | | | | | - |--->| Service | RabbitMQ +<------------+ PVC | | - | <-------------------+ | | | | - | | StatefulSet | +-----+ | - | | | | - | +-----------+--+ | - | ^ +------------+ | - | | | | | - | +----------------+ Configmaps | | - | | Secrets | | - | +------------+ | - | | - | | - ------------------------------------------------------------------------- - -``` - -This solution allows to easily deploy multiple RabbitMQ instances compared to the *bitnami/rabbitmq* chart. As the operator automatically deploys RabbitMQ installations, the RabbitMQ Operator pods will require a ServiceAccount with privileges to create and destroy multiple Kubernetes objects. This may be problematic for Kubernetes clusters with strict role-based access policies. - -## Parameters - -### Global parameters - -| Name | Description | Value | -| ------------------------- | ----------------------------------------------- | ----- | -| `global.imageRegistry` | Global Docker image registry | `""` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | - - -### Common parameters - -| Name | Description | Value | -| ------------------------ | ---------------------------------------------------- | --------------- | -| `kubeVersion` | Override Kubernetes version | `""` | -| `nameOverride` | String to partially override common.names.fullname | `""` | -| `fullnameOverride` | String to fully override common.names.fullname | `""` | -| `commonLabels` | Labels to add to all deployed objects | `{}` | -| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | -| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | -| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | -| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled) | `false` | - - -### RabbitMQ Cluster Operator Parameters - -| Name | Description | Value | -| ----------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ---------------------------------------- | -| `rabbitmqImage.registry` | RabbitMQ Image registry | `docker.io` | -| `rabbitmqImage.repository` | RabbitMQ Image repository | `bitnami/rabbitmq` | -| `rabbitmqImage.tag` | RabbitMQ Image tag (immutable tags are recommended) | `3.8.28-debian-10-r1` | -| `rabbitmqImage.pullSecrets` | RabbitMQ Image pull secrets | `[]` | -| `credentialUpdaterImage.registry` | RabbitMQ Default User Credential Updater Image registry | `docker.io` | -| `credentialUpdaterImage.repository` | RabbitMQ Default User Credential Updater Image repository | `bitnami/rmq-default-credential-updater` | -| `credentialUpdaterImage.tag` | RabbitMQ Default User Credential Updater Image tag (immutable tags are recommended) | `1.0.2-scratch-r0` | -| `credentialUpdaterImage.pullSecrets` | RabbitMQ Default User Credential Updater Image pull secrets | `[]` | -| `clusterOperator.image.registry` | RabbitMQ Cluster Operator image registry | `docker.io` | -| `clusterOperator.image.repository` | RabbitMQ Cluster Operator image repository | `bitnami/rabbitmq-cluster-operator` | -| `clusterOperator.image.tag` | RabbitMQ Cluster Operator image tag (immutable tags are recommended) | `1.12.1-scratch-r2` | -| `clusterOperator.image.pullPolicy` | RabbitMQ Cluster Operator image pull policy | `IfNotPresent` | -| `clusterOperator.image.pullSecrets` | RabbitMQ Cluster Operator image pull secrets | `[]` | -| `clusterOperator.replicaCount` | Number of RabbitMQ Cluster Operator replicas to deploy | `1` | -| `clusterOperator.schedulerName` | Alternative scheduler | `""` | -| `clusterOperator.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | -| `clusterOperator.terminationGracePeriodSeconds` | In seconds, time the given to the %%MAIN_CONTAINER_NAME%% pod needs to terminate gracefully | `""` | -| `clusterOperator.livenessProbe.enabled` | Enable livenessProbe on RabbitMQ Cluster Operator nodes | `true` | -| `clusterOperator.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | -| `clusterOperator.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | -| `clusterOperator.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `clusterOperator.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | -| `clusterOperator.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `clusterOperator.readinessProbe.enabled` | Enable readinessProbe on RabbitMQ Cluster Operator nodes | `true` | -| `clusterOperator.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `clusterOperator.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | -| `clusterOperator.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `clusterOperator.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | -| `clusterOperator.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `clusterOperator.startupProbe.enabled` | Enable startupProbe on RabbitMQ Cluster Operator nodes | `false` | -| `clusterOperator.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | -| `clusterOperator.startupProbe.periodSeconds` | Period seconds for startupProbe | `30` | -| `clusterOperator.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | -| `clusterOperator.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | -| `clusterOperator.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `clusterOperator.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `clusterOperator.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `clusterOperator.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `clusterOperator.resources.limits` | The resources limits for the RabbitMQ Cluster Operator containers | `{}` | -| `clusterOperator.resources.requests` | The requested resources for the RabbitMQ Cluster Operator containers | `{}` | -| `clusterOperator.podSecurityContext.enabled` | Enabled RabbitMQ Cluster Operator pods' Security Context | `true` | -| `clusterOperator.podSecurityContext.fsGroup` | Set RabbitMQ Cluster Operator pod's Security Context fsGroup | `1001` | -| `clusterOperator.containerSecurityContext.enabled` | Enabled RabbitMQ Cluster Operator containers' Security Context | `true` | -| `clusterOperator.containerSecurityContext.runAsUser` | Set RabbitMQ Cluster Operator containers' Security Context runAsUser | `1001` | -| `clusterOperator.containerSecurityContext.runAsNonRoot` | Force running the container as non root | `true` | -| `clusterOperator.containerSecurityContext.readOnlyRootFilesystem` | mount / (root) as a readonly filesystem on cluster operator containers | `true` | -| `clusterOperator.command` | Override default container command (useful when using custom images) | `[]` | -| `clusterOperator.args` | Override default container args (useful when using custom images) | `[]` | -| `clusterOperator.hostAliases` | RabbitMQ Cluster Operator pods host aliases | `[]` | -| `clusterOperator.podLabels` | Extra labels for RabbitMQ Cluster Operator pods | `{}` | -| `clusterOperator.podAnnotations` | Annotations for RabbitMQ Cluster Operator pods | `{}` | -| `clusterOperator.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `clusterOperator.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `clusterOperator.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `clusterOperator.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | -| `clusterOperator.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | -| `clusterOperator.affinity` | Affinity for RabbitMQ Cluster Operator pods assignment | `{}` | -| `clusterOperator.nodeSelector` | Node labels for RabbitMQ Cluster Operator pods assignment | `{}` | -| `clusterOperator.tolerations` | Tolerations for RabbitMQ Cluster Operator pods assignment | `[]` | -| `clusterOperator.updateStrategy.type` | RabbitMQ Cluster Operator statefulset strategy type | `RollingUpdate` | -| `clusterOperator.priorityClassName` | RabbitMQ Cluster Operator pods' priorityClassName | `""` | -| `clusterOperator.lifecycleHooks` | for the RabbitMQ Cluster Operator container(s) to automate configuration before or after startup | `{}` | -| `clusterOperator.containerPorts.metrics` | RabbitMQ Cluster Operator container port (used for metrics) | `9782` | -| `clusterOperator.extraEnvVars` | Array with extra environment variables to add to RabbitMQ Cluster Operator nodes | `[]` | -| `clusterOperator.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for RabbitMQ Cluster Operator nodes | `""` | -| `clusterOperator.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for RabbitMQ Cluster Operator nodes | `""` | -| `clusterOperator.extraVolumes` | Optionally specify extra list of additional volumes for the RabbitMQ Cluster Operator pod(s) | `[]` | -| `clusterOperator.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the RabbitMQ Cluster Operator container(s) | `[]` | -| `clusterOperator.sidecars` | Add additional sidecar containers to the RabbitMQ Cluster Operator pod(s) | `[]` | -| `clusterOperator.initContainers` | Add additional init containers to the RabbitMQ Cluster Operator pod(s) | `[]` | -| `clusterOperator.rbac.create` | Specifies whether RBAC resources should be created | `true` | -| `clusterOperator.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `clusterOperator.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | -| `clusterOperator.serviceAccount.annotations` | Add annotations | `{}` | -| `clusterOperator.serviceAccount.automountServiceAccountToken` | Automount API credentials for a service account. | `true` | - - -### RabbitMQ Cluster Operator Metrics parameters - -| Name | Description | Value | -| ---------------------------------------------------------- | --------------------------------------------------------------------------- | ------------------------ | -| `clusterOperator.metrics.enabled` | Create a service for accessing the metrics endpoint | `false` | -| `clusterOperator.metrics.service.type` | RabbitMQ Cluster Operator metrics service type | `ClusterIP` | -| `clusterOperator.metrics.service.ports.http` | RabbitMQ Cluster Operator metrics service HTTP port | `80` | -| `clusterOperator.metrics.service.nodePorts.http` | Node port for HTTP | `""` | -| `clusterOperator.metrics.service.clusterIP` | RabbitMQ Cluster Operator metrics service Cluster IP | `""` | -| `clusterOperator.metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | -| `clusterOperator.metrics.service.loadBalancerIP` | RabbitMQ Cluster Operator metrics service Load Balancer IP | `""` | -| `clusterOperator.metrics.service.loadBalancerSourceRanges` | RabbitMQ Cluster Operator metrics service Load Balancer sources | `[]` | -| `clusterOperator.metrics.service.externalTrafficPolicy` | RabbitMQ Cluster Operator metrics service external traffic policy | `Cluster` | -| `clusterOperator.metrics.service.annotations` | Additional custom annotations for RabbitMQ Cluster Operator metrics service | `{}` | -| `clusterOperator.metrics.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | -| `clusterOperator.metrics.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `clusterOperator.metrics.serviceMonitor.enabled` | Specify if a servicemonitor will be deployed for prometheus-operator | `false` | -| `clusterOperator.metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | -| `clusterOperator.metrics.serviceMonitor.jobLabel` | Specify the jobLabel to use for the prometheus-operator | `app.kubernetes.io/name` | -| `clusterOperator.metrics.serviceMonitor.honorLabels` | Honor metrics labels | `false` | -| `clusterOperator.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `clusterOperator.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `clusterOperator.metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `""` | -| `clusterOperator.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | -| `clusterOperator.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | -| `clusterOperator.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | - - -### RabbitMQ Messaging Topology Operator Parameters - -| Name | Description | Value | -| --------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | ----------------------------------------- | -| `msgTopologyOperator.image.registry` | RabbitMQ Messaging Topology Operator image registry | `docker.io` | -| `msgTopologyOperator.image.repository` | RabbitMQ Messaging Topology Operator image repository | `bitnami/rmq-messaging-topology-operator` | -| `msgTopologyOperator.image.tag` | RabbitMQ Messaging Topology Operator image tag (immutable tags are recommended) | `1.5.0-scratch-r0` | -| `msgTopologyOperator.image.pullPolicy` | RabbitMQ Messaging Topology Operator image pull policy | `IfNotPresent` | -| `msgTopologyOperator.image.pullSecrets` | RabbitMQ Messaging Topology Operator image pull secrets | `[]` | -| `msgTopologyOperator.replicaCount` | Number of RabbitMQ Messaging Topology Operator replicas to deploy | `1` | -| `msgTopologyOperator.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | -| `msgTopologyOperator.schedulerName` | Alternative scheduler | `""` | -| `msgTopologyOperator.terminationGracePeriodSeconds` | In seconds, time the given to the %%MAIN_CONTAINER_NAME%% pod needs to terminate gracefully | `""` | -| `msgTopologyOperator.livenessProbe.enabled` | Enable livenessProbe on RabbitMQ Messaging Topology Operator nodes | `true` | -| `msgTopologyOperator.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | -| `msgTopologyOperator.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `30` | -| `msgTopologyOperator.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | -| `msgTopologyOperator.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | -| `msgTopologyOperator.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `msgTopologyOperator.readinessProbe.enabled` | Enable readinessProbe on RabbitMQ Messaging Topology Operator nodes | `true` | -| `msgTopologyOperator.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | -| `msgTopologyOperator.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `30` | -| `msgTopologyOperator.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | -| `msgTopologyOperator.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | -| `msgTopologyOperator.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `msgTopologyOperator.startupProbe.enabled` | Enable startupProbe on RabbitMQ Messaging Topology Operator nodes | `false` | -| `msgTopologyOperator.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | -| `msgTopologyOperator.startupProbe.periodSeconds` | Period seconds for startupProbe | `30` | -| `msgTopologyOperator.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | -| `msgTopologyOperator.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | -| `msgTopologyOperator.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `msgTopologyOperator.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `msgTopologyOperator.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `msgTopologyOperator.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `msgTopologyOperator.existingWebhookCertSecret` | name of a secret containing the certificates (use it to avoid certManager creating one) | `""` | -| `msgTopologyOperator.existingWebhookCertCABundle` | PEM-encoded CA Bundle of the existing secret provided in existingWebhookCertSecret (only if useCertManager=false) | `""` | -| `msgTopologyOperator.resources.limits` | The resources limits for the RabbitMQ Messaging Topology Operator containers | `{}` | -| `msgTopologyOperator.resources.requests` | The requested resources for the RabbitMQ Messaging Topology Operator containers | `{}` | -| `msgTopologyOperator.podSecurityContext.enabled` | Enabled RabbitMQ Messaging Topology Operator pods' Security Context | `true` | -| `msgTopologyOperator.podSecurityContext.fsGroup` | Set RabbitMQ Messaging Topology Operator pod's Security Context fsGroup | `1001` | -| `msgTopologyOperator.containerSecurityContext.enabled` | Enabled RabbitMQ Messaging Topology Operator containers' Security Context | `true` | -| `msgTopologyOperator.containerSecurityContext.runAsUser` | Set RabbitMQ Messaging Topology Operator containers' Security Context runAsUser | `1001` | -| `msgTopologyOperator.containerSecurityContext.runAsNonRoot` | Force running the container as non root | `true` | -| `msgTopologyOperator.containerSecurityContext.readOnlyRootFilesystem` | mount / (root) as a readonly filesystem on Messaging Topology Operator | `true` | -| `msgTopologyOperator.fullnameOverride` | String to fully override rmqco.msgTopologyOperator.fullname template | `""` | -| `msgTopologyOperator.command` | Override default container command (useful when using custom images) | `[]` | -| `msgTopologyOperator.args` | Override default container args (useful when using custom images) | `[]` | -| `msgTopologyOperator.hostAliases` | RabbitMQ Messaging Topology Operator pods host aliases | `[]` | -| `msgTopologyOperator.podLabels` | Extra labels for RabbitMQ Messaging Topology Operator pods | `{}` | -| `msgTopologyOperator.podAnnotations` | Annotations for RabbitMQ Messaging Topology Operator pods | `{}` | -| `msgTopologyOperator.podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `msgTopologyOperator.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `msgTopologyOperator.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `msgTopologyOperator.nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | -| `msgTopologyOperator.nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | -| `msgTopologyOperator.affinity` | Affinity for RabbitMQ Messaging Topology Operator pods assignment | `{}` | -| `msgTopologyOperator.nodeSelector` | Node labels for RabbitMQ Messaging Topology Operator pods assignment | `{}` | -| `msgTopologyOperator.tolerations` | Tolerations for RabbitMQ Messaging Topology Operator pods assignment | `[]` | -| `msgTopologyOperator.updateStrategy.type` | RabbitMQ Messaging Topology Operator statefulset strategy type | `RollingUpdate` | -| `msgTopologyOperator.priorityClassName` | RabbitMQ Messaging Topology Operator pods' priorityClassName | `""` | -| `msgTopologyOperator.lifecycleHooks` | for the RabbitMQ Messaging Topology Operator container(s) to automate configuration before or after startup | `{}` | -| `msgTopologyOperator.containerPorts.metrics` | RabbitMQ Messaging Topology Operator container port (used for metrics) | `8080` | -| `msgTopologyOperator.extraEnvVars` | Array with extra environment variables to add to RabbitMQ Messaging Topology Operator nodes | `[]` | -| `msgTopologyOperator.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for RabbitMQ Messaging Topology Operator nodes | `""` | -| `msgTopologyOperator.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for RabbitMQ Messaging Topology Operator nodes | `""` | -| `msgTopologyOperator.extraVolumes` | Optionally specify extra list of additional volumes for the RabbitMQ Messaging Topology Operator pod(s) | `[]` | -| `msgTopologyOperator.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the RabbitMQ Messaging Topology Operator container(s) | `[]` | -| `msgTopologyOperator.sidecars` | Add additional sidecar containers to the RabbitMQ Messaging Topology Operator pod(s) | `[]` | -| `msgTopologyOperator.initContainers` | Add additional init containers to the RabbitMQ Messaging Topology Operator pod(s) | `[]` | -| `msgTopologyOperator.service.type` | RabbitMQ Messaging Topology Operator webhook service type | `ClusterIP` | -| `msgTopologyOperator.service.ports.webhook` | RabbitMQ Messaging Topology Operator webhook service HTTP port | `443` | -| `msgTopologyOperator.service.nodePorts.http` | Node port for HTTP | `""` | -| `msgTopologyOperator.service.clusterIP` | RabbitMQ Messaging Topology Operator webhook service Cluster IP | `""` | -| `msgTopologyOperator.service.loadBalancerIP` | RabbitMQ Messaging Topology Operator webhook service Load Balancer IP | `""` | -| `msgTopologyOperator.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | -| `msgTopologyOperator.service.loadBalancerSourceRanges` | RabbitMQ Messaging Topology Operator webhook service Load Balancer sources | `[]` | -| `msgTopologyOperator.service.externalTrafficPolicy` | RabbitMQ Messaging Topology Operator webhook service external traffic policy | `Cluster` | -| `msgTopologyOperator.service.annotations` | Additional custom annotations for RabbitMQ Messaging Topology Operator webhook service | `{}` | -| `msgTopologyOperator.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | -| `msgTopologyOperator.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `msgTopologyOperator.rbac.create` | Specifies whether RBAC resources should be created | `true` | -| `msgTopologyOperator.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `msgTopologyOperator.serviceAccount.name` | The name of the ServiceAccount to use. | `""` | -| `msgTopologyOperator.serviceAccount.annotations` | Add annotations | `{}` | -| `msgTopologyOperator.serviceAccount.automountServiceAccountToken` | Automount API credentials for a service account. | `true` | - - -### RabbitMQ Messaging Topology Operator parameters - -| Name | Description | Value | -| -------------------------------------------------------------- | --------------------------------------------------------------------------- | ------------------------ | -| `msgTopologyOperator.metrics.enabled` | Create a service for accessing the metrics endpoint | `false` | -| `msgTopologyOperator.metrics.service.type` | RabbitMQ Cluster Operator metrics service type | `ClusterIP` | -| `msgTopologyOperator.metrics.service.ports.http` | RabbitMQ Cluster Operator metrics service HTTP port | `80` | -| `msgTopologyOperator.metrics.service.nodePorts.http` | Node port for HTTP | `""` | -| `msgTopologyOperator.metrics.service.clusterIP` | RabbitMQ Cluster Operator metrics service Cluster IP | `""` | -| `msgTopologyOperator.metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | -| `msgTopologyOperator.metrics.service.loadBalancerIP` | RabbitMQ Cluster Operator metrics service Load Balancer IP | `""` | -| `msgTopologyOperator.metrics.service.loadBalancerSourceRanges` | RabbitMQ Cluster Operator metrics service Load Balancer sources | `[]` | -| `msgTopologyOperator.metrics.service.externalTrafficPolicy` | RabbitMQ Cluster Operator metrics service external traffic policy | `Cluster` | -| `msgTopologyOperator.metrics.service.annotations` | Additional custom annotations for RabbitMQ Cluster Operator metrics service | `{}` | -| `msgTopologyOperator.metrics.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | -| `msgTopologyOperator.metrics.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `msgTopologyOperator.metrics.serviceMonitor.enabled` | Specify if a servicemonitor will be deployed for prometheus-operator | `false` | -| `msgTopologyOperator.metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `""` | -| `msgTopologyOperator.metrics.serviceMonitor.jobLabel` | Specify the jobLabel to use for the prometheus-operator | `app.kubernetes.io/name` | -| `msgTopologyOperator.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `msgTopologyOperator.metrics.serviceMonitor.honorLabels` | Honor metrics labels | `false` | -| `msgTopologyOperator.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `msgTopologyOperator.metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `""` | -| `msgTopologyOperator.metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | -| `msgTopologyOperator.metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | -| `msgTopologyOperator.metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | - - -### cert-manager parameters - -| Name | Description | Value | -| ---------------- | ----------------------------------------------------------------- | ------- | -| `useCertManager` | Deploy cert-manager objects (Issuer and Certificate) for webhooks | `false` | - - -See [readme-generator-for-helm](https://github.com/bitnami-labs/readme-generator-for-helm) to create the table. - -The above parameters map to the env variables defined in [bitnami/rabbitmq-cluster-operator](https://github.com/bitnami/bitnami-docker-rabbitmq-cluster-operator). For more information please refer to the [bitnami/rabbitmq-cluster-operator](https://github.com/bitnami/bitnami-docker-rabbitmq-cluster-operator) image documentation. - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, - -```console -helm install my-release \ - --set livenessProbe.enabled=false \ - bitnami/rabbitmq-cluster-operator -``` - -The above command disables the Operator liveness probes. - -Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, - -```console -helm install my-release -f values.yaml bitnami/rabbitmq-cluster-operator -``` - -> **Tip**: You can use the default [values.yaml](values.yaml) - -## Configuration and installation details - -### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) - -It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. - -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. - -### Additional environment variables - -In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. - -```yaml -rabbitmq-cluster-operator: - extraEnvVars: - - name: LOG_LEVEL - value: error -``` - -Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. - -### Sidecars - -If additional containers are needed in the same pod as rabbitmq-cluster-operator (such as additional metrics or logging exporters), they can be defined using the `sidecars` parameter. If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter. [Learn more about configuring and using sidecar containers](https://docs.bitnami.com/kubernetes/infrastructure/rabbitmq-cluster-operator/configuration/configure-sidecar-init-containers/). - -### Pod affinity - -This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). - -As an alternative, use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/master/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. -### Deploying extra resources - -There are cases where you may want to deploy extra objects, such your custom *RabbitmqCluster* objects. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. - -For instance, to deploy your custom *RabbitmqCluster* definition, you can install the RabbitMQ Cluster Operator using the values below: - -```yaml -extraDeploy: - - apiVersion: rabbitmq.com/v1beta1 - kind: RabbitmqCluster - metadata: - name: rabbitmq-custom-configuration - spec: - replicas: 1 - rabbitmq: - additionalConfig: | - log.console.level = debug -``` - -## Troubleshooting - -Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). - -## Upgrading - -### To 2.0.0 - -This new version adds the following components: - - - RabbitMQ Messaging Topology Operator: all the settings are inside the `msgTopologyOperator` section. - - RabbitMQ Default User Credential Updater sidecar: this enables Hashicorp Vault integration for all `RabbitMQCluster` instances. - - `cert-manager` subchart: this is necessary for the RabbitMQ Messaging Topology Webhooks to work. - -As a breaking change, all `rabbitmq-cluster-operator` deployment values were moved to the `clusterOperator` section. - -No issues are expected during upgrades. - -### To 1.0.0 - -The CRD was updated according to the latest changes in the upstream project. Thanks to the improvements in the latest changes, the CRD is not templated anymore abd can be placed under the `crds` directory following [Helm best practices for CRDS](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/). - -You need to manually delete the old CRD before upgrading the release. - -```console -kubectl delete crd rabbitmqclusters.rabbitmq.com -helm upgrade my-release bitnami/rabbitmq-cluster-operator -``` - -## License - -Copyright © 2022 Bitnami - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/Chart.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/Chart.yaml deleted file mode 100644 index e8d2db9d..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/Chart.yaml +++ /dev/null @@ -1,23 +0,0 @@ -annotations: - category: Infrastructure -apiVersion: v2 -appVersion: 1.13.1 -description: A Library Helm Chart for grouping common logic between bitnami charts. - This chart is not deployable by itself. -home: https://github.com/bitnami/charts/tree/master/bitnami/common -icon: https://bitnami.com/downloads/logos/bitnami-mark.png -keywords: -- common -- helper -- template -- function -- bitnami -maintainers: -- email: containers@bitnami.com - name: Bitnami -name: common -sources: -- https://github.com/bitnami/charts -- https://www.bitnami.com/ -type: library -version: 1.13.1 diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/README.md b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/README.md deleted file mode 100644 index 88d13b1d..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/README.md +++ /dev/null @@ -1,347 +0,0 @@ -# Bitnami Common Library Chart - -A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. - -## TL;DR - -```yaml -dependencies: - - name: common - version: 1.x.x - repository: https://charts.bitnami.com/bitnami -``` - -```bash -$ helm dependency update -``` - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "common.names.fullname" . }} -data: - myvalue: "Hello World" -``` - -## Introduction - -This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. - -Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. - -## Prerequisites - -- Kubernetes 1.19+ -- Helm 3.2.0+ - -## Parameters - -The following table lists the helpers available in the library which are scoped in different sections. - -### Affinities - -| Helper identifier | Description | Expected Input | -|-------------------------------|------------------------------------------------------|------------------------------------------------| -| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | -| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | -| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | -| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | - -### Capabilities - -| Helper identifier | Description | Expected Input | -|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| -| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | -| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | -| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | -| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | -| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | -| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | -| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | -| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | -| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | -| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | -| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | - -### Errors - -| Helper identifier | Description | Expected Input | -|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| -| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | - -### Images - -| Helper identifier | Description | Expected Input | -|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| -| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | -| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | -| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | - -### Ingress - -| Helper identifier | Description | Expected Input | -|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | -| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | -| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | -| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | - -### Labels - -| Helper identifier | Description | Expected Input | -|-----------------------------|-----------------------------------------------------------------------------|-------------------| -| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | -| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | - -### Names - -| Helper identifier | Description | Expected Input | -|--------------------------|------------------------------------------------------------|-------------------| -| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | -| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | -| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | -| `common.names.chart` | Chart name plus version | `.` Chart context | - -### Secrets - -| Helper identifier | Description | Expected Input | -|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | -| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | -| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | -| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | - -### Storage - -| Helper identifier | Description | Expected Input | -|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| -| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | - -### TplValues - -| Helper identifier | Description | Expected Input | -|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| -| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | - -### Utils - -| Helper identifier | Description | Expected Input | -|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| -| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | -| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | -| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | -| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | - -### Validations - -| Helper identifier | Description | Expected Input | -|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | -| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | -| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | -| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | -| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis™ are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | -| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | -| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | - -### Warnings - -| Helper identifier | Description | Expected Input | -|------------------------------|----------------------------------|------------------------------------------------------------| -| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | - -## Special input schemas - -### ImageRoot - -```yaml -registry: - type: string - description: Docker registry where the image is located - example: docker.io - -repository: - type: string - description: Repository and image name - example: bitnami/nginx - -tag: - type: string - description: image tag - example: 1.16.1-debian-10-r63 - -pullPolicy: - type: string - description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - -pullSecrets: - type: array - items: - type: string - description: Optionally specify an array of imagePullSecrets (evaluated as templates). - -debug: - type: boolean - description: Set to true if you would like to see extra information on logs - example: false - -## An instance would be: -# registry: docker.io -# repository: bitnami/nginx -# tag: 1.16.1-debian-10-r63 -# pullPolicy: IfNotPresent -# debug: false -``` - -### Persistence - -```yaml -enabled: - type: boolean - description: Whether enable persistence. - example: true - -storageClass: - type: string - description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. - example: "-" - -accessMode: - type: string - description: Access mode for the Persistent Volume Storage. - example: ReadWriteOnce - -size: - type: string - description: Size the Persistent Volume Storage. - example: 8Gi - -path: - type: string - description: Path to be persisted. - example: /bitnami - -## An instance would be: -# enabled: true -# storageClass: "-" -# accessMode: ReadWriteOnce -# size: 8Gi -# path: /bitnami -``` - -### ExistingSecret - -```yaml -name: - type: string - description: Name of the existing secret. - example: mySecret -keyMapping: - description: Mapping between the expected key name and the name of the key in the existing secret. - type: object - -## An instance would be: -# name: mySecret -# keyMapping: -# password: myPasswordKey -``` - -#### Example of use - -When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. - -```yaml -# templates/secret.yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "common.names.fullname" . }} - labels: - app: {{ include "common.names.fullname" . }} -type: Opaque -data: - password: {{ .Values.password | b64enc | quote }} - -# templates/dpl.yaml ---- -... - env: - - name: PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} - key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} -... - -# values.yaml ---- -name: mySecret -keyMapping: - password: myPasswordKey -``` - -### ValidateValue - -#### NOTES.txt - -```console -{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} -{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} - -{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} -``` - -If we force those values to be empty we will see some alerts - -```console -$ helm install test mychart --set path.to.value00="",path.to.value01="" - 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: - - export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) - - 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: - - export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) -``` - -## Upgrading - -### To 1.0.0 - -[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. - -**What changes were introduced in this major version?** - -- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. -- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. -- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts - -**Considerations when upgrading to this version** - -- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues -- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore -- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 - -**Useful links** - -- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ -- https://helm.sh/docs/topics/v2_v3_migration/ -- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ - -## License - -Copyright © 2022 Bitnami - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_affinities.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_affinities.tpl deleted file mode 100644 index 189ea403..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_affinities.tpl +++ /dev/null @@ -1,102 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Return a soft nodeAffinity definition -{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.nodes.soft" -}} -preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: {{ .key }} - operator: In - values: - {{- range .values }} - - {{ . | quote }} - {{- end }} - weight: 1 -{{- end -}} - -{{/* -Return a hard nodeAffinity definition -{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.nodes.hard" -}} -requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .key }} - operator: In - values: - {{- range .values }} - - {{ . | quote }} - {{- end }} -{{- end -}} - -{{/* -Return a nodeAffinity definition -{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.nodes" -}} - {{- if eq .type "soft" }} - {{- include "common.affinities.nodes.soft" . -}} - {{- else if eq .type "hard" }} - {{- include "common.affinities.nodes.hard" . -}} - {{- end -}} -{{- end -}} - -{{/* -Return a soft podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} -*/}} -{{- define "common.affinities.pods.soft" -}} -{{- $component := default "" .component -}} -{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} -preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} - {{- if not (empty $component) }} - {{ printf "app.kubernetes.io/component: %s" $component }} - {{- end }} - {{- range $key, $value := $extraMatchLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - namespaces: - - {{ .context.Release.Namespace | quote }} - topologyKey: kubernetes.io/hostname - weight: 1 -{{- end -}} - -{{/* -Return a hard podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} -*/}} -{{- define "common.affinities.pods.hard" -}} -{{- $component := default "" .component -}} -{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} -requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} - {{- if not (empty $component) }} - {{ printf "app.kubernetes.io/component: %s" $component }} - {{- end }} - {{- range $key, $value := $extraMatchLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - namespaces: - - {{ .context.Release.Namespace | quote }} - topologyKey: kubernetes.io/hostname -{{- end -}} - -{{/* -Return a podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.pods" -}} - {{- if eq .type "soft" }} - {{- include "common.affinities.pods.soft" . -}} - {{- else if eq .type "hard" }} - {{- include "common.affinities.pods.hard" . -}} - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_capabilities.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_capabilities.tpl deleted file mode 100644 index 4ec8321e..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_capabilities.tpl +++ /dev/null @@ -1,139 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Return the target Kubernetes version -*/}} -{{- define "common.capabilities.kubeVersion" -}} -{{- if .Values.global }} - {{- if .Values.global.kubeVersion }} - {{- .Values.global.kubeVersion -}} - {{- else }} - {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} - {{- end -}} -{{- else }} -{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for poddisruptionbudget. -*/}} -{{- define "common.capabilities.policy.apiVersion" -}} -{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "policy/v1beta1" -}} -{{- else -}} -{{- print "policy/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for networkpolicy. -*/}} -{{- define "common.capabilities.networkPolicy.apiVersion" -}} -{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for cronjob. -*/}} -{{- define "common.capabilities.cronjob.apiVersion" -}} -{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "batch/v1beta1" -}} -{{- else -}} -{{- print "batch/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for deployment. -*/}} -{{- define "common.capabilities.deployment.apiVersion" -}} -{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for statefulset. -*/}} -{{- define "common.capabilities.statefulset.apiVersion" -}} -{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "apps/v1beta1" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "common.capabilities.ingress.apiVersion" -}} -{{- if .Values.ingress -}} -{{- if .Values.ingress.apiVersion -}} -{{- .Values.ingress.apiVersion -}} -{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1" -}} -{{- end }} -{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for RBAC resources. -*/}} -{{- define "common.capabilities.rbac.apiVersion" -}} -{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "rbac.authorization.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "rbac.authorization.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for CRDs. -*/}} -{{- define "common.capabilities.crd.apiVersion" -}} -{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "apiextensions.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "apiextensions.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for APIService. -*/}} -{{- define "common.capabilities.apiService.apiVersion" -}} -{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "apiregistration.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "apiregistration.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Returns true if the used Helm version is 3.3+. -A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. -This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. -**To be removed when the catalog's minimun Helm version is 3.3** -*/}} -{{- define "common.capabilities.supportsHelmVersion" -}} -{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} - {{- true -}} -{{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_errors.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_errors.tpl deleted file mode 100644 index a79cc2e3..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_errors.tpl +++ /dev/null @@ -1,23 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Through error when upgrading using empty passwords values that must not be empty. - -Usage: -{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} -{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} -{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} - -Required password params: - - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. - - context - Context - Required. Parent context. -*/}} -{{- define "common.errors.upgrade.passwords.empty" -}} - {{- $validationErrors := join "" .validationErrors -}} - {{- if and $validationErrors .context.Release.IsUpgrade -}} - {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} - {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} - {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} - {{- $errorString = print $errorString "\n%s" -}} - {{- printf $errorString $validationErrors | fail -}} - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_images.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_images.tpl deleted file mode 100644 index 42ffbc72..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_images.tpl +++ /dev/null @@ -1,75 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Return the proper image name -{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} -*/}} -{{- define "common.images.image" -}} -{{- $registryName := .imageRoot.registry -}} -{{- $repositoryName := .imageRoot.repository -}} -{{- $tag := .imageRoot.tag | toString -}} -{{- if .global }} - {{- if .global.imageRegistry }} - {{- $registryName = .global.imageRegistry -}} - {{- end -}} -{{- end -}} -{{- if $registryName }} -{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- else -}} -{{- printf "%s:%s" $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) -{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} -*/}} -{{- define "common.images.pullSecrets" -}} - {{- $pullSecrets := list }} - - {{- if .global }} - {{- range .global.imagePullSecrets -}} - {{- $pullSecrets = append $pullSecrets . -}} - {{- end -}} - {{- end -}} - - {{- range .images -}} - {{- range .pullSecrets -}} - {{- $pullSecrets = append $pullSecrets . -}} - {{- end -}} - {{- end -}} - - {{- if (not (empty $pullSecrets)) }} -imagePullSecrets: - {{- range $pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names evaluating values as templates -{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} -*/}} -{{- define "common.images.renderPullSecrets" -}} - {{- $pullSecrets := list }} - {{- $context := .context }} - - {{- if $context.Values.global }} - {{- range $context.Values.global.imagePullSecrets -}} - {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} - {{- end -}} - {{- end -}} - - {{- range .images -}} - {{- range .pullSecrets -}} - {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} - {{- end -}} - {{- end -}} - - {{- if (not (empty $pullSecrets)) }} -imagePullSecrets: - {{- range $pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_ingress.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_ingress.tpl deleted file mode 100644 index 8caf73a6..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_ingress.tpl +++ /dev/null @@ -1,68 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Generate backend entry that is compatible with all Kubernetes API versions. - -Usage: -{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} - -Params: - - serviceName - String. Name of an existing service backend - - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. - - context - Dict - Required. The context for the template evaluation. -*/}} -{{- define "common.ingress.backend" -}} -{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} -{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} -serviceName: {{ .serviceName }} -servicePort: {{ .servicePort }} -{{- else -}} -service: - name: {{ .serviceName }} - port: - {{- if typeIs "string" .servicePort }} - name: {{ .servicePort }} - {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} - number: {{ .servicePort | int }} - {{- end }} -{{- end -}} -{{- end -}} - -{{/* -Print "true" if the API pathType field is supported -Usage: -{{ include "common.ingress.supportsPathType" . }} -*/}} -{{- define "common.ingress.supportsPathType" -}} -{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} -{{- print "false" -}} -{{- else -}} -{{- print "true" -}} -{{- end -}} -{{- end -}} - -{{/* -Returns true if the ingressClassname field is supported -Usage: -{{ include "common.ingress.supportsIngressClassname" . }} -*/}} -{{- define "common.ingress.supportsIngressClassname" -}} -{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "false" -}} -{{- else -}} -{{- print "true" -}} -{{- end -}} -{{- end -}} - -{{/* -Return true if cert-manager required annotations for TLS signed -certificates are set in the Ingress annotations -Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations -Usage: -{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} -*/}} -{{- define "common.ingress.certManagerRequest" -}} -{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} - {{- true -}} -{{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_labels.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_labels.tpl deleted file mode 100644 index 252066c7..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_labels.tpl +++ /dev/null @@ -1,18 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Kubernetes standard labels -*/}} -{{- define "common.labels.standard" -}} -app.kubernetes.io/name: {{ include "common.names.name" . }} -helm.sh/chart: {{ include "common.names.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector -*/}} -{{- define "common.labels.matchLabels" -}} -app.kubernetes.io/name: {{ include "common.names.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_names.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_names.tpl deleted file mode 100644 index c8574d17..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_names.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "common.names.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "common.names.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "common.names.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a default fully qualified dependency name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -Usage: -{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} -*/}} -{{- define "common.names.dependency.fullname" -}} -{{- if .chartValues.fullnameOverride -}} -{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .chartName .chartValues.nameOverride -}} -{{- if contains $name .context.Release.Name -}} -{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Allow the release namespace to be overridden for multi-namespace deployments in combined charts. -*/}} -{{- define "common.names.namespace" -}} -{{- if .Values.namespaceOverride -}} -{{- .Values.namespaceOverride -}} -{{- else -}} -{{- .Release.Namespace -}} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_secrets.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_secrets.tpl deleted file mode 100644 index a53fb44f..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_secrets.tpl +++ /dev/null @@ -1,140 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Generate secret name. - -Usage: -{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} - -Params: - - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user - to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. - +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret - - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. - - context - Dict - Required. The context for the template evaluation. -*/}} -{{- define "common.secrets.name" -}} -{{- $name := (include "common.names.fullname" .context) -}} - -{{- if .defaultNameSuffix -}} -{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{- with .existingSecret -}} -{{- if not (typeIs "string" .) -}} -{{- with .name -}} -{{- $name = . -}} -{{- end -}} -{{- else -}} -{{- $name = . -}} -{{- end -}} -{{- end -}} - -{{- printf "%s" $name -}} -{{- end -}} - -{{/* -Generate secret key. - -Usage: -{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} - -Params: - - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user - to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. - +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret - - key - String - Required. Name of the key in the secret. -*/}} -{{- define "common.secrets.key" -}} -{{- $key := .key -}} - -{{- if .existingSecret -}} - {{- if not (typeIs "string" .existingSecret) -}} - {{- if .existingSecret.keyMapping -}} - {{- $key = index .existingSecret.keyMapping $.key -}} - {{- end -}} - {{- end }} -{{- end -}} - -{{- printf "%s" $key -}} -{{- end -}} - -{{/* -Generate secret password or retrieve one if already created. - -Usage: -{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} - -Params: - - secret - String - Required - Name of the 'Secret' resource where the password is stored. - - key - String - Required - Name of the key in the secret. - - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. - - length - int - Optional - Length of the generated random password. - - strong - Boolean - Optional - Whether to add symbols to the generated random password. - - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. - - context - Context - Required - Parent context. - -The order in which this function returns a secret password: - 1. Already existing 'Secret' resource - (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) - 2. Password provided via the values.yaml - (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) - 3. Randomly generated secret password - (A new random secret password with the length specified in the 'length' parameter will be generated and returned) - -*/}} -{{- define "common.secrets.passwords.manage" -}} - -{{- $password := "" }} -{{- $subchart := "" }} -{{- $chartName := default "" .chartName }} -{{- $passwordLength := default 10 .length }} -{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} -{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} -{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} -{{- if $secretData }} - {{- if hasKey $secretData .key }} - {{- $password = index $secretData .key }} - {{- else }} - {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} - {{- end -}} -{{- else if $providedPasswordValue }} - {{- $password = $providedPasswordValue | toString | b64enc | quote }} -{{- else }} - - {{- if .context.Values.enabled }} - {{- $subchart = $chartName }} - {{- end -}} - - {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} - {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} - {{- $passwordValidationErrors := list $requiredPasswordError -}} - {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} - - {{- if .strong }} - {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} - {{- $password = randAscii $passwordLength }} - {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} - {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} - {{- else }} - {{- $password = randAlphaNum $passwordLength | b64enc | quote }} - {{- end }} -{{- end -}} -{{- printf "%s" $password -}} -{{- end -}} - -{{/* -Returns whether a previous generated secret already exists - -Usage: -{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} - -Params: - - secret - String - Required - Name of the 'Secret' resource where the password is stored. - - context - Context - Required - Parent context. -*/}} -{{- define "common.secrets.exists" -}} -{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} -{{- if $secret }} - {{- true -}} -{{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_storage.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_storage.tpl deleted file mode 100644 index 60e2a844..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_storage.tpl +++ /dev/null @@ -1,23 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Return the proper Storage Class -{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} -*/}} -{{- define "common.storage.class" -}} - -{{- $storageClass := .persistence.storageClass -}} -{{- if .global -}} - {{- if .global.storageClass -}} - {{- $storageClass = .global.storageClass -}} - {{- end -}} -{{- end -}} - -{{- if $storageClass -}} - {{- if (eq "-" $storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" $storageClass -}} - {{- end -}} -{{- end -}} - -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_tplvalues.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_tplvalues.tpl deleted file mode 100644 index 2db16685..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_tplvalues.tpl +++ /dev/null @@ -1,13 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Renders a value that contains template. -Usage: -{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} -*/}} -{{- define "common.tplvalues.render" -}} - {{- if typeIs "string" .value }} - {{- tpl .value .context }} - {{- else }} - {{- tpl (.value | toYaml) .context }} - {{- end }} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_utils.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_utils.tpl deleted file mode 100644 index ea083a24..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_utils.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Print instructions to get a secret value. -Usage: -{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} -*/}} -{{- define "common.utils.secret.getvalue" -}} -{{- $varname := include "common.utils.fieldToEnvVar" . -}} -export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) -{{- end -}} - -{{/* -Build env var name given a field -Usage: -{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} -*/}} -{{- define "common.utils.fieldToEnvVar" -}} - {{- $fieldNameSplit := splitList "-" .field -}} - {{- $upperCaseFieldNameSplit := list -}} - - {{- range $fieldNameSplit -}} - {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} - {{- end -}} - - {{ join "_" $upperCaseFieldNameSplit }} -{{- end -}} - -{{/* -Gets a value from .Values given -Usage: -{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} -*/}} -{{- define "common.utils.getValueFromKey" -}} -{{- $splitKey := splitList "." .key -}} -{{- $value := "" -}} -{{- $latestObj := $.context.Values -}} -{{- range $splitKey -}} - {{- if not $latestObj -}} - {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} - {{- end -}} - {{- $value = ( index $latestObj . ) -}} - {{- $latestObj = $value -}} -{{- end -}} -{{- printf "%v" (default "" $value) -}} -{{- end -}} - -{{/* -Returns first .Values key with a defined value or first of the list if all non-defined -Usage: -{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} -*/}} -{{- define "common.utils.getKeyFromList" -}} -{{- $key := first .keys -}} -{{- $reverseKeys := reverse .keys }} -{{- range $reverseKeys }} - {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} - {{- if $value -}} - {{- $key = . }} - {{- end -}} -{{- end -}} -{{- printf "%s" $key -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_warnings.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_warnings.tpl deleted file mode 100644 index ae10fa41..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/_warnings.tpl +++ /dev/null @@ -1,14 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Warning about using rolling tag. -Usage: -{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} -*/}} -{{- define "common.warnings.rollingTag" -}} - -{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} -WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ -{{- end }} - -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_cassandra.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_cassandra.tpl deleted file mode 100644 index ded1ae3b..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_cassandra.tpl +++ /dev/null @@ -1,72 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Validate Cassandra required passwords are not empty. - -Usage: -{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} -Params: - - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" - - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false -*/}} -{{- define "common.validations.values.cassandra.passwords" -}} - {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} - {{- $enabled := include "common.cassandra.values.enabled" . -}} - {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} - {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} - - {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} - {{- $requiredPasswords := list -}} - - {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} - - {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} - - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for existingSecret. - -Usage: -{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} -Params: - - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false -*/}} -{{- define "common.cassandra.values.existingSecret" -}} - {{- if .subchart -}} - {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} - {{- else -}} - {{- .context.Values.dbUser.existingSecret | quote -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for enabled cassandra. - -Usage: -{{ include "common.cassandra.values.enabled" (dict "context" $) }} -*/}} -{{- define "common.cassandra.values.enabled" -}} - {{- if .subchart -}} - {{- printf "%v" .context.Values.cassandra.enabled -}} - {{- else -}} - {{- printf "%v" (not .context.Values.enabled) -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for the key dbUser - -Usage: -{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false -*/}} -{{- define "common.cassandra.values.key.dbUser" -}} - {{- if .subchart -}} - cassandra.dbUser - {{- else -}} - dbUser - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_mariadb.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_mariadb.tpl deleted file mode 100644 index b6906ff7..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_mariadb.tpl +++ /dev/null @@ -1,103 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Validate MariaDB required passwords are not empty. - -Usage: -{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} -Params: - - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" - - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false -*/}} -{{- define "common.validations.values.mariadb.passwords" -}} - {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} - {{- $enabled := include "common.mariadb.values.enabled" . -}} - {{- $architecture := include "common.mariadb.values.architecture" . -}} - {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} - {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} - {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} - {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} - {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} - - {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} - {{- $requiredPasswords := list -}} - - {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} - - {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} - {{- if not (empty $valueUsername) -}} - {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} - {{- end -}} - - {{- if (eq $architecture "replication") -}} - {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} - {{- end -}} - - {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} - - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for existingSecret. - -Usage: -{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} -Params: - - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false -*/}} -{{- define "common.mariadb.values.auth.existingSecret" -}} - {{- if .subchart -}} - {{- .context.Values.mariadb.auth.existingSecret | quote -}} - {{- else -}} - {{- .context.Values.auth.existingSecret | quote -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for enabled mariadb. - -Usage: -{{ include "common.mariadb.values.enabled" (dict "context" $) }} -*/}} -{{- define "common.mariadb.values.enabled" -}} - {{- if .subchart -}} - {{- printf "%v" .context.Values.mariadb.enabled -}} - {{- else -}} - {{- printf "%v" (not .context.Values.enabled) -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for architecture - -Usage: -{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false -*/}} -{{- define "common.mariadb.values.architecture" -}} - {{- if .subchart -}} - {{- .context.Values.mariadb.architecture -}} - {{- else -}} - {{- .context.Values.architecture -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for the key auth - -Usage: -{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false -*/}} -{{- define "common.mariadb.values.key.auth" -}} - {{- if .subchart -}} - mariadb.auth - {{- else -}} - auth - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_mongodb.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_mongodb.tpl deleted file mode 100644 index a071ea4d..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_mongodb.tpl +++ /dev/null @@ -1,108 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Validate MongoDB® required passwords are not empty. - -Usage: -{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} -Params: - - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" - - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false -*/}} -{{- define "common.validations.values.mongodb.passwords" -}} - {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} - {{- $enabled := include "common.mongodb.values.enabled" . -}} - {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} - {{- $architecture := include "common.mongodb.values.architecture" . -}} - {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} - {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} - {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} - {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} - {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} - {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} - - {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} - - {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} - {{- $requiredPasswords := list -}} - - {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} - - {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} - {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} - {{- if and $valueUsername $valueDatabase -}} - {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} - {{- end -}} - - {{- if (eq $architecture "replicaset") -}} - {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} - {{- end -}} - - {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} - - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for existingSecret. - -Usage: -{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} -Params: - - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false -*/}} -{{- define "common.mongodb.values.auth.existingSecret" -}} - {{- if .subchart -}} - {{- .context.Values.mongodb.auth.existingSecret | quote -}} - {{- else -}} - {{- .context.Values.auth.existingSecret | quote -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for enabled mongodb. - -Usage: -{{ include "common.mongodb.values.enabled" (dict "context" $) }} -*/}} -{{- define "common.mongodb.values.enabled" -}} - {{- if .subchart -}} - {{- printf "%v" .context.Values.mongodb.enabled -}} - {{- else -}} - {{- printf "%v" (not .context.Values.enabled) -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for the key auth - -Usage: -{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false -*/}} -{{- define "common.mongodb.values.key.auth" -}} - {{- if .subchart -}} - mongodb.auth - {{- else -}} - auth - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for architecture - -Usage: -{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false -*/}} -{{- define "common.mongodb.values.architecture" -}} - {{- if .subchart -}} - {{- .context.Values.mongodb.architecture -}} - {{- else -}} - {{- .context.Values.architecture -}} - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_postgresql.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_postgresql.tpl deleted file mode 100644 index 164ec0d0..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_postgresql.tpl +++ /dev/null @@ -1,129 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Validate PostgreSQL required passwords are not empty. - -Usage: -{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} -Params: - - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" - - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false -*/}} -{{- define "common.validations.values.postgresql.passwords" -}} - {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} - {{- $enabled := include "common.postgresql.values.enabled" . -}} - {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} - {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} - {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} - {{- $requiredPasswords := list -}} - {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} - - {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} - {{- if (eq $enabledReplication "true") -}} - {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} - {{- end -}} - - {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to decide whether evaluate global values. - -Usage: -{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} -Params: - - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" -*/}} -{{- define "common.postgresql.values.use.global" -}} - {{- if .context.Values.global -}} - {{- if .context.Values.global.postgresql -}} - {{- index .context.Values.global.postgresql .key | quote -}} - {{- end -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for existingSecret. - -Usage: -{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} -*/}} -{{- define "common.postgresql.values.existingSecret" -}} - {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} - - {{- if .subchart -}} - {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} - {{- else -}} - {{- default (.context.Values.existingSecret | quote) $globalValue -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for enabled postgresql. - -Usage: -{{ include "common.postgresql.values.enabled" (dict "context" $) }} -*/}} -{{- define "common.postgresql.values.enabled" -}} - {{- if .subchart -}} - {{- printf "%v" .context.Values.postgresql.enabled -}} - {{- else -}} - {{- printf "%v" (not .context.Values.enabled) -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for the key postgressPassword. - -Usage: -{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false -*/}} -{{- define "common.postgresql.values.key.postgressPassword" -}} - {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} - - {{- if not $globalValue -}} - {{- if .subchart -}} - postgresql.postgresqlPassword - {{- else -}} - postgresqlPassword - {{- end -}} - {{- else -}} - global.postgresql.postgresqlPassword - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for enabled.replication. - -Usage: -{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false -*/}} -{{- define "common.postgresql.values.enabled.replication" -}} - {{- if .subchart -}} - {{- printf "%v" .context.Values.postgresql.replication.enabled -}} - {{- else -}} - {{- printf "%v" .context.Values.replication.enabled -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for the key replication.password. - -Usage: -{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false -*/}} -{{- define "common.postgresql.values.key.replicationPassword" -}} - {{- if .subchart -}} - postgresql.replication.password - {{- else -}} - replication.password - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_redis.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_redis.tpl deleted file mode 100644 index 5d72959b..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_redis.tpl +++ /dev/null @@ -1,76 +0,0 @@ - -{{/* vim: set filetype=mustache: */}} -{{/* -Validate Redis™ required passwords are not empty. - -Usage: -{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} -Params: - - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" - - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false -*/}} -{{- define "common.validations.values.redis.passwords" -}} - {{- $enabled := include "common.redis.values.enabled" . -}} - {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} - {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} - - {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} - {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} - - {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} - {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} - - {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} - {{- $requiredPasswords := list -}} - - {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} - {{- if eq $useAuth "true" -}} - {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} - {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} - {{- end -}} - - {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right value for enabled redis. - -Usage: -{{ include "common.redis.values.enabled" (dict "context" $) }} -*/}} -{{- define "common.redis.values.enabled" -}} - {{- if .subchart -}} - {{- printf "%v" .context.Values.redis.enabled -}} - {{- else -}} - {{- printf "%v" (not .context.Values.enabled) -}} - {{- end -}} -{{- end -}} - -{{/* -Auxiliary function to get the right prefix path for the values - -Usage: -{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} -Params: - - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false -*/}} -{{- define "common.redis.values.keys.prefix" -}} - {{- if .subchart -}}redis.{{- else -}}{{- end -}} -{{- end -}} - -{{/* -Checks whether the redis chart's includes the standarizations (version >= 14) - -Usage: -{{ include "common.redis.values.standarized.version" (dict "context" $) }} -*/}} -{{- define "common.redis.values.standarized.version" -}} - - {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} - {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} - - {{- if $standarizedAuthValues -}} - {{- true -}} - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_validations.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_validations.tpl deleted file mode 100644 index 9a814cf4..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/templates/validations/_validations.tpl +++ /dev/null @@ -1,46 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Validate values must not be empty. - -Usage: -{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} -{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} -{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} - -Validate value params: - - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" - - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" - - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" -*/}} -{{- define "common.validations.values.multiple.empty" -}} - {{- range .required -}} - {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} - {{- end -}} -{{- end -}} - -{{/* -Validate a value must not be empty. - -Usage: -{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} - -Validate value params: - - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" - - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" - - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" - - subchart - String - Optional - Name of the subchart that the validated password is part of. -*/}} -{{- define "common.validations.values.single.empty" -}} - {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} - {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} - - {{- if not $value -}} - {{- $varname := "my-value" -}} - {{- $getCurrentValue := "" -}} - {{- if and .secret .field -}} - {{- $varname = include "common.utils.fieldToEnvVar" . -}} - {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} - {{- end -}} - {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} - {{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/values.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/values.yaml deleted file mode 100644 index f2df68e5..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/charts/common/values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -## bitnami/common -## It is required by CI/CD tools and processes. -## @skip exampleValue -## -exampleValue: common-chart diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/crds/crd-rabbitmq-cluster.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/crds/crd-rabbitmq-cluster.yaml deleted file mode 100644 index 70bf432b..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/crds/crd-rabbitmq-cluster.yaml +++ /dev/null @@ -1,4115 +0,0 @@ -# Source: https://github.com/rabbitmq/cluster-operator/tree/main/config/crd -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: rabbitmqclusters.rabbitmq.com - labels: - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: RabbitmqCluster - listKind: RabbitmqClusterList - plural: rabbitmqclusters - shortNames: - - rmq - singular: rabbitmqcluster - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type == 'AllReplicasReady')].status - name: AllReplicasReady - type: string - - jsonPath: .status.conditions[?(@.type == 'ReconcileSuccess')].status - name: ReconcileSuccess - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: RabbitmqCluster is the Schema for the RabbitmqCluster API. Each instance of this object corresponds to a single RabbitMQ cluster. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec is the desired state of the RabbitmqCluster Custom Resource. - properties: - affinity: - description: Affinity scheduling rules to be applied on created Pods. - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). - properties: - preference: - description: A node selector term, associated with the corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. - items: - description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements by node's labels. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements by node's fields. - items: - description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: The label key that the selector applies to. - type: string - operator: - description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - type: string - values: - description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running - properties: - labelSelector: - description: A label query over a set of resources, in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaceSelector: - description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled. - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - image: - description: Image is the name of the RabbitMQ docker image to use for RabbitMQ nodes in the RabbitmqCluster. Must be provided together with ImagePullSecrets in order to use an image in a private registry. - type: string - imagePullSecrets: - description: List of Secret resource containing access credentials to the registry for the RabbitMQ image. Required if the docker registry is private. - items: - description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - type: array - override: - properties: - service: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - spec: - properties: - allocateLoadBalancerNodePorts: - type: boolean - clusterIP: - type: string - clusterIPs: - items: - type: string - type: array - x-kubernetes-list-type: atomic - externalIPs: - items: - type: string - type: array - externalName: - type: string - externalTrafficPolicy: - type: string - healthCheckNodePort: - format: int32 - type: integer - internalTrafficPolicy: - type: string - ipFamilies: - items: - type: string - type: array - x-kubernetes-list-type: atomic - ipFamilyPolicy: - type: string - loadBalancerClass: - type: string - loadBalancerIP: - type: string - loadBalancerSourceRanges: - items: - type: string - type: array - ports: - items: - properties: - appProtocol: - type: string - name: - type: string - nodePort: - format: int32 - type: integer - port: - format: int32 - type: integer - protocol: - default: TCP - type: string - targetPort: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: array - x-kubernetes-list-map-keys: - - port - - protocol - x-kubernetes-list-type: map - publishNotReadyAddresses: - type: boolean - selector: - additionalProperties: - type: string - type: object - x-kubernetes-map-type: atomic - sessionAffinity: - type: string - sessionAffinityConfig: - properties: - clientIP: - properties: - timeoutSeconds: - format: int32 - type: integer - type: object - type: object - type: - type: string - type: object - type: object - statefulSet: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - spec: - properties: - podManagementPolicy: - type: string - replicas: - format: int32 - type: integer - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - serviceName: - type: string - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - activeDeadlineSeconds: - format: int64 - type: integer - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaceSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - automountServiceAccountToken: - type: boolean - containers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - default: TCP - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - required: - - type - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - dnsPolicy: - type: string - enableServiceLinks: - type: boolean - ephemeralContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - default: TCP - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - required: - - type - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - targetContainerName: - type: string - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostIPC: - type: boolean - hostNetwork: - type: boolean - hostPID: - type: boolean - hostname: - type: string - imagePullSecrets: - items: - properties: - name: - type: string - type: object - type: array - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - default: TCP - type: string - required: - - containerPort - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - required: - - type - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - grpc: - properties: - port: - format: int32 - type: integer - service: - type: string - required: - - port - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - format: int64 - type: integer - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - nodeName: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - x-kubernetes-map-type: atomic - os: - properties: - name: - type: string - required: - - name - type: object - overhead: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - preemptionPolicy: - type: string - priority: - format: int32 - type: integer - priorityClassName: - type: string - readinessGates: - items: - properties: - conditionType: - type: string - required: - - conditionType - type: object - type: array - restartPolicy: - type: string - runtimeClassName: - type: string - schedulerName: - type: string - securityContext: - properties: - fsGroup: - format: int64 - type: integer - fsGroupChangePolicy: - type: string - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - seccompProfile: - properties: - localhostProfile: - type: string - type: - type: string - required: - - type - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - hostProcess: - type: boolean - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAccountName: - type: string - setHostnameAsFQDN: - type: boolean - shareProcessNamespace: - type: boolean - subdomain: - type: string - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - topologySpreadConstraints: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - maxSkew: - format: int32 - type: integer - topologyKey: - type: string - whenUnsatisfiable: - type: string - required: - - maxSkew - - topologyKey - - whenUnsatisfiable - type: object - type: array - x-kubernetes-list-map-keys: - - topologyKey - - whenUnsatisfiable - x-kubernetes-list-type: map - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - metadata: - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - required: - - spec - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - format: int64 - type: integer - path: - type: string - required: - - path - type: object - type: object - type: array - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - containers - type: object - type: object - updateStrategy: - properties: - rollingUpdate: - properties: - partition: - format: int32 - type: integer - type: object - type: - type: string - type: object - volumeClaimTemplates: - items: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - accessModes: - items: - type: string - type: array - dataSource: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - dataSourceRef: - properties: - apiGroup: - type: string - kind: - type: string - name: - type: string - required: - - kind - - name - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - storageClassName: - type: string - volumeMode: - type: string - volumeName: - type: string - type: object - type: object - type: array - type: object - type: object - type: object - persistence: - default: - storage: 10Gi - description: The desired persistent storage configuration for each Pod in the cluster. - properties: - storage: - anyOf: - - type: integer - - type: string - default: 10Gi - description: The requested size of the persistent volume attached to each Pod in the RabbitmqCluster. The format of this field matches that defined by kubernetes/apimachinery. See https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity for more info on the format of this field. - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - storageClassName: - description: The name of the StorageClass to claim a PersistentVolume from. - type: string - type: object - rabbitmq: - description: Configuration options for RabbitMQ Pods created in the cluster. - properties: - additionalConfig: - description: Modify to add to the rabbitmq.conf file in addition to default configurations set by the operator. Modifying this property on an existing RabbitmqCluster will trigger a StatefulSet rolling restart and will cause rabbitmq downtime. For more information on this config, see https://www.rabbitmq.com/configure.html#config-file - maxLength: 2000 - type: string - additionalPlugins: - description: 'List of plugins to enable in addition to essential plugins: rabbitmq_management, rabbitmq_prometheus, and rabbitmq_peer_discovery_k8s.' - items: - description: A Plugin to enable on the RabbitmqCluster. - maxLength: 100 - pattern: ^\w+$ - type: string - maxItems: 100 - type: array - advancedConfig: - description: Specify any rabbitmq advanced.config configurations to apply to the cluster. For more information on advanced config, see https://www.rabbitmq.com/configure.html#advanced-config-file - maxLength: 100000 - type: string - envConfig: - description: Modify to add to the rabbitmq-env.conf file. Modifying this property on an existing RabbitmqCluster will trigger a StatefulSet rolling restart and will cause rabbitmq downtime. For more information on env config, see https://www.rabbitmq.com/man/rabbitmq-env.conf.5.html - maxLength: 100000 - type: string - type: object - replicas: - default: 1 - description: Replicas is the number of nodes in the RabbitMQ cluster. Each node is deployed as a Replica in a StatefulSet. Only 1, 3, 5 replicas clusters are tested. This value should be an odd number to ensure the resultant cluster can establish exactly one quorum of nodes in the event of a fragmenting network partition. - format: int32 - minimum: 0 - type: integer - resources: - default: - limits: - cpu: 2000m - memory: 2Gi - requests: - cpu: 1000m - memory: 2Gi - description: The desired compute resource requirements of Pods in the cluster. - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - secretBackend: - description: Secret backend configuration for the RabbitmqCluster. Enables to fetch default user credentials and certificates from K8s external secret stores. - properties: - vault: - description: VaultSpec will add Vault annotations (see https://www.vaultproject.io/docs/platform/k8s/injector/annotations) to RabbitMQ Pods. It requires a Vault Agent Sidecar Injector (https://www.vaultproject.io/docs/platform/k8s/injector) to be installed in the K8s cluster. The injector is a K8s Mutation Webhook Controller that alters RabbitMQ Pod specifications (based on the added Vault annotations) to include Vault Agent containers that render Vault secrets to the volume. - properties: - annotations: - additionalProperties: - type: string - description: Vault annotations that override the Vault annotations set by the cluster-operator. For a list of valid Vault annotations, see https://www.vaultproject.io/docs/platform/k8s/injector/annotations - type: object - defaultUserPath: - description: Path in Vault to access a KV (Key-Value) secret with the fields username and password for the default user. For example "secret/data/rabbitmq/config". - type: string - defaultUserUpdaterImage: - description: Sidecar container that updates the default user's password in RabbitMQ when it changes in Vault. Additionally, it updates /var/lib/rabbitmq/.rabbitmqadmin.conf (used by rabbitmqadmin CLI). Set to empty string to disable the sidecar container. - type: string - role: - description: Role in Vault. If vault.defaultUserPath is set, this role must have capability to read the pre-created default user credential in Vault. If vault.tls is set, this role must have capability to create and update certificates in the Vault PKI engine for the domains "" and ".svc". - type: string - tls: - properties: - altNames: - description: 'Specifies the requested Subject Alternative Names (SANs), in a comma-delimited list. These will be appended to the SANs added by the cluster-operator. The cluster-operator will add SANs: "-server-.-nodes." for each pod, e.g. "myrabbit-server-0.myrabbit-nodes.default".' - type: string - commonName: - description: Specifies the requested certificate Common Name (CN). Defaults to ..svc if not provided. - type: string - ipSans: - description: Specifies the requested IP Subject Alternative Names, in a comma-delimited list. - type: string - pkiIssuerPath: - description: Path in Vault PKI engine. For example "pki/issue/hashicorp-com". required - type: string - type: object - type: object - type: object - service: - default: - type: ClusterIP - description: The desired state of the Kubernetes Service to create for the cluster. - properties: - annotations: - additionalProperties: - type: string - description: Annotations to add to the Service. - type: object - type: - default: ClusterIP - description: 'Type of Service to create for the cluster. Must be one of: ClusterIP, LoadBalancer, NodePort. For more info see https://pkg.go.dev/k8s.io/api/core/v1#ServiceType' - enum: - - ClusterIP - - LoadBalancer - - NodePort - type: string - type: object - skipPostDeploySteps: - description: If unset, or set to false, the cluster will run `rabbitmq-queues rebalance all` whenever the cluster is updated. Set to true to prevent the operator rebalancing queue leaders after a cluster update. Has no effect if the cluster only consists of one node. For more information, see https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance - type: boolean - terminationGracePeriodSeconds: - default: 604800 - description: 'TerminationGracePeriodSeconds is the timeout that each rabbitmqcluster pod will have to terminate gracefully. It defaults to 604800 seconds ( a week long) to ensure that the container preStop lifecycle hook can finish running. For more information, see: https://github.com/rabbitmq/cluster-operator/blob/main/docs/design/20200520-graceful-pod-termination.md' - format: int64 - minimum: 0 - type: integer - tls: - description: TLS-related configuration for the RabbitMQ cluster. - properties: - caSecretName: - description: Name of a Secret in the same Namespace as the RabbitmqCluster, containing the Certificate Authority's public certificate for TLS. The Secret must store this as ca.crt. This Secret can be created by running `kubectl create secret generic ca-secret --from-file=ca.crt=path/to/ca.cert` Used for mTLS, and TLS for rabbitmq_web_stomp and rabbitmq_web_mqtt. - type: string - disableNonTLSListeners: - description: 'When set to true, the RabbitmqCluster disables non-TLS listeners for RabbitMQ, management plugin and for any enabled plugins in the following list: stomp, mqtt, web_stomp, web_mqtt. Only TLS-enabled clients will be able to connect.' - type: boolean - secretName: - description: Name of a Secret in the same Namespace as the RabbitmqCluster, containing the server's private key & public certificate for TLS. The Secret must store these as tls.key and tls.crt, respectively. This Secret can be created by running `kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/to/tls.key` - type: string - type: object - tolerations: - description: Tolerations is the list of Toleration resources attached to each Pod in the RabbitmqCluster. - items: - description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. - type: string - type: object - type: array - type: object - status: - description: Status presents the observed state of RabbitmqCluster - properties: - binding: - description: 'Binding exposes a secret containing the binding information for this RabbitmqCluster. It implements the service binding Provisioned Service duck type. See: https://github.com/servicebinding/spec#provisioned-service' - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - conditions: - description: Set of Conditions describing the current state of the RabbitmqCluster - items: - properties: - lastTransitionTime: - description: The last time this Condition type changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of RabbitmqCluster status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - defaultUser: - description: Identifying information on internal resources - properties: - secretReference: - description: Reference to the Kubernetes Secret containing the credentials of the default user. - properties: - keys: - additionalProperties: - type: string - description: Key-value pairs in the Secret corresponding to `username`, `password`, `host`, and `port` - type: object - name: - description: Name of the Secret containing the default user credentials - type: string - namespace: - description: Namespace of the Secret containing the default user credentials - type: string - required: - - keys - - name - - namespace - type: object - serviceReference: - description: Reference to the Kubernetes Service serving the cluster. - properties: - name: - description: Name of the Service serving the cluster - type: string - namespace: - description: Namespace of the Service serving the cluster - type: string - required: - - name - - namespace - type: object - type: object - observedGeneration: - description: observedGeneration is the most recent successful generation observed for this RabbitmqCluster. It corresponds to the RabbitmqCluster's generation, which is updated on mutation by the API Server. - format: int64 - type: integer - required: - - conditions - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/crds/crds-messaging-topology-operator.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/crds/crds-messaging-topology-operator.yaml deleted file mode 100644 index 7443fd9b..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/crds/crds-messaging-topology-operator.yaml +++ /dev/null @@ -1,1593 +0,0 @@ -# Source: https://github.com/rabbitmq/messaging-topology-operator/tree/main/config/crd -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: bindings.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Binding - listKind: BindingList - plural: bindings - singular: binding - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Binding is the Schema for the bindings API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: BindingSpec defines the desired state of Binding - properties: - arguments: - description: Cannot be updated - type: object - x-kubernetes-preserve-unknown-fields: true - destination: - description: Cannot be updated - type: string - destinationType: - description: Cannot be updated - enum: - - exchange - - queue - type: string - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that the binding will - be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - routingKey: - description: Cannot be updated - type: string - source: - description: Cannot be updated - type: string - vhost: - default: / - description: Default to vhost '/'; cannot be updated - type: string - required: - - rabbitmqClusterReference - type: object - status: - description: BindingStatus defines the observed state of Binding - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Binding. It corresponds to the Binding's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: exchanges.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Exchange - listKind: ExchangeList - plural: exchanges - singular: exchange - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Exchange is the Schema for the exchanges API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: ExchangeSpec defines the desired state of Exchange - properties: - arguments: - type: object - x-kubernetes-preserve-unknown-fields: true - autoDelete: - description: Cannot be updated - type: boolean - durable: - description: Cannot be updated - type: boolean - name: - description: Required property; cannot be updated - type: string - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that the exchange will - be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - type: - default: direct - description: Cannot be updated - type: string - vhost: - default: / - description: Default to vhost '/'; cannot be updated - type: string - required: - - name - - rabbitmqClusterReference - type: object - status: - description: ExchangeStatus defines the observed state of Exchange - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Exchange. It corresponds to the Exchange's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: federations.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Federation - listKind: FederationList - plural: federations - singular: federation - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Federation is the Schema for the federations API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'FederationSpec defines the desired state of Federation For - how to configure federation upstreams, see: https://www.rabbitmq.com/federation-reference.html.' - properties: - ackMode: - enum: - - on-confirm - - on-publish - - no-ack - type: string - exchange: - type: string - expires: - type: integer - maxHops: - type: integer - messageTTL: - type: integer - name: - description: Required property; cannot be updated - type: string - prefetch-count: - type: integer - queue: - type: string - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that this federation - upstream will be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - reconnectDelay: - type: integer - trustUserId: - type: boolean - uriSecret: - description: Secret contains the AMQP URI(s) for the upstream. The - Secret must contain the key `uri` or operator will error. `uri` - should be one or multiple uris separated by ','. Required property. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - vhost: - default: / - description: Default to vhost '/'; cannot be updated - type: string - required: - - name - - rabbitmqClusterReference - - uriSecret - type: object - status: - description: FederationStatus defines the observed state of Federation - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Federation. It corresponds to the Federation's - generation, which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: permissions.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Permission - listKind: PermissionList - plural: permissions - singular: permission - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Permission is the Schema for the permissions API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PermissionSpec defines the desired state of Permission - properties: - permissions: - description: 'Permissions to grant to the user in the specific vhost; - required property. See RabbitMQ doc for more information: https://www.rabbitmq.com/access-control.html#user-management' - properties: - configure: - type: string - read: - type: string - write: - type: string - type: object - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that both the provided - user and vhost are. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - user: - description: Name of an existing user; must provide user or userReference, - else create/update will fail; cannot be updated - type: string - userReference: - description: Reference to an existing user.rabbitmq.com object; must - provide user or userReference, else create/update will fail; cannot - be updated - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - vhost: - description: Name of an existing vhost; required property; cannot - be updated - type: string - required: - - permissions - - rabbitmqClusterReference - - vhost - type: object - status: - description: PermissionStatus defines the observed state of Permission - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Permission. It corresponds to the Permission's - generation, which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: policies.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Policy - listKind: PolicyList - plural: policies - singular: policy - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Policy is the Schema for the policies API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PolicySpec defines the desired state of Policy https://www.rabbitmq.com/parameters.html#policies - properties: - applyTo: - default: all - description: 'What this policy applies to: ''queues'', ''exchanges'', - or ''all''. Default to ''all''.' - enum: - - queues - - exchanges - - all - type: string - definition: - description: Policy definition. Required property. - type: object - x-kubernetes-preserve-unknown-fields: true - name: - description: Required property; cannot be updated - type: string - pattern: - description: Regular expression pattern used to match queues and exchanges, - e.g. "^amq.". Required property. - type: string - priority: - default: 0 - description: Default to '0'. In the event that more than one policy - can match a given exchange or queue, the policy with the greatest - priority applies. - type: integer - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that the exchange will - be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - vhost: - default: / - description: Default to vhost '/'; cannot be updated - type: string - required: - - definition - - name - - pattern - - rabbitmqClusterReference - type: object - status: - description: PolicyStatus defines the observed state of Policy - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Policy. It corresponds to the Policy's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: queues.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Queue - listKind: QueueList - plural: queues - singular: queue - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Queue is the Schema for the queues API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: QueueSpec defines the desired state of Queue - properties: - arguments: - description: 'Queue arguments in the format of KEY: VALUE. e.g. x-delivery-limit: - 10000. Configuring queues through arguments is not recommended because - they cannot be updated once set; we recommend configuring queues - through policies instead.' - type: object - x-kubernetes-preserve-unknown-fields: true - autoDelete: - description: when set to true, queues that have had at least one consumer - before are deleted after the last consumer unsubscribes. - type: boolean - durable: - description: When set to false queues does not survive server restart. - type: boolean - name: - description: Name of the queue; required property. - type: string - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that the queue will - be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - type: - type: string - vhost: - default: / - description: Default to vhost '/' - type: string - required: - - name - - rabbitmqClusterReference - type: object - status: - description: QueueStatus defines the observed state of Queue - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Queue. It corresponds to the Queue's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: schemareplications.rabbitmq.com -spec: - group: rabbitmq.com - names: - kind: SchemaReplication - listKind: SchemaReplicationList - plural: schemareplications - singular: schemareplication - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: 'SchemaReplication is the Schema for the schemareplications API - This feature requires Tanzu RabbitMQ with schema replication plugin. For - more information, see: https://tanzu.vmware.com/rabbitmq and https://www.rabbitmq.com/definitions-standby.html.' - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: SchemaReplicationSpec defines the desired state of SchemaReplication - properties: - endpoints: - description: endpoints should be one or multiple endpoints separated - by ','. Must provide either spec.endpoints or endpoints in spec.upstreamSecret. - When endpoints are provided in both spec.endpoints and spec.upstreamSecret, - spec.endpoints takes precedence. - type: string - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that schema replication - would be set for. Must be an existing cluster. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - upstreamSecret: - description: Defines a Secret which contains credentials to be used - for schema replication. The Secret must contain the keys `username` - and `password` in its Data field, or operator will error. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - required: - - rabbitmqClusterReference - type: object - status: - description: SchemaReplicationStatus defines the observed state of SchemaReplication - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Queue. It corresponds to the Queue's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: shovels.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Shovel - listKind: ShovelList - plural: shovels - singular: shovel - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Shovel is the Schema for the shovels API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'ShovelSpec defines the desired state of Shovel For how to - configure Shovel, see: https://www.rabbitmq.com/shovel.html.' - properties: - ackMode: - enum: - - on-confirm - - on-publish - - no-ack - type: string - addForwardHeaders: - type: boolean - deleteAfter: - type: string - destAddForwardHeaders: - type: boolean - destAddTimestampHeader: - type: boolean - destAddress: - type: string - destApplicationProperties: - type: string - destExchange: - type: string - destExchangeKey: - type: string - destProperties: - type: string - destProtocol: - type: string - destPublishProperties: - type: string - destQueue: - type: string - name: - description: Required property; cannot be updated - type: string - prefetchCount: - type: integer - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that this Shovel will - be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - reconnectDelay: - type: integer - srcAddress: - type: string - srcDeleteAfter: - type: string - srcExchange: - type: string - srcExchangeKey: - type: string - srcPrefetchCount: - type: integer - srcProtocol: - type: string - srcQueue: - type: string - uriSecret: - description: Secret contains the AMQP URI(s) to configure Shovel destination - and source. The Secret must contain the key `destUri` and `srcUri` - or operator will error. Both fields should be one or multiple uris - separated by ','. Required property. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - vhost: - default: / - description: Default to vhost '/'; cannot be updated - type: string - required: - - name - - rabbitmqClusterReference - - uriSecret - type: object - status: - description: ShovelStatus defines the observed state of Shovel - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Shovel. It corresponds to the Shovel's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: superstreams.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: SuperStream - listKind: SuperStreamList - plural: superstreams - singular: superstream - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: SuperStream is the Schema for the queues API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: SuperStreamSpec defines the desired state of SuperStream - properties: - name: - description: Name of the queue; required property. - type: string - partitions: - default: 3 - description: Number of partitions to create within this super stream. - Defaults to '3'. - type: integer - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that the SuperStream - will be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - routingKeys: - description: Routing keys to use for each of the partitions in the - SuperStream If unset, the routing keys for the partitions will be - set to the index of the partitions - items: - type: string - type: array - vhost: - default: / - description: Default to vhost '/'; cannot be updated - type: string - required: - - name - - rabbitmqClusterReference - type: object - status: - description: SuperStreamStatus defines the observed state of SuperStream - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this SuperStream. It corresponds to the SuperStream's - generation, which is updated on mutation by the API Server. - format: int64 - type: integer - partitions: - description: Partitions are a list of the stream queue names which - form the partitions of this SuperStream. - items: - type: string - type: array - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: users.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: User - listKind: UserList - plural: users - singular: user - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: User is the Schema for the users API. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec configures the desired state of the User object. - properties: - importCredentialsSecret: - description: Defines a Secret used to pre-define the username and - password set for this User. User objects created with this field - set will not have randomly-generated credentials, and will instead - import the username/password values from this Secret. The Secret - must contain the keys `username` and `password` in its Data field, - or the import will fail. Note that this import only occurs at creation - time, and is ignored once a password has been set on a User. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that the user will be - created for. This cluster must exist for the User object to be created. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - tags: - description: List of permissions tags to associate with the user. - This determines the level of access to the RabbitMQ management UI - granted to the user. Omitting this field will lead to a user than - can still connect to the cluster through messaging protocols, but - cannot perform any management actions. For more information, see - https://www.rabbitmq.com/management.html#permissions. - items: - description: UserTag defines the level of access to the management - UI allocated to the user. For more information, see https://www.rabbitmq.com/management.html#permissions. - enum: - - management - - policymaker - - monitoring - - administrator - type: string - type: array - required: - - rabbitmqClusterReference - type: object - status: - description: Status exposes the observed state of the User object. - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - credentials: - description: Provides a reference to a Secret object containing the - user credentials. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this User. It corresponds to the User's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - username: - description: Provide rabbitmq Username - type: string - required: - - username - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.8.0 - creationTimestamp: null - name: vhosts.rabbitmq.com -spec: - group: rabbitmq.com - names: - categories: - - all - - rabbitmq - kind: Vhost - listKind: VhostList - plural: vhosts - singular: vhost - scope: Namespaced - versions: - - name: v1beta1 - schema: - openAPIV3Schema: - description: Vhost is the Schema for the vhosts API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: VhostSpec defines the desired state of Vhost - properties: - name: - description: Name of the vhost; see https://www.rabbitmq.com/vhosts.html. - type: string - rabbitmqClusterReference: - description: Reference to the RabbitmqCluster that the vhost will - be created in. Required property. - properties: - connectionSecret: - description: Secret contains the http management uri for the RabbitMQ - cluster. The Secret must contain the key `uri`, `username` and - `password` or operator will error. Have to set either name or - connectionSecret, but not both. - properties: - name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' - type: string - type: object - name: - description: The name of the RabbitMQ cluster to reference. Have - to set either name or connectionSecret, but not both. - type: string - namespace: - description: The namespace of the RabbitMQ cluster to reference. - Defaults to the namespace of the requested resource if omitted. - type: string - type: object - tags: - items: - type: string - type: array - tracing: - type: boolean - required: - - name - - rabbitmqClusterReference - type: object - status: - description: VhostStatus defines the observed state of Vhost - properties: - conditions: - items: - properties: - lastTransitionTime: - description: The last time this Condition status changed. - format: date-time - type: string - message: - description: Full text reason for current status of the condition. - type: string - reason: - description: One word, camel-case reason for current status - of the condition. - type: string - status: - description: True, False, or Unknown - type: string - type: - description: Type indicates the scope of the custom resource - status addressed by the condition. - type: string - required: - - status - - type - type: object - type: array - observedGeneration: - description: observedGeneration is the most recent successful generation - observed for this Vhost. It corresponds to the Vhost's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/NOTES.txt b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/NOTES.txt deleted file mode 100644 index c723b87c..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/NOTES.txt +++ /dev/null @@ -1,14 +0,0 @@ -CHART NAME: {{ .Chart.Name }} -CHART VERSION: {{ .Chart.Version }} -APP VERSION: {{ .Chart.AppVersion }} - -** Please be patient while the chart is being deployed ** - -Watch the RabbitMQ Cluster Operator and RabbitMQ Messaging Topology Operator Deployment status using the command: - - kubectl get deploy -w --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/name={{ include "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }} - -{{ include "common.warnings.rollingTag" .Values.clusterOperator.image }} -{{ include "common.warnings.rollingTag" .Values.msgTopologyOperator.image }} -{{ include "common.warnings.rollingTag" .Values.credentialUpdaterImage }} -{{ include "common.warnings.rollingTag" .Values.rabbitmqImage }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/_helpers.tpl b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/_helpers.tpl deleted file mode 100644 index 82aa0d03..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/_helpers.tpl +++ /dev/null @@ -1,124 +0,0 @@ -{{/* -Return the proper RabbitMQ Cluster Operator fullname -Note: We use the regular common function as the chart name already contains the -the rabbitmq-cluster-operator name. -*/}} -{{- define "rmqco.clusterOperator.fullname" -}} -{{- include "common.names.fullname" . -}} -{{- end -}} - -{{/* -Return the proper RabbitMQ Messaging Topology Operator fullname -NOTE: Not using the common function to avoid generating too long names -*/}} -{{- define "rmqco.msgTopologyOperator.fullname" -}} -{{- if .Values.msgTopologyOperator.fullnameOverride -}} - {{- .Values.msgTopologyOperator.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else if .Values.fullnameOverride -}} - {{- printf "%s-%s" .Values.fullnameOverride "messaging-topology-operator" | trunc 63 | trimSuffix "-" -}} -{{- else -}} - {{- printf "%s-%s" .Release.Name "rabbitmq-messaging-topology-operator" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper RabbitMQ Messaging Topology Operator fullname -NOTE: Not using the common function to avoid generating too long names -*/}} -{{- define "rmqco.msgTopologyOperator.webhook.fullname" -}} -{{- if .Values.msgTopologyOperator.fullnameOverride -}} - {{- printf "%s-%s" .Values.msgTopologyOperator.fullnameOverride "webhook" | trunc 63 | trimSuffix "-" -}} -{{- else if .Values.fullnameOverride -}} - {{- printf "%s-%s" .Values.fullnameOverride "messaging-topology-operator-webhook" | trunc 63 | trimSuffix "-" -}} -{{- else -}} - {{- printf "%s-%s" .Release.Name "rabbitmq-messaging-topology-operator-webhook" | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper RabbitMQ Messaging Topology Operator fullname -*/}} -{{- define "rmqco.msgTopologyOperator.webhook.secretName" -}} -{{- if .Values.msgTopologyOperator.existingWebhookCertSecret -}} - {{- .Values.msgTopologyOperator.existingWebhookCertSecret -}} -{{- else }} - {{- include "rmqco.msgTopologyOperator.webhook.fullname" . -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper RabbitMQ Default User Credential updater image name -*/}} -{{- define "rmqco.defaultCredentialUpdater.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.credentialUpdaterImage "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper RabbitMQ Cluster Operator image name -*/}} -{{- define "rmqco.clusterOperator.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.clusterOperator.image "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper RabbitMQ Cluster Operator image name -*/}} -{{- define "rmqco.msgTopologyOperator.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.msgTopologyOperator.image "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper RabbitMQ image name -*/}} -{{- define "rmqco.rabbitmq.image" -}} -{{- include "common.images.image" ( dict "imageRoot" .Values.rabbitmqImage "global" .Values.global ) -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "rmqco.imagePullSecrets" -}} -{{- include "common.images.pullSecrets" (dict "images" (list .Values.clusterOperator.image .Values.rabbitmqImage) "global" .Values.global) -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names as a comma separated string -*/}} -{{- define "rmqco.imagePullSecrets.string" -}} -{{- $pullSecrets := list }} -{{- if .Values.global }} - {{- range .Values.global.imagePullSecrets -}} - {{- $pullSecrets = append $pullSecrets . -}} - {{- end -}} -{{- end -}} -{{- range (list .Values.clusterOperator.image .Values.rabbitmqImage) -}} - {{- range .pullSecrets -}} - {{- $pullSecrets = append $pullSecrets . -}} - {{- end -}} -{{- end -}} -{{- if (not (empty $pullSecrets)) }} - {{- printf "%s" (join "," $pullSecrets) -}} -{{- end }} -{{- end }} - -{{/* -Create the name of the service account to use (Cluster Operator) -*/}} -{{- define "rmqco.clusterOperator.serviceAccountName" -}} -{{- if .Values.clusterOperator.serviceAccount.create -}} - {{ default (printf "%s" (include "rmqco.clusterOperator.fullname" .)) .Values.clusterOperator.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.clusterOperator.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to use (Messaging Topology Operator) -*/}} -{{- define "rmqco.msgTopologyOperator.serviceAccountName" -}} -{{- if .Values.msgTopologyOperator.serviceAccount.create -}} - {{ default (printf "%s" (include "rmqco.msgTopologyOperator.fullname" .)) .Values.msgTopologyOperator.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.msgTopologyOperator.serviceAccount.name }} -{{- end -}} -{{- end -}} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/clusterrole.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/clusterrole.yaml deleted file mode 100644 index 98354071..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/clusterrole.yaml +++ /dev/null @@ -1,151 +0,0 @@ -{{- if .Values.clusterOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: ClusterRole -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.clusterOperator.fullname" . }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - get - - patch - - apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - list - - update - - watch - - apiGroups: - - "" - resources: - - pods/exec - verbs: - - create - - apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - "" - resources: - - services - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - apps - resources: - - statefulsets - verbs: - - create - - delete - - get - - list - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - rabbitmqclusters - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - rabbitmqclusters/finalizers - verbs: - - update - - apiGroups: - - rabbitmq.com - resources: - - rabbitmqclusters/status - verbs: - - get - - update - - apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - get - - list - - update - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - get - - list - - update - - watch -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/clusterrolebinding.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/clusterrolebinding.yaml deleted file mode 100644 index fc863e91..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/clusterrolebinding.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if .Values.clusterOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: ClusterRoleBinding -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.clusterOperator.fullname" . }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "rmqco.clusterOperator.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "rmqco.clusterOperator.serviceAccountName" . }} - namespace: {{ include "common.names.namespace" . | quote }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/deployment.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/deployment.yaml deleted file mode 100644 index b136c1f3..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/deployment.yaml +++ /dev/null @@ -1,161 +0,0 @@ -apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} -kind: Deployment -metadata: - name: {{ template "rmqco.clusterOperator.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - replicas: {{ .Values.clusterOperator.replicaCount }} - {{- if .Values.clusterOperator.updateStrategy }} - strategy: {{- toYaml .Values.clusterOperator.updateStrategy | nindent 4 }} - {{- end }} - selector: - matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: rabbitmq-operator - template: - metadata: - {{- if .Values.clusterOperator.podAnnotations }} - annotations: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.podAnnotations "context" $) | nindent 8 }} - {{- end }} - labels: {{- include "common.labels.standard" . | nindent 8 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.clusterOperator.podLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.podLabels "context" $) | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ template "rmqco.clusterOperator.serviceAccountName" . }} - {{- include "rmqco.imagePullSecrets" . | nindent 6 }} - {{- if .Values.clusterOperator.schedulerName }} - schedulerName: {{ .Values.clusterOperator.schedulerName | quote }} - {{- end }} - {{- if .Values.clusterOperator.hostAliases }} - hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.hostAliases "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.clusterOperator.topologySpreadConstraints }} - topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.topologySpreadConstraints "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.clusterOperator.affinity }} - affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.clusterOperator.affinity "context" $) | nindent 8 }} - {{- else }} - affinity: - podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.clusterOperator.podAffinityPreset "component" "rabbitmq-operator" "context" $) | nindent 10 }} - podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.clusterOperator.podAntiAffinityPreset "component" "rabbitmq-operator" "context" $) | nindent 10 }} - nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.clusterOperator.nodeAffinityPreset.type "key" .Values.clusterOperator.nodeAffinityPreset.key "values" .Values.clusterOperator.nodeAffinityPreset.values) | nindent 10 }} - {{- end }} - {{- if .Values.clusterOperator.nodeSelector }} - nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.clusterOperator.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.clusterOperator.tolerations }} - tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.tolerations "context" .) | nindent 8 }} - {{- end }} - {{- if .Values.clusterOperator.priorityClassName }} - priorityClassName: {{ .Values.clusterOperator.priorityClassName | quote }} - {{- end }} - {{- if .Values.clusterOperator.podSecurityContext.enabled }} - securityContext: {{- omit .Values.clusterOperator.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - {{- if .Values.clusterOperator.terminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ .Values.clusterOperator.terminationGracePeriodSeconds }} - {{- end }} - initContainers: - {{- if .Values.clusterOperator.initContainers }} - {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.initContainers "context" $) | nindent 8 }} - {{- end }} - containers: - - name: rabbitmq-cluster-operator - image: {{ template "rmqco.clusterOperator.image" . }} - imagePullPolicy: {{ .Values.clusterOperator.image.pullPolicy }} - {{- if .Values.clusterOperator.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.clusterOperator.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.clusterOperator.command }} - command: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.command "context" $) | nindent 12 }} - {{- else }} - command: - - /manager - {{- end }} - {{- if .Values.clusterOperator.args }} - args: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.args "context" $) | nindent 12 }} - {{- else }} - args: - - --metrics-bind-address=:{{ .Values.clusterOperator.containerPorts.metrics }} - {{- end }} - env: - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: DEFAULT_RABBITMQ_IMAGE - value: {{ include "rmqco.rabbitmq.image" . }} - - name: DEFAULT_USER_UPDATER_IMAGE - value: {{ include "rmqco.defaultCredentialUpdater.image" . }} - {{- if (include "rmqco.imagePullSecrets.string" .) }} - - name: DEFAULT_IMAGE_PULL_SECRETS - value: {{ include "rmqco.imagePullSecrets.string" . | quote }} - {{- end }} - {{- if .Values.clusterOperator.extraEnvVars }} - {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - envFrom: - {{- if .Values.clusterOperator.extraEnvVarsCM }} - - configMapRef: - name: {{ include "common.tplvalues.render" (dict "value" .Values.clusterOperator.extraEnvVarsCM "context" $) }} - {{- end }} - {{- if .Values.clusterOperator.extraEnvVarsSecret }} - - secretRef: - name: {{ include "common.tplvalues.render" (dict "value" .Values.clusterOperator.extraEnvVarsSecret "context" $) }} - {{- end }} - {{- if .Values.clusterOperator.resources }} - resources: {{- toYaml .Values.clusterOperator.resources | nindent 12 }} - {{- end }} - {{- if not .Values.diagnosticMode.enabled }} - {{- if .Values.clusterOperator.livenessProbe.enabled }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.clusterOperator.livenessProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /metrics - port: http - {{- else if .Values.clusterOperator.customLivenessProbe }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.customLivenessProbe "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.clusterOperator.readinessProbe.enabled }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.clusterOperator.readinessProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /metrics - port: http - {{- else if .Values.clusterOperator.customReadinessProbe }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.customReadinessProbe "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.clusterOperator.startupProbe.enabled }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.clusterOperator.startupProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /metrics - port: http - {{- else if .Values.clusterOperator.customStartupProbe }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.customStartupProbe "context" $) | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.clusterOperator.lifecycleHooks }} - lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.lifecycleHooks "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.clusterOperator.extraVolumeMounts }} - volumeMounts: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.extraVolumeMounts "context" $) | nindent 12 }} - {{- end }} - ports: - - name: http - containerPort: {{ .Values.clusterOperator.containerPorts.metrics }} - protocol: TCP - {{- if .Values.clusterOperator.sidecars }} - {{- include "common.tplvalues.render" ( dict "value" .Values.clusterOperator.sidecars "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.clusterOperator.extraVolumes }} - volumes: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.extraVolumes "context" $) | nindent 8 }} - {{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/metrics-service.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/metrics-service.yaml deleted file mode 100644 index 0cca6e33..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/metrics-service.yaml +++ /dev/null @@ -1,57 +0,0 @@ -{{- if .Values.clusterOperator.metrics.enabled }} -apiVersion: v1 -kind: Service -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ printf "%s-metrics" (include "rmqco.clusterOperator.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if or .Values.commonAnnotations .Values.clusterOperator.metrics.service.annotations }} - annotations: - {{- if .Values.clusterOperator.metrics.service.annotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.clusterOperator.metrics.service.annotations "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ .Values.clusterOperator.metrics.service.type }} - {{- if (or (eq .Values.clusterOperator.metrics.service.type "LoadBalancer") (eq .Values.clusterOperator.metrics.service.type "NodePort")) }} - externalTrafficPolicy: {{ .Values.clusterOperator.metrics.service.externalTrafficPolicy | quote }} - {{- end }} - {{- if .Values.clusterOperator.metrics.service.clusterIP }} - clusterIP: {{ .Values.clusterOperator.metrics.service.clusterIP }} - {{- end }} - {{- if eq .Values.clusterOperator.metrics.service.type "LoadBalancer" }} - loadBalancerSourceRanges: {{ .Values.clusterOperator.metrics.service.loadBalancerSourceRanges }} - {{- end }} - {{- if (and (eq .Values.clusterOperator.metrics.service.type "LoadBalancer") (not (empty .Values.clusterOperator.metrics.service.loadBalancerIP))) }} - loadBalancerIP: {{ .Values.clusterOperator.metrics.service.loadBalancerIP }} - {{- end }} - {{- if .Values.clusterOperator.metrics.service.sessionAffinity }} - sessionAffinity: {{ .Values.clusterOperator.metrics.service.sessionAffinity }} - {{- end }} - {{- if .Values.clusterOperator.metrics.service.sessionAffinityConfig }} - sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.metrics.service.sessionAffinityConfig "context" $) | nindent 4 }} - {{- end }} - ports: - - name: http - port: {{ .Values.clusterOperator.metrics.service.ports.http }} - targetPort: http - protocol: TCP - {{- if (and (or (eq .Values.clusterOperator.metrics.service.type "NodePort") (eq .Values.clusterOperator.metrics.service.type "LoadBalancer")) (not (empty .Values.clusterOperator.metrics.service.nodePorts.http))) }} - nodePort: {{ .Values.clusterOperator.metrics.service.nodePorts.http }} - {{- else if eq .Values.clusterOperator.metrics.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.clusterOperator.metrics.service.extraPorts }} - {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.metrics.service.extraPorts "context" $) | nindent 4 }} - {{- end }} - selector: {{- include "common.labels.matchLabels" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/role.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/role.yaml deleted file mode 100644 index 92bb731e..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/role.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if .Values.clusterOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: Role -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.clusterOperator.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -rules: - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - - apiGroups: - - "" - resources: - - events - verbs: - - create -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/rolebinding.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/rolebinding.yaml deleted file mode 100644 index d439e875..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/rolebinding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.clusterOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: RoleBinding -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.clusterOperator.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "rmqco.clusterOperator.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "rmqco.clusterOperator.serviceAccountName" . }} - namespace: {{ include "common.names.namespace" . | quote }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/service-account.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/service-account.yaml deleted file mode 100644 index f14dc73f..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/service-account.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if .Values.clusterOperator.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.clusterOperator.serviceAccountName" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if or .Values.commonAnnotations .Values.clusterOperator.serviceAccount.annotations }} - annotations: - {{- if .Values.clusterOperator.serviceAccount.annotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.clusterOperator.serviceAccount.annotations "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- end }} -automountServiceAccountToken: {{ .Values.clusterOperator.serviceAccount.automountServiceAccountToken }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/servicemonitor.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/servicemonitor.yaml deleted file mode 100644 index ed1997cb..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/cluster-operator/servicemonitor.yaml +++ /dev/null @@ -1,50 +0,0 @@ -{{- if and .Values.clusterOperator.metrics.serviceMonitor.enabled .Values.clusterOperator.metrics.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "rmqco.clusterOperator.fullname" . }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: rabbitmq-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.clusterOperator.metrics.serviceMonitor.additionalLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.clusterOperator.metrics.serviceMonitor.labels }} - {{- include "common.tplvalues.render" (dict "value" .Values.clusterOperator.metrics.serviceMonitor.labels "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - namespace: {{ default (include "common.names.namespace" .) .Values.clusterOperator.metrics.serviceMonitor.namespace | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - jobLabel: {{ .Values.clusterOperator.metrics.serviceMonitor.jobLabel }} - selector: - matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: rabbitmq-operator - {{- if .Values.clusterOperator.metrics.serviceMonitor.selector }} - {{- include "common.tplvalues.render" ( dict "value" .Values.clusterOperator.metrics.serviceMonitor.selector "context" $ ) | nindent 6 }} - {{- end }} - namespaceSelector: - matchNames: - - {{ include "common.names.namespace" . | quote }} - endpoints: - - port: http - {{- if .Values.clusterOperator.metrics.serviceMonitor.interval }} - interval: {{ .Values.clusterOperator.metrics.serviceMonitor.interval }} - {{- end }} - {{- if .Values.clusterOperator.metrics.serviceMonitor.honorLabels }} - honorLabels: {{ .Values.clusterOperator.metrics.serviceMonitor.honorLabels }} - {{- end }} - {{- if .Values.clusterOperator.metrics.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ .Values.clusterOperator.metrics.serviceMonitor.scrapeTimeout }} - {{- end }} - {{- if .Values.clusterOperator.metrics.serviceMonitor.metricRelabelings }} - metricRelabelings: {{ toYaml .Values.clusterOperator.metrics.serviceMonitor.metricRelabelings | nindent 8 }} - {{- end }} - {{- if .Values.clusterOperator.metrics.serviceMonitor.relabelings }} - relabelings: {{ toYaml .Values.clusterOperator.metrics.serviceMonitor.relabelings | nindent 8 }} - {{- end }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/extra-list.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/extra-list.yaml deleted file mode 100644 index 9ac65f9e..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/extra-list.yaml +++ /dev/null @@ -1,4 +0,0 @@ -{{- range .Values.extraDeploy }} ---- -{{ include "common.tplvalues.render" (dict "value" . "context" $) }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/issuer.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/issuer.yaml deleted file mode 100644 index 12ba1fa2..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/issuer.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.useCertManager }} -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "common.names.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} -spec: - selfSigned: {} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/certificate.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/certificate.yaml deleted file mode 100644 index 56381da2..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/certificate.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if and (.Values.useCertManager) (not .Values.msgTopologyOperator.existingWebhookCertSecret) }} -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - dnsNames: - - {{ printf "%s.%s.svc" (include "rmqco.msgTopologyOperator.webhook.fullname" .) (include "common.names.namespace" .) }} - - {{ printf "%s.%s.svc.%s" (include "rmqco.msgTopologyOperator.webhook.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain }} - issuerRef: - kind: Issuer - name: {{ template "common.names.fullname" . }} - secretName: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/clusterrole.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/clusterrole.yaml deleted file mode 100644 index 9984199d..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/clusterrole.yaml +++ /dev/null @@ -1,275 +0,0 @@ -{{- if .Values.msgTopologyOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: ClusterRole -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -rules: - - apiGroups: - - "" - resources: - - events - verbs: - - create - - get - - patch - - apiGroups: - - "" - resources: - - secrets - verbs: - - create - - get - - list - - watch - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - apiGroups: - - rabbitmq.com - resources: - - bindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - bindings/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - exchanges - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - exchanges/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - federations - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - federations/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - permissions - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - permissions/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - policies - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - policies/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - queues - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - queues/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - rabbitmqclusters - verbs: - - get - - list - - watch - - apiGroups: - - rabbitmq.com - resources: - - rabbitmqclusters/status - verbs: - - get - - apiGroups: - - rabbitmq.com - resources: - - schemareplications - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - schemareplications/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - shovels - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - shovels/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - superstreams - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - superstreams/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - users - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - users/status - verbs: - - get - - patch - - update - - apiGroups: - - rabbitmq.com - resources: - - vhosts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rabbitmq.com - resources: - - vhosts/status - verbs: - - get - - patch - - update -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/clusterrolebinding.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/clusterrolebinding.yaml deleted file mode 100644 index 9af144cc..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/clusterrolebinding.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if .Values.msgTopologyOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: ClusterRoleBinding -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "rmqco.msgTopologyOperator.serviceAccountName" . }} - namespace: {{ include "common.names.namespace" . | quote }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/deployment.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/deployment.yaml deleted file mode 100644 index 795bfc76..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/deployment.yaml +++ /dev/null @@ -1,165 +0,0 @@ -apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} -kind: Deployment -metadata: - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - replicas: {{ .Values.msgTopologyOperator.replicaCount }} - {{- if .Values.msgTopologyOperator.updateStrategy }} - strategy: {{- toYaml .Values.msgTopologyOperator.updateStrategy | nindent 4 }} - {{- end }} - selector: - matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: messaging-topology-operator - template: - metadata: - {{- if .Values.msgTopologyOperator.podAnnotations }} - annotations: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.podAnnotations "context" $) | nindent 8 }} - {{- end }} - labels: {{- include "common.labels.standard" . | nindent 8 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.msgTopologyOperator.podLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.podLabels "context" $) | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ template "rmqco.msgTopologyOperator.serviceAccountName" . }} - {{- include "rmqco.imagePullSecrets" . | nindent 6 }} - {{- if .Values.msgTopologyOperator.hostAliases }} - hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.hostAliases "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.msgTopologyOperator.topologySpreadConstraints }} - topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.topologySpreadConstraints "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.msgTopologyOperator.schedulerName }} - schedulerName: {{ .Values.msgTopologyOperator.schedulerName | quote }} - {{- end }} - {{- if .Values.msgTopologyOperator.affinity }} - affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.msgTopologyOperator.affinity "context" $) | nindent 8 }} - {{- else }} - affinity: - podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.msgTopologyOperator.podAffinityPreset "component" "messaging-topology-operator" "context" $) | nindent 10 }} - podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.msgTopologyOperator.podAntiAffinityPreset "component" "messaging-topology-operator" "context" $) | nindent 10 }} - nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.msgTopologyOperator.nodeAffinityPreset.type "key" .Values.msgTopologyOperator.nodeAffinityPreset.key "values" .Values.msgTopologyOperator.nodeAffinityPreset.values) | nindent 10 }} - {{- end }} - {{- if .Values.msgTopologyOperator.nodeSelector }} - nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.msgTopologyOperator.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.msgTopologyOperator.tolerations }} - tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.tolerations "context" .) | nindent 8 }} - {{- end }} - {{- if .Values.msgTopologyOperator.priorityClassName }} - priorityClassName: {{ .Values.msgTopologyOperator.priorityClassName | quote }} - {{- end }} - {{- if .Values.msgTopologyOperator.podSecurityContext.enabled }} - securityContext: {{- omit .Values.msgTopologyOperator.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - {{- if .Values.msgTopologyOperator.terminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ .Values.msgTopologyOperator.terminationGracePeriodSeconds }} - {{- end }} - initContainers: - {{- if .Values.msgTopologyOperator.initContainers }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.initContainers "context" $) | nindent 8 }} - {{- end }} - containers: - - name: rabbitmq-cluster-operator - image: {{ template "rmqco.msgTopologyOperator.image" . }} - imagePullPolicy: {{ .Values.msgTopologyOperator.image.pullPolicy }} - {{- if .Values.msgTopologyOperator.containerSecurityContext.enabled }} - securityContext: {{- omit .Values.msgTopologyOperator.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if .Values.msgTopologyOperator.command }} - command: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.command "context" $) | nindent 12 }} - {{- else }} - command: - - /manager - {{- end }} - {{- if .Values.msgTopologyOperator.args }} - args: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.args "context" $) | nindent 12 }} - {{- else }} - args: - - --metrics-bind-address=:{{ .Values.msgTopologyOperator.containerPorts.metrics }} - {{- end }} - env: - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- if .Values.msgTopologyOperator.extraEnvVars }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - envFrom: - {{- if .Values.msgTopologyOperator.extraEnvVarsCM }} - - configMapRef: - name: {{ include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.extraEnvVarsCM "context" $) }} - {{- end }} - {{- if .Values.msgTopologyOperator.extraEnvVarsSecret }} - - secretRef: - name: {{ include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.extraEnvVarsSecret "context" $) }} - {{- end }} - {{- if .Values.msgTopologyOperator.resources }} - resources: {{- toYaml .Values.msgTopologyOperator.resources | nindent 12 }} - {{- end }} - ports: - - name: http-webhook - containerPort: 9443 - protocol: TCP - - name: http-metrics - containerPort: {{ .Values.msgTopologyOperator.containerPorts.metrics }} - protocol: TCP - {{- if not .Values.diagnosticMode.enabled }} - {{- if .Values.msgTopologyOperator.livenessProbe.enabled }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.msgTopologyOperator.livenessProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /metrics - port: http-metrics - {{- else if .Values.msgTopologyOperator.customLivenessProbe }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.customLivenessProbe "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.msgTopologyOperator.readinessProbe.enabled }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.msgTopologyOperator.readinessProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /metrics - port: http-metrics - {{- else if .Values.msgTopologyOperator.customReadinessProbe }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.customReadinessProbe "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.msgTopologyOperator.startupProbe.enabled }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.msgTopologyOperator.startupProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /metrics - port: http-metrics - {{- else if .Values.msgTopologyOperator.customStartupProbe }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.customStartupProbe "context" $) | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.msgTopologyOperator.lifecycleHooks }} - lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.lifecycleHooks "context" $) | nindent 12 }} - {{- end }} - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - {{- if .Values.msgTopologyOperator.extraVolumeMounts }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.extraVolumeMounts "context" $) | nindent 12 }} - {{- end }} - {{- if .Values.msgTopologyOperator.sidecars }} - {{- include "common.tplvalues.render" ( dict "value" .Values.msgTopologyOperator.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: {{ template "rmqco.msgTopologyOperator.webhook.secretName" . }} - {{- if .Values.msgTopologyOperator.extraVolumes }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.extraVolumes "context" $) | nindent 8 }} - {{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/metrics-service.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/metrics-service.yaml deleted file mode 100644 index a59115fb..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/metrics-service.yaml +++ /dev/null @@ -1,58 +0,0 @@ -{{- if .Values.msgTopologyOperator.metrics.enabled }} -apiVersion: v1 -kind: Service -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - type: metrics - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ printf "%s-metrics" (include "rmqco.msgTopologyOperator.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if or .Values.commonAnnotations .Values.msgTopologyOperator.metrics.service.annotations }} - annotations: - {{- if .Values.msgTopologyOperator.metrics.service.annotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.msgTopologyOperator.metrics.service.annotations "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ .Values.msgTopologyOperator.metrics.service.type }} - {{- if (or (eq .Values.msgTopologyOperator.metrics.service.type "LoadBalancer") (eq .Values.msgTopologyOperator.metrics.service.type "NodePort")) }} - externalTrafficPolicy: {{ .Values.msgTopologyOperator.metrics.service.externalTrafficPolicy | quote }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.service.clusterIP }} - clusterIP: {{ .Values.msgTopologyOperator.metrics.service.clusterIP }} - {{- end }} - {{- if eq .Values.msgTopologyOperator.metrics.service.type "LoadBalancer" }} - loadBalancerSourceRanges: {{ .Values.msgTopologyOperator.metrics.service.loadBalancerSourceRanges }} - {{- end }} - {{- if (and (eq .Values.msgTopologyOperator.metrics.service.type "LoadBalancer") (not (empty .Values.msgTopologyOperator.metrics.service.loadBalancerIP))) }} - loadBalancerIP: {{ .Values.msgTopologyOperator.metrics.service.loadBalancerIP }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.service.sessionAffinity }} - sessionAffinity: {{ .Values.msgTopologyOperator.metrics.service.sessionAffinity }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.service.sessionAffinityConfig }} - sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.metrics.service.sessionAffinityConfig "context" $) | nindent 4 }} - {{- end }} - ports: - - name: http - port: {{ .Values.msgTopologyOperator.metrics.service.ports.http }} - targetPort: http-metrics - protocol: TCP - {{- if (and (or (eq .Values.msgTopologyOperator.metrics.service.type "NodePort") (eq .Values.msgTopologyOperator.metrics.service.type "LoadBalancer")) (not (empty .Values.msgTopologyOperator.metrics.service.nodePorts.http))) }} - nodePort: {{ .Values.msgTopologyOperator.metrics.service.nodePorts.http }} - {{- else if eq .Values.msgTopologyOperator.metrics.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.service.extraPorts }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.metrics.service.extraPorts "context" $) | nindent 4 }} - {{- end }} - selector: {{- include "common.labels.matchLabels" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/role.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/role.yaml deleted file mode 100644 index e629efb8..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/role.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- if .Values.msgTopologyOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: Role -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -rules: - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - - apiGroups: - - "" - resources: - - events - verbs: - - create - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/rolebinding.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/rolebinding.yaml deleted file mode 100644 index f4b94e47..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/rolebinding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if .Values.msgTopologyOperator.rbac.create }} -apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} -kind: RoleBinding -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} -subjects: - - kind: ServiceAccount - name: {{ template "rmqco.msgTopologyOperator.serviceAccountName" . }} - namespace: {{ include "common.names.namespace" . | quote }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/service-account.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/service-account.yaml deleted file mode 100644 index cbaf5cd3..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/service-account.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if .Values.msgTopologyOperator.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.serviceAccountName" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if or .Values.commonAnnotations .Values.msgTopologyOperator.serviceAccount.annotations }} - annotations: - {{- if .Values.msgTopologyOperator.serviceAccount.annotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.msgTopologyOperator.serviceAccount.annotations "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- end }} -automountServiceAccountToken: {{ .Values.msgTopologyOperator.serviceAccount.automountServiceAccountToken }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/servicemonitor.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/servicemonitor.yaml deleted file mode 100644 index c0220074..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/servicemonitor.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- if and .Values.msgTopologyOperator.metrics.serviceMonitor.enabled .Values.msgTopologyOperator.metrics.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "rmqco.msgTopologyOperator.fullname" . }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.additionalLabels }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.metrics.serviceMonitor.additionalLabels "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.labels }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.metrics.serviceMonitor.labels "context" $) | nindent 4 }} - {{- end }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - namespace: {{ default (include "common.names.namespace" .) .Values.msgTopologyOperator.metrics.serviceMonitor.namespace | quote }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - jobLabel: {{ .Values.msgTopologyOperator.metrics.serviceMonitor.jobLabel }} - selector: - matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: messaging-topology-operator - # We need an extra label for the ServiceMonitor to scrape it correctly - type: metrics - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.selector }} - {{- include "common.tplvalues.render" ( dict "value" .Values.msgTopologyOperator.metrics.serviceMonitor.selector "context" $ ) | nindent 6 }} - {{- end }} - namespaceSelector: - matchNames: - - {{ include "common.names.namespace" . | quote }} - endpoints: - - port: http - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.interval }} - interval: {{ .Values.msgTopologyOperator.metrics.serviceMonitor.interval }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.honorLabels }} - honorLabels: {{ .Values.msgTopologyOperator.metrics.serviceMonitor.honorLabels }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ .Values.msgTopologyOperator.metrics.serviceMonitor.scrapeTimeout }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.metricRelabelings }} - metricRelabelings: {{ toYaml .Values.msgTopologyOperator.metrics.serviceMonitor.metricRelabelings | nindent 8 }} - {{- end }} - {{- if .Values.msgTopologyOperator.metrics.serviceMonitor.relabelings }} - relabelings: {{ toYaml .Values.msgTopologyOperator.metrics.serviceMonitor.relabelings | nindent 8 }} - {{- end }} -{{- end }} diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/validating-webhook-configuration.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/validating-webhook-configuration.yaml deleted file mode 100644 index 1543a1a1..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/validating-webhook-configuration.yaml +++ /dev/null @@ -1,297 +0,0 @@ -{{/* - If the user does not have cert-manager and is not providing a secret with the certificates, the chart needs to generate the secret - */}} -{{- $ca := genCA "rmq-msg-topology-ca" 365 }} -{{- $cert := genSignedCert (include "rmqco.msgTopologyOperator.fullname" .) nil (list (printf "%s.%s.svc" (include "rmqco.msgTopologyOperator.webhook.fullname" .) (include "common.names.namespace" .)) (printf "%s.%s.svc.%s" (include "rmqco.msgTopologyOperator.webhook.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain)) 365 $ca }} - -{{- if and (not .Values.useCertManager) (not .Values.msgTopologyOperator.existingWebhookCertSecret) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" . | nindent 4 }} - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -type: kubernetes.io/tls -data: - tls.crt: {{ $cert.Cert | b64enc | quote }} - tls.key: {{ $cert.Key | b64enc | quote }} - ca.crt: {{ $ca.Cert | b64enc | quote }} -{{- end }} ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - annotations: - {{- if .Values.useCertManager }} - cert-manager.io/inject-ca-from: {{ printf "%s/%s" (include "common.names.namespace" .) ( include "rmqco.msgTopologyOperator.webhook.secretName" . ) }} - {{- end }} - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} -webhooks: - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-binding - failurePolicy: Fail - name: vbinding.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - bindings - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-exchange - failurePolicy: Fail - name: vexchange.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - exchanges - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-federation - failurePolicy: Fail - name: vfederation.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - federations - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1alpha1-superstream - failurePolicy: Fail - name: vsuperstream.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1alpha1 - operations: - - CREATE - - UPDATE - resources: - - superstreams - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-permission - failurePolicy: Fail - name: vpermission.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - permissions - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-policy - failurePolicy: Fail - name: vpolicy.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - policies - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-queue - failurePolicy: Fail - name: vqueue.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - queues - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-schemareplication - failurePolicy: Fail - name: vschemareplication.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - schemareplications - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-shovel - failurePolicy: Fail - name: vshovel.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - shovels - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-user - failurePolicy: Fail - name: vuser.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - users - sideEffects: None - - admissionReviewVersions: - - v1 - clientConfig: - {{- if not .Values.useCertManager }} - caBundle: {{ default $ca.Cert .Values.msgTopologyOperator.existingWebhookCertCABundle | b64enc | quote }} - {{- end }} - service: - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - path: /validate-rabbitmq-com-v1beta1-vhost - failurePolicy: Fail - name: vvhost.kb.io - rules: - - apiGroups: - - rabbitmq.com - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - vhosts - sideEffects: None diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/webhook-service.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/webhook-service.yaml deleted file mode 100644 index e48706e2..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/templates/messaging-topology-operator/webhook-service.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: {{- include "common.labels.standard" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator - app.kubernetes.io/part-of: rabbitmq - {{- if .Values.commonLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - name: {{ template "rmqco.msgTopologyOperator.webhook.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - {{- if or .Values.commonAnnotations .Values.msgTopologyOperator.service.annotations }} - annotations: - {{- if .Values.msgTopologyOperator.service.annotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.msgTopologyOperator.service.annotations "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ .Values.msgTopologyOperator.service.type }} - {{- if (or (eq .Values.msgTopologyOperator.service.type "LoadBalancer") (eq .Values.msgTopologyOperator.service.type "NodePort")) }} - externalTrafficPolicy: {{ .Values.msgTopologyOperator.service.externalTrafficPolicy | quote }} - {{- end }} - {{- if .Values.msgTopologyOperator.service.clusterIP }} - clusterIP: {{ .Values.msgTopologyOperator.service.clusterIP }} - {{- end }} - {{- if eq .Values.msgTopologyOperator.service.type "LoadBalancer" }} - loadBalancerSourceRanges: {{ .Values.msgTopologyOperator.service.loadBalancerSourceRanges }} - {{- end }} - {{- if (and (eq .Values.msgTopologyOperator.service.type "LoadBalancer") (not (empty .Values.msgTopologyOperator.service.loadBalancerIP))) }} - loadBalancerIP: {{ .Values.msgTopologyOperator.service.loadBalancerIP }} - {{- end }} - {{- if .Values.msgTopologyOperator.service.sessionAffinity }} - sessionAffinity: {{ .Values.msgTopologyOperator.service.sessionAffinity }} - {{- end }} - {{- if .Values.msgTopologyOperator.service.sessionAffinityConfig }} - sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.service.sessionAffinityConfig "context" $) | nindent 4 }} - {{- end }} - ports: - - name: http - port: {{ .Values.msgTopologyOperator.service.ports.webhook }} - targetPort: http-webhook - protocol: TCP - {{- if (and (or (eq .Values.msgTopologyOperator.service.type "NodePort") (eq .Values.msgTopologyOperator.service.type "LoadBalancer")) (not (empty .Values.msgTopologyOperator.service.nodePorts.http))) }} - nodePort: {{ .Values.msgTopologyOperator.service.nodePorts.http }} - {{- else if eq .Values.msgTopologyOperator.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.msgTopologyOperator.service.extraPorts }} - {{- include "common.tplvalues.render" (dict "value" .Values.msgTopologyOperator.service.extraPorts "context" $) | nindent 4 }} - {{- end }} - selector: {{- include "common.labels.matchLabels" . | nindent 4 }} - app.kubernetes.io/component: messaging-topology-operator diff --git a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/values.yaml b/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/values.yaml deleted file mode 100644 index a5d989d8..00000000 --- a/components/charts/rabbitmq-cluster-operator/2.6.1/1.12.1/values.yaml +++ /dev/null @@ -1,919 +0,0 @@ -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass -## - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - storageClass: "" - -## @section Common parameters -## - -## @param kubeVersion Override Kubernetes version -## -kubeVersion: "" -## @param nameOverride String to partially override common.names.fullname -## -nameOverride: "" -## @param fullnameOverride String to fully override common.names.fullname -## -fullnameOverride: "" -## @param commonLabels Labels to add to all deployed objects -## -commonLabels: {} -## @param commonAnnotations Annotations to add to all deployed objects -## -commonAnnotations: {} -## @param clusterDomain Kubernetes cluster domain name -## -clusterDomain: cluster.local -## @param extraDeploy Array of extra objects to deploy with the release -## -extraDeploy: [] -## Enable diagnostic mode in the deployment(s)/statefulset(s) -## -diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled) - ## - enabled: false - -## @section RabbitMQ Cluster Operator Parameters -## - -## Bitnami RabbitMQ Image -## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ -## @param rabbitmqImage.registry RabbitMQ Image registry -## @param rabbitmqImage.repository RabbitMQ Image repository -## @param rabbitmqImage.tag RabbitMQ Image tag (immutable tags are recommended) -## @param rabbitmqImage.pullSecrets RabbitMQ Image pull secrets -## -rabbitmqImage: - registry: docker.io - repository: bitnami/rabbitmq - tag: 3.9.16-debian-10-r2 - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-rabbitmqImage-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - -## Bitnami RabbitMQ Default User Credential Updater Image -## ref: https://hub.docker.com/r/bitnami/rmq-default-credential-updater/tags/ -## @param credentialUpdaterImage.registry RabbitMQ Default User Credential Updater Image registry -## @param credentialUpdaterImage.repository RabbitMQ Default User Credential Updater Image repository -## @param credentialUpdaterImage.tag RabbitMQ Default User Credential Updater Image tag (immutable tags are recommended) -## @param credentialUpdaterImage.pullSecrets RabbitMQ Default User Credential Updater Image pull secrets -## -credentialUpdaterImage: - registry: docker.io - repository: bitnami/rmq-default-credential-updater - tag: 1.0.2-scratch-r2 - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-credentialUpdaterImage-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - -clusterOperator: - ## Bitnami RabbitMQ Cluster Operator image - ## ref: https://hub.docker.com/r/bitnami/rabbitmq-cluster-operator/tags/ - ## @param clusterOperator.image.registry RabbitMQ Cluster Operator image registry - ## @param clusterOperator.image.repository RabbitMQ Cluster Operator image repository - ## @param clusterOperator.image.tag RabbitMQ Cluster Operator image tag (immutable tags are recommended) - ## @param clusterOperator.image.pullPolicy RabbitMQ Cluster Operator image pull policy - ## @param clusterOperator.image.pullSecrets RabbitMQ Cluster Operator image pull secrets - ## - image: - registry: docker.io - repository: bitnami/rabbitmq-cluster-operator - tag: 1.12.1-scratch-r9 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - - ## @param clusterOperator.replicaCount Number of RabbitMQ Cluster Operator replicas to deploy - ## - replicaCount: 1 - ## @param clusterOperator.schedulerName Alternative scheduler - ## - schedulerName: "" - ## @param clusterOperator.topologySpreadConstraints Topology Spread Constraints for pod assignment - ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## The value is evaluated as a template - ## - topologySpreadConstraints: [] - ## @param clusterOperator.terminationGracePeriodSeconds In seconds, time the given to the %%MAIN_CONTAINER_NAME%% pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: "" - ## Configure extra options for RabbitMQ Cluster Operator containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param clusterOperator.livenessProbe.enabled Enable livenessProbe on RabbitMQ Cluster Operator nodes - ## @param clusterOperator.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param clusterOperator.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param clusterOperator.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param clusterOperator.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param clusterOperator.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 30 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param clusterOperator.readinessProbe.enabled Enable readinessProbe on RabbitMQ Cluster Operator nodes - ## @param clusterOperator.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param clusterOperator.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param clusterOperator.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param clusterOperator.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param clusterOperator.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 30 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param clusterOperator.startupProbe.enabled Enable startupProbe on RabbitMQ Cluster Operator nodes - ## @param clusterOperator.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param clusterOperator.startupProbe.periodSeconds Period seconds for startupProbe - ## @param clusterOperator.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param clusterOperator.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param clusterOperator.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 30 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - - ## @param clusterOperator.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param clusterOperator.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param clusterOperator.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - - ## RabbitMQ Cluster Operator resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param clusterOperator.resources.limits The resources limits for the RabbitMQ Cluster Operator containers - ## @param clusterOperator.resources.requests The requested resources for the RabbitMQ Cluster Operator containers - ## - resources: - limits: {} - requests: {} - - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param clusterOperator.podSecurityContext.enabled Enabled RabbitMQ Cluster Operator pods' Security Context - ## @param clusterOperator.podSecurityContext.fsGroup Set RabbitMQ Cluster Operator pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param clusterOperator.containerSecurityContext.enabled Enabled RabbitMQ Cluster Operator containers' Security Context - ## @param clusterOperator.containerSecurityContext.runAsUser Set RabbitMQ Cluster Operator containers' Security Context runAsUser - ## @param clusterOperator.containerSecurityContext.runAsNonRoot Force running the container as non root - ## @param clusterOperator.containerSecurityContext.readOnlyRootFilesystem mount / (root) as a readonly filesystem on cluster operator containers - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - - ## @param clusterOperator.command Override default container command (useful when using custom images) - ## - command: [] - ## @param clusterOperator.args Override default container args (useful when using custom images) - ## - args: [] - ## @param clusterOperator.hostAliases RabbitMQ Cluster Operator pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param clusterOperator.podLabels Extra labels for RabbitMQ Cluster Operator pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param clusterOperator.podAnnotations Annotations for RabbitMQ Cluster Operator pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param clusterOperator.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param clusterOperator.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param clusterOperator.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param clusterOperator.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set - ## - key: "" - ## @param clusterOperator.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param clusterOperator.affinity Affinity for RabbitMQ Cluster Operator pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `podAffinityPreset`, `podAntiAffinityPreset`, and `nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param clusterOperator.nodeSelector Node labels for RabbitMQ Cluster Operator pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param clusterOperator.tolerations Tolerations for RabbitMQ Cluster Operator pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param clusterOperator.updateStrategy.type RabbitMQ Cluster Operator statefulset strategy type - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - ## @param clusterOperator.priorityClassName RabbitMQ Cluster Operator pods' priorityClassName - ## - priorityClassName: "" - ## @param clusterOperator.lifecycleHooks for the RabbitMQ Cluster Operator container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - - ## @param clusterOperator.containerPorts.metrics RabbitMQ Cluster Operator container port (used for metrics) - ## - containerPorts: - metrics: 9782 - - ## @param clusterOperator.extraEnvVars Array with extra environment variables to add to RabbitMQ Cluster Operator nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param clusterOperator.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for RabbitMQ Cluster Operator nodes - ## - extraEnvVarsCM: "" - ## @param clusterOperator.extraEnvVarsSecret Name of existing Secret containing extra env vars for RabbitMQ Cluster Operator nodes - ## - extraEnvVarsSecret: "" - ## @param clusterOperator.extraVolumes Optionally specify extra list of additional volumes for the RabbitMQ Cluster Operator pod(s) - ## - extraVolumes: [] - ## @param clusterOperator.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the RabbitMQ Cluster Operator container(s) - ## - extraVolumeMounts: [] - ## @param clusterOperator.sidecars Add additional sidecar containers to the RabbitMQ Cluster Operator pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param clusterOperator.initContainers Add additional init containers to the RabbitMQ Cluster Operator pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - - ## RBAC configuration - ## - rbac: - ## @param clusterOperator.rbac.create Specifies whether RBAC resources should be created - ## - create: true - - ## ServiceAccount configuration - ## - serviceAccount: - ## @param clusterOperator.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param clusterOperator.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param clusterOperator.serviceAccount.annotations Add annotations - ## - annotations: {} - ## @param clusterOperator.serviceAccount.automountServiceAccountToken Automount API credentials for a service account. - ## - automountServiceAccountToken: true - - ## @section RabbitMQ Cluster Operator Metrics parameters - ## - metrics: - ## @param clusterOperator.metrics.enabled Create a service for accessing the metrics endpoint - ## - enabled: false - ## Metrics service parameters - ## - service: - ## @param clusterOperator.metrics.service.type RabbitMQ Cluster Operator metrics service type - ## - type: ClusterIP - ## @param clusterOperator.metrics.service.ports.http RabbitMQ Cluster Operator metrics service HTTP port - ## - ports: - http: 80 - ## Node ports to expose - ## @param clusterOperator.metrics.service.nodePorts.http Node port for HTTP - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - ## @param clusterOperator.metrics.service.clusterIP RabbitMQ Cluster Operator metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param clusterOperator.metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param clusterOperator.metrics.service.loadBalancerIP RabbitMQ Cluster Operator metrics service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param clusterOperator.metrics.service.loadBalancerSourceRanges RabbitMQ Cluster Operator metrics service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param clusterOperator.metrics.service.externalTrafficPolicy RabbitMQ Cluster Operator metrics service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param clusterOperator.metrics.service.annotations [object] Additional custom annotations for RabbitMQ Cluster Operator metrics service - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.clusterOperator.metrics.service.ports.http }}" - ## @param clusterOperator.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param clusterOperator.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - serviceMonitor: - ## @param clusterOperator.metrics.serviceMonitor.enabled Specify if a servicemonitor will be deployed for prometheus-operator - ## - enabled: false - ## @param clusterOperator.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param clusterOperator.metrics.serviceMonitor.jobLabel Specify the jobLabel to use for the prometheus-operator - ## - jobLabel: app.kubernetes.io/name - ## @param clusterOperator.metrics.serviceMonitor.honorLabels Honor metrics labels - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - honorLabels: false - ## @param clusterOperator.metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## e.g: - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param clusterOperator.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## e.g: - ## scrapeTimeout: 10s - ## - scrapeTimeout: "" - ## @param clusterOperator.metrics.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used - ## - interval: "" - - ## DEPRECATED: Use clusterOperator.metrics.serviceMonitor.labels instead - ## This value will be removed in a future release - ## additionalLabels: {} - - ## @param clusterOperator.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics - ## - metricRelabelings: [] - ## @param clusterOperator.metrics.serviceMonitor.relabelings Specify general relabeling - ## - relabelings: [] - ## @param clusterOperator.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor - ## - labels: {} - -## @section RabbitMQ Messaging Topology Operator Parameters -## -msgTopologyOperator: - ## Bitnami RabbitMQ Messaging Topology Operator image - ## ref: https://hub.docker.com/r/bitnami/rmq-messaging-topology-operator/tags/ - ## @param msgTopologyOperator.image.registry RabbitMQ Messaging Topology Operator image registry - ## @param msgTopologyOperator.image.repository RabbitMQ Messaging Topology Operator image repository - ## @param msgTopologyOperator.image.tag RabbitMQ Messaging Topology Operator image tag (immutable tags are recommended) - ## @param msgTopologyOperator.image.pullPolicy RabbitMQ Messaging Topology Operator image pull policy - ## @param msgTopologyOperator.image.pullSecrets RabbitMQ Messaging Topology Operator image pull secrets - ## - image: - registry: docker.io - repository: bitnami/rmq-messaging-topology-operator - tag: 1.5.0-scratch-r2 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - - ## @param msgTopologyOperator.replicaCount Number of RabbitMQ Messaging Topology Operator replicas to deploy - ## - replicaCount: 1 - ## @param msgTopologyOperator.topologySpreadConstraints Topology Spread Constraints for pod assignment - ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ - ## The value is evaluated as a template - ## - topologySpreadConstraints: [] - ## @param msgTopologyOperator.schedulerName Alternative scheduler - ## - schedulerName: "" - ## @param msgTopologyOperator.terminationGracePeriodSeconds In seconds, time the given to the %%MAIN_CONTAINER_NAME%% pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: "" - ## Configure extra options for RabbitMQ Messaging Topology Operator containers' liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes - ## @param msgTopologyOperator.livenessProbe.enabled Enable livenessProbe on RabbitMQ Messaging Topology Operator nodes - ## @param msgTopologyOperator.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param msgTopologyOperator.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param msgTopologyOperator.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param msgTopologyOperator.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param msgTopologyOperator.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 30 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param msgTopologyOperator.readinessProbe.enabled Enable readinessProbe on RabbitMQ Messaging Topology Operator nodes - ## @param msgTopologyOperator.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param msgTopologyOperator.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param msgTopologyOperator.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param msgTopologyOperator.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param msgTopologyOperator.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 30 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - ## @param msgTopologyOperator.startupProbe.enabled Enable startupProbe on RabbitMQ Messaging Topology Operator nodes - ## @param msgTopologyOperator.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param msgTopologyOperator.startupProbe.periodSeconds Period seconds for startupProbe - ## @param msgTopologyOperator.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param msgTopologyOperator.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param msgTopologyOperator.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 5 - periodSeconds: 30 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - - ## @param msgTopologyOperator.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param msgTopologyOperator.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param msgTopologyOperator.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - - ## @param msgTopologyOperator.existingWebhookCertSecret name of a secret containing the certificates (use it to avoid certManager creating one) - ## - existingWebhookCertSecret: "" - - ## @param msgTopologyOperator.existingWebhookCertCABundle PEM-encoded CA Bundle of the existing secret provided in existingWebhookCertSecret (only if useCertManager=false) - ## - existingWebhookCertCABundle: "" - - ## RabbitMQ Messaging Topology Operator resource requests and limits - ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## @param msgTopologyOperator.resources.limits The resources limits for the RabbitMQ Messaging Topology Operator containers - ## @param msgTopologyOperator.resources.requests The requested resources for the RabbitMQ Messaging Topology Operator containers - ## - resources: - limits: {} - requests: {} - - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param msgTopologyOperator.podSecurityContext.enabled Enabled RabbitMQ Messaging Topology Operator pods' Security Context - ## @param msgTopologyOperator.podSecurityContext.fsGroup Set RabbitMQ Messaging Topology Operator pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroup: 1001 - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param msgTopologyOperator.containerSecurityContext.enabled Enabled RabbitMQ Messaging Topology Operator containers' Security Context - ## @param msgTopologyOperator.containerSecurityContext.runAsUser Set RabbitMQ Messaging Topology Operator containers' Security Context runAsUser - ## @param msgTopologyOperator.containerSecurityContext.runAsNonRoot Force running the container as non root - ## @param msgTopologyOperator.containerSecurityContext.readOnlyRootFilesystem mount / (root) as a readonly filesystem on Messaging Topology Operator - ## - containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsNonRoot: true - readOnlyRootFilesystem: true - - ## @param msgTopologyOperator.fullnameOverride String to fully override rmqco.msgTopologyOperator.fullname template - ## - fullnameOverride: "" - - ## @param msgTopologyOperator.command Override default container command (useful when using custom images) - ## - command: [] - ## @param msgTopologyOperator.args Override default container args (useful when using custom images) - ## - args: [] - ## @param msgTopologyOperator.hostAliases RabbitMQ Messaging Topology Operator pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param msgTopologyOperator.podLabels Extra labels for RabbitMQ Messaging Topology Operator pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param msgTopologyOperator.podAnnotations Annotations for RabbitMQ Messaging Topology Operator pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param msgTopologyOperator.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param msgTopologyOperator.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## Node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param msgTopologyOperator.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param msgTopologyOperator.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set - ## - key: "" - ## @param msgTopologyOperator.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param msgTopologyOperator.affinity Affinity for RabbitMQ Messaging Topology Operator pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: `podAffinityPreset`, `podAntiAffinityPreset`, and `nodeAffinityPreset` will be ignored when it's set - ## - affinity: {} - ## @param msgTopologyOperator.nodeSelector Node labels for RabbitMQ Messaging Topology Operator pods assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param msgTopologyOperator.tolerations Tolerations for RabbitMQ Messaging Topology Operator pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param msgTopologyOperator.updateStrategy.type RabbitMQ Messaging Topology Operator statefulset strategy type - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - ## @param msgTopologyOperator.priorityClassName RabbitMQ Messaging Topology Operator pods' priorityClassName - ## - priorityClassName: "" - ## @param msgTopologyOperator.lifecycleHooks for the RabbitMQ Messaging Topology Operator container(s) to automate configuration before or after startup - ## - lifecycleHooks: {} - - ## @param msgTopologyOperator.containerPorts.metrics RabbitMQ Messaging Topology Operator container port (used for metrics) - ## - containerPorts: - metrics: 8080 - - ## @param msgTopologyOperator.extraEnvVars Array with extra environment variables to add to RabbitMQ Messaging Topology Operator nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param msgTopologyOperator.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for RabbitMQ Messaging Topology Operator nodes - ## - extraEnvVarsCM: "" - ## @param msgTopologyOperator.extraEnvVarsSecret Name of existing Secret containing extra env vars for RabbitMQ Messaging Topology Operator nodes - ## - extraEnvVarsSecret: "" - ## @param msgTopologyOperator.extraVolumes Optionally specify extra list of additional volumes for the RabbitMQ Messaging Topology Operator pod(s) - ## - extraVolumes: [] - ## @param msgTopologyOperator.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the RabbitMQ Messaging Topology Operator container(s) - ## - extraVolumeMounts: [] - ## @param msgTopologyOperator.sidecars Add additional sidecar containers to the RabbitMQ Messaging Topology Operator pod(s) - ## e.g: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param msgTopologyOperator.initContainers Add additional init containers to the RabbitMQ Messaging Topology Operator pod(s) - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - ## e.g: - ## initContainers: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## command: ['sh', '-c', 'echo "hello world"'] - ## - initContainers: [] - - ## Webhook service parameters - ## - service: - ## @param msgTopologyOperator.service.type RabbitMQ Messaging Topology Operator webhook service type - ## - type: ClusterIP - ## @param msgTopologyOperator.service.ports.webhook RabbitMQ Messaging Topology Operator webhook service HTTP port - ## - ports: - webhook: 443 - ## Node ports to expose - ## @param msgTopologyOperator.service.nodePorts.http Node port for HTTP - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - ## @param msgTopologyOperator.service.clusterIP RabbitMQ Messaging Topology Operator webhook service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param msgTopologyOperator.service.loadBalancerIP RabbitMQ Messaging Topology Operator webhook service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param msgTopologyOperator.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param msgTopologyOperator.service.loadBalancerSourceRanges RabbitMQ Messaging Topology Operator webhook service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param msgTopologyOperator.service.externalTrafficPolicy RabbitMQ Messaging Topology Operator webhook service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param msgTopologyOperator.service.annotations Additional custom annotations for RabbitMQ Messaging Topology Operator webhook service - ## - annotations: {} - ## @param msgTopologyOperator.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param msgTopologyOperator.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - - ## RBAC configuration - ## - rbac: - ## @param msgTopologyOperator.rbac.create Specifies whether RBAC resources should be created - ## - create: true - - ## ServiceAccount configuration - ## - serviceAccount: - ## @param msgTopologyOperator.serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param msgTopologyOperator.serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param msgTopologyOperator.serviceAccount.annotations Add annotations - ## - annotations: {} - ## @param msgTopologyOperator.serviceAccount.automountServiceAccountToken Automount API credentials for a service account. - ## - automountServiceAccountToken: true - - ## @section RabbitMQ Messaging Topology Operator parameters - ## - metrics: - ## @param msgTopologyOperator.metrics.enabled Create a service for accessing the metrics endpoint - ## - enabled: false - ## Metrics service parameters - ## - service: - ## @param msgTopologyOperator.metrics.service.type RabbitMQ Cluster Operator metrics service type - ## - type: ClusterIP - ## @param msgTopologyOperator.metrics.service.ports.http RabbitMQ Cluster Operator metrics service HTTP port - ## - ports: - http: 80 - ## Node ports to expose - ## @param msgTopologyOperator.metrics.service.nodePorts.http Node port for HTTP - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - ## @param msgTopologyOperator.metrics.service.clusterIP RabbitMQ Cluster Operator metrics service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param msgTopologyOperator.metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param msgTopologyOperator.metrics.service.loadBalancerIP RabbitMQ Cluster Operator metrics service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param msgTopologyOperator.metrics.service.loadBalancerSourceRanges RabbitMQ Cluster Operator metrics service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param msgTopologyOperator.metrics.service.externalTrafficPolicy RabbitMQ Cluster Operator metrics service external traffic policy - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param msgTopologyOperator.metrics.service.annotations [object] Additional custom annotations for RabbitMQ Cluster Operator metrics service - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.msgTopologyOperator.metrics.service.ports.http }}" - ## @param msgTopologyOperator.metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param msgTopologyOperator.metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - serviceMonitor: - ## @param msgTopologyOperator.metrics.serviceMonitor.enabled Specify if a servicemonitor will be deployed for prometheus-operator - ## - enabled: false - ## @param msgTopologyOperator.metrics.serviceMonitor.namespace Namespace which Prometheus is running in - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param msgTopologyOperator.metrics.serviceMonitor.jobLabel Specify the jobLabel to use for the prometheus-operator - ## - jobLabel: app.kubernetes.io/name - - ## DEPRECATED: Use msgTopologyOperator.metrics.serviceMonitor.labels instead. - ## This value will be removed in a future release - ## additionalLabels: {} - - ## @param msgTopologyOperator.metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## e.g: - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param msgTopologyOperator.metrics.serviceMonitor.honorLabels Honor metrics labels - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - honorLabels: false - ## @param msgTopologyOperator.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## e.g: - ## scrapeTimeout: 10s - ## - scrapeTimeout: "" - ## @param msgTopologyOperator.metrics.serviceMonitor.interval Scrape interval. If not set, the Prometheus default scrape interval is used - ## - interval: "" - ## @param msgTopologyOperator.metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics - ## - metricRelabelings: [] - ## @param msgTopologyOperator.metrics.serviceMonitor.relabelings Specify general relabeling - ## - relabelings: [] - ## @param msgTopologyOperator.metrics.serviceMonitor.labels Extra labels for the ServiceMonitor - ## - labels: {} - -## @section cert-manager parameters -## - -## @param useCertManager Deploy cert-manager objects (Issuer and Certificate) for webhooks -## -useCertManager: false diff --git a/components/generators/argocd/__init__.py b/components/generators/argocd/__init__.py deleted file mode 100644 index 10abee23..00000000 --- a/components/generators/argocd/__init__.py +++ /dev/null @@ -1,349 +0,0 @@ -import base64 -import hashlib -import os - -from kapitan.cached import args -from kapitan.inputs.kadet import BaseObj, inventory -from kapitan.utils import render_jinja2_file - -search_paths = args.get("search_paths") - -from . import k8s - - -def j2(filename, ctx): - return render_jinja2_file(filename, ctx, search_paths=search_paths) - - -inv = inventory(lazy=True) - - -def merge(source, destination): - for key, value in source.items(): - if isinstance(value, dict): - node = destination.setdefault(key, value) - if node is None: - destination[key] = value - else: - merge(value, node) - else: - destination[key] = destination.setdefault(key, value) - - return destination - - -class ArgoCDAppProject(k8s.Base): - def new(self): - self.need("name") - self.kwargs.apiVersion = "argoproj.io/v1alpha1" - self.kwargs.kind = "AppProject" - - # Add a this finalizer ONLY if you want these to cascade delete - self.kwargs.finalizers = list("resources-finalizer.argocd.argoproj.io") - super().new() - - def body(self): - super().body() - - # You'll usually want to add your resources to the argocd namespace. - self.add_namespace(inv.parameters.argocd_namespace) - - argocd_project = self.kwargs.argocd_project - - self.add_annotations(argocd_project.get("annotations", {})) - self.add_labels(argocd_project.get("labels", {})) - - # Allow manifests to deploy from any Git repos - if argocd_project.source_repos: - self.root.spec.sourceRepos = argocd_project.source_repos - - # Only permit applications to deploy to the namespace in the same cluster - if argocd_project.destinations: - self.root.spec.destinations = argocd_project.destinations - - # Deny all cluster-scoped resources from being created, except for Namespace - if argocd_project.cluster_resource_whitelist: - self.root.spec.clusterResourceWhitelist = ( - argocd_project.cluster_resource_whitelist - ) - - # Allow all namespaced-scoped resources to be created, except for ResourceQuota, LimitRange, NetworkPolicy - if argocd_project.namespace_resource_blacklist: - self.root.spec.namespaceResourceBlacklist = ( - argocd_project.namespace_resource_blacklist - ) - - # Deny all namespaced-scoped resources from being created, except for Deployment and StatefulSet - if argocd_project.namespace_resource_whitelist: - self.root.spec.namespaceResourceWhitelist = ( - argocd_project.namespace_resource_whitelist - ) - - # Enables namespace orphaned resource monitoring. - if argocd_project.orphaned_resources: - self.root.spec.orphanedResources = argocd_project.orphaned_resources - - # Roles - if argocd_project.roles: - self.root.spec.roles = argocd_project.roles - - -class ArgoCDApplication(k8s.Base): - def new(self): - self.need("name") - self.kwargs.apiVersion = "argoproj.io/v1alpha1" - self.kwargs.kind = "Application" - - # Add a this finalizer ONLY if you want these to cascade delete - - # self.kwargs.finalizers = list('resources-finalizer.argocd.argoproj.io') - super().new() - - def body(self): - super().body() - - # You'll usually want to add your resources to the argocd namespace. - self.add_namespace(inv.parameters.argocd_namespace) - - argocd_application = self.kwargs.argocd_application - - self.add_annotations(argocd_application.get("annotations", {})) - self.add_labels(argocd_application.get("labels", {})) - - # The project the argocd_application belongs to. - self.root.spec.project = argocd_application.project - - # The destination in which Namespace the application should be deployed - self.root.spec.destination = argocd_application.destination - - # Source of the application manifests - if argocd_application.source: - self.root.spec.source = argocd_application.source - - # Sync policy - if argocd_application.sync_policy: - self.root.spec.syncPolicy = argocd_application.sync_policy - - # Ignore differences at the specified json pointers - if argocd_application.ignore_differences: - self.root.spec.ignoreDifferences = argocd_application.ignore_differences - - -# The following classes are required to generate Secrets + ConfigMaps -# TODO: Imported from k8s-generator -class SharedConfig: - """Shared class to use for both Secrets and ConfigMaps classes. - - contain anything needed by both classes, so that their behavious is basically the same. - Each subclass will then implement its own way of adding the data depending on their implementation. - """ - - @staticmethod - def encode_string(unencoded_string): - return base64.b64encode(unencoded_string.encode("ascii")).decode("ascii") - - def setup_metadata(self): - self.add_namespace(inv.parameters.argocd_namespace) - self.add_annotations(self.config.annotations) - self.add_labels(self.config.labels) - - self.items = self.config["items"] - try: - if isinstance(self, ConfigMap): - globals = ( - inv.parameters.generators.manifest.default_config.globals.config_maps - ) - else: - globals = ( - inv.parameters.generators.manifest.default_config.globals.secrets - ) - self.add_annotations(globals.get("annotations", {})) - self.add_labels(globals.get("labels", {})) - except AttributeError: - pass - - self.versioning(self.config.get("versioned", False)) - - def add_directory(self, directory, encode=False): - stringdata = inv.parameters.get("use_tesoro", False) - if directory and os.path.isdir(directory): - for filename in os.listdir(directory): - with open(f"{directory}/{filename}", "r") as f: - file_content = f.read() - self.add_item( - filename, - file_content, - request_encode=encode, - stringdata=stringdata, - ) - - def add_data(self, data): - stringdata = inv.parameters.get("use_tesoro", False) - - for key, spec in data.items(): - encode = spec.get("b64_encode", False) - - if "value" in spec: - value = spec.get("value") - if "template" in spec: - value = j2(spec.template, spec.get("values", {})) - if "file" in spec: - with open(spec.file, "r") as f: - value = f.read() - - self.add_item(key, value, request_encode=encode, stringdata=stringdata) - - def add_string_data(self, string_data, encode=False): - stringdata = True - - for key, spec in string_data.items(): - - if "value" in spec: - value = spec.get("value") - if "template" in spec: - value = j2(spec.template, spec.get("values", {})) - if "file" in spec: - with open(spec.file, "r") as f: - value = f.read() - - self.add_item(key, value, request_encode=encode, stringdata=stringdata) - - def versioning(self, enabled=False): - if enabled: - self.hash = hashlib.sha256(str(self.root.to_dict()).encode()).hexdigest()[ - :8 - ] - self.root.metadata.name += f"-{self.hash}" - - -# TODO: Imported from k8s-generator -class ConfigMap(k8s.Base, SharedConfig): - def new(self): - self.kwargs.apiVersion = "v1" - self.kwargs.kind = "ConfigMap" - super().new() - - def body(self): - super().body() - - def add_item(self, key, value, request_encode=False, stringdata=False): - encode = request_encode - - self.root["data"][key] = self.encode_string(value) if encode else value - - -# TODO: Imported from k8s-generator -class ComponentConfig(ConfigMap, SharedConfig): - def new(self): - super().new() - self.need("config") - - def body(self): - super().body() - self.config = self.kwargs.config - - self.setup_metadata() - self.add_data(self.config.data) - self.add_directory(self.config.directory, encode=False) - - -class Secret(k8s.Base): - def new(self): - self.kwargs.apiVersion = "v1" - self.kwargs.kind = "Secret" - super().new() - - def body(self): - super().body() - - def add_item(self, key, value, request_encode=False, stringdata=False): - encode = not stringdata and request_encode - field = "stringData" if stringdata else "data" - self.root[field][key] = self.encode_string(value) if encode else value - - -class ComponentSecret(Secret, SharedConfig): - def new(self): - super().new() - self.need("config") - - def body(self): - super().body() - self.config = self.kwargs.config - self.root.type = self.config.get("type", "Opaque") - - self.setup_metadata() - if self.config.data: - self.add_data(self.config.data) - if self.config.string_data: - self.add_string_data(self.config.string_data) - self.add_directory(self.config.directory, encode=True) - - -# This function renderes an ArgoCD-AppProject -def generate_argocd_appproject(input_params): - obj = BaseObj() - bundle = list() - argocd_projects = inv.parameters.argocd_projects - for name in argocd_projects.keys(): - argocd_project = ArgoCDAppProject( - name=name, argocd_project=argocd_projects[name] - ) - - obj.root["{}-argo-appproject".format(name)] = argocd_project - - return obj - - -# This function renderes an ArgoCD-Application -def generate_argocd_application(input_params): - obj = BaseObj() - bundle = list() - argocd_applications = inv.parameters.argocd_applications - for name in argocd_applications.keys(): - argocd_application = ArgoCDApplication( - name=name, argocd_application=argocd_applications[name] - ) - - obj.root["{}-argo-application".format(name)] = argocd_application - - return obj - - -# This function renderes an Shared-ConfigMaps + Secrets -def generate_resource_manifests(input_params): - obj = BaseObj() - - for secret_name, secret_spec in inv.parameters.generators.argocd.secrets.items(): - name = secret_spec.get("name", secret_name) - secret = ComponentSecret(name=name, config=secret_spec) - obj.root[f"{name}"] = secret - - for config_name, config_spec in inv.parameters.generators.argocd.configs.items(): - name = config_spec.get("name", config_name) - config = ComponentConfig(name=name, config=config_spec) - obj.root[f"{name}"] = config - - return obj - - -# This function renderes all previous defined functions and returns -def generate_manifests(input_params): - all_manifests = BaseObj() - - argocd_project_manifests = generate_argocd_appproject(input_params) - argocd_application_manifests = generate_argocd_application(input_params) - resource_manifests = generate_resource_manifests(input_params) - - all_manifests.root.update(argocd_project_manifests.root) - all_manifests.root.update(argocd_application_manifests.root) - all_manifests.root.update(resource_manifests.root) - - return all_manifests - - -def main(input_params): - whitelisted_functions = ["generate_manifests"] - function = input_params.get("function", "generate_manifests") - if function in whitelisted_functions: - return globals()[function](input_params) diff --git a/components/generators/argocd/k8s.py b/components/generators/argocd/k8s.py deleted file mode 100644 index 5b86d244..00000000 --- a/components/generators/argocd/k8s.py +++ /dev/null @@ -1,33 +0,0 @@ -from kapitan.inputs.kadet import BaseObj - - -# TODO: Imported from k8s-generator -class Base(BaseObj): - def new(self): - self.need("apiVersion") - self.need("kind") - self.need("name") - - def body(self): - self.root.apiVersion = self.kwargs.apiVersion - self.root.kind = self.kwargs.kind - self.name = self.kwargs.name - self.root.metadata.name = self.kwargs.get("rendered_name", self.name) - self.add_label("name", self.root.metadata.name) - - def add_labels(self, labels): - for key, value in labels.items(): - self.add_label(key, value) - - def add_label(self, key, value): - self.root.metadata.labels[key] = value - - def add_namespace(self, namespace): - self.root.metadata.namespace = namespace - - def add_annotations(self, annotations): - for key, value in annotations.items(): - self.add_annotation(key, value) - - def add_annotation(self, key, value): - self.root.metadata.annotations[key] = value diff --git a/components/generators/ingresses/README.md b/components/generators/ingresses/README.md deleted file mode 100644 index 3c1fbe2b..00000000 --- a/components/generators/ingresses/README.md +++ /dev/null @@ -1,242 +0,0 @@ -# Ingresses generator - -The `ingresses` generator adds on the manifest generator by providing a quick way to expose paths to your application using ingresses resources. - -## Basic usage - -The generator is expecting ingresses to be defined under the `parameters.ingresses` path of the inventory. - -For convenience, you can add the configuration in the same files as your component. - -For instance, add the following to the component [echo-server](inventory/classes/components/echo-server.yml). - -```yaml -ingresses: - global: - annotations: - kubernetes.io/ingress.global-static-ip-name: my_static_ip - paths: - - backend: - serviceName: echo-server - servicePort: 80 - path: /echo/* -``` - -which will generate a file similar to: - -```yaml -apiVersion: networking.k8s.io/v1beta1 -kind: Ingress -metadata: - annotations: - kubernetes.io/ingress.global-static-ip-name: my_static_ip - labels: - name: global - name: global - namespace: tutorial -spec: - rules: - - http: - paths: - - backend: - serviceName: echo-server - servicePort: 80 - path: /echo/* -``` - -Injecting "rules" confirations is also supported: - -```yaml -ingresses: - global: - annotations: - kubernetes.io/ingress.global-static-ip-name: my_static_ip - rules: - - http: - paths: - - backend: - serviceName: echo-server - servicePort: 80 - path: /echo/* -``` - -### Create an ingress resource - -Each key under the `ingresses` parameters represent an ingress resource: - -```yaml -parameters: ---- -ingresses: - main: - default_backend: - name: frontend - port: 80 -``` - -Will generate the following `Ingress` resource - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - labels: - name: main - name: main - namespace: prod-sockshop -spec: - backend: - serviceName: frontend - servicePort: 80 -``` - -### Add annotations to an ingress - -Simply adding the `annotations` directive allows to configure an ingress: - -```yaml -ingresses: - main: - annotations: - kubernetes.io/ingress.global-static-ip-name: static-ip-name - default_backend: - name: frontend - port: 80 -``` - -The generator will add the annotations to the resource - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - kubernetes.io/ingress.global-static-ip-name: static-ip-name - labels: - name: main - name: main - namespace: prod-sockshop -spec: - backend: - serviceName: frontend - servicePort: 80 -``` - -## Adding TLS certificates - -You can define a TLS certificate to be used by the ingress with the following syntax - -```yaml -generators: - kubernetes: - secrets: - sockshop.kapicorp.com: - type: kubernetes.io/tls - data: - tls.crt: - value: ?{gkms:targets/${target_name}/sockshop.kapicorp.com.crt} - tls.key: - value: ?{gkms:targets/${target_name}/sockshop.kapicorp.com.key} -``` - -Both references need to be configured before hand with the correct PEM certificates. - -You can then pass the TLS configuration to the ingress, with a reference to the secret just created: - -```yaml - ingresses: - global: - annotations: - kubernetes.io/ingress.global-static-ip-name: sock-shop-prod - default_backend: - name: frontend - port: 80 - tls: - - hosts: - - sockshop.kapicorp.com - secretName: sockshop.kapicorp.com -``` - -## Managed certificats (currently GKE only) - -### Add a managed certificate - -Set the `manage_certificate` directive to the domain you want to manage a certificate for. - -```yaml -ingresses: - main: - managed_certificate: sockshop.kapicorp.com - annotations: - kubernetes.io/ingress.global-static-ip-name: static-ip-name - default_backend: - name: frontend - port: 80 -``` - -Which will create a new `ManagedCertificate` resource for such domain - -```yaml -apiVersion: networking.gke.io/v1beta1 -kind: ManagedCertificate -metadata: - labels: - name: sockshop.kapicorp.com - name: sockshop.kapicorp.com - namespace: prod-sockshop -spec: - domains: - - sockshop.kapicorp.com -``` - -and injects the correct annotation into the ingress resource: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - kubernetes.io/ingress.global-static-ip-name: static-ip-name - networking.gke.io/managed-certificates: sockshop.kapicorp.com - labels: - name: main - name: main - namespace: prod-sockshop -spec: - backend: - serviceName: frontend - servicePort: 80 -``` - -### Multiple certificats - -The generator also supports multiple certificates with the `additional_domains` directive. - -```yaml -ingresses: - main: - annotations: - kubernetes.io/ingress.global-static-ip-name: static-ip-name - managed_certificate: sockshop.kapicorp.com - additional_domains: - - secure.kapicorp.com - default_backend: - name: frontend - port: 80 -``` - -Which will generate: - -```yaml -apiVersion: networking.gke.io/v1beta1 -kind: ManagedCertificate -metadata: - labels: - name: sockshop.kapicorp.com - name: sockshop.kapicorp.com - namespace: prod-sockshop -spec: - domains: - - sockshop.kapicorp.com - - secure.kapicorp.com -``` diff --git a/components/generators/kubernetes/networking.py b/components/generators/kubernetes/networking.py deleted file mode 100644 index 9cc1618f..00000000 --- a/components/generators/kubernetes/networking.py +++ /dev/null @@ -1,27 +0,0 @@ -from kapitan.inputs.kadet import load_from_search_paths - -from .common import KubernetesResource, ResourceType - -kgenlib = load_from_search_paths("generators") - - -class NetworkPolicy(KubernetesResource): - resource_type = ResourceType( - kind="NetworkPolicy", api_version="networking.k8s.io/v1", id="network_policy" - ) - - def new(self): - super().new() - - def body(self): - super().body() - policy = self.config - workload = self.workload - self.root.spec.podSelector.matchLabels = workload.root.metadata.labels - self.root.spec.ingress = policy.ingress - self.root.spec.egress = policy.egress - if self.root.spec.ingress: - self.root.spec.setdefault("policyTypes", []).append("Ingress") - - if self.root.spec.egress: - self.root.spec.setdefault("policyTypes", []).append("Egress") diff --git a/components/generators/kubernetes/rbac.py b/components/generators/kubernetes/rbac.py deleted file mode 100644 index 62654a0b..00000000 --- a/components/generators/kubernetes/rbac.py +++ /dev/null @@ -1,95 +0,0 @@ -from kapitan.inputs.kadet import load_from_search_paths - -from .common import KubernetesResource, ResourceType - -kgenlib = load_from_search_paths("generators") - - -class Role(KubernetesResource): - resource_type = ResourceType( - kind="Role", api_version="rbac.authorization.k8s.io/v1", id="role" - ) - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - self.root.rules = config.role.rules - - -class RoleBinding(KubernetesResource): - resource_type = ResourceType( - kind="RoleBinding", - api_version="rbac.authorization.k8s.io/v1", - id="role_binding", - ) - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - sa = self.sa - default_role_ref = { - "apiGroup": "rbac.authorization.k8s.io", - "kind": "Role", - "name": config.name, - } - default_subject = [ - { - "kind": "ServiceAccount", - "name": sa.name, - "namespace": sa.namespace, - } - ] - self.root.roleRef = config.get("roleRef", default_role_ref) - self.root.subjects = config.get("subject", default_subject) - - -class ClusterRole(KubernetesResource): - resource_type = ResourceType( - kind="ClusterRole", - api_version="rbac.authorization.k8s.io/v1", - id="cluster_role", - ) - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - self.root.rules = config.cluster_role.rules - - -class ClusterRoleBinding(KubernetesResource): - resource_type = ResourceType( - kind="ClusterRoleBinding", - api_version="rbac.authorization.k8s.io/v1", - id="cluster_role_binding", - ) - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - sa = self.sa - default_role_ref = { - "apiGroup": "rbac.authorization.k8s.io", - "kind": "ClusterRole", - "name": config.name, - } - default_subject = [ - { - "kind": "ServiceAccount", - "name": sa.name, - "namespace": sa.namespace, - } - ] - self.root.roleRef = config.get("roleRef", default_role_ref) - self.root.subjects = config.get("subject", default_subject) diff --git a/components/generators/rabbitmq/__init__.py b/components/generators/rabbitmq/__init__.py deleted file mode 100644 index 1c6bc3d3..00000000 --- a/components/generators/rabbitmq/__init__.py +++ /dev/null @@ -1,866 +0,0 @@ -import base64 -import hashlib -import os - -from kapitan.cached import args -from kapitan.inputs.kadet import BaseObj, inventory -from kapitan.utils import render_jinja2_file - -search_paths = args.get("search_paths") - -from . import k8s - - -def j2(filename, ctx): - return render_jinja2_file(filename, ctx, search_paths=search_paths) - - -inv = inventory(lazy=True) - - -def merge(source, destination): - for key, value in source.items(): - if isinstance(value, dict): - node = destination.setdefault(key, value) - if node is None: - destination[key] = value - else: - merge(value, node) - else: - destination[key] = destination.setdefault(key, value) - - return destination - - -class RabbitmqCluster(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "RabbitmqCluster" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmqcluster = self.kwargs.rabbitmqcluster - self.add_annotations(rabbitmqcluster.get("annotations", {})) - self.add_labels(rabbitmqcluster.get("labels", {})) - - if rabbitmqcluster.replicas: - self.root.spec.replicas = rabbitmqcluster.replicas - - if rabbitmqcluster.image: - self.root.spec.image = rabbitmqcluster.image - - if rabbitmqcluster.imagePullSecrets: - self.root.spec.imagePullSecrets = rabbitmqcluster.imagePullSecrets - - if rabbitmqcluster.service: - self.root.spec.service = rabbitmqcluster.service - - if rabbitmqcluster.persistence: - self.root.spec.persistence = rabbitmqcluster.persistence - - if rabbitmqcluster.resources: - self.root.spec.resources = rabbitmqcluster.resources - - if rabbitmqcluster.affinity: - self.root.spec.resources = rabbitmqcluster.affinity - - if rabbitmqcluster.tolerations: - self.root.spec.tolerations = rabbitmqcluster.tolerations - - if rabbitmqcluster.rabbitmq: - self.root.spec.rabbitmq = rabbitmqcluster.rabbitmq - - if rabbitmqcluster.tls: - self.root.spec.tls = rabbitmqcluster.tls - - if rabbitmqcluster.skipPostDeploySteps: - self.root.spec.skipPostDeploySteps = rabbitmqcluster.skipPostDeploySteps - - if rabbitmqcluster.terminationGracePeriodSeconds: - self.root.spec.terminationGracePeriodSeconds = ( - rabbitmqcluster.terminationGracePeriodSeconds - ) - - if rabbitmqcluster.secretBackend: - self.root.spec.secretBackend = rabbitmqcluster.secretBackend - - if rabbitmqcluster.override: - self.root.spec.override = rabbitmqcluster.override - - -class RabbitmqQueue(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Queue" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_queue = self.kwargs.rabbitmq_queue - self.add_annotations(rabbitmq_queue.get("annotations", {})) - self.add_labels(rabbitmq_queue.get("labels", {})) - - if rabbitmq_queue.name: - self.root.spec.name = rabbitmq_queue.name - - if type(rabbitmq_queue.autoDelete) is bool: - self.root.spec.autoDelete = rabbitmq_queue.autoDelete - - if type(rabbitmq_queue.durable) is bool: - self.root.spec.durable = rabbitmq_queue.durable - - if rabbitmq_queue.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_queue.rabbitmqClusterReference - ) - - if rabbitmq_queue.arguments: - self.root.spec.arguments = rabbitmq_queue.arguments - - -class RabbitmqPolicy(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Policy" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_policy = self.kwargs.rabbitmq_policy - self.add_annotations(rabbitmq_policy.get("annotations", {})) - self.add_labels(rabbitmq_policy.get("labels", {})) - - if rabbitmq_policy.name: - self.root.spec.name = rabbitmq_policy.name - - if rabbitmq_policy.pattern: - self.root.spec.pattern = rabbitmq_policy.pattern - - if rabbitmq_policy.applyTo: - self.root.spec.applyTo = rabbitmq_policy.applyTo - - if rabbitmq_policy.definition: - self.root.spec.definition = rabbitmq_policy.definition - - if rabbitmq_policy.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_policy.rabbitmqClusterReference - ) - - if rabbitmq_policy.priority: - self.root.spec.priority = rabbitmq_policy.priority - - if rabbitmq_policy.vhost: - self.root.spec.vhost = rabbitmq_policy.vhost - - -class RabbitmqExchange(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Exchange" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_exchange = self.kwargs.rabbitmq_exchange - self.add_annotations(rabbitmq_exchange.get("annotations", {})) - self.add_labels(rabbitmq_exchange.get("labels", {})) - - if rabbitmq_exchange.name: - self.root.spec.name = rabbitmq_exchange.name - - if rabbitmq_exchange.type: - self.root.spec.type = rabbitmq_exchange.type - - if type(rabbitmq_exchange.autoDelete) is bool: - self.root.spec.autoDelete = rabbitmq_exchange.autoDelete - - if type(rabbitmq_exchange.durable) is bool: - self.root.spec.durable = rabbitmq_exchange.durable - - if rabbitmq_exchange.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_exchange.rabbitmqClusterReference - ) - - if rabbitmq_exchange.arguments: - self.root.spec.arguments = rabbitmq_exchange.arguments - - if rabbitmq_exchange.vhost: - self.root.spec.vhost = rabbitmq_exchange.vhost - - -class RabbitmqBinding(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Binding" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_binding = self.kwargs.rabbitmq_binding - self.add_annotations(rabbitmq_binding.get("annotations", {})) - self.add_labels(rabbitmq_binding.get("labels", {})) - - if rabbitmq_binding.source: - self.root.spec.source = rabbitmq_binding.source - - if rabbitmq_binding.destination: - self.root.spec.destination = rabbitmq_binding.destination - - if rabbitmq_binding.destinationType: - self.root.spec.destinationType = rabbitmq_binding.destinationType - - if rabbitmq_binding.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_binding.rabbitmqClusterReference - ) - - if rabbitmq_binding.routingKey: - self.root.spec.routingKey = rabbitmq_binding.routingKey - - if rabbitmq_binding.arguments: - self.root.spec.arguments = rabbitmq_binding.arguments - - if rabbitmq_binding.vhost: - self.root.spec.vhost = rabbitmq_binding.vhost - - -class RabbitmqUser(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "User" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_user = self.kwargs.rabbitmq_user - self.add_annotations(rabbitmq_user.get("annotations", {})) - self.add_labels(rabbitmq_user.get("labels", {})) - - if rabbitmq_user.tags: - self.root.spec.tags = rabbitmq_user.tags - - if rabbitmq_user.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_user.rabbitmqClusterReference - ) - - if rabbitmq_user.importCredentialsSecret: - self.root.spec.importCredentialsSecret = ( - rabbitmq_user.importCredentialsSecret - ) - - -class RabbitmqPermission(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Permission" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_permission = self.kwargs.rabbitmq_permission - self.add_annotations(rabbitmq_permission.get("annotations", {})) - self.add_labels(rabbitmq_permission.get("labels", {})) - - if rabbitmq_permission.vhost: - self.root.spec.vhost = rabbitmq_permission.vhost - - if rabbitmq_permission.user: - self.root.spec.user = rabbitmq_permission.user - - if rabbitmq_permission.permissions: - self.root.spec.permissions = rabbitmq_permission.permissions - - if rabbitmq_permission.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_permission.rabbitmqClusterReference - ) - - if rabbitmq_permission.userReference: - self.root.spec.userReference = rabbitmq_permission.userReference - - -class RabbitmqVhost(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Vhost" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_vhost = self.kwargs.rabbitmq_vhost - self.add_annotations(rabbitmq_vhost.get("annotations", {})) - self.add_labels(rabbitmq_vhost.get("labels", {})) - - if rabbitmq_vhost.name: - self.root.spec.name = rabbitmq_vhost.name - - if rabbitmq_vhost.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_vhost.rabbitmqClusterReference - ) - - if rabbitmq_vhost.tags: - self.root.spec.tags = rabbitmq_vhost.tags - - if rabbitmq_vhost.tracing: - self.root.spec.tracing = rabbitmq_vhost.tracing - - -class RabbitmqFederation(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Federation" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_federation = self.kwargs.rabbitmq_federation - self.add_annotations(rabbitmq_federation.get("annotations", {})) - self.add_labels(rabbitmq_federation.get("labels", {})) - - if rabbitmq_federation.name: - self.root.spec.name = rabbitmq_federation.name - - if rabbitmq_federation.uriSecret: - self.root.spec.uriSecret = rabbitmq_federation.uriSecret - - if rabbitmq_federation.ackMode: - self.root.spec.ackMode = rabbitmq_federation.ackMode - - if rabbitmq_federation.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_federation.rabbitmqClusterReference - ) - - if rabbitmq_federation.exchange: - self.root.sec.exchange = rabbitmq_federation.exchange - - if rabbitmq_federation.expires: - self.root.spec.expires = rabbitmq_federation.expires - - if rabbitmq_federation.maxHops: - self.root.spec.maxHops = rabbitmq_federation.maxHops - - if rabbitmq_federation.messageTTL: - self.root.spec.messageTTL = rabbitmq_federation.messageTTL - - if rabbitmq_federation.prefetch_count: - self.root.spec.prefetch_count = rabbitmq_federation.prefetch_count - - if rabbitmq_federation.queue: - self.root.spec.queue = rabbitmq_federation.queue - - if rabbitmq_federation.reconnectDelay: - self.root.spec.reconnectDelay = rabbitmq_federation.reconnectDelay - - if rabbitmq_federation.trustUserId: - self.root.spec.trustUserId = rabbitmq_federation.trustUserId - - if rabbitmq_federation.vhost: - self.root.spec.vhost = rabbitmq_federation.vhost - - -class RabbitmqShovel(k8s.Base): - def new(self): - self.kwargs.apiVersion = "rabbitmq.com/v1beta1" - self.kwargs.kind = "Shovel" - - self.kwargs.finalizers = list( - "deletion.finalizers.rabbitmqclusters.rabbitmq.com" - ) - super().new() - self.need("name") - - def body(self): - super().body() - - self.add_namespace(inv.parameters.rabbitmq_namespace) - - rabbitmq_shovel = self.kwargs.rabbitmq_shovel - self.add_annotations(rabbitmq_shovel.get("annotations", {})) - self.add_labels(rabbitmq_shovel.get("labels", {})) - - if rabbitmq_shovel.name: - self.root.spec.name = rabbitmq_shovel.name - - if rabbitmq_shovel.uriSecret: - self.root.spec.uriSecret = rabbitmq_shovel.uriSecret - - if rabbitmq_shovel.srcQueue: - self.root.spec.srcQueue = rabbitmq_shovel.srcQueue - - if rabbitmq_shovel.destQueue: - self.root.spec.destQueue = rabbitmq_shovel.destQueue - - if rabbitmq_shovel.rabbitmqClusterReference: - self.root.spec.rabbitmqClusterReference = ( - rabbitmq_shovel.rabbitmqClusterReference - ) - - if rabbitmq_shovel.ackMode: - self.root.spec.ackMode = rabbitmq_shovel.ackMode - - if rabbitmq_shovel.addForwardHeaders: - self.root.spec.addForwardHeaders = rabbitmq_shovel.addForwardHeaders - - if rabbitmq_shovel.deleteAfter: - self.root.spec.deleteAfter = rabbitmq_shovel.deleteAfter - - if rabbitmq_shovel.destAddForwardHeaders: - self.root.spec.destAddForwardHeaders = rabbitmq_shovel.destAddForwardHeaders - - if rabbitmq_shovel.destAddTimestampHeader: - self.root.spec.destAddTimestampHeader = ( - rabbitmq_shovel.destAddTimestampHeader - ) - - if rabbitmq_shovel.destAddress: - self.root.spec.destAddress = rabbitmq_shovel.destAddress - - if rabbitmq_shovel.destApplicationProperties: - self.root.spec.destApplicationProperties = ( - rabbitmq_shovel.destApplicationProperties - ) - - if rabbitmq_shovel.destExchange: - self.root.spec.destExchange = rabbitmq_shovel.destExchange - - if rabbitmq_shovel.destExchangeKey: - self.root.spec.destExchangeKey = rabbitmq_shovel.destExchangeKey - - if rabbitmq_shovel.destProperties: - self.root.spec.destProperties = rabbitmq_shovel.destProperties - - if rabbitmq_shovel.destProtocol: - self.root.spec.destProtocol = rabbitmq_shovel.destProtocol - - if rabbitmq_shovel.destPublishProperties: - self.root.spec.destPublishProperties = rabbitmq_shovel.destPublishProperties - - if rabbitmq_shovel.prefetchCount: - self.root.spec.prefetchCount = rabbitmq_shovel.prefetchCount - - if rabbitmq_shovel.reconnectDelay: - self.root.spec.reconnectDelay = rabbitmq_shovel.reconnectDelay - - if rabbitmq_shovel.srcAddress: - self.root.spec.srcAddress = rabbitmq_shovel.srcAddress - - if rabbitmq_shovel.srcDeleteAfter: - self.root.spec.srcDeleteAfter = rabbitmq_shovel.srcDeleteAfter - - if rabbitmq_shovel.srcExchange: - self.root.spec.srcExchange = rabbitmq_shovel.srcExchange - - if rabbitmq_shovel.srcExchangeKey: - self.root.spec.srcExchangeKey = rabbitmq_shovel.srcExchangeKey - - if rabbitmq_shovel.srcPrefetchCount: - self.root.spec.srcPrefetchCount = rabbitmq_shovel.srcPrefetchCount - - if rabbitmq_shovel.srcProtocol: - self.root.spec.srcProtocol = rabbitmq_shovel.srcProtocol - - if rabbitmq_shovel.vhost: - self.root.spec.vhost = rabbitmq_shovel.vhost - - -# The following classes are required to generate Secrets + ConfigMaps -class SharedConfig: - """Shared class to use for both Secrets and ConfigMaps classes. - - containt anything needed by both classes, so that their behavious is basically the same. - Each subclass will then implement its own way of adding the data depending on their implementation. - """ - - @staticmethod - def encode_string(unencoded_string): - return base64.b64encode(unencoded_string.encode("ascii")).decode("ascii") - - def setup_metadata(self): - self.add_namespace(inv.parameters.rabbitmq_namespace) - self.add_annotations(self.config.annotations) - self.add_labels(self.config.labels) - - self.items = self.config["items"] - try: - if isinstance(self, ConfigMap): - globals = ( - inv.parameters.generators.manifest.default_config.globals.config_maps - ) - else: - globals = ( - inv.parameters.generators.manifest.default_config.globals.secrets - ) - self.add_annotations(globals.get("annotations", {})) - self.add_labels(globals.get("labels", {})) - except AttributeError: - pass - - self.versioning(self.config.get("versioned", False)) - - def add_directory(self, directory, encode=False): - stringdata = inv.parameters.get("use_tesoro", False) - if directory and os.path.isdir(directory): - for filename in os.listdir(directory): - with open(f"{directory}/{filename}", "r") as f: - file_content = f.read() - self.add_item( - filename, - file_content, - request_encode=encode, - stringdata=stringdata, - ) - - def add_data(self, data): - stringdata = inv.parameters.get("use_tesoro", False) - - for key, spec in data.items(): - encode = spec.get("b64_encode", False) - - if "value" in spec: - value = spec.get("value") - if "template" in spec: - value = j2(spec.template, spec.get("values", {})) - if "file" in spec: - with open(spec.file, "r") as f: - value = f.read() - - self.add_item(key, value, request_encode=encode, stringdata=stringdata) - - def add_string_data(self, string_data, encode=False): - stringdata = True - - for key, spec in string_data.items(): - - if "value" in spec: - value = spec.get("value") - if "template" in spec: - value = j2(spec.template, spec.get("values", {})) - if "file" in spec: - with open(spec.file, "r") as f: - value = f.read() - - self.add_item(key, value, request_encode=encode, stringdata=stringdata) - - def versioning(self, enabled=False): - if enabled: - self.hash = hashlib.sha256(str(self.root.to_dict()).encode()).hexdigest()[ - :8 - ] - self.root.metadata.name += f"-{self.hash}" - - -class ConfigMap(k8s.Base, SharedConfig): - def new(self): - self.kwargs.apiVersion = "v1" - self.kwargs.kind = "ConfigMap" - super().new() - - def body(self): - super().body() - - def add_item(self, key, value, request_encode=False, stringdata=False): - encode = request_encode - - self.root["data"][key] = self.encode_string(value) if encode else value - - -class ComponentConfig(ConfigMap, SharedConfig): - def new(self): - super().new() - self.need("config") - - def body(self): - super().body() - self.config = self.kwargs.config - - self.setup_metadata() - self.add_data(self.config.data) - self.add_directory(self.config.directory, encode=False) - - -class Secret(k8s.Base): - def new(self): - self.kwargs.apiVersion = "v1" - self.kwargs.kind = "Secret" - super().new() - - def body(self): - super().body() - - def add_item(self, key, value, request_encode=False, stringdata=False): - encode = not stringdata and request_encode - field = "stringData" if stringdata else "data" - self.root[field][key] = self.encode_string(value) if encode else value - - -class ComponentSecret(Secret, SharedConfig): - def new(self): - super().new() - self.need("config") - - def body(self): - super().body() - self.config = self.kwargs.config - self.root.type = self.config.get("type", "Opaque") - - self.setup_metadata() - if self.config.data: - self.add_data(self.config.data) - if self.config.string_data: - self.add_string_data(self.config.string_data) - self.add_directory(self.config.directory, encode=True) - - -def generate_rabbitmqcluster(input_params): - obj = BaseObj() - rabbitmqcluster_list = inv.parameters.rabbitmqcluster - for name in rabbitmqcluster_list.keys(): - rabbitmqcluster = RabbitmqCluster( - name=name, rabbitmqcluster=rabbitmqcluster_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmqcluster - - return obj - - -def generate_rabbitmq_queue(input_params): - obj = BaseObj() - rabbitmq_queue_list = inv.parameters.rabbitmq_queue - for name in rabbitmq_queue_list.keys(): - rabbitmq_queue = RabbitmqQueue( - name=name, rabbitmq_queue=rabbitmq_queue_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_queue - return obj - - -def generate_rabbitmq_policy(input_params): - obj = BaseObj() - rabbitmq_policy_list = inv.parameters.rabbitmq_policy - for name in rabbitmq_policy_list.keys(): - rabbitmq_policy = RabbitmqPolicy( - name=name, rabbitmq_policy=rabbitmq_policy_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_policy - return obj - - -def generate_rabbitmq_exchange(input_params): - obj = BaseObj() - rabbitmq_exchange_list = inv.parameters.rabbitmq_exchange - for name in rabbitmq_exchange_list.keys(): - rabbitmq_exchange = RabbitmqExchange( - name=name, rabbitmq_exchange=rabbitmq_exchange_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_exchange - return obj - - -def generate_rabbitmq_binding(input_params): - obj = BaseObj() - rabbitmq_binding_list = inv.parameters.rabbitmq_binding - for name in rabbitmq_binding_list.keys(): - rabbitmq_binding = RabbitmqBinding( - name=name, rabbitmq_binding=rabbitmq_binding_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_binding - return obj - - -def generate_rabbitmq_user(input_params): - obj = BaseObj() - rabbitmq_user_list = inv.parameters.rabbitmq_user - for name in rabbitmq_user_list.keys(): - rabbitmq_user = RabbitmqUser(name=name, rabbitmq_user=rabbitmq_user_list[name]) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_user - return obj - - -def generate_rabbitmq_permission(input_params): - obj = BaseObj() - rabbitmq_permission_list = inv.parameters.rabbitmq_permission - for name in rabbitmq_permission_list.keys(): - rabbitmq_permission = RabbitmqPermission( - name=name, rabbitmq_permission=rabbitmq_permission_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_permission - return obj - - -def generate_rabbitmq_vhost(input_params): - obj = BaseObj() - rabbitmq_vhost_list = inv.parameters.rabbitmq_vhost - for name in rabbitmq_vhost_list.keys(): - rabbitmq_vhost = RabbitmqVhost( - name=name, rabbitmq_vhost=rabbitmq_vhost_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_vhost - return obj - - -def generate_rabbitmq_federation(input_params): - obj = BaseObj() - rabbitmq_federation_list = inv.parameters.rabbitmq_federation - for name in rabbitmq_federation_list.keys(): - rabbitmq_federation = RabbitmqFederation( - name=name, rabbitmq_federation=rabbitmq_federation_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_federation - return obj - - -def generate_rabbitmq_shovel(input_params): - obj = BaseObj() - rabbitmq_shovel_list = inv.parameters.rabbitmq_shovel - for name in rabbitmq_shovel_list.keys(): - rabbitmq_shovel = RabbitmqShovel( - name=name, rabbitmq_shovel=rabbitmq_shovel_list[name] - ) - - obj.root["{}-rabbitmq".format(name)] = rabbitmq_shovel - return obj - - -# This function renderes an Shared-ConfigMaps + Secrets -def generate_resource_manifests(input_params): - obj = BaseObj() - - for secret_name, secret_spec in inv.parameters.generators.rabbitmq.secrets.items(): - name = secret_spec.get("name", secret_name) - secret = ComponentSecret(name=name, config=secret_spec) - obj.root[f"{name}"] = secret - - for config_name, config_spec in inv.parameters.generators.rabbitmq.configs.items(): - name = config_spec.get("name", config_name) - config = ComponentConfig(name=name, config=config_spec) - obj.root[f"{name}"] = config - return obj - - -# This function renderes all previous defined functions and returns - - -def generate_manifests(input_params): - all_manifests = BaseObj() - - rabbitmq_manifests = generate_rabbitmqcluster(input_params) - rabbitmq_queue_manifests = generate_rabbitmq_queue(input_params) - rabbitmq_policy_manifests = generate_rabbitmq_policy(input_params) - rabbitmq_exchange_manifests = generate_rabbitmq_exchange(input_params) - rabbitmq_binding_manifests = generate_rabbitmq_binding(input_params) - rabbitmq_user_manifests = generate_rabbitmq_user(input_params) - rabbitmq_permission_manifests = generate_rabbitmq_permission(input_params) - rabbitmq_vhost_manifests = generate_rabbitmq_vhost(input_params) - rabbitmq_federation_manifests = generate_rabbitmq_federation(input_params) - rabbitmq_shovel_manifests = generate_rabbitmq_shovel(input_params) - - resource_manifests = generate_resource_manifests(input_params) - - all_manifests.root.update(rabbitmq_manifests.root) - all_manifests.root.update(rabbitmq_queue_manifests.root) - all_manifests.root.update(rabbitmq_policy_manifests.root) - all_manifests.root.update(rabbitmq_exchange_manifests.root) - all_manifests.root.update(rabbitmq_binding_manifests.root) - all_manifests.root.update(rabbitmq_user_manifests.root) - all_manifests.root.update(rabbitmq_permission_manifests.root) - all_manifests.root.update(rabbitmq_vhost_manifests.root) - all_manifests.root.update(rabbitmq_federation_manifests.root) - all_manifests.root.update(rabbitmq_shovel_manifests.root) - - all_manifests.root.update(resource_manifests.root) - - return all_manifests - - -def main(input_params): - whitelisted_functions = ["generate_manifests"] - function = input_params.get("function", "generate_manifests") - if function in whitelisted_functions: - return globals()[function](input_params) diff --git a/components/generators/rabbitmq/k8s.py b/components/generators/rabbitmq/k8s.py deleted file mode 100644 index 2516be6b..00000000 --- a/components/generators/rabbitmq/k8s.py +++ /dev/null @@ -1,32 +0,0 @@ -from kapitan.inputs.kadet import BaseObj - - -class Base(BaseObj): - def new(self): - self.need("apiVersion") - self.need("kind") - self.need("name") - - def body(self): - self.root.apiVersion = self.kwargs.apiVersion - self.root.kind = self.kwargs.kind - self.name = self.kwargs.name - self.root.metadata.name = self.kwargs.get("rendered_name", self.name) - self.add_label("name", self.root.metadata.name) - - def add_labels(self, labels): - for key, value in labels.items(): - self.add_label(key, value) - - def add_label(self, key, value): - self.root.metadata.labels[key] = value - - def add_namespace(self, namespace): - self.root.metadata.namespace = namespace - - def add_annotations(self, annotations): - for key, value in annotations.items(): - self.add_annotation(key, value) - - def add_annotation(self, key, value): - self.root.metadata.annotations[key] = value diff --git a/components/generators/terraform/__init__.py b/components/generators/terraform/__init__.py deleted file mode 100644 index 83e2bb72..00000000 --- a/components/generators/terraform/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from kapitan.inputs.kadet import BaseObj, inventory - -inv = inventory() - - -def main(input_params): - obj = BaseObj() - generator_root_paths = input_params.get("generator_root", "sections.tf").split(".") - root = inv.parameters - - for path in generator_root_paths: - root = root.get(path, {}) - - for section_name, content in root.items(): - if section_name in ["resource", "data"]: - for resource_name, content in content.items(): - obj.root["{}.tf".format(resource_name)][section_name][ - resource_name - ] = content - else: - obj.root["{}.tf".format(section_name)][section_name] = content - return obj diff --git a/components/helm/cert-manager/1.4.0/cert-manager.rendered.yml b/components/helm/cert-manager/1.4.0/cert-manager.rendered.yml deleted file mode 100644 index 5eda767c..00000000 --- a/components/helm/cert-manager/1.4.0/cert-manager.rendered.yml +++ /dev/null @@ -1,1096 +0,0 @@ ---- -# Source: cert-manager/templates/cainjector-serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: cert-manager-cainjector - namespace: "cert-manager" - labels: - app: cainjector - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cainjector" - helm.sh/chart: cert-manager-v1.4.0 ---- -# Source: cert-manager/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: cert-manager - namespace: "cert-manager" - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 ---- -# Source: cert-manager/templates/webhook-serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -automountServiceAccountToken: true -metadata: - name: cert-manager-webhook - namespace: "cert-manager" - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 ---- -# Source: cert-manager/templates/cainjector-rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-cainjector - labels: - app: cainjector - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cainjector" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["cert-manager.io"] - resources: ["certificates"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get", "create", "update", "patch"] - - apiGroups: ["admissionregistration.k8s.io"] - resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["apiregistration.k8s.io"] - resources: ["apiservices"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["auditregistration.k8s.io"] - resources: ["auditsinks"] - verbs: ["get", "list", "watch", "update"] ---- -# Source: cert-manager/templates/rbac.yaml -# Issuer controller role -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-issuers - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["cert-manager.io"] - resources: ["issuers", "issuers/status"] - verbs: ["update"] - - apiGroups: ["cert-manager.io"] - resources: ["issuers"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] ---- -# Source: cert-manager/templates/rbac.yaml -# ClusterIssuer controller role -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-clusterissuers - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["cert-manager.io"] - resources: ["clusterissuers", "clusterissuers/status"] - verbs: ["update"] - - apiGroups: ["cert-manager.io"] - resources: ["clusterissuers"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] ---- -# Source: cert-manager/templates/rbac.yaml -# Certificates controller role -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-certificates - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["cert-manager.io"] - resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"] - verbs: ["update"] - - apiGroups: ["cert-manager.io"] - resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"] - verbs: ["get", "list", "watch"] - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["cert-manager.io"] - resources: ["certificates/finalizers", "certificaterequests/finalizers"] - verbs: ["update"] - - apiGroups: ["acme.cert-manager.io"] - resources: ["orders"] - verbs: ["create", "delete", "get", "list", "watch"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch", "create", "update", "delete"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] ---- -# Source: cert-manager/templates/rbac.yaml -# Orders controller role -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-orders - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["acme.cert-manager.io"] - resources: ["orders", "orders/status"] - verbs: ["update"] - - apiGroups: ["acme.cert-manager.io"] - resources: ["orders", "challenges"] - verbs: ["get", "list", "watch"] - - apiGroups: ["cert-manager.io"] - resources: ["clusterissuers", "issuers"] - verbs: ["get", "list", "watch"] - - apiGroups: ["acme.cert-manager.io"] - resources: ["challenges"] - verbs: ["create", "delete"] - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["acme.cert-manager.io"] - resources: ["orders/finalizers"] - verbs: ["update"] - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] ---- -# Source: cert-manager/templates/rbac.yaml -# Challenges controller role -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-challenges - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -rules: - # Use to update challenge resource status - - apiGroups: ["acme.cert-manager.io"] - resources: ["challenges", "challenges/status"] - verbs: ["update"] - # Used to watch challenge resources - - apiGroups: ["acme.cert-manager.io"] - resources: ["challenges"] - verbs: ["get", "list", "watch"] - # Used to watch challenges, issuer and clusterissuer resources - - apiGroups: ["cert-manager.io"] - resources: ["issuers", "clusterissuers"] - verbs: ["get", "list", "watch"] - # Need to be able to retrieve ACME account private key to complete challenges - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] - # Used to create events - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] - # HTTP01 rules - - apiGroups: [""] - resources: ["pods", "services"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: ["networking.k8s.io"] - resources: ["ingresses"] - verbs: ["get", "list", "watch", "create", "delete", "update"] - # We require the ability to specify a custom hostname when we are creating - # new ingress resources. - # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 - - apiGroups: ["route.openshift.io"] - resources: ["routes/custom-host"] - verbs: ["create"] - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["acme.cert-manager.io"] - resources: ["challenges/finalizers"] - verbs: ["update"] - # DNS01 rules (duplicated above) - - apiGroups: [""] - resources: ["secrets"] - verbs: ["get", "list", "watch"] ---- -# Source: cert-manager/templates/rbac.yaml -# ingress-shim controller role -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-ingress-shim - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["cert-manager.io"] - resources: ["certificates", "certificaterequests"] - verbs: ["create", "update", "delete"] - - apiGroups: ["cert-manager.io"] - resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"] - verbs: ["get", "list", "watch"] - - apiGroups: ["networking.k8s.io"] - resources: ["ingresses"] - verbs: ["get", "list", "watch"] - # We require these rules to support users with the OwnerReferencesPermissionEnforcement - # admission controller enabled: - # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement - - apiGroups: ["networking.k8s.io"] - resources: ["ingresses/finalizers"] - verbs: ["update"] - - apiGroups: [""] - resources: ["events"] - verbs: ["create", "patch"] ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-view - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: - - apiGroups: ["cert-manager.io"] - resources: ["certificates", "certificaterequests", "issuers"] - verbs: ["get", "list", "watch"] - - apiGroups: ["acme.cert-manager.io"] - resources: ["challenges", "orders"] - verbs: ["get", "list", "watch"] ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-edit - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 - rbac.authorization.k8s.io/aggregate-to-edit: "true" - rbac.authorization.k8s.io/aggregate-to-admin: "true" -rules: - - apiGroups: ["cert-manager.io"] - resources: ["certificates", "certificaterequests", "issuers"] - verbs: ["create", "delete", "deletecollection", "patch", "update"] - - apiGroups: ["acme.cert-manager.io"] - resources: ["challenges", "orders"] - verbs: ["create", "delete", "deletecollection", "patch", "update"] ---- -# Source: cert-manager/templates/rbac.yaml -# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-approve:cert-manager-io - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cert-manager" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["cert-manager.io"] - resources: ["signers"] - verbs: ["approve"] - resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] ---- -# Source: cert-manager/templates/rbac.yaml -# Permission to: -# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers -# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-controller-certificatesigningrequests - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cert-manager" - helm.sh/chart: cert-manager-v1.4.0 -rules: - - apiGroups: ["certificates.k8s.io"] - resources: ["certificatesigningrequests"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["certificates.k8s.io"] - resources: ["certificatesigningrequests/status"] - verbs: ["update"] - - apiGroups: ["certificates.k8s.io"] - resources: ["signers"] - resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] - verbs: ["sign"] - - apiGroups: ["authorization.k8s.io"] - resources: ["subjectaccessreviews"] - verbs: ["create"] ---- -# Source: cert-manager/templates/webhook-rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cert-manager-webhook:subjectaccessreviews - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 -rules: -- apiGroups: ["authorization.k8s.io"] - resources: ["subjectaccessreviews"] - verbs: ["create"] ---- -# Source: cert-manager/templates/cainjector-rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-cainjector - labels: - app: cainjector - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cainjector" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-cainjector -subjects: - - name: cert-manager-cainjector - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-issuers - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-issuers -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-clusterissuers - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-clusterissuers -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-certificates - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-certificates -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-orders - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-orders -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-challenges - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-challenges -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-ingress-shim - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-ingress-shim -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-approve:cert-manager-io - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cert-manager" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-approve:cert-manager-io -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-controller-certificatesigningrequests - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cert-manager" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-controller-certificatesigningrequests -subjects: - - name: cert-manager - namespace: "cert-manager" - kind: ServiceAccount ---- -# Source: cert-manager/templates/webhook-rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cert-manager-webhook:subjectaccessreviews - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cert-manager-webhook:subjectaccessreviews -subjects: -- apiGroup: "" - kind: ServiceAccount - name: cert-manager-webhook - namespace: cert-manager ---- -# Source: cert-manager/templates/cainjector-rbac.yaml -# leader election rules -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cert-manager-cainjector:leaderelection - namespace: kube-system - labels: - app: cainjector - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cainjector" - helm.sh/chart: cert-manager-v1.4.0 -rules: - # Used for leader election by the controller - # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller - # see cmd/cainjector/start.go#L113 - # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller - # see cmd/cainjector/start.go#L137 - # See also: https://github.com/kubernetes-sigs/controller-runtime/pull/1144#discussion_r480173688 - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"] - verbs: ["get", "update", "patch"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"] - verbs: ["get", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] ---- -# Source: cert-manager/templates/rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cert-manager:leaderelection - namespace: kube-system - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -rules: - # Used for leader election by the controller - # See also: https://github.com/kubernetes-sigs/controller-runtime/pull/1144#discussion_r480173688 - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cert-manager-controller"] - verbs: ["get", "update", "patch"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - resourceNames: ["cert-manager-controller"] - verbs: ["get", "update", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] ---- -# Source: cert-manager/templates/webhook-rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cert-manager-webhook:dynamic-serving - namespace: "cert-manager" - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 -rules: -- apiGroups: [""] - resources: ["secrets"] - resourceNames: - - 'cert-manager-webhook-ca' - verbs: ["get", "list", "watch", "update"] -# It's not possible to grant CREATE permission on a single resourceName. -- apiGroups: [""] - resources: ["secrets"] - verbs: ["create"] ---- -# Source: cert-manager/templates/cainjector-rbac.yaml -# grant cert-manager permission to manage the leaderelection configmap in the -# leader election namespace -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cert-manager-cainjector:leaderelection - namespace: kube-system - labels: - app: cainjector - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cainjector" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cert-manager-cainjector:leaderelection -subjects: - - kind: ServiceAccount - name: cert-manager-cainjector - namespace: cert-manager ---- -# Source: cert-manager/templates/rbac.yaml -# grant cert-manager permission to manage the leaderelection configmap in the -# leader election namespace -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cert-manager:leaderelection - namespace: kube-system - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cert-manager:leaderelection -subjects: - - apiGroup: "" - kind: ServiceAccount - name: cert-manager - namespace: cert-manager ---- -# Source: cert-manager/templates/webhook-rbac.yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cert-manager-webhook:dynamic-serving - namespace: "cert-manager" - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cert-manager-webhook:dynamic-serving -subjects: -- apiGroup: "" - kind: ServiceAccount - name: cert-manager-webhook - namespace: cert-manager ---- -# Source: cert-manager/templates/service.yaml -apiVersion: v1 -kind: Service -metadata: - name: cert-manager - namespace: "cert-manager" - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -spec: - type: ClusterIP - ports: - - protocol: TCP - port: 9402 - targetPort: 9402 - selector: - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/component: "controller" ---- -# Source: cert-manager/templates/webhook-service.yaml -apiVersion: v1 -kind: Service -metadata: - name: cert-manager-webhook - namespace: "cert-manager" - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 -spec: - type: ClusterIP - ports: - - name: https - port: 443 - targetPort: 10250 - selector: - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/component: "webhook" ---- -# Source: cert-manager/templates/cainjector-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cert-manager-cainjector - namespace: "cert-manager" - labels: - app: cainjector - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cainjector" - helm.sh/chart: cert-manager-v1.4.0 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/component: "cainjector" - template: - metadata: - labels: - app: cainjector - app.kubernetes.io/name: cainjector - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "cainjector" - helm.sh/chart: cert-manager-v1.4.0 - spec: - serviceAccountName: cert-manager-cainjector - securityContext: - runAsNonRoot: true - containers: - - name: cert-manager - image: "quay.io/jetstack/cert-manager-cainjector:v1.4.0" - imagePullPolicy: IfNotPresent - args: - - --v=2 - - --leader-election-namespace=kube-system - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: - {} ---- -# Source: cert-manager/templates/deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cert-manager - namespace: "cert-manager" - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "controller" - helm.sh/chart: cert-manager-v1.4.0 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/component: "controller" - template: - metadata: - labels: - app: cert-manager - app.kubernetes.io/name: cert-manager - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/component: "controller" - app.kubernetes.io/managed-by: Helm - helm.sh/chart: cert-manager-v1.4.0 - annotations: - prometheus.io/path: "/metrics" - prometheus.io/scrape: 'true' - prometheus.io/port: '9402' - spec: - serviceAccountName: cert-manager - securityContext: - runAsNonRoot: true - containers: - - name: cert-manager - image: "quay.io/jetstack/cert-manager-controller:v1.4.0" - imagePullPolicy: IfNotPresent - args: - - --v=2 - - --cluster-resource-namespace=$(POD_NAMESPACE) - - --leader-election-namespace=kube-system - ports: - - containerPort: 9402 - protocol: TCP - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: - {} ---- -# Source: cert-manager/templates/webhook-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cert-manager-webhook - namespace: "cert-manager" - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/component: "webhook" - template: - metadata: - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 - spec: - serviceAccountName: cert-manager-webhook - securityContext: - runAsNonRoot: true - containers: - - name: cert-manager - image: "quay.io/jetstack/cert-manager-webhook:v1.4.0" - imagePullPolicy: IfNotPresent - args: - - --v=2 - - --secure-port=10250 - - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) - - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca - - --dynamic-serving-dns-names=cert-manager-webhook,cert-manager-webhook.cert-manager,cert-manager-webhook.cert-manager.svc - ports: - - name: https - containerPort: 10250 - livenessProbe: - httpGet: - path: /livez - port: 6080 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /healthz - port: 6080 - scheme: HTTP - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 1 - successThreshold: 1 - failureThreshold: 3 - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: - {} ---- -# Source: cert-manager/templates/webhook-mutating-webhook.yaml -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - name: cert-manager-webhook - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 - annotations: - cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" -webhooks: - - name: webhook.cert-manager.io - rules: - - apiGroups: - - "cert-manager.io" - - "acme.cert-manager.io" - apiVersions: - - "*" - operations: - - CREATE - - UPDATE - resources: - - "*/*" - admissionReviewVersions: ["v1", "v1beta1"] - timeoutSeconds: 10 - failurePolicy: Fail - # Only include 'sideEffects' field in Kubernetes 1.12+ - sideEffects: None - clientConfig: - service: - name: cert-manager-webhook - namespace: "cert-manager" - path: /mutate ---- -# Source: cert-manager/templates/webhook-validating-webhook.yaml -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: cert-manager-webhook - labels: - app: webhook - app.kubernetes.io/name: webhook - app.kubernetes.io/instance: cert-manager - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "webhook" - helm.sh/chart: cert-manager-v1.4.0 - annotations: - cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" -webhooks: - - name: webhook.cert-manager.io - namespaceSelector: - matchExpressions: - - key: "cert-manager.io/disable-validation" - operator: "NotIn" - values: - - "true" - - key: "name" - operator: "NotIn" - values: - - cert-manager - rules: - - apiGroups: - - "cert-manager.io" - - "acme.cert-manager.io" - apiVersions: - - "*" - operations: - - CREATE - - UPDATE - resources: - - "*/*" - admissionReviewVersions: ["v1", "v1beta1"] - timeoutSeconds: 10 - failurePolicy: Fail - sideEffects: None - clientConfig: - service: - name: cert-manager-webhook - namespace: "cert-manager" - path: /validate diff --git a/components/helm/cert-manager/1.4.0/values.yml b/components/helm/cert-manager/1.4.0/values.yml deleted file mode 100644 index 4f8465ac..00000000 --- a/components/helm/cert-manager/1.4.0/values.yml +++ /dev/null @@ -1 +0,0 @@ -installCRDs: true \ No newline at end of file diff --git a/components/helm/cert-manager/README.md b/components/helm/cert-manager/README.md deleted file mode 100644 index d85ddedf..00000000 --- a/components/helm/cert-manager/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Cert-Manager - -# 2021-07-16 -```shell -helm template cert-manager --values components/helm/cert-manager/1.4.0/values.yml --create-namespace --namespace cert-manager --version v1.4.0 jetstack/cert-manager > components/helm/cert-manager/1.4.0/cert-manager.rendered.yml -``` diff --git a/components/melm/__init__.py b/components/melm/__init__.py deleted file mode 100644 index 495536dd..00000000 --- a/components/melm/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -import os - -from kapitan.inputs.kadet import BaseModel, load_from_search_paths - -kgenlib = load_from_search_paths("generators") - - -def main(input_params): - """Component main.""" - - chart_dir = input_params["chart_dir"] - helm_params = input_params.get("helm_params", {}) - helm_values = input_params.get("helm_values", {}) - - mutations = input_params.get("mutations", {}) - - output_file = helm_params.get("output_file", None) - if output_file: - output_file = os.path.splitext(os.path.basename(output_file))[0] - - store = kgenlib.BaseStore() - helm_config = { - "chart_dir": chart_dir, - "helm_params": helm_params, - "helm_values": helm_values, - } - store.import_from_helm_chart(**helm_config) - store.process_mutations(mutations) - - return store.dump(output_filename=output_file) diff --git a/inventory/classes/common.yml b/inventory/classes/common.yml index b2df6f3d..4b6e7fc0 100644 --- a/inventory/classes/common.yml +++ b/inventory/classes/common.yml @@ -4,3 +4,8 @@ classes: parameters: namespace: ${target_name} target_name: ${_reclass_:name:short} + target_path: ${_reclass_:name:path} + target_secret: targets/${target_path} + target: ${_reclass_:name:full} + gcp_project_id: ${target_name} + terraform_version: 1.4 \ No newline at end of file diff --git a/inventory/classes/components/argoproj/cd/argocd.yml b/inventory/classes/components/argoproj/cd/argocd.yml index bcfc32d8..9031edc4 100644 --- a/inventory/classes/components/argoproj/cd/argocd.yml +++ b/inventory/classes/components/argoproj/cd/argocd.yml @@ -15,7 +15,7 @@ parameters: argocd: chart_name: argo-cd chart_version: "3.32.0" - chart_dir: components/charts/${argocd:chart_name}/${argocd:chart_name}/${argocd:chart_version}/${argocd:application_version} + chart_dir: system/sources/charts/${argocd:chart_name}/${argocd:chart_name}/${argocd:chart_version}/${argocd:application_version} application_version: "v2.2.3" namespace: ${namespace} external_url: ${external_url} @@ -58,14 +58,14 @@ parameters: kapitan: dependencies: - type: helm - output_path: components/charts/${argocd:chart_name}/${argocd:chart_version}/${argocd:application_version} + output_path: ${argocd:chart_dir} source: https://argoproj.github.io/argo-helm version: ${argocd:chart_version} chart_name: ${argocd:chart_name} compile: - input_type: helm input_paths: - - components/charts/${argocd:chart_name}/${argocd:chart_version}/${argocd:application_version} + - ${argocd:chart_dir} output_path: manifests helm_params: namespace: ${argocd:namespace} diff --git a/inventory/classes/components/echo-server.yml b/inventory/classes/components/echo-server.yml index 950ae1e8..770e17d0 100644 --- a/inventory/classes/components/echo-server.yml +++ b/inventory/classes/components/echo-server.yml @@ -86,7 +86,7 @@ parameters: value: my_secret b64_encode: true better_secret: - value: ?{base64:targets/${target_name}/password||randomstr|base64} + value: ?{base64:targets/${target_name}/password||random:str|base64} config_maps: config: @@ -96,7 +96,7 @@ parameters: versioned: true data: echo-service.conf: - template: "components/echo-server/echo-server.conf.j2" + template: "system/sources/templates/echo-server/echo-server.conf.j2" values: example: true nginx.conf: diff --git a/inventory/classes/components/filebeat.yml b/inventory/classes/components/filebeat.yml index 9c0fce70..fb4bb4b1 100644 --- a/inventory/classes/components/filebeat.yml +++ b/inventory/classes/components/filebeat.yml @@ -114,5 +114,5 @@ parameters: subPath: filebeat.yml data: filebeat.yml: - template: components/filebeat/templates/filebeat.yml.j2 + template: system/sources/templates/filebeat/templates/filebeat.yml.j2 values: {} \ No newline at end of file diff --git a/inventory/classes/components/github-runners/kubernetes.yml b/inventory/classes/components/github-runners/kubernetes.yml index 0532e9e3..27e1bff2 100644 --- a/inventory/classes/components/github-runners/kubernetes.yml +++ b/inventory/classes/components/github-runners/kubernetes.yml @@ -10,7 +10,7 @@ parameters: dependencies: - type: https source: https://github.com/actions/actions-runner-controller/releases/download/v0.25.2/actions-runner-controller.yaml - output_path: components/actions-runner-controller.yaml + output_path: system/sources/manifests/actions-runner-controller.yaml # Compiles and mutates some fields to setup service account compile: @@ -19,7 +19,7 @@ parameters: output_type: yml prune: false input_params: - files: [components/actions-runner-controller.yaml] + files: [system/sources/manifests/actions-runner-controller.yaml] mutations: bundle: - conditions: @@ -38,4 +38,4 @@ parameters: annotations: argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true,Replace=true input_paths: - - components/kstmz + - system/generators/kstmz diff --git a/inventory/classes/components/gke-pvm-killer.yml b/inventory/classes/components/gke-pvm-killer.yml index ae0fbaa4..19c376fb 100644 --- a/inventory/classes/components/gke-pvm-killer.yml +++ b/inventory/classes/components/gke-pvm-killer.yml @@ -2,11 +2,8 @@ parameters: service_accounts: gke-pvm-killer: ref: plain:targets/${target_name}/gke-pvm-killer-service-account - secret: ?{plain:targets/${target_name}/gke-pvm-killer-service-account||randomstr} - name: gke-pvm-killer@${google_project}.iam.gserviceaccount.com - - scripts: - - templates/scripts/generate_sa_secrets.sh + secret: ?{plain:targets/${target_name}/gke-pvm-killer-service-account||random:str} + name: gke-pvm-killer@${gcp_project_id}.iam.gserviceaccount.com components: gke-pvm-killer: diff --git a/inventory/classes/components/helm/cert-manager-helm.yml b/inventory/classes/components/helm/cert-manager-helm.yml index 2f98a921..3f9ecec6 100644 --- a/inventory/classes/components/helm/cert-manager-helm.yml +++ b/inventory/classes/components/helm/cert-manager-helm.yml @@ -5,6 +5,7 @@ parameters: cert-manager: chart_name: cert-manager chart_version: "v1.4.0" + chart_dir: system/sources/charts/${cert-manager:chart_name} application_version: "v1.4.0" namespace: ${namespace} @@ -39,14 +40,14 @@ parameters: kapitan: dependencies: - type: helm - output_path: components/charts/${cert-manager:chart_name} + output_path: ${cert-manager:chart_dir} source: https://charts.jetstack.io/ version: ${cert-manager:chart_version} chart_name: ${cert-manager:chart_name} compile: - input_type: helm input_paths: - - components/charts/${cert-manager:chart_name} + - ${cert-manager:chart_dir} output_path: . helm_params: namespace: ${cert-manager:namespace} diff --git a/inventory/classes/components/kapicorp/tesoro.yml b/inventory/classes/components/kapicorp/tesoro.yml index f7b881ba..c01fb35e 100644 --- a/inventory/classes/components/kapicorp/tesoro.yml +++ b/inventory/classes/components/kapicorp/tesoro.yml @@ -11,9 +11,9 @@ parameters: private_key: plain:targets/${target_name}/kapicorp-tesoro-cert-key cacert: plain:targets/${target_name}/kapicorp-tesoro-cacert-pem ssl: - certificate: ?{${kapicorp:tesoro:refs:certificate}||randomstr} - private_key: ?{${kapicorp:tesoro:refs:private_key}||randomstr} - cacert: ?{${kapicorp:tesoro:refs:cacert}||randomstr} + certificate: ?{${kapicorp:tesoro:refs:certificate}||random:str} + private_key: ?{${kapicorp:tesoro:refs:private_key}||random:str} + cacert: ?{${kapicorp:tesoro:refs:cacert}||random:str} components: tesoro: diff --git a/inventory/classes/components/logstash.yml b/inventory/classes/components/logstash.yml index b15bdea5..7ef904aa 100644 --- a/inventory/classes/components/logstash.yml +++ b/inventory/classes/components/logstash.yml @@ -20,14 +20,14 @@ parameters: mount: /usr/share/logstash/config/ data: logstash.yml: - template: components/logstash/templates/logstash.yml.j2 + template: system/sources/templates/logstash/templates/logstash.yml.j2 values: {} pipelines.yml: - template: components/logstash/templates/pipelines.yml.j2 + template: system/sources/templates/logstash/templates/pipelines.yml.j2 values: {} pipelines: mount: /usr/share/logstash/pipeline/ data: example.conf: - template: components/logstash/templates/example.conf.j2 + template: system/sources/templates/logstash/templates/example.conf.j2 values: {} diff --git a/inventory/classes/components/mysql.yml b/inventory/classes/components/mysql.yml index 7005a970..98702e5f 100644 --- a/inventory/classes/components/mysql.yml +++ b/inventory/classes/components/mysql.yml @@ -44,7 +44,7 @@ parameters: value: |- ignore-db-dir=lost+found mytemplate.cnf: - template: components/mysql/mytemplate.cnf.j2 + template: system/sources/templates/mysql/mytemplate.cnf.j2 values: mysql: ${mysql:settings} @@ -54,7 +54,7 @@ parameters: versioned: true data: mysql-root-password: - value: ?{plain:targets/${target_name}/mysql-root-password||randomstr:32|base64} + value: ?{plain:targets/${target_name}/mysql-root-password||random:str:32|base64} mysql-password: - value: ?{plain:targets/${target_name}/mysql-password||randomstr|base64} + value: ?{plain:targets/${target_name}/mysql-password||random:str|base64} diff --git a/inventory/classes/components/postgres-proxy.yml b/inventory/classes/components/postgres-proxy.yml index 30dad227..5f3a7350 100644 --- a/inventory/classes/components/postgres-proxy.yml +++ b/inventory/classes/components/postgres-proxy.yml @@ -5,11 +5,8 @@ parameters: service_accounts: postgres-proxy: ref: plain:targets/${target_name}/postgres-proxy-service-account - secret: ?{plain:targets/${target_name}/postgres-proxy-service-account||randomstr} - name: postgres-proxy@${google_project}.iam.gserviceaccount.com - - scripts: - - templates/scripts/generate_sa_secrets.sh + secret: ?{plain:targets/${target_name}/postgres-proxy-service-account||random:str} + name: postgres-proxy@${gcp_project_id}.iam.gserviceaccount.com components: postgres-proxy: @@ -23,7 +20,8 @@ parameters: GOOGLE_APPLICATION_CREDENTIALS: /opt/secrets/service_account_file replicas: 3 pdb_min_available: 2 - vpa: Auto + vpa: + update_mode: Auto healthcheck: liveness: type: tcp diff --git a/inventory/classes/components/pritunl/pritunl-mongo.yml b/inventory/classes/components/pritunl/pritunl-mongo.yml index 544643c8..223d65c5 100644 --- a/inventory/classes/components/pritunl/pritunl-mongo.yml +++ b/inventory/classes/components/pritunl/pritunl-mongo.yml @@ -57,4 +57,4 @@ parameters: secrets: data: mongodb-root-password: - value: ?{plain:targets/${target_name}/mongodb_password||randomstr|base64} + value: ?{plain:targets/${target_name}/mongodb_password||random:str|base64} diff --git a/inventory/classes/components/pritunl/pritunl.yml b/inventory/classes/components/pritunl/pritunl.yml index 337f7647..05c554bf 100644 --- a/inventory/classes/components/pritunl/pritunl.yml +++ b/inventory/classes/components/pritunl/pritunl.yml @@ -6,7 +6,7 @@ parameters: connection_string: mongodb://${pritunl:auth:username}:${pritunl:auth:password}@pritunl-mongo:27017/${pritunl:database:name} auth: username: pritunl - password: ?{plain:targets/${target_name}/pritunl_password||randomstr} + password: ?{plain:targets/${target_name}/pritunl_password||random:str} components: pritunl: diff --git a/inventory/classes/components/rabbitmq-operator/rabbitmq-operator.yml b/inventory/classes/components/rabbitmq-operator/rabbitmq-operator.yml index 395bfea7..089eec24 100644 --- a/inventory/classes/components/rabbitmq-operator/rabbitmq-operator.yml +++ b/inventory/classes/components/rabbitmq-operator/rabbitmq-operator.yml @@ -5,6 +5,7 @@ parameters: rabbitmq-operator: chart_name: rabbitmq-cluster-operator chart_version: 2.6.1 + chart_dir: system/sources/charts/${rabbitmq-operator:chart_name}/${rabbitmq-operator:chart_version}/${rabbitmq-operator:application_version} application_version: 1.12.1 namespace: ${namespace} @@ -26,14 +27,14 @@ parameters: kapitan: dependencies: - type: helm - output_path: components/charts/${rabbitmq-operator:chart_name}/${rabbitmq-operator:chart_version}/${rabbitmq-operator:application_version} + output_path: ${rabbitmq-operator:chart_dir} source: https://charts.bitnami.com/bitnami version: ${rabbitmq-operator:chart_version} chart_name: ${rabbitmq-operator:chart_name} compile: - input_type: helm input_paths: - - components/charts/${rabbitmq-operator:chart_name}/${rabbitmq-operator:chart_version}/${rabbitmq-operator:application_version} + - ${rabbitmq-operator:chart_dir} output_path: manifests helm_params: namespace: ${rabbitmq-operator:namespace} diff --git a/inventory/classes/components/shared-secret.yml b/inventory/classes/components/shared-secret.yml index 8ea05b17..7b6617dd 100644 --- a/inventory/classes/components/shared-secret.yml +++ b/inventory/classes/components/shared-secret.yml @@ -1,32 +1,38 @@ parameters: + + passwords: + shared-password-string-data: ?{base64:targets/${target_name}/shared-password-string-data||random:str:10|base64} + shared-password-base64-as-plain: ?{base64:targets/${target_name}/shared-password-base64-as-plain||random:str:15} + shared-password-plain-as-base64: ?{plain:targets/${target_name}/shared-password-plain-as-base64||random:str:20|base64} + shared-password-plain-as-plain-pass: ?{plain:targets/${target_name}/shared-password-plain-as-plain-pass||random:str:35} generators: kubernetes: secrets: base64-as-base64: string_data: CONNECTION: - value: xyz://?{base64:targets/${target_name}/shared-password-string-data||randomstr:10|base64}-someotherstuff + value: xyz://${passwords:shared-password-string-data}-someotherstuff base64-as-plain: string_data: CONNECTION: - value: xyz://?{base64:targets/${target_name}/shared-password-base64-as-plain||randomstr:15}_someotherstuff + value: xyz://${passwords:shared-password-base64-as-plain}_someotherstuff plain-base64: string_data: CONNECTION: - value: xyz://?{plain:targets/${target_name}/shared-password-plain-as-base64||randomstr:20|base64}_xx_someotherstuff + value: xyz://${passwords:shared-password-plain-as-base64}_xx_someotherstuff plain-plain-connection: string_data: CONNECTION: - value: postgresql://?{plain:targets/${target_name}/shared-password-plain-as-plain-user||randomstr:35}:?{plain:targets/${target_name}/shared-password-plain-as-plain-pass||randomstr:35}/database + value: postgresql://myUser:${passwords:shared-password-plain-as-plain-pass}/database plain-plain-connection-b64: string_data: CONNECTION: b64_encode: true versioned: true - value: postgresql://?{plain:targets/${target_name}/shared-password-plain-as-plain-user||randomstr:35}:?{plain:targets/${target_name}/shared-password-plain-as-plain-pass||randomstr:35}/database + value: postgresql://myUser:${passwords:shared-password-plain-as-plain-pass}/database plain-plain-connection-non-b64: string_data: CONNECTION: versioned: true b64_encode: false - value: postgresql://?{plain:targets/${target_name}/shared-password-plain-as-plain-user||randomstr:35}:?{plain:targets/${target_name}/shared-password-plain-as-plain-pass||randomstr:35}/database \ No newline at end of file + value: postgresql://myUser:${passwords:shared-password-plain-as-plain-pass}/database \ No newline at end of file diff --git a/inventory/classes/components/vault.yml b/inventory/classes/components/vault.yml index 086e48dc..a8bebf3a 100644 --- a/inventory/classes/components/vault.yml +++ b/inventory/classes/components/vault.yml @@ -319,5 +319,5 @@ parameters: subPath: extraconfig-from-values.hcl data: extraconfig-from-values.hcl: - template: components/vault/extraconfig-from-values.hcl.j2 + template: system/sources/templates/vault/extraconfig-from-values.hcl.j2 values: {} diff --git a/inventory/classes/components/weaveworks/catalogue-db.yml b/inventory/classes/components/weaveworks/catalogue-db.yml index 5185c413..42295db5 100644 --- a/inventory/classes/components/weaveworks/catalogue-db.yml +++ b/inventory/classes/components/weaveworks/catalogue-db.yml @@ -11,5 +11,5 @@ parameters: mysql: service_port: 3306 env: - MYSQL_ROOT_PASSWORD: ?{plain:targets/${target_name}/mysql_password||randomstr} + MYSQL_ROOT_PASSWORD: ?{plain:targets/${target_name}/mysql_password||random:str} MYSQL_DATABASE: socksdb diff --git a/inventory/classes/features/gkms-demo.yml b/inventory/classes/features/gkms-demo.yml index a841a7a9..c8c74505 100644 --- a/inventory/classes/features/gkms-demo.yml +++ b/inventory/classes/features/gkms-demo.yml @@ -1,5 +1,5 @@ # This class allows you to test the gkms backend. -# Once enabled, you should be able to use secrets like ?{gkms:my_secret_id||randomstr} +# Once enabled, you should be able to use secrets like ?{gkms:my_secret_id||random:str} ##################################################### # PLEASE NOTE THIS SHOULD ONLY BE USED FOR TESTING. # diff --git a/inventory/classes/kapitan/common.yml b/inventory/classes/kapitan/common.yml index 29cb5d15..34a0f7ef 100644 --- a/inventory/classes/kapitan/common.yml +++ b/inventory/classes/kapitan/common.yml @@ -1,27 +1,41 @@ classes: - - kapitan.kube + - kapitan.kgenlib - kapitan.generators.kubernetes - - kapitan.generators.argocd - kapitan.generators.terraform - - kapitan.generators.rabbitmq - parameters: - scripts: [] + init_scripts: [] + scripts: + - templates/scripts/includes/ + - templates/scripts/gcloud/ + - templates/scripts/kapitan/ + + scripts_params: + gcp_project_id: ${gcp_project_id} + + docs: - templates/docs/README.md manifests: [] kapitan: compile: - - output_path: docs + - output_path: . input_type: jinja2 input_paths: ${docs} + - output_path: scripts input_type: jinja2 input_paths: ${scripts} + input_params: ${scripts_params} + + - input_type: external + input_paths: ${init_scripts} + output_path: . + env_vars: + HOME: / - output_path: manifests input_type: jsonnet output_type: yml input_paths: ${manifests} vars: - target: ${target_name} + target: ${target} diff --git a/inventory/classes/kapitan/generators/argocd.yml b/inventory/classes/kapitan/generators/argocd.yml deleted file mode 100644 index 808d3d00..00000000 --- a/inventory/classes/kapitan/generators/argocd.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -parameters: - kapitan: - compile: - - output_path: argocd - input_type: kadet - output_type: yml - input_paths: - - components/generators/argocd diff --git a/inventory/classes/kapitan/generators/kubernetes.yml b/inventory/classes/kapitan/generators/kubernetes.yml index ffcaebf1..d9b6230d 100644 --- a/inventory/classes/kapitan/generators/kubernetes.yml +++ b/inventory/classes/kapitan/generators/kubernetes.yml @@ -2,16 +2,16 @@ parameters: kapitan: dependencies: - type: git - source: https://github.com/kapicorp/kapitan-reference.git - ref: master - subdir: components/generators/kubernetes - output_path: components/generators/kubernetes + source: https://github.com/kapicorp/generators.git + ref: main + subdir: kubernetes + output_path: system/generators/kubernetes compile: - output_path: manifests input_type: kadet output_type: yml input_paths: - - components/generators/kubernetes + - system/generators/kubernetes input_params: mutations: bundle: diff --git a/inventory/classes/kapitan/generators/rabbitmq.yml b/inventory/classes/kapitan/generators/rabbitmq.yml deleted file mode 100644 index d2431d50..00000000 --- a/inventory/classes/kapitan/generators/rabbitmq.yml +++ /dev/null @@ -1,8 +0,0 @@ -parameters: - kapitan: - compile: - - output_path: rabbitmq - input_type: kadet - output_type: yml - input_paths: - - components/generators/rabbitmq \ No newline at end of file diff --git a/inventory/classes/kapitan/generators/terraform.yml b/inventory/classes/kapitan/generators/terraform.yml index c0568280..ca1c2163 100644 --- a/inventory/classes/kapitan/generators/terraform.yml +++ b/inventory/classes/kapitan/generators/terraform.yml @@ -2,18 +2,16 @@ parameters: kapitan: dependencies: - type: git - source: https://github.com/kapicorp/kapitan-reference.git - ref: master - subdir: components/generators/terraform - output_path: components/generators/terraform + source: https://github.com/kapicorp/generators.git + ref: main + subdir: terraform + output_path: system/generators/terraform compile: - output_path: terraform input_type: kadet output_type: json - input_params: - generator_root: resources.tf input_paths: - - components/generators/terraform + - system/generators/terraform - input_type: copy ignore_missing: true input_paths: diff --git a/inventory/classes/kapitan/kgenlib.yml b/inventory/classes/kapitan/kgenlib.yml new file mode 100644 index 00000000..da2f4ee5 --- /dev/null +++ b/inventory/classes/kapitan/kgenlib.yml @@ -0,0 +1,8 @@ +parameters: + kapitan: + dependencies: + - type: git + source: https://github.com/kapicorp/generators.git + ref: main + subdir: lib/ + output_path: system/lib/ \ No newline at end of file diff --git a/inventory/classes/kapitan/kube.yml b/inventory/classes/kapitan/kube.yml deleted file mode 100644 index 16be9b4f..00000000 --- a/inventory/classes/kapitan/kube.yml +++ /dev/null @@ -1,9 +0,0 @@ -parameters: - kapitan: - dependencies: - - type: https - source: https://raw.githubusercontent.com/bitnami-labs/kube-libsonnet/master/kube.libsonnet - output_path: lib/kube.libsonnet - - type: https - source: https://raw.githubusercontent.com/bitnami-labs/kube-libsonnet/master/kube-platforms.libsonnet - output_path: lib/kube-platforms.libsonnet \ No newline at end of file diff --git a/inventory/classes/projects/kapitan-demo/kubernetes/demo.yml b/inventory/classes/projects/kapitan-demo/kubernetes/demo.yml index a131507a..9ed2b6ca 100644 --- a/inventory/classes/projects/kapitan-demo/kubernetes/demo.yml +++ b/inventory/classes/projects/kapitan-demo/kubernetes/demo.yml @@ -1,11 +1,11 @@ classes: - kapitan.templates.kubernetes parameters: - google_project: kapitan-demo + gcp_project_id: kapitan-demo cluster: ${demo} demo: name: demo - google_project: kapitan-demo + gcp_project_id: kapitan-demo zone: europe-west1-b type: gke cluster: demo diff --git a/inventory/classes/terraform/common.yml b/inventory/classes/terraform/common.yml index c6ea1440..513a5ba6 100644 --- a/inventory/classes/terraform/common.yml +++ b/inventory/classes/terraform/common.yml @@ -1,11 +1,22 @@ +--- classes: -- terraform.providers.gcp + - terraform.providers.google + - terraform.remote-state + - terraform.providers.gcp-impersonate parameters: - state_bucket_name: state-kapicorp-terraform-admin + terraform_templates: [] + name: ${gcp_project_id} + state_bucket_name: kapicorp-terraform-state scripts: - templates/scripts/terraform + kapitan: labels: - type: terraform \ No newline at end of file + type: terraform + + compile: + - output_path: terraform + input_type: jinja2 + input_paths: ${terraform_templates} diff --git a/inventory/classes/terraform/gcp/billing.yml b/inventory/classes/terraform/gcp/billing.yml deleted file mode 100644 index da7abc1e..00000000 --- a/inventory/classes/terraform/gcp/billing.yml +++ /dev/null @@ -1,11 +0,0 @@ -parameters: - gcp_billing_account: 017012-945270-0844F0 - resources: - tf: - resource: - google_project: - project: - name: ${name} - project_id: ${google:project} - billing_account: ${gcp_billing_account} - org_id: 163756623419 \ No newline at end of file diff --git a/inventory/classes/terraform/gcp/gcs-bucket.yml b/inventory/classes/terraform/gcp/gcs-bucket.yml index 86250c96..b391c160 100644 --- a/inventory/classes/terraform/gcp/gcs-bucket.yml +++ b/inventory/classes/terraform/gcp/gcs-bucket.yml @@ -1,29 +1,31 @@ +--- parameters: + terraform: + gen_google_storage_bucket: + terraform-state: + name: ${state_bucket_name} + description: Terraform Bucket + location: EU + storage_class: MULTI_REGIONAL resources: - tf: + generic: resource: - google_service_account: terraform: account_id: terraform - description: "Terraform Service Account" - google_storage_bucket: - terraform-state: - name: ${state_bucket_name} - location: EU - storage_class: MULTI_REGIONAL + description: Terraform Service Account google_storage_bucket_iam_binding: binding: bucket: \${google_storage_bucket.terraform-state.name} - role: "roles/storage.admin" + role: roles/storage.admin members: - - serviceAccount:\${google_service_account.terraform.email} + - serviceAccount:\${google_service_account.terraform.email} google_organization_iam_member: terraform_owner: - org_id: 163756623419 - role: "roles/owner" - member: serviceAccount:\${google_service_account.terraform.email} + org_id: 163756623419 + role: roles/owner + member: serviceAccount:\${google_service_account.terraform.email} terraform_billing: - org_id: 163756623419 - role: "roles/billing.user" - member: serviceAccount:\${google_service_account.terraform.email} \ No newline at end of file + org_id: 163756623419 + role: roles/billing.user + member: serviceAccount:\${google_service_account.terraform.email} diff --git a/inventory/classes/terraform/gcp/organization/kapicorp.yml b/inventory/classes/terraform/gcp/organization/kapicorp.yml new file mode 100644 index 00000000..bc926445 --- /dev/null +++ b/inventory/classes/terraform/gcp/organization/kapicorp.yml @@ -0,0 +1,28 @@ +--- +classes: + - common + - terraform.gcp.services.common + + +parameters: + gcp_project_id: ${target_name} + gcp_display_name: ${target_name} + gcp_organization_id: ?{plain:shared/gcp_organization_id||random:str} + gcp_billing_account: ?{plain:shared/gcp_billing_account||random:str} + + generators: + terraform: + defaults: + gcp_project_id: + org_id: ${gcp_organization_id} + auto_create_network: false + project_id: ${gcp_project_id} + billing_account: ${gcp_billing_account} + + terraform: + resources: + generic: + gcp_project_id: + main: + project: + name: ${gcp_display_name} diff --git a/inventory/classes/terraform/gcp/resources/gke.yml b/inventory/classes/terraform/gcp/resources/gke.yml deleted file mode 100644 index 1957d7fb..00000000 --- a/inventory/classes/terraform/gcp/resources/gke.yml +++ /dev/null @@ -1,32 +0,0 @@ -parameters: - resources: - tf: - resource: - google_project_service: - enable_container_service: - service: container.googleapis.com - project: \${google_project.project.project_id} - - google_service_account: - default: - account_id: "gke-sa" - display_name: Service Account for GKE - - google_container_cluster: - primary: - name: gke-cluster - location: europe-west1 - initial_node_count: 1 - node_config: - # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. - service_account: \${google_service_account.default.email} - oauth_scopes: [ - "https://www.googleapis.com/auth/cloud-platform" - ] - labels: - target: ${target_name} - depends_on: - - google_project_service.enable_container_service - timeouts: - create: "30m" - update: "40m" \ No newline at end of file diff --git a/inventory/classes/terraform/gcp/resources/network.yml b/inventory/classes/terraform/gcp/resources/network.yml new file mode 100644 index 00000000..19aeb286 --- /dev/null +++ b/inventory/classes/terraform/gcp/resources/network.yml @@ -0,0 +1,14 @@ +--- +parameters: + gcp_default_network_name: kapicorp + terraform: + resources: + generic: + google_compute_network: + default: + name: ${gcp_default_network_name} + description: Default network for the project maintained by terraform + auto_create_subnetworks: true + depends_on: + - gcp_project_id.main + - gcp_project_id_service.compute diff --git a/inventory/classes/terraform/gcp/services.yml b/inventory/classes/terraform/gcp/services.yml deleted file mode 100644 index 125928a1..00000000 --- a/inventory/classes/terraform/gcp/services.yml +++ /dev/null @@ -1,17 +0,0 @@ -parameters: - resources: - tf: - resource: - google_project_service: - enable_cloudbilling_service: - service: cloudbilling.googleapis.com - project: \${google_project.project.project_id} - enable_iam_service: - service: iam.googleapis.com - project: \${google_project.project.project_id} - enable_compute_service: - service: compute.googleapis.com - project: \${google_project.project.project_id} - enable_compute_service: - service: storage-component.googleapis.com - project: \${google_project.project.project_id} \ No newline at end of file diff --git a/inventory/classes/terraform/gcp/services/common.yml b/inventory/classes/terraform/gcp/services/common.yml new file mode 100644 index 00000000..822ccd4d --- /dev/null +++ b/inventory/classes/terraform/gcp/services/common.yml @@ -0,0 +1,10 @@ +--- +parameters: + terraform: + resources: + generic: + gcp_project_id_service: + cloudbilling: + service: cloudbilling.googleapis.com + iam: + service: iam.googleapis.com diff --git a/inventory/classes/terraform/gcp/services/compute.yml b/inventory/classes/terraform/gcp/services/compute.yml new file mode 100644 index 00000000..79eaf346 --- /dev/null +++ b/inventory/classes/terraform/gcp/services/compute.yml @@ -0,0 +1,17 @@ +--- +classes: + - .common + +parameters: + terraform: + resources: + generic: + gcp_project_id_service: + compute: + service: compute.googleapis.com + storage-component: + service: storage-component.googleapis.com + container: + service: container.googleapis.com + depends_on: + - gcp_project_id_service.compute diff --git a/inventory/classes/terraform/github/common.yml b/inventory/classes/terraform/github/common.yml new file mode 100644 index 00000000..9b5c8d11 --- /dev/null +++ b/inventory/classes/terraform/github/common.yml @@ -0,0 +1,9 @@ +--- +classes: + - common + - terraform.github.defaults + - terraform.providers.github + +parameters: + scripts: + - system/templates/scripts/github diff --git a/inventory/classes/terraform/github/defaults.yml b/inventory/classes/terraform/github/defaults.yml new file mode 100644 index 00000000..d79f8091 --- /dev/null +++ b/inventory/classes/terraform/github/defaults.yml @@ -0,0 +1,56 @@ +--- +parameters: + github: + deploy_key: + # ssh-keygen -t rsa -b 4096 -C "deploy_ke" + # cat deploy_key | ./compiled/github/.../scripts/set_reference gkms:shared/github/deploy_key_private + private: ?{gkms:shared/github/deploy_key_private} + + # cat deploy_key.pub | ./compiled/github/.../scripts/set_reference plain:shared/github/deploy_key_public + public: ?{plain:shared/github/deploy_key_public} + + terraform: + gen_locals: + github_deploy_key_private: + value: ${github:deploy_key:private} + github_deploy_key_public: + value: ${github:deploy_key:public} + generators: + terraform: + defaults: + gen_github_repository: + branch_protection: + main: {} + + github_repository: + # Default settings for repositories + # Documentation: https://registry.terraform.io/providers/integrations/github/latest/docs/resources/repository + allow_auto_merge: true + allow_merge_commit: false + allow_rebase_merge: false + allow_squash_merge: true + allow_update_branch: true + delete_branch_on_merge: true + has_downloads: false + has_issues: true + has_projects: false + has_wiki: false + visibility: private + vulnerability_alerts: true + + github_branch_protection: + # Default settings for branch protection + # Documentation: https://registry.terraform.io/providers/integrations/github/latest/docs/resources/branch_protection + allows_deletions: false + enforce_admins: true + pattern: main + required_linear_history: true + required_status_checks: + strict: false + required_pull_request_reviews: + dismiss_stale_reviews: false + dismissal_restrictions: [] + require_code_owner_reviews: false + require_last_push_approval: false + required_approving_review_count: 1 + restrict_dismissals: false diff --git a/inventory/classes/terraform/local-state.yml b/inventory/classes/terraform/local-state.yml index a21dd01e..7ba259ad 100644 --- a/inventory/classes/terraform/local-state.yml +++ b/inventory/classes/terraform/local-state.yml @@ -1,7 +1,6 @@ +--- parameters: - resources: - tf: - terraform: - backend: - local: - path: "../../../state/terraform.tfstate" \ No newline at end of file + terraform: + gen_backend: + local: + path: ../../../state/terraform.tfstate diff --git a/inventory/classes/terraform/project-type/admin.yml b/inventory/classes/terraform/project-type/admin.yml deleted file mode 100644 index 199a8aef..00000000 --- a/inventory/classes/terraform/project-type/admin.yml +++ /dev/null @@ -1,20 +0,0 @@ -classes: - - terraform.gcp.billing - - terraform.gcp.services - - terraform.local-state - - terraform.gcp.gcs-bucket - -parameters: - resources: - tf: - resource: - google_project_service: - enable_container_service: - service: container.googleapis.com - project: \${google_project.project.project_id} - enable_cloudresourcemanager_service: - service: cloudresourcemanager.googleapis.com - project: \${google_project.project.project_id} - enable_serviceusage_service: - service: serviceusage.googleapis.com - project: \${google_project.project.project_id} \ No newline at end of file diff --git a/inventory/classes/terraform/project-type/generic.yml b/inventory/classes/terraform/project-type/generic.yml deleted file mode 100644 index ec78cc0e..00000000 --- a/inventory/classes/terraform/project-type/generic.yml +++ /dev/null @@ -1,5 +0,0 @@ -classes: - - terraform.gcp.billing - - terraform.gcp.services - - terraform.remote-state - - terraform.providers.gcp-impersonate \ No newline at end of file diff --git a/inventory/classes/terraform/providers/gcp-impersonate.yml b/inventory/classes/terraform/providers/gcp-impersonate.yml index 403de1ea..1e2522bf 100644 --- a/inventory/classes/terraform/providers/gcp-impersonate.yml +++ b/inventory/classes/terraform/providers/gcp-impersonate.yml @@ -1,3 +1,4 @@ +--- parameters: google: - impersonate_service_account: terraform@kapicorp-terraform-admin.iam.gserviceaccount.com \ No newline at end of file + impersonate_service_account: terraform@kapicorp-terraform-admin.iam.gserviceaccount.com diff --git a/inventory/classes/terraform/providers/gcp.yml b/inventory/classes/terraform/providers/gcp.yml deleted file mode 100644 index 735076e1..00000000 --- a/inventory/classes/terraform/providers/gcp.yml +++ /dev/null @@ -1,13 +0,0 @@ -parameters: - google: - project: ${target_name} - region: europe-west1 - zone: europe-west1-b - resources: - tf: - terraform: - required_providers: - google: - version: 3.46.0 - provider: - - google: ${google} \ No newline at end of file diff --git a/inventory/classes/terraform/providers/github.yml b/inventory/classes/terraform/providers/github.yml new file mode 100644 index 00000000..08a08f2a --- /dev/null +++ b/inventory/classes/terraform/providers/github.yml @@ -0,0 +1,24 @@ +--- +parameters: + github: + app: + pem_file: ?{gkms:shared/github_kapitan_app_pem} + id: 305325 + installation_id: 35247134 + + terraform: + gen_locals: + github_pem: + value: ${github:app:pem_file} + + gen_required_providers: + github: + source: integrations/github + version: ~> 5.0 + gen_provider: + github: + owner: kapicorp + app_auth: + pem_file: \${local.github_pem} + id: ${github:app:id} + installation_id: ${github:app:installation_id} diff --git a/inventory/classes/terraform/providers/google.yml b/inventory/classes/terraform/providers/google.yml new file mode 100644 index 00000000..23c99f8a --- /dev/null +++ b/inventory/classes/terraform/providers/google.yml @@ -0,0 +1,55 @@ +--- +parameters: + google: + project: ${gcp_project_id} + region: europe-west1 + zone: europe-west1-b + terraform: + gen_required_providers: + google: + version: 4.64.0 + gen_provider: + google: ${google} + + generators: + terraform: + defaults: + provider: + google: + resource: + project: ${gcp_project_id} + + google_dns_record_set: + managed_zone: platform + type: "A" + + google_container_node_pool: + node_count: 1 + node_config: + preemptible: true + machine_type: e2-medium + oauth_scopes: [https://www.googleapis.com/auth/cloud-platform] + google_container_cluster: + remove_default_node_pool: true + initial_node_count: 1 + enable_shielded_nodes: true + cost_management_config: + enabled: true + monitoring_config: + enable_components: + - SYSTEM_COMPONENTS + - APISERVER + - CONTROLLER_MANAGER + - SCHEDULER + managed_prometheus: + enabled: true + vertical_pod_autoscaling: + enabled: true + gateway_api_config: + channel: CHANNEL_STANDARD + workload_identity_config: + workload_pool: ${gcp_project_id}.svc.id.goog + private_cluster_config: + enable_private_nodes: true + enable_private_endpoint: false + master_ipv4_cidr_block: 172.16.200.16/28 diff --git a/inventory/classes/terraform/providers/postgres.yml b/inventory/classes/terraform/providers/postgres.yml new file mode 100644 index 00000000..c610f19a --- /dev/null +++ b/inventory/classes/terraform/providers/postgres.yml @@ -0,0 +1,29 @@ +--- +parameters: + database: + master: + admin_user: admin + terraform: + gen_locals: + admin_password: + value: ?{gkms:targets/${target_path}/psql-admin-password||random:str} + gen_required_providers: + postgresql: + source: cyrilgdn/postgresql + version: 1.20.0 + gen_provider: + postgresql: + scheme: gcppostgres + host: \${google_sql_database_instance.database.connection_name} + database: "postgres" + username: ${database:master:admin_user} + password: \${local.admin_password} + sslmode: "require" + connect_timeout: 15 + resources: + generic: + google_sql_user: + admin: + name: ${database:master:admin_user} + instance: \${google_sql_database_instance.database.name} + password: \${local.admin_password} diff --git a/inventory/classes/terraform/remote-state.yml b/inventory/classes/terraform/remote-state.yml index a37893a2..e52e7ebd 100644 --- a/inventory/classes/terraform/remote-state.yml +++ b/inventory/classes/terraform/remote-state.yml @@ -1,8 +1,7 @@ +--- parameters: - resources: - tf: - terraform: - backend: - gcs: - bucket: ${state_bucket_name} - prefix: terraform/state/${target_name} \ No newline at end of file + terraform: + gen_backend: + gcs: + bucket: ${state_bucket_name} + prefix: terraform/state/${target} diff --git a/inventory/targets/examples/gke-pvm-killer.yml b/inventory/targets/examples/gke-pvm-killer.yml index 90b45c54..2a1aa169 100644 --- a/inventory/targets/examples/gke-pvm-killer.yml +++ b/inventory/targets/examples/gke-pvm-killer.yml @@ -4,4 +4,4 @@ classes: parameters: target_name: gke-pvm-killer - google_project: example-gce-project + gcp_project_id: example-gce-project diff --git a/inventory/targets/examples/postgres-proxy.yml b/inventory/targets/examples/postgres-proxy.yml index eb76520c..2691da0e 100644 --- a/inventory/targets/examples/postgres-proxy.yml +++ b/inventory/targets/examples/postgres-proxy.yml @@ -4,4 +4,4 @@ classes: parameters: target_name: postgres-proxy - google_project: example-project + gcp_project_id: example-project diff --git a/inventory/targets/examples/tutorial.yml b/inventory/targets/examples/tutorial.yml index 3e5ed055..9a25f266 100644 --- a/inventory/targets/examples/tutorial.yml +++ b/inventory/targets/examples/tutorial.yml @@ -3,8 +3,25 @@ classes: - projects.localhost.kubernetes.kind - applications.microservices - components.echo-server - - features.tesoro + # - features.tesoro parameters: target_name: tutorial + + kapicorp: + simple_fish_generator: + cod: + family: Gadidae + + blue_shark: + name: blue-shark + family: Carcharhinidae + + # Defaults + generators: + defaults: + simple_fish_generator: + habitat: water + water: + fins: yes \ No newline at end of file diff --git a/inventory/targets/kapicorp/prod-sockshop.yml b/inventory/targets/kapicorp/prod-sockshop.yml index cbe094c0..708d1601 100644 --- a/inventory/targets/kapicorp/prod-sockshop.yml +++ b/inventory/targets/kapicorp/prod-sockshop.yml @@ -16,10 +16,10 @@ parameters: data: tls.crt: # just an example, not a real certificate - value: ?{plain:targets/${target_name}/sockshop.kapicorp.com.crt||randomstr|base64} + value: ?{plain:targets/${target_name}/sockshop.kapicorp.com.crt||random:str|base64} tls.key: # just an example, not a real key - value: ?{plain:targets/${target_name}/sockshop.kapicorp.com.key||randomstr|base64} + value: ?{plain:targets/${target_name}/sockshop.kapicorp.com.key||random:str|base64} ingresses: tls-certificate: annotations: diff --git a/inventory/targets/terraform/kapicorp-demo-march.yml b/inventory/targets/terraform/kapicorp-demo-march.yml deleted file mode 100644 index 0197d92b..00000000 --- a/inventory/targets/terraform/kapicorp-demo-march.yml +++ /dev/null @@ -1,8 +0,0 @@ -classes: - - common - - terraform.common - - terraform.project-type.generic - - terraform.gcp.resources.gke - -parameters: - name: Example Terraform Project diff --git a/inventory/targets/terraform/kapicorp-project-123.yml b/inventory/targets/terraform/kapicorp-project-123.yml index 714be862..e2c9f412 100644 --- a/inventory/targets/terraform/kapicorp-project-123.yml +++ b/inventory/targets/terraform/kapicorp-project-123.yml @@ -1,7 +1,7 @@ classes: - common - terraform.common - - terraform.project-type.generic + - terraform.gcp.organization.kapicorp parameters: name: Example Terraform Project diff --git a/inventory/targets/terraform/kapicorp-terraform-admin.yml b/inventory/targets/terraform/kapicorp-terraform-admin.yml index 5573318a..65f3eaf1 100644 --- a/inventory/targets/terraform/kapicorp-terraform-admin.yml +++ b/inventory/targets/terraform/kapicorp-terraform-admin.yml @@ -1,7 +1,7 @@ classes: - common - terraform.common - - terraform.project-type.admin + - terraform.gcp.organization.kapicorp parameters: name: Terraform Admin Project diff --git a/kapitan b/kapitan index 0f1e687f..e46f8ecd 100755 --- a/kapitan +++ b/kapitan @@ -3,7 +3,7 @@ set -o nounset -o pipefail -o noclobber -o errexit DIR=$(dirname ${BASH_SOURCE[0]}) ABS_PATH=$(cd "${DIR}"; pwd) -KAPITAN_IMAGE=kapicorp/kapitan:v0.31.1-rc.3 +KAPITAN_IMAGE=kapicorp/kapitan:v0.32.0 KAPITAN_BINARY="docker run --rm -i -u $UID --network host -w $PWD \ -v $PWD:$PWD:delegated \ diff --git a/lib/kap.libsonnet b/lib/kap.libsonnet deleted file mode 100644 index 84f3b1fa..00000000 --- a/lib/kap.libsonnet +++ /dev/null @@ -1,323 +0,0 @@ -/* - * This class hides all the boilerplate needed for kapitan. - */ - -local kapitan = import 'lib/kapitan.libjsonnet'; -local kube = import 'lib/kube-platforms.libsonnet'; -local utils = import 'lib/utils.libsonnet'; - -local inventory = kapitan.inventory(); -local p = inventory.parameters; - -local isRefTag = function(potential_ref_tag) - std.startsWith(potential_ref_tag, '?{') && std.endsWith(potential_ref_tag, '}') -; - -local getFilePathFromRef = function(ref_tag) - // Remove the leading `?{` and the trailing `}` - local ref_content = std.substr(ref_tag, 2, std.length(ref_tag) - 3); - // Remove the scheme (e.g., `gkms:`, `plain:`) - local ref_content_without_scheme = std.splitLimit(ref_content, ':', 1)[1]; - // Remove any pipelines - std.splitLimit(ref_content_without_scheme, '|', 1)[0] -; - -local loadRefFile = function(potential_ref_tag) - local final_file_path = 'refs/' + getFilePathFromRef(potential_ref_tag); - kapitan.file_read(final_file_path) -; - -local version_secrets = utils.objectGet(p, 'version_secrets', false); - -local HealthCheck = function(healthcheck_config) if utils.objectGet(healthcheck_config, 'enabled', true) then { - failureThreshold: utils.objectGet(healthcheck_config, 'failure_threshold', 3), - periodSeconds: utils.objectGet(healthcheck_config, 'period_seconds', 10), - successThreshold: utils.objectGet(healthcheck_config, 'sucess_threshold', 1), - initialDelaySeconds: utils.objectGet(healthcheck_config, 'initial_delay_seconds', 0), - timeoutSeconds: utils.objectGet(healthcheck_config, 'timeout_seconds', 1), - [if healthcheck_config.type == 'command' then 'exec']: { - command: healthcheck_config.command, - }, - [if healthcheck_config.type == 'http' then 'httpGet']: { - path: healthcheck_config.path, - port: utils.objectGet(healthcheck_config, 'port', 80), - scheme: utils.objectGet(healthcheck_config, 'scheme', 'HTTP'), - httpHeaders: utils.objectGet(healthcheck_config, 'httpHeaders', []), - }, - [if healthcheck_config.type == 'tcp' then 'tcpSocket']: { - port: utils.objectGet(healthcheck_config, 'port', 80), - }, -}; - -local ClusterRole = function(cluster_role) if utils.objectGet(cluster_role, 'rbac_role', true) then { - rules: { - - }, -}; - - -kapitan + kube + { - inventory: inventory, - parameters: inventory.parameters, - utils: utils, - kapitan: kapitan, - - - AntiAffinityPreferred(name, topology_key): { - spec+: { - template+: { - spec+: { - affinity+: { - podAntiAffinity+: { - preferredDuringSchedulingIgnoredDuringExecution+: [{ - weight: 1, - podAffinityTerm: { - labelSelector: { - matchExpressions: [{ - key: 'app', - operator: 'In', - values: [name], - }], - }, - topologyKey: topology_key, - }, - }], - }, - }, - }, - }, - }, - }, - NodeAffinityPreferred(label, value, operator='In'): { - spec+: { - template+: { - spec+: { - affinity+: { - nodeAffinity+: { - preferredDuringSchedulingIgnoredDuringExecution+: [{ - weight: 1, - preference: { - matchExpressions: [{ - key: label, - operator: operator, - values: [value], - }], - }, - }], - }, - }, - }, - }, - }, - }, - - K8sCommon(name): { - WithAnnotations(annotations):: self + if annotations != null then { metadata+: { annotations+: annotations } } else {}, - WithLabels(labels):: self + { metadata+: { labels+: labels } }, - WithLabel(label):: self + { metadata+: { labels+: label } }, - WithTemplateLabel(label):: self + { spec+: { template+: { metadata+: { labels+: label } } } }, - WithMetadata(metadata):: self + { metadata+: metadata }, - WithNamespace(namespace=p.namespace):: self + { metadata+: { namespace: namespace } }, - }, - K8sService(name): $.K8sCommon(name) + kube.Service(name) { - WithExternalTrafficPolicy(policy):: self + { spec+: { externalTrafficPolicy: policy } }, - WithType(type):: self + { spec+: { type: type } }, - WithSessionAffinity(affinity):: self + { spec+: { sessionAffinity: affinity } }, - WithPorts(ports):: self + { spec+: { ports: [ - { - port_info:: ports[port_name], - name: port_name, - port: utils.objectGet(self.port_info, 'service_port'), - protocol: utils.objectGet(self.port_info, 'protocol', 'TCP'), - nodePort: utils.objectGet(self.port_info, 'node_port'), - targetPort: port_name, - } - for port_name in std.objectFields(ports) - if 'service_port' in ports[port_name] - ] } }, - }, - K8sDeployment(name): $.K8sCommon(name) + kube.Deployment(name) { - spec+: { - revisionHistoryLimit: utils.objectGet(p, 'revisionHistoryLimit', null), - }, - WithPodAntiAffinity(name=name, topology, enabled=true):: self + if enabled then $.AntiAffinityPreferred(name, topology) else {}, - WithNodeAffinity(label, value, operator='In', enabled=true):: self + if enabled then $.NodeAffinityPreferred(label, value, operator) else {}, - WithContainer(container):: self + { spec+: { template+: { spec+: { containers_+: container } } } }, - WithMinReadySeconds(seconds):: self + { spec+: { minReadySeconds: seconds } }, - WithNodeSelector(labels):: self + { spec+: { template+: { spec+: { nodeSelector+: labels } } } }, - WithSecurityContext(security_context):: self + { spec+: { template+: { spec+: { securityContext+: security_context } } } }, - WithProgressDeadlineSeconds(seconds):: self + { spec+: { progressDeadlineSeconds: seconds } }, - WithReplicas(replicas):: self + { spec+: { replicas: replicas } }, - WithUpdateStrategy(strategy):: self + { spec+: { strategy+: strategy } }, - WithPrometheusScrapeAnnotation(enabled=true, port=6060):: self + if enabled then { spec+: { template+: { metadata+: { annotations+: { - 'prometheus.io/port': std.toString(port), - 'prometheus.io/scrape': std.toString(enabled), - } } } } } else {}, - WithDNSPolicy(policy):: self + { spec+: { template+: { spec+: { dnsPolicy: policy } } } }, - WithRestartPolicy(policy):: self + { spec+: { template+: { spec+: { restartPolicy: policy } } } }, - WithServiceAccountName(sa):: self + if utils.objectGet(sa, 'enabled', false) then { spec+: { template+: { spec+: { serviceAccountName: utils.objectGet(sa, 'name', name) } } } } else {}, - WithVolume(volume, enabled=true):: self + if enabled then { spec+: { template+: { spec+: { volumes_+: volume } } } } else {}, - }, - - K8sStatefulSet(name): $.K8sCommon(name) + kube.StatefulSet(name) { - spec+: { - revisionHistoryLimit: utils.objectGet(p, 'revisionHistoryLimit', null), - }, - WithPodAntiAffinity(name=name, topology, enabled=true):: self + if enabled then $.AntiAffinityPreferred(name, topology) else {}, - WithContainer(container):: self + { spec+: { template+: { spec+: { containers_+: container } } } }, - WithNodeSelector(labels):: self + { spec+: { template+: { spec+: { nodeSelector+: labels } } } }, - WithSecurityContext(security_context):: self + { spec+: { template+: { spec+: { securityContext+: security_context } } } }, - WithMinReadySeconds(seconds):: self + { spec+: { minReadySeconds: seconds } }, - WithUpdateStrategy(strategy):: self + { spec+: { updateStrategy+: strategy } }, - WithProgressDeadlineSeconds(seconds):: self + { spec+: { progressDeadlineSeconds: seconds } }, - WithReplicas(replicas):: self + { spec+: { replicas: replicas } }, - WithPrometheusScrapeAnnotation(enabled=true, port=6060):: self + if enabled then { spec+: { template+: { metadata+: { annotations+: { - 'prometheus.io/port': std.toString(port), - 'prometheus.io/scrape': std.toString(enabled), - } } } } } else {}, - WithDNSPolicy(policy):: self + { spec+: { template+: { spec+: { dnsPolicy: policy } } } }, - WithRestartPolicy(policy):: self + { spec+: { template+: { spec+: { restartPolicy: policy } } } }, - WithServiceAccountName(sa):: self + if utils.objectGet(sa, 'enabled', false) then { spec+: { template+: { spec+: { serviceAccountName: utils.objectGet(sa, 'name', name) } } } } else {}, - WithVolume(volume, enabled=true):: self + if enabled then { spec+: { template+: { spec+: { volumes_+: volume } } } } else {}, - WithVolumeClaimTemplates(vct, enabled=true):: self + if enabled then { spec+: { volumeClaimTemplates_+: vct } } else {}, - }, - - K8sJob(name): $.K8sCommon(name) + kube.Job(name) { - WithContainer(container):: self + { spec+: { template+: { spec+: { containers_+: container } } } }, - WithSecurityContext(security_context):: self + { spec+: { template+: { spec+: { securityContext+: security_context } } } }, - WithBackoffLimit(limit):: self + { spec+: { backoffLimit: limit } }, - WithDNSPolicy(policy):: self + { spec+: { template+: { spec+: { dnsPolicy: policy } } } }, - WithImagePullSecrets(secret):: self + { spec+: { template+: { spec+: { imagePullSecrets+: [{ name: secret }] } } } }, - WithNodeSelector(labels):: self + { spec+: { template+: { spec+: { nodeSelector+: labels } } } }, - WithSelector(selector):: self + { spec+: { selector: selector } }, - WithRestartPolicy(policy):: self + { spec+: { template+: { spec+: { restartPolicy: policy } } } }, - WithServiceAccountName(sa, service_name):: self + if utils.objectGet(sa, 'enabled', false) then { spec+: { template+: { spec+: { serviceAccountName: utils.objectGet(sa, 'name', service_name) } } } } else {}, - WithVolume(volume, enabled=true):: self + if enabled then { spec+: { template+: { spec+: { volumes_+: volume } } } } else {}, - }, - - K8sServiceAccount(name): $.K8sCommon(name) + kube.ServiceAccount(name) + { - WithImagePullSecrets(secrets, enabled=true):: self + if enabled then { imagePullSecrets+: [secrets] } else {}, - }, - - K8sClusterRole(name): $.K8sCommon(name) + kube.ClusterRole(name) + { - WithRules(rules):: self + { rules+: rules }, - }, - - K8sClusterRoleBinding(name): $.K8sCommon(name) + kube.ClusterRoleBinding(name) + { - WithSubjects(subjects):: self + { subjects+: subjects }, - WithRoleRef(roleRef):: self + { roleRef+: roleRef }, - }, - - K8sContainer(name, service_component, secrets_configs): kube.Container(name) { - WithLivenessProbe(healthchecks, spec):: self + if utils.objectHas(healthchecks, 'liveness') then { livenessProbe: HealthCheck(spec.liveness) } else {}, - WithReadinessProbe(healthchecks, spec):: self + if utils.objectHas(healthchecks, 'readiness') then { readinessProbe: HealthCheck(spec.readiness) } else {}, - WithCommand(command):: self + { command: command }, - WithArgs(args):: self + { args: args }, - WithResources(resources):: self + { resources: resources }, - WithPullPolicy(policy):: self + { imagePullPolicy: policy }, - WithImage(image):: self + { image_:: image }, - WithEnvs(envs):: self + { env_: std.prune(envs) }, - WithSecurityContext(security_context):: self + { securityContext+: security_context }, - WithMount(mount, enabled=true):: self + if enabled then { volumeMounts_+: mount } else {}, - WithAllowPrivilegeEscalation(bool, enabled=true):: self + if enabled then { securityContext+: { allowPrivilegeEscalation: bool } } else {}, - WithRunAsUser(user, enabled=true):: self + if enabled then { securityContext+: { runAsUser: user } } else {}, - WithPorts(ports):: self + { ports: [ - { - containerPort: utils.objectGet(ports[port_name], 'container_port', ports[port_name].service_port), - name: port_name, - protocol: 'TCP', - } - for port_name in std.objectFields(ports) - ] }, - local container = self, - image: container.image_, - env_:: {}, - DetectSecretsInEnvs(env, secret_name, secrets_configs):: { - [if 'valueFrom' in env then 'valueFrom']+: { - [if 'secretKeyRef' in env.valueFrom then 'secretKeyRef']+: { - secret_manifest:: secrets_configs[secret_name].manifest, - assert secrets_configs != null : - 'Env var %s in component %s cannot use valueFrom.secretKeyRef because the component does not have a secret' % [ - env.name, - name, - ], - assert !('name' in env.valueFrom.secretKeyRef) : 'Env var %s in %s should not set secretKeyRef.name' % [ - env.name, - name, - ], - name: self.secret_manifest.metadata.name, - }, - }, - }, - - env: if std.length(secrets_configs) > 0 - then - [ - env + self.DetectSecretsInEnvs(env, secret_name, secrets_configs) - for secret_name in std.objectFields(secrets_configs) - for env in std.sort(self.envList(self.env_), keyF=function(x) x.name) - ] - else - std.sort(self.envList(self.env_), keyF=function(x) x.name), - }, - - - //Flag to makes use of "stringData" instead of "data" - local secret_data_type = if utils.objectGet(p, 'secrets_use_string_data', false) then 'stringData' else 'data', - K8sSecret(name, data): $.K8sCommon(name) + kube.Secret(name) { - local secret = self, - local data_resolved = { - [key]: if isRefTag(secret.data[key]) then loadRefFile(secret.data[key]) else secret.data[key] - for key in std.objectFields(secret.data) - }, - - data_digest:: std.md5(std.toString(data_resolved)), - metadata+: { - name: if version_secrets then '%s-%s' % [name, std.substr(secret.data_digest, 0, 8)] else name, - }, - short_name:: name, - [secret_data_type]: { - [key]: if utils.objectGet(data[key], 'b64_encode', false) && secret_data_type == 'data' then - std.base64(data[key].value) - else if utils.objectGet(data[key], 'template', false) != false then - if secret_data_type == 'data' then - std.base64(kapitan.jinja2_template(data[key].template, utils.objectGet(data[key], 'values', {}))) - else - kapitan.jinja2_template(data[key].template, utils.objectGet(data[key], 'values', {})) - else - data[key].value - for key in std.objectFields(data) - }, - }, - K8sConfigMap(name, data): $.K8sCommon(name) + kube.ConfigMap(name) { - data: { - [key]: if utils.objectGet(data[key], 'template', false) != false then - kapitan.jinja2_template(data[key].template, utils.objectGet(data[key], 'values', {})) - else - data[key].value - for key in std.objectFields(data) - }, - }, - - //TODO(ademaria): Add to upstream kube.libjsonnet and make more generic - K8sMutatingWebhookConfiguration(name): $.K8sCommon(name) + kube._Object('admissionregistration.k8s.io/v1beta1', 'MutatingWebhookConfiguration', name) { - withWebHooks(webhooks):: self + { webhooks+: webhooks }, - }, - - K8sIngress(name): $.K8sCommon(name) + kube.Ingress(name) { - spec+: { rules+: [] }, - WithDefaultBackend(backend):: self + { spec+: { backend: backend } }, - WithRules(rules):: self + if std.length(rules) > 0 then { spec+: { rules+: rules } } else {}, - WithPaths(paths):: self + if std.length(paths) > 0 then { spec+: { rules+: [{ http+: { paths+: paths } }] } } else {}, - }, - K8sGKEManagedCertificate(name): $.K8sCommon(name) + kube.gke.ManagedCertificate(name) { - spec+: { rules+: [] }, - WithDomains(domains):: self + { spec+: { domains+: domains } }, - }, - - K8sNetworkPolicy(name): $.K8sCommon(name) + kube.NetworkPolicy(name) { - WithPodSelector(pod_selector):: self + { spec+: { podSelector: pod_selector}}, - WithIngress(ingress):: self + { spec+: { ingress: ingress}}, - WithEgress(egress):: self + { spec+: { egress: egress}}, - } -} diff --git a/lib/kube-platforms.libsonnet b/lib/kube-platforms.libsonnet deleted file mode 100644 index 28d4bb7e..00000000 --- a/lib/kube-platforms.libsonnet +++ /dev/null @@ -1,21 +0,0 @@ -// Extend kube.libsonnet for platform specific CRDs, drop-in usage as: -// -// local kube = import "kube-platforms.jsonnet"; -// { -// my_deploy: kube.Deployment(...) { ... } -// my_gke_cert: kube.gke.ManagedCertificate(...) { ... } -// } -(import 'kube.libsonnet') { - gke:: { - ManagedCertificate(name): $._Object('networking.gke.io/v1beta1', 'ManagedCertificate', name) { - spec: { - domains: error 'spec.domains array is required', - }, - assert std.length(self.spec.domains) > 0 : "ManagedCertificate '%s' spec.domains array must not be empty" % self.metadata.name, - }, - - BackendConfig(name): $._Object('cloud.google.com/v1beta1', 'BackendConfig', name) { - spec: {}, - }, - }, -} diff --git a/lib/kube.libsonnet b/lib/kube.libsonnet deleted file mode 100644 index 40b4a19d..00000000 --- a/lib/kube.libsonnet +++ /dev/null @@ -1,740 +0,0 @@ -// Generic library of Kubernetes objects (https://github.com/bitnami-labs/kube-libsonnet) -// -// Objects in this file follow the regular Kubernetes API object -// schema with two exceptions: -// -// ## Optional helpers -// -// A few objects have defaults or additional "helper" hidden -// (double-colon) fields that will help with common situations. For -// example, `Service.target_pod` generates suitable `selector` and -// `ports` blocks for the common case of a single-pod/single-port -// service. If for some reason you don't want the helper, just -// provide explicit values for the regular Kubernetes fields that the -// helper *would* have generated, and the helper logic will be -// ignored. -// -// ## The Underscore Convention: -// -// Various constructs in the Kubernetes API use JSON arrays to -// represent unordered sets or named key/value maps. This is -// particularly annoying with jsonnet since we want to use jsonnet's -// powerful object merge operation with these constructs. -// -// To combat this, this library attempts to provide more "jsonnet -// native" variants of these arrays in alternative hidden fields that -// end with an underscore. For example, the `env_` block in -// `Container`: -// ``` -// kube.Container("foo") { -// env_: { FOO: "bar" }, -// } -// ``` -// ... produces the expected `container.env` JSON array: -// ``` -// { -// "env": [ -// { "name": "FOO", "value": "bar" } -// ] -// } -// ``` -// -// If you are confused by the underscore versions, or don't want them -// in your situation then just ignore them and set the regular -// non-underscore field as usual. -// -// -// ## TODO -// -// TODO: Expand this to include all API objects. -// -// Should probably fill out all the defaults here too, so jsonnet can -// reference them. In addition, jsonnet validation is more useful -// (client-side, and gives better line information). - -{ - // resource contructors will use kinds/versions/fields compatible at least with version: - minKubeVersion: { - major: 1, - minor: 9, - version: '%s.%s' % [self.major, self.minor], - }, - - // Returns array of values from given object. Does not include hidden fields. - objectValues(o):: [o[field] for field in std.objectFields(o)], - - // Returns array of [key, value] pairs from given object. Does not include hidden fields. - objectItems(o):: [[k, o[k]] for k in std.objectFields(o)], - - // Replace all occurrences of `_` with `-`. - hyphenate(s):: std.join('-', std.split(s, '_')), - - // Convert an octal (as a string) to number, - parseOctal(s):: ( - local len = std.length(s); - local leading = std.substr(s, 0, len - 1); - local last = std.parseInt(std.substr(s, len - 1, 1)); - assert last < 8 : "found '%s' digit >= 8" % [last]; - last + (if len > 1 then 8 * $.parseOctal(leading) else 0) - ), - - // Convert {foo: {a: b}} to [{name: foo, a: b}] - mapToNamedList(o):: [{ name: $.hyphenate(n) } + o[n] for n in std.objectFields(o)], - - // Return object containing only these fields elements - filterMapByFields(o, fields): { [field]: o[field] for field in std.setInter(std.objectFields(o), fields) }, - - // Convert from SI unit suffixes to regular number - siToNum(n):: ( - local convert = - if std.endsWith(n, 'm') then [1, 0.001] - else if std.endsWith(n, 'K') then [1, 1e3] - else if std.endsWith(n, 'M') then [1, 1e6] - else if std.endsWith(n, 'G') then [1, 1e9] - else if std.endsWith(n, 'T') then [1, 1e12] - else if std.endsWith(n, 'P') then [1, 1e15] - else if std.endsWith(n, 'E') then [1, 1e18] - else if std.endsWith(n, 'Ki') then [2, std.pow(2, 10)] - else if std.endsWith(n, 'Mi') then [2, std.pow(2, 20)] - else if std.endsWith(n, 'Gi') then [2, std.pow(2, 30)] - else if std.endsWith(n, 'Ti') then [2, std.pow(2, 40)] - else if std.endsWith(n, 'Pi') then [2, std.pow(2, 50)] - else if std.endsWith(n, 'Ei') then [2, std.pow(2, 60)] - else error 'Unknown numerical suffix in ' + n; - local n_len = std.length(n); - std.parseInt(std.substr(n, 0, n_len - convert[0])) * convert[1] - ), - - local remap(v, start, end, newstart) = - if v >= start && v <= end then v - start + newstart else v, - local remapChar(c, start, end, newstart) = - std.char(remap( - std.codepoint(c), std.codepoint(start), std.codepoint(end), std.codepoint(newstart) - )), - toLower(s):: ( - std.join('', [remapChar(c, 'A', 'Z', 'a') for c in std.stringChars(s)]) - ), - toUpper(s):: ( - std.join('', [remapChar(c, 'a', 'z', 'A') for c in std.stringChars(s)]) - ), - - boolXor(x, y):: ((if x then 1 else 0) + (if y then 1 else 0) == 1), - - _Object(apiVersion, kind, name):: { - local this = self, - apiVersion: apiVersion, - kind: kind, - metadata: { - name: name, - labels: { name: std.join('-', std.split(this.metadata.name, ':')) }, - annotations: {}, - }, - }, - - List(): { - apiVersion: 'v1', - kind: 'List', - items_:: {}, - items: $.objectValues(self.items_), - }, - - Namespace(name): $._Object('v1', 'Namespace', name) { - }, - - Endpoints(name): $._Object('v1', 'Endpoints', name) { - Ip(addr):: { ip: addr }, - Port(p):: { port: p }, - - subsets: [], - }, - - Service(name): $._Object('v1', 'Service', name) { - local service = self, - - target_pod:: error 'service target_pod required', - port:: self.target_pod.spec.containers[0].ports[0].containerPort, - - // Helpers that format host:port in various ways - host:: '%s.%s.svc' % [self.metadata.name, self.metadata.namespace], - host_colon_port:: '%s:%s' % [self.host, self.spec.ports[0].port], - http_url:: 'http://%s/' % self.host_colon_port, - proxy_urlpath:: '/api/v1/proxy/namespaces/%s/services/%s/' % [ - self.metadata.namespace, - self.metadata.name, - ], - // Useful in Ingress rules - name_port:: { - serviceName: service.metadata.name, - servicePort: service.spec.ports[0].port, - }, - - spec: { - selector: service.target_pod.metadata.labels, - ports: [ - { - port: service.port, - name: service.target_pod.spec.containers[0].ports[0].name, - targetPort: service.target_pod.spec.containers[0].ports[0].containerPort, - }, - ], - type: 'ClusterIP', - }, - }, - - PersistentVolume(name): $._Object('v1', 'PersistentVolume', name) { - spec: {}, - }, - - // TODO: This is a terrible name - PersistentVolumeClaimVolume(pvc): { - persistentVolumeClaim: { claimName: pvc.metadata.name }, - }, - - StorageClass(name): $._Object('storage.k8s.io/v1beta1', 'StorageClass', name) { - provisioner: error 'provisioner required', - }, - - PersistentVolumeClaim(name): $._Object('v1', 'PersistentVolumeClaim', name) { - local pvc = self, - - storageClass:: null, - storage:: error 'storage required', - - metadata+: if pvc.storageClass != null then { - annotations+: { - 'volume.beta.kubernetes.io/storage-class': pvc.storageClass, - }, - } else {}, - - spec: { - resources: { - requests: { - storage: pvc.storage, - }, - }, - accessModes: ['ReadWriteOnce'], - [if pvc.storageClass != null then 'storageClassName']: pvc.storageClass, - }, - }, - - Container(name): { - name: name, - image: error 'container image value required', - imagePullPolicy: if std.endsWith(self.image, ':latest') then 'Always' else 'IfNotPresent', - - envList(map):: [ - if std.type(map[x]) == 'object' then { name: x, valueFrom: map[x] } else { name: x, value: std.toString(map[x]) } - for x in std.objectFields(map) - ], - - env_:: {}, - env: self.envList(self.env_), - - args_:: {}, - args: ['--%s=%s' % kv for kv in $.objectItems(self.args_)], - - ports_:: {}, - ports: $.mapToNamedList(self.ports_), - - volumeMounts_:: {}, - volumeMounts: $.mapToNamedList(self.volumeMounts_), - - stdin: false, - tty: false, - assert !self.tty || self.stdin : 'tty=true requires stdin=true', - }, - - PodDisruptionBudget(name): $._Object('policy/v1beta1', 'PodDisruptionBudget', name) { - local this = self, - target_pod:: error 'target_pod required', - spec: { - assert $.boolXor( - std.objectHas(self, 'minAvailable'), - std.objectHas(self, 'maxUnavailable') - ) : "PDB '%s': exactly one of minAvailable/maxUnavailable required" % name, - selector: { - matchLabels: this.target_pod.metadata.labels, - }, - }, - }, - - Pod(name): $._Object('v1', 'Pod', name) { - spec: $.PodSpec, - }, - - PodSpec: { - // The 'first' container is used in various defaults in k8s. - local container_names = std.objectFields(self.containers_), - default_container:: if std.length(container_names) > 1 then 'default' else container_names[0], - containers_:: {}, - - local container_names_ordered = [self.default_container] + [n for n in container_names if n != self.default_container], - containers: ( - assert std.length(self.containers_) > 0 : 'Pod must have at least one container (via containers_ map)'; - [{ name: $.hyphenate(name) } + self.containers_[name] for name in container_names_ordered if self.containers_[name] != null] - ), - - // Note initContainers are inherently ordered, and using this - // named object will lose that ordering. If order matters, then - // manipulate `initContainers` directly (perhaps - // appending/prepending to `super.initContainers` to mix+match - // both approaches) - initContainers_:: {}, - initContainers: [{ name: $.hyphenate(name) } + self.initContainers_[name] for name in std.objectFields(self.initContainers_) if self.initContainers_[name] != null], - - volumes_:: {}, - volumes: $.mapToNamedList(self.volumes_), - - imagePullSecrets: [], - - terminationGracePeriodSeconds: 30, - - assert std.length(self.containers) > 0 : 'Pod must have at least one container (via containers array)', - - // Return an array of pod's ports numbers - ports(proto):: [ - p.containerPort - for p in std.flattenArrays([ - c.ports - for c in self.containers - ]) - if ( - (!(std.objectHas(p, 'protocol')) && proto == 'TCP') - || - ((std.objectHas(p, 'protocol')) && p.protocol == proto) - ) - ], - - }, - - EmptyDirVolume(): { - emptyDir: {}, - }, - - HostPathVolume(path, type=''): { - hostPath: { path: path, type: type }, - }, - - GitRepoVolume(repository, revision): { - gitRepo: { - repository: repository, - - // "master" is possible, but should be avoided for production - revision: revision, - }, - }, - - SecretVolume(secret): { - secret: { secretName: secret.metadata.name }, - }, - - ConfigMapVolume(configmap): { - configMap: { name: configmap.metadata.name }, - }, - - ConfigMap(name): $._Object('v1', 'ConfigMap', name) { - data: {}, - - // I keep thinking data values can be any JSON type. This check - // will remind me that they must be strings :( - local nonstrings = [ - k - for k in std.objectFields(self.data) - if std.type(self.data[k]) != 'string' - ], - assert std.length(nonstrings) == 0 : 'data contains non-string values: %s' % [nonstrings], - }, - - // subtype of EnvVarSource - ConfigMapRef(configmap, key): { - assert std.objectHas(configmap.data, key) : "ConfigMap '%s' doesn't have '%s' field in configmap.data" % [configmap.metadata.name, key], - configMapKeyRef: { - name: configmap.metadata.name, - key: key, - }, - }, - - Secret(name): $._Object('v1', 'Secret', name) { - local secret = self, - - type: 'Opaque', - data_:: {}, - data: { [k]: std.base64(secret.data_[k]) for k in std.objectFields(secret.data_) }, - }, - - // subtype of EnvVarSource - SecretKeyRef(secret, key): { - assert std.objectHas(secret.data, key) : "Secret '%s' doesn't have '%s' field in secret.data" % [secret.metadata.name, key], - secretKeyRef: { - name: secret.metadata.name, - key: key, - }, - }, - - // subtype of EnvVarSource - FieldRef(key): { - fieldRef: { - apiVersion: 'v1', - fieldPath: key, - }, - }, - - // subtype of EnvVarSource - ResourceFieldRef(key, divisor='1'): { - resourceFieldRef: { - resource: key, - divisor: std.toString(divisor), - }, - }, - - Deployment(name): $._Object('apps/v1', 'Deployment', name) { - local deployment = self, - - spec: { - template: { - spec: $.PodSpec, - metadata: { - labels: deployment.metadata.labels, - annotations: {}, - }, - }, - - selector: { - matchLabels: deployment.spec.template.metadata.labels, - }, - - strategy: { - type: 'RollingUpdate', - - local pvcs = [ - v - for v in deployment.spec.template.spec.volumes - if std.objectHas(v, 'persistentVolumeClaim') - ], - local is_stateless = std.length(pvcs) == 0, - - // Apps trying to maintain a majority quorum or similar will - // want to tune these carefully. - // NB: Upstream default is surge=1 unavail=1 - rollingUpdate: if is_stateless then { - maxSurge: '25%', // rounds up - maxUnavailable: '25%', // rounds down - } else { - // Poor-man's StatelessSet. Useful mostly with replicas=1. - maxSurge: 0, - maxUnavailable: 1, - }, - }, - - // NB: Upstream default is 0 - minReadySeconds: 30, - - // NB: Regular k8s default is to keep all revisions - revisionHistoryLimit: 10, - - replicas: 1, - }, - }, - - CrossVersionObjectReference(target): { - apiVersion: target.apiVersion, - kind: target.kind, - name: target.metadata.name, - }, - - HorizontalPodAutoscaler(name): $._Object('autoscaling/v1', 'HorizontalPodAutoscaler', name) { - local hpa = self, - - target:: error 'target required', - - spec: { - scaleTargetRef: $.CrossVersionObjectReference(hpa.target), - - minReplicas: hpa.target.spec.replicas, - maxReplicas: error 'maxReplicas required', - - assert self.maxReplicas >= self.minReplicas, - }, - }, - - StatefulSet(name): $._Object('apps/v1', 'StatefulSet', name) { - local sset = self, - - spec: { - serviceName: name, - - updateStrategy: { - type: 'RollingUpdate', - rollingUpdate: { - partition: 0, - }, - }, - - template: { - spec: $.PodSpec, - metadata: { - labels: sset.metadata.labels, - annotations: {}, - }, - }, - - selector: { - matchLabels: sset.spec.template.metadata.labels, - }, - - volumeClaimTemplates_:: {}, - volumeClaimTemplates: [ - // StatefulSet is overly fussy about "changes" (even when - // they're no-ops). - // In particular annotations={} is apparently a "change", - // since the comparison is ignorant of defaults. - std.prune($.PersistentVolumeClaim($.hyphenate(kv[0])) + { apiVersion:: null, kind:: null } + kv[1]) - for kv in $.objectItems(self.volumeClaimTemplates_) - ], - - replicas: 1, - assert self.replicas >= 1, - }, - }, - - Job(name): $._Object('batch/v1', 'Job', name) { - local job = self, - - spec: $.JobSpec { - template+: { - metadata+: { - labels: job.metadata.labels, - }, - }, - }, - }, - - CronJob(name): $._Object('batch/v1beta1', 'CronJob', name) { - local cronjob = self, - - spec: { - jobTemplate: { - spec: $.JobSpec { - template+: { - metadata+: { - labels: cronjob.metadata.labels, - }, - }, - }, - }, - schedule: error 'Need to provide spec.schedule', - successfulJobsHistoryLimit: 10, - failedJobsHistoryLimit: 20, - // NB: upstream concurrencyPolicy default is "Allow" - concurrencyPolicy: 'Forbid', - }, - }, - - JobSpec: { - local this = self, - - template: { - spec: $.PodSpec { - restartPolicy: 'OnFailure', - }, - }, - completions: 1, - parallelism: 1, - }, - - DaemonSet(name): $._Object('apps/v1', 'DaemonSet', name) { - local ds = self, - spec: { - updateStrategy: { - type: 'RollingUpdate', - rollingUpdate: { - maxUnavailable: 1, - }, - }, - template: { - metadata: { - labels: ds.metadata.labels, - annotations: {}, - }, - spec: $.PodSpec, - }, - - selector: { - matchLabels: ds.spec.template.metadata.labels, - }, - }, - }, - - Ingress(name): $._Object('networking.k8s.io/v1', 'Ingress', name) { - spec: {}, - - local rel_paths = [ - p.path - for r in self.spec.rules - for p in r.http.paths - if !std.startsWith(p.path, '/') - ], - assert std.length(rel_paths) == 0 : 'paths must be absolute: ' + rel_paths, - }, - - ThirdPartyResource(name): $._Object('extensions/v1beta1', 'ThirdPartyResource', name) { - versions_:: [], - versions: [{ name: n } for n in self.versions_], - }, - - CustomResourceDefinition(group, version, kind): { - local this = self, - apiVersion: 'apiextensions.k8s.io/v1beta1', - kind: 'CustomResourceDefinition', - metadata+: { - name: this.spec.names.plural + '.' + this.spec.group, - }, - spec: { - scope: 'Namespaced', - group: group, - version: version, - names: { - kind: kind, - singular: $.toLower(self.kind), - plural: self.singular + 's', - listKind: self.kind + 'List', - }, - }, - }, - - ServiceAccount(name): $._Object('v1', 'ServiceAccount', name) { - }, - - Role(name): $._Object('rbac.authorization.k8s.io/v1beta1', 'Role', name) { - rules: [], - }, - - ClusterRole(name): $.Role(name) { - kind: 'ClusterRole', - }, - - Group(name): { - kind: 'Group', - name: name, - apiGroup: 'rbac.authorization.k8s.io', - }, - - User(name): { - kind: 'User', - name: name, - apiGroup: 'rbac.authorization.k8s.io', - }, - - RoleBinding(name): $._Object('rbac.authorization.k8s.io/v1beta1', 'RoleBinding', name) { - local rb = self, - - subjects_:: [], - subjects: [{ - kind: o.kind, - namespace: o.metadata.namespace, - name: o.metadata.name, - } for o in self.subjects_], - - roleRef_:: error 'roleRef is required', - roleRef: { - apiGroup: 'rbac.authorization.k8s.io', - kind: rb.roleRef_.kind, - name: rb.roleRef_.metadata.name, - }, - }, - - ClusterRoleBinding(name): $.RoleBinding(name) { - kind: 'ClusterRoleBinding', - }, - - // NB: encryptedData can be imported into a SealedSecret as follows: - // kubectl get secret ... -ojson mysec | kubeseal | jq -r .spec.encryptedData > sealedsecret.json - // encryptedData: std.parseJson(importstr "sealedsecret.json") - SealedSecret(name): $._Object('bitnami.com/v1alpha1', 'SealedSecret', name) { - spec: { - encryptedData: {}, - }, - assert std.length(std.objectFields(self.spec.encryptedData)) != 0 : "SealedSecret '%s' has empty encryptedData field" % name, - }, - - // NB: helper method to access several Kubernetes objects podRef, - // used below to extract its labels - podRef(obj):: ({ - Pod: obj, - Deployment: obj.spec.template, - StatefulSet: obj.spec.template, - DaemonSet: obj.spec.template, - Job: obj.spec.template, - CronJob: obj.spec.jobTemplate.spec.template, - }[obj.kind]), - - // NB: return a { podSelector: ... } ready to use for e.g. NSPs (see below) - // pod labels can be optionally filtered by their label name 2nd array arg - podLabelsSelector(obj, filter=null):: { - podSelector: std.prune({ - matchLabels: - if filter != null then $.filterMapByFields($.podRef(obj).metadata.labels, filter) - else $.podRef(obj).metadata.labels, - }), - }, - - // NB: Returns an array as [{ port: num, protocol: "PROTO" }, {...}, ... ] - // Need to split TCP, UDP logic to be able to dedup each set of protocol ports - podsPorts(obj_list):: std.flattenArrays([ - [ - { port: port, protocol: protocol } - for port in std.set( - std.flattenArrays([$.podRef(obj).spec.ports(protocol) for obj in obj_list]) - ) - ] - for protocol in ['TCP', 'UDP'] - ]), - - // NB: most of the "helper" stuff comes from above (podLabelsSelector, podsPorts), - // NetworkPolicy returned object will have "Ingress", "Egress" policyTypes auto-set - // based on populated spec.ingress or spec.egress - // See tests/test-simple-validate.jsonnet for example(s). - NetworkPolicy(name): $._Object('networking.k8s.io/v1', 'NetworkPolicy', name) { - local networkpolicy = self, - spec: { - policyTypes: std.prune([ - if networkpolicy.spec.ingress != [] then 'Ingress' else null, - if networkpolicy.spec.egress != [] then 'Egress' else null, - ]), - ingress: $.objectValues(self.ingress_), - ingress_:: {}, - egress: $.objectValues(self.egress_), - egress_:: {}, - podSelector: {}, - }, - }, - - VerticalPodAutoscaler(name):: $._Object('autoscaling.k8s.io/v1beta2', 'VerticalPodAutoscaler', name) { - local vpa = self, - - target:: error 'target required', - - spec: { - targetRef: $.CrossVersionObjectReference(vpa.target), - - updatePolicy: { - updateMode: 'Auto', - }, - }, - }, - // Helper function to ease VPA creation as e.g.: - // foo_vpa:: kube.createVPAFor($.foo_deploy) - createVPAFor(target, mode='Auto'):: $.VerticalPodAutoscaler(target.metadata.name) { - target:: target, - - metadata+: { - namespace: target.metadata.namespace, - labels+: target.metadata.labels, - }, - spec+: { - updatePolicy+: { - updateMode: mode, - }, - }, - }, -} diff --git a/lib/utils.libsonnet b/lib/utils.libsonnet deleted file mode 100644 index fd289e8c..00000000 --- a/lib/utils.libsonnet +++ /dev/null @@ -1,48 +0,0 @@ -// Generic Jsonnet utilities. We might want to propose most of these to the Jsonnet stdlib. - -local id = function(value) value; - -{ - objectGet(object, key, default=null):: if key in object then object[key] else default, - objectHas(object, key, default=false):: if key in object && object[key] != null then true else default, - - objectValues(object):: [object[k] for k in std.objectFields(object)], - - mergeObjects(objects):: std.foldl(function(a, b) a + b, objects, {}), - trace(object, text=''):: std.trace('Trace %s: %s' % [text, std.toString(object)], object), - - deepMerge(obj1, obj2):: - local isIterable = function(value) $.arrayHas(['object', 'array'], std.type(value)); - - assert isIterable(obj1) : 'obj1 must be iterable'; - assert isIterable(obj2) : 'obj2 must be iterable'; - assert std.type(obj1) == std.type(obj2) : 'obj1 and obj2 must be of the same type'; - - obj1 - + - obj2 - + ( - if std.type(obj1) == 'object' then { - [k]: $.deepMerge(obj1[k], obj2[k]) - for k in std.objectFields(obj2) - if k in obj1 && isIterable(obj1[k]) && isIterable(obj2[k]) - } else [] - ) - , - - arrayHas(array, item):: [i for i in array if i == item] != [], - - // Report whether of the elements in `array` is true - any(array, func=id):: [v for v in array if func(v)] != [], - - coalesce(values):: - local non_null_values = [v for v in values if v != null]; - if non_null_values != [] then non_null_values[0] else null - , - - jsonSchema:: { - nullable(objectSchema):: { - anyOf: [objectSchema, { type: 'null' }], - }, - }, -} diff --git a/resources/state/kapicorp-terraform-admin/.terraform.lock.hcl b/resources/state/kapicorp-terraform-admin/.terraform.lock.hcl deleted file mode 100644 index ebfdda6a..00000000 --- a/resources/state/kapicorp-terraform-admin/.terraform.lock.hcl +++ /dev/null @@ -1,20 +0,0 @@ -# This file is maintained automatically by "terraform init". -# Manual edits may be lost in future updates. - -provider "registry.terraform.io/hashicorp/google" { - version = "3.46.0" - constraints = "3.46.0" - hashes = [ - "h1:DRSfO9KO00maugker2DFDQ4eyrmFJKCnsc1o3qj/1Zc=", - "zh:0c3f811578709dd11010354da086b4dc81dcc8e387a87e4a3a954f6717bcc941", - "zh:2314d3caabfcfd653b26f6c84062003e0c692996e3158a404441788aec6b625d", - "zh:2867ad89ef146ee2ab76abe7d2b27e9b1265e8d5da6f7a2521ca8575278001b9", - "zh:4cb07dce9c77b0ef76afdd051c81bac481fa323341668d076029718bb529dcba", - "zh:70c9a72bcf3665fa511fef9c3b97585ba5e93447eefa08fae6ff19562236c292", - "zh:732026edc545b963938966429a50663f4784e6331140401c4627a36333bf8543", - "zh:b68238a5d118f4a776283d3b70086b9087d33ff7c08713653acf971d5046c713", - "zh:cbfa870beb4d1f7e7e76e9f7183253408e05a5b59cca066272ffec1e6669649e", - "zh:d39e163230715a4f6b85fe871322e68eea77cb3551431daecd4e19ce1ffe3c1c", - "zh:f6e3dff88beeb72eaf8052af20f7e7d21bf44754726bf1bd3a72fceb9357b5b9", - ] -} diff --git a/scripts/import_kubernetes_clusters b/scripts/import_kubernetes_clusters deleted file mode 100755 index ff0a183f..00000000 --- a/scripts/import_kubernetes_clusters +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -# Import kubernetes cluster configuration using gcloud -# Usage: ./import_kubernetes_clusters [ gcp_project [..] ] - -set -e -GCLOUD="gcloud" -GCLOUD_CLUSTER_LIST="${GCLOUD} container clusters list" - -PROJECTS_LIST=() -if [[ ${#@} -gt 0 ]] -then - PROJECTS_LIST+=${@} -else - PROJECTS_LIST+=$(${GCLOUD} projects list --format="value(project_id)") -fi - -for PROJECT in ${PROJECTS_LIST} -do - -set +e -read -r -d '' VAR << EOM -{ - "classes": [ "kapitan.templates.kubernetes" ], - "parameters": { - "google_project": "${PROJECT}", - "cluster": "\${\(.name)}", - (.name) : { - "name": .name, - "google_project": "${PROJECT}", - "zone": .zone, - "type": "gke", - "cluster": .name, - "id": "gke_${PROJECT}_\(.zone)_\(.name)", - "user": "gke_${PROJECT}_\(.zone)_\(.name)", - } - } -} -EOM - -echo "Processing project ${PROJECT}" - for CLUSTER in $(${GCLOUD_CLUSTER_LIST} --project ${PROJECT} --format="value(name)") - do - echo "....discovered Kubernetes cluster $CLUSTER on project $PROJECT" - CLUSTER_DIR="$(dirname $0)/../inventory/classes/projects/$PROJECT/kubernetes" - CLUSTER_FILE="${CLUSTER_DIR}/${CLUSTER}.yml" - mkdir -p ${CLUSTER_DIR} - $GCLOUD_CLUSTER_LIST --project $PROJECT --format json --filter="name=$CLUSTER" | \ - yq -r ".[] | $VAR" --yaml-output > ${CLUSTER_FILE} - done -done diff --git a/state/terraform.tfstate b/state/terraform.tfstate deleted file mode 100644 index 44b6682b..00000000 --- a/state/terraform.tfstate +++ /dev/null @@ -1,318 +0,0 @@ -{ - "version": 4, - "terraform_version": "0.14.7", - "serial": 29, - "lineage": "8cce8e1d-c756-f2e1-0c58-b2aa1b20b0b0", - "outputs": {}, - "resources": [ - { - "mode": "managed", - "type": "google_organization_iam_member", - "name": "terraform_billing", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "condition": [], - "etag": "BwW80tZcbvY=", - "id": "163756623419/roles/billing.user/serviceaccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "member": "serviceAccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "org_id": "163756623419", - "role": "roles/billing.user" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "google_service_account.terraform" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_organization_iam_member", - "name": "terraform_owner", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "condition": [], - "etag": "BwW80tZcbvY=", - "id": "163756623419/roles/owner/serviceaccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "member": "serviceAccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "org_id": "163756623419", - "role": "roles/owner" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "google_service_account.terraform" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project", - "name": "project", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "auto_create_network": true, - "billing_account": "017012-945270-0844F0", - "folder_id": "", - "id": "projects/kapicorp-terraform-admin", - "labels": {}, - "name": "Terraform Admin Project", - "number": "701357227130", - "org_id": "163756623419", - "project_id": "kapicorp-terraform-admin", - "skip_delete": null, - "timeouts": { - "create": null, - "delete": null, - "read": null, - "update": null - } - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwLCJyZWFkIjo2MDAwMDAwMDAwMDAsInVwZGF0ZSI6NjAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=" - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_cloudbilling_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/cloudbilling.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "cloudbilling.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_cloudresourcemanager_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/cloudresourcemanager.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "cloudresourcemanager.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_compute_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/storage-component.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "storage-component.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_container_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/container.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "container.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_iam_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/iam.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "iam.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_serviceusage_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/serviceusage.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "serviceusage.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_service_account", - "name": "terraform", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "account_id": "terraform", - "description": "Terraform Service Account", - "display_name": "", - "email": "terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "id": "projects/kapicorp-terraform-admin/serviceAccounts/terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "name": "projects/kapicorp-terraform-admin/serviceAccounts/terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "project": "kapicorp-terraform-admin", - "timeouts": null, - "unique_id": "110742429027280734673" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9fQ==" - } - ] - }, - { - "mode": "managed", - "type": "google_storage_bucket", - "name": "terraform-state", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "bucket_policy_only": false, - "cors": [], - "default_event_based_hold": false, - "encryption": [], - "force_destroy": false, - "id": "state-kapicorp-terraform-admin", - "labels": {}, - "lifecycle_rule": [], - "location": "EU", - "logging": [], - "name": "state-kapicorp-terraform-admin", - "project": "kapicorp-terraform-admin", - "requester_pays": false, - "retention_policy": [], - "self_link": "https://www.googleapis.com/storage/v1/b/state-kapicorp-terraform-admin", - "storage_class": "MULTI_REGIONAL", - "uniform_bucket_level_access": false, - "url": "gs://state-kapicorp-terraform-admin", - "versioning": [], - "website": [] - }, - "sensitive_attributes": [], - "private": "bnVsbA==" - } - ] - }, - { - "mode": "managed", - "type": "google_storage_bucket_iam_binding", - "name": "binding", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "bucket": "b/state-kapicorp-terraform-admin", - "condition": [], - "etag": "CAI=", - "id": "b/state-kapicorp-terraform-admin/roles/storage.admin", - "members": [ - "serviceAccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com" - ], - "role": "roles/storage.admin" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "google_service_account.terraform", - "google_storage_bucket.terraform-state" - ] - } - ] - } - ] -} diff --git a/state/terraform.tfstate.backup b/state/terraform.tfstate.backup deleted file mode 100644 index aeeae3f8..00000000 --- a/state/terraform.tfstate.backup +++ /dev/null @@ -1,294 +0,0 @@ -{ - "version": 4, - "terraform_version": "0.14.7", - "serial": 27, - "lineage": "8cce8e1d-c756-f2e1-0c58-b2aa1b20b0b0", - "outputs": {}, - "resources": [ - { - "mode": "managed", - "type": "google_organization_iam_member", - "name": "terraform_billing", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "condition": [], - "etag": "BwW80tZcbvY=", - "id": "163756623419/roles/billing.user/serviceaccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "member": "serviceAccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "org_id": "163756623419", - "role": "roles/billing.user" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "google_service_account.terraform" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_organization_iam_member", - "name": "terraform_owner", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "condition": [], - "etag": "BwW80tZcbvY=", - "id": "163756623419/roles/owner/serviceaccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "member": "serviceAccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "org_id": "163756623419", - "role": "roles/owner" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "google_service_account.terraform" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project", - "name": "project", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 1, - "attributes": { - "auto_create_network": true, - "billing_account": "017012-945270-0844F0", - "folder_id": "", - "id": "projects/kapicorp-terraform-admin", - "labels": {}, - "name": "Terraform Admin Project", - "number": "701357227130", - "org_id": "163756623419", - "project_id": "kapicorp-terraform-admin", - "skip_delete": null, - "timeouts": { - "create": null, - "delete": null, - "read": null, - "update": null - } - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjo2MDAwMDAwMDAwMDAsImRlbGV0ZSI6NjAwMDAwMDAwMDAwLCJyZWFkIjo2MDAwMDAwMDAwMDAsInVwZGF0ZSI6NjAwMDAwMDAwMDAwfSwic2NoZW1hX3ZlcnNpb24iOiIxIn0=" - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_cloudbilling_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/cloudbilling.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "cloudbilling.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_cloudresourcemanager_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/cloudresourcemanager.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "cloudresourcemanager.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_compute_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/storage-component.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "storage-component.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_iam_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/iam.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "iam.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_project_service", - "name": "enable_serviceusage_service", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "disable_dependent_services": null, - "disable_on_destroy": true, - "id": "kapicorp-terraform-admin/serviceusage.googleapis.com", - "project": "kapicorp-terraform-admin", - "service": "serviceusage.googleapis.com", - "timeouts": null - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjoxMjAwMDAwMDAwMDAwLCJkZWxldGUiOjEyMDAwMDAwMDAwMDAsInJlYWQiOjYwMDAwMDAwMDAwMCwidXBkYXRlIjoxMjAwMDAwMDAwMDAwfX0=", - "dependencies": [ - "google_project.project" - ] - } - ] - }, - { - "mode": "managed", - "type": "google_service_account", - "name": "terraform", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "account_id": "terraform", - "description": "Terraform Service Account", - "display_name": "", - "email": "terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "id": "projects/kapicorp-terraform-admin/serviceAccounts/terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "name": "projects/kapicorp-terraform-admin/serviceAccounts/terraform@kapicorp-terraform-admin.iam.gserviceaccount.com", - "project": "kapicorp-terraform-admin", - "timeouts": null, - "unique_id": "110742429027280734673" - }, - "sensitive_attributes": [], - "private": "eyJlMmJmYjczMC1lY2FhLTExZTYtOGY4OC0zNDM2M2JjN2M0YzAiOnsiY3JlYXRlIjozMDAwMDAwMDAwMDB9fQ==" - } - ] - }, - { - "mode": "managed", - "type": "google_storage_bucket", - "name": "terraform-state", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "bucket_policy_only": false, - "cors": [], - "default_event_based_hold": false, - "encryption": [], - "force_destroy": false, - "id": "state-kapicorp-terraform-admin", - "labels": {}, - "lifecycle_rule": [], - "location": "EU", - "logging": [], - "name": "state-kapicorp-terraform-admin", - "project": "kapicorp-terraform-admin", - "requester_pays": false, - "retention_policy": [], - "self_link": "https://www.googleapis.com/storage/v1/b/state-kapicorp-terraform-admin", - "storage_class": "MULTI_REGIONAL", - "uniform_bucket_level_access": false, - "url": "gs://state-kapicorp-terraform-admin", - "versioning": [], - "website": [] - }, - "sensitive_attributes": [], - "private": "bnVsbA==" - } - ] - }, - { - "mode": "managed", - "type": "google_storage_bucket_iam_binding", - "name": "binding", - "provider": "provider[\"registry.terraform.io/hashicorp/google\"]", - "instances": [ - { - "schema_version": 0, - "attributes": { - "bucket": "b/state-kapicorp-terraform-admin", - "condition": [], - "etag": "CAI=", - "id": "b/state-kapicorp-terraform-admin/roles/storage.admin", - "members": [ - "serviceAccount:terraform@kapicorp-terraform-admin.iam.gserviceaccount.com" - ], - "role": "roles/storage.admin" - }, - "sensitive_attributes": [], - "private": "bnVsbA==", - "dependencies": [ - "google_service_account.terraform", - "google_storage_bucket.terraform-state" - ] - } - ] - } - ] -} diff --git a/Docs/tmpl_daemonset.yml b/system/docs/tmpl_daemonset.yml similarity index 99% rename from Docs/tmpl_daemonset.yml rename to system/docs/tmpl_daemonset.yml index 65244ef9..32dd5244 100644 --- a/Docs/tmpl_daemonset.yml +++ b/system/docs/tmpl_daemonset.yml @@ -284,7 +284,7 @@ parameters: : versioned: true b64_encode: false - value: ?{plain:targets/${target_name}//||randomstr:16|base64} + value: ?{plain:targets/${target_name}//||random:str:16|base64} # ------------------------------------------------- # ConfigMaps diff --git a/Docs/tmpl_deployment.yml b/system/docs/tmpl_deployment.yml similarity index 99% rename from Docs/tmpl_deployment.yml rename to system/docs/tmpl_deployment.yml index 1240a1c6..e6ec6e2a 100644 --- a/Docs/tmpl_deployment.yml +++ b/system/docs/tmpl_deployment.yml @@ -290,7 +290,7 @@ parameters: : versioned: true b64_encode: false - value: ?{plain:targets/${target_name}//||randomstr:16|base64} + value: ?{plain:targets/${target_name}//||random:str:16|base64} # ------------------------------------------------- # ConfigMaps diff --git a/Docs/tmpl_helm.yml b/system/docs/tmpl_helm.yml similarity index 100% rename from Docs/tmpl_helm.yml rename to system/docs/tmpl_helm.yml diff --git a/Docs/tmpl_job.yml b/system/docs/tmpl_job.yml similarity index 98% rename from Docs/tmpl_job.yml rename to system/docs/tmpl_job.yml index 33086a9f..5a51d699 100644 --- a/Docs/tmpl_job.yml +++ b/system/docs/tmpl_job.yml @@ -104,7 +104,7 @@ parameters: : versioned: true b64_encode: false - value: ?{plain:targets/${target_name}//||randomstr:16|base64} + value: ?{plain:targets/${target_name}//||random:str:16|base64} # ------------------------------------------------- # ConfigMaps diff --git a/Docs/tmpl_statefulset.yml b/system/docs/tmpl_statefulset.yml similarity index 99% rename from Docs/tmpl_statefulset.yml rename to system/docs/tmpl_statefulset.yml index d45b08ac..5ca8aa4a 100644 --- a/Docs/tmpl_statefulset.yml +++ b/system/docs/tmpl_statefulset.yml @@ -296,7 +296,7 @@ parameters: : versioned: true b64_encode: false - value: ?{plain:targets/${target_name}//||randomstr:16|base64} + value: ?{plain:targets/${target_name}//||random:str:16|base64} # ------------------------------------------------- # ConfigMaps diff --git a/components/kstmz/__init__.py b/system/generators/kstmz/__init__.py similarity index 93% rename from components/kstmz/__init__.py rename to system/generators/kstmz/__init__.py index d94cfe68..9a0ba617 100644 --- a/components/kstmz/__init__.py +++ b/system/generators/kstmz/__init__.py @@ -2,7 +2,7 @@ from kapitan.inputs.kadet import BaseModel, load_from_search_paths -kgenlib = load_from_search_paths("generators") +kgenlib = load_from_search_paths("kgenlib") def main(input_params): diff --git a/components/generators/kubernetes/README.md b/system/generators/kubernetes/README.md similarity index 99% rename from components/generators/kubernetes/README.md rename to system/generators/kubernetes/README.md index 139a038b..9d6c75fc 100644 --- a/components/generators/kubernetes/README.md +++ b/system/generators/kubernetes/README.md @@ -453,7 +453,7 @@ secrets: value: my_secret b64_encode: true better_secret: - value: ?{gkms:targets/${target_name}/password||randomstr|base64} + value: ?{gkms:targets/${target_name}/password||random:str|base64} ``` which will generate an truly encrypted secret using Google KMS (other backends also available) @@ -503,7 +503,7 @@ parameters: plain-plain-connection: string_data: CONNECTION: - value: postgresql://?{plain:targets/${target_name}/shared-password-plain-as-plain-user||randomstr:35}:?{plain:targets/${target_name}/shared-password-plain-as-plain-pass||randomstr:35}/database + value: postgresql://?{plain:targets/${target_name}/shared-password-plain-as-plain-user||random:str:35}:?{plain:targets/${target_name}/shared-password-plain-as-plain-pass||random:str:35}/database ``` ## Deployment diff --git a/system/generators/kubernetes/__init__.py b/system/generators/kubernetes/__init__.py new file mode 100644 index 00000000..abc0a985 --- /dev/null +++ b/system/generators/kubernetes/__init__.py @@ -0,0 +1,19 @@ +import logging + +logger = logging.getLogger(__name__) + +from kapitan.inputs.kadet import inventory + +from .common import kgenlib + +# Loads generators dynamically +kgenlib.load_generators(__name__, __file__) + + +def main(input_params): + target_inventory = inventory(lazy=True) + generator = kgenlib.BaseGenerator(inventory=target_inventory) + store = generator.generate() + store.process_mutations(input_params.get("mutations", {})) + + return store.dump() diff --git a/system/generators/kubernetes/argocd.py b/system/generators/kubernetes/argocd.py new file mode 100644 index 00000000..04f65f18 --- /dev/null +++ b/system/generators/kubernetes/argocd.py @@ -0,0 +1,105 @@ +import json +import logging + +logger = logging.getLogger(__name__) + +from .base import Namespace +from .common import KubernetesResource, kgenlib + + +class ArgoCDApplication(KubernetesResource): + source: dict = None + kind = "Application" + api_version = "argoproj.io/v1alpha1" + + def body(self): + super().body() + project = self.config.get("project", "default") + self.root.spec.project = project + self.root.spec.destination = self.config.get("destination") + self.root.spec.source = self.config.get("source") + if self.config.get("sync_policy"): + self.root.spec.syncPolicy = self.config.get("sync_policy") + + self.root.spec.ignoreDifferences = self.config.get("ignore_differences", None) + namespace = self.config.get("namespace", None) + + if namespace is None: + namespace = f"argocd-project-{project}" + + self.set_namespace(namespace) + + +@kgenlib.register_generator( + path="generators.argocd.applications", + global_generator=True, + activation_path="argocd.app_of_apps", + apply_patches=["generators.argocd.defaults.application"], +) +class GenArgoCDApplication(kgenlib.BaseStore): + def body(self): + config = self.config + namespace = config.get("namespace", "argocd") + name = config.get("name", self.name) + + argo_application = ArgoCDApplication( + name=name, namespace=namespace, config=config + ) + self.add(argo_application) + + +class ArgoCDProject(KubernetesResource): + kind = "AppProject" + api_version = "argoproj.io/v1alpha1" + + def body(self): + super().body() + self.root.spec.sourceRepos = self.config.get("source_repos") + self.root.spec.destinations = self.config.get("destinations") + if self.config.get("cluster_resource_whitelist"): + self.root.spec.clusterResourceWhitelist = self.config.get( + "cluster_resource_whitelist" + ) + self.root.spec.sourceNamespaces = self.config.setdefault( + "source_namespaces", [f"argocd-project-{self.name}"] + ) + + +@kgenlib.register_generator( + path="generators.argocd.projects", + apply_patches=["generators.argocd.defaults.project"], +) +class GenArgoCDProject(kgenlib.BaseStore): + def body(self): + config = self.config + namespace = config.get("namespace", "argocd") + name = config.get("name", self.name) + + self.add(ArgoCDProject(name=name, namespace=namespace, config=config)) + self.add(Namespace(name=f"argocd-project-{name}", config=config)) + + +@kgenlib.register_generator( + path="clusters", global_generator=True, activation_path="argocd.clusters" +) +class GenArgoCDCluster(kgenlib.BaseStore): + def body(self): + config = self.config + target = self.target + namespace = self.global_inventory[target]["parameters"]["namespace"] + name = config.get("name") + cluster = ArgoCDCluster(name=name, namespace=namespace, config=config) + + self.add(cluster) + + +class ArgoCDCluster(KubernetesResource): + kind = "Secret" + api_version = "v1" + + def body(self): + super().body() + self.add_label("argocd.argoproj.io/secret-type", "cluster") + self.root.stringData.name = self.config.argocd.name + self.root.stringData.server = self.config.endpoint_url + self.root.stringData.config = json.dumps(self.config.argocd.config, indent=4) diff --git a/system/generators/kubernetes/autoscaling.py b/system/generators/kubernetes/autoscaling.py new file mode 100644 index 00000000..0c305bd7 --- /dev/null +++ b/system/generators/kubernetes/autoscaling.py @@ -0,0 +1,69 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import KubernetesResource + + +class KedaScaledObject(KubernetesResource): + kind = "ScaledObject" + api_version = "keda.sh/v1alpha1" + + def body(self): + super().body() + config = self.config + workload = self.workload + self.root.spec.scaleTargetRef.name = workload.root.metadata.name + self.root.spec.scaleTargetRef.kind = workload.root.kind + self.root.spec.scaleTargetRef.apiVersion = workload.root.apiVersion + self.root.spec.update(config.get("keda_scaled_object", {})) + + +class PodDisruptionBudget(KubernetesResource): + kind = "PodDisruptionBudget" + api_version = "policy/v1beta1" + + def body(self): + super().body() + config = self.config + workload = self.workload + if config.auto_pdb: + self.root.spec.maxUnavailable = 1 + else: + self.root.spec.minAvailable = config.pdb_min_available + self.root.spec.selector.matchLabels = ( + workload.root.spec.template.metadata.labels + ) + + +class VerticalPodAutoscaler(KubernetesResource): + kind = "VerticalPodAutoscaler" + api_version = "autoscaling.k8s.io/v1" + + def body(self): + super().body() + config = self.config + workload = self.workload + self.add_labels(workload.root.metadata.labels) + self.root.spec.targetRef.apiVersion = workload.api_version + self.root.spec.targetRef.kind = workload.kind + self.root.spec.targetRef.name = workload.name + self.root.spec.updatePolicy.updateMode = config.vpa.get("update_mode", "Auto") + self.root.spec.resourcePolicy = config.vpa.get("resource_policy", {}) + + +class HorizontalPodAutoscaler(KubernetesResource): + kind = "HorizontalPodAutoscaler" + api_version = "autoscaling.k8s.io/v2beta2" + + def body(self): + super().body() + config = self.config + workload = self.workload + self.add_labels(workload.root.metadata.labels) + self.root.spec.scaleTargetRef.apiVersion = workload.api_version + self.root.spec.scaleTargetRef.kind = workload.kind + self.root.spec.scaleTargetRef.name = workload.name + self.root.spec.minReplicas = config.hpa.min_replicas + self.root.spec.maxReplicas = config.hpa.max_replicas + self.root.spec.metrics = config.hpa.metrics diff --git a/system/generators/kubernetes/base.py b/system/generators/kubernetes/base.py new file mode 100644 index 00000000..63ca4743 --- /dev/null +++ b/system/generators/kubernetes/base.py @@ -0,0 +1,51 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import KubernetesResource, kgenlib + + +class MutatingWebhookConfiguration(KubernetesResource): + kind = "MutatingWebhookConfiguration" + api_version = "admissionregistration.k8s.io/v1" + + def new(self): + super().new() + + def body(self): + super().body() + name = self.name + config = self.config + self.root.webhooks = config.webhooks + + +class PriorityClass(KubernetesResource): + kind = "PriorityClass" + api_version = "scheduling.k8s.io/v1" + priority: int + + def body(self): + super().body() + config = self.config + self.root.value = self.priority + self.root.globalDefault = False + + +class Namespace(KubernetesResource): + kind = "Namespace" + api_version = "v1" + + def body(self): + super().body() + config = self.config + labels = config.get("labels", {}) + annotations = config.get("annotations", {}) + self.add_labels(labels) + self.add_annotations(annotations) + + +@kgenlib.register_generator(path="generators.kubernetes.namespace") +class NamespaceGenerator(kgenlib.BaseStore): + def body(self): + name = self.config.get("name", self.name) + self.add(Namespace(name=name, config=self.config)) diff --git a/system/generators/kubernetes/certmanager.py b/system/generators/kubernetes/certmanager.py new file mode 100644 index 00000000..2a95a7d9 --- /dev/null +++ b/system/generators/kubernetes/certmanager.py @@ -0,0 +1,38 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import KubernetesResource, kgenlib + + +@kgenlib.register_generator(path="certmanager.issuer") +class CertManagerIssuer(KubernetesResource): + kind = "Issuer" + api_version = "cert-manager.io/v1" + + def body(self): + config = self.config + super().body() + self.root.spec = config.get("spec") + + +@kgenlib.register_generator(path="certmanager.cluster_issuer") +class CertManagerClusterIssuer(KubernetesResource): + kind = "ClusterIssuer" + api_version = "cert-manager.io/v1" + + def body(self): + config = self.config + super().body() + self.root.spec = config.get("spec") + + +@kgenlib.register_generator(path="certmanager.certificate") +class CertManagerCertificate(KubernetesResource): + kind = "Certificate" + api_version = "cert-manager.io/v1" + + def body(self): + config = self.config + super().body() + self.root.spec = config.get("spec") diff --git a/components/generators/kubernetes/common.py b/system/generators/kubernetes/common.py similarity index 79% rename from components/generators/kubernetes/common.py rename to system/generators/kubernetes/common.py index 655b5784..2813eba3 100644 --- a/components/generators/kubernetes/common.py +++ b/system/generators/kubernetes/common.py @@ -1,27 +1,27 @@ import logging -from kapitan.inputs.kadet import BaseModel, BaseObj, load_from_search_paths - -kgenlib = load_from_search_paths("generators") - logger = logging.getLogger(__name__) +from kapitan.inputs.kadet import BaseObj, load_from_search_paths -class ResourceType(BaseModel): - kind: str - id: str - api_version: str +kgenlib = load_from_search_paths("kgenlib") class KubernetesResource(kgenlib.BaseContent): - resource_type: ResourceType name: str + api_version: str + kind: str namespace: str = None config: dict = None - api_version: str = None - kind: str = None rendered_name: str = None - id: str = None + + def __eq__(self, other): + return ( + self.root.metadata.name == other.root.metadata.name + and self.root.kind == other.root.kind + and self.root.apiVersion == other.root.apiVersion + and self.root.metadata.namespace == other.root.metadata.namespace + ) @classmethod def from_baseobj(cls, baseobj: BaseObj): @@ -29,10 +29,9 @@ def from_baseobj(cls, baseobj: BaseObj): kind = baseobj.root.kind api_version = baseobj.root.apiVersion - id = kind.lower() + name = baseobj.root.metadata.name - resource_type = ResourceType(kind=kind, api_version=api_version, id=id) - resource = cls(resource_type=resource_type, name=baseobj.root.metadata.name) + resource = cls(name=name, api_version=api_version, kind=kind) resource.root = baseobj.root return resource @@ -41,9 +40,6 @@ def component_name(self): return self.get_label("app.kapicorp.dev/component") or self.name def new(self): - self.kind = self.resource_type.kind - self.api_version = self.resource_type.api_version - self.id = self.resource_type.id if self.config: if not self.namespace: self.namespace = self.config.get("namespace", None) @@ -80,7 +76,7 @@ def add_annotations(self, annotations: dict): for key, value in annotations.items(): self.add_annotation(key, value) - def add_namespace(self, namespace: str): + def set_namespace(self, namespace: str): self.root.metadata.namespace = namespace def set_labels(self, labels: dict): diff --git a/system/generators/kubernetes/gke.py b/system/generators/kubernetes/gke.py new file mode 100644 index 00000000..ca436b27 --- /dev/null +++ b/system/generators/kubernetes/gke.py @@ -0,0 +1,14 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import KubernetesResource + + +class BackendConfig(KubernetesResource): + kind = "BackendConfig" + api_version = "cloud.google.com/v1" + + def body(self): + super().body() + self.root.spec = self.config.backend_config diff --git a/system/generators/kubernetes/helm.py b/system/generators/kubernetes/helm.py new file mode 100644 index 00000000..4c05c4e2 --- /dev/null +++ b/system/generators/kubernetes/helm.py @@ -0,0 +1,36 @@ +import logging + +logger = logging.getLogger(__name__) + +from typing import Any + +from kapitan.inputs.helm import HelmChart +from kapitan.inputs.kadet import BaseObj + +from .common import KubernetesResource, kgenlib + + +class MyHelmChart(HelmChart): + def new(self): + for obj in self.load_chart(): + if obj: + self.root[ + f"{obj['metadata']['name'].lower()}-{obj['kind'].lower().replace(':','-')}" + ] = BaseObj.from_dict(obj) + + +@kgenlib.register_generator(path="charts") +class HelmChartGenerator(kgenlib.BaseStore): + name: str + config: Any + + def body(self): + helm_config = self.config.to_dict() + chart_name = self.config.helm_params.name + + rendered_chart = MyHelmChart(**helm_config) + + for helm_resource in rendered_chart.root.values(): + resource = KubernetesResource.from_baseobj(helm_resource) + resource.add_label("app.kapicorp.dev/component", chart_name) + self.add(resource) diff --git a/system/generators/kubernetes/istio.py b/system/generators/kubernetes/istio.py new file mode 100644 index 00000000..df607683 --- /dev/null +++ b/system/generators/kubernetes/istio.py @@ -0,0 +1,18 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import KubernetesResource + + +class IstioPolicy(KubernetesResource): + kind = "IstioPolicy" + api_version = "authentication.istio.io/v1alpha1" + + def body(self): + super().body() + config = self.config + name = self.name + self.root.spec.origins = config.istio_policy.policies.origins + self.root.spec.principalBinding = "USE_ORIGIN" + self.root.spec.targets = [{"name": name}] diff --git a/system/generators/kubernetes/networking.py b/system/generators/kubernetes/networking.py new file mode 100644 index 00000000..37f5575d --- /dev/null +++ b/system/generators/kubernetes/networking.py @@ -0,0 +1,282 @@ +import logging + +logger = logging.getLogger(__name__) + +from typing import Any + +from .common import KubernetesResource, kgenlib + + +class Ingress(KubernetesResource): + kind = "Ingress" + api_version = "networking.k8s.io/v1" + + def new(self): + super().new() + + def body(self): + super().body() + config = self.config + + self.add_annotations(config.get("annotations", {})) + self.add_labels(config.get("labels", {})) + if "default_backend" in config: + self.root.spec.backend.service.name = config.default_backend.get("name") + self.root.spec.backend.service.port = config.default_backend.get("port", 80) + if "paths" in config: + host = config.host + paths = config.paths + self.root.spec.setdefault("rules", []).extend( + [{"host": host, "http": {"paths": paths}}] + ) + if "rules" in config: + self.root.spec.setdefault("rules", []).extend(config.rules) + if config.tls: + self.root.spec.tls = config.tls + + +class GoogleManagedCertificate(KubernetesResource): + kind = "ManagedCertificate" + api_version = "networking.gke.io/v1beta1" + + def body(self): + super().body() + config = self.config + self.root.spec.domains = config.get("domains", []) + + +class NetworkPolicy(KubernetesResource): + kind = "NetworkPolicy" + api_version = "networking.k8s.io/v1" + + def body(self): + super().body() + policy = self.config + workload = self.workload + self.root.spec.podSelector.matchLabels = workload.root.metadata.labels + self.root.spec.ingress = policy.ingress + self.root.spec.egress = policy.egress + if self.root.spec.ingress: + self.root.spec.setdefault("policyTypes", []).append("Ingress") + + if self.root.spec.egress: + self.root.spec.setdefault("policyTypes", []).append("Egress") + + +class HealthCheckPolicy(KubernetesResource): + kind = "HealthCheckPolicy" + api_version = "networking.gke.io/v1" + + def body(self): + super().body() + config = self.config + + self.root.spec.default.logConfig.enabled = config.healthcheck.get("log", False) + + config_spec = self.root.spec.default.config + container_port = config.healthcheck.get("container_port", self.name) + config_spec.type = config.healthcheck.get("type", "HTTP").upper() + if config_spec.type == "HTTP": + config_spec.httpHealthCheck.portSpecification = "USE_FIXED_PORT" + config_spec.httpHealthCheck.port = container_port + config_spec.httpHealthCheck.requestPath = config.healthcheck.get( + "path", config.get("path", "/") + ) + + self.root.spec.targetRef = { + "group": "", + "kind": "Service", + "name": config.get("service"), + } + + +class Gateway(KubernetesResource): + kind = "Gateway" + api_version = "gateway.networking.k8s.io/v1beta1" + + def body(self): + super().body() + self.root.spec.gatewayClassName = self.config.type + default_listener = {"name": "http", "protocol": "HTTP", "port": 80} + + certificate = self.config.get("certificate", None) + if certificate: + default_listener = { + "name": "https", + "protocol": "HTTPS", + "port": 443, + "tls": { + "mode": "Terminate", + "certificateRefs": [{"name": certificate}], + }, + } + + self.root.spec.listeners = self.config.listeners or [default_listener] + + if self.config.get("named_address"): + self.root.spec.setdefault("addresses", []).append( + {"type": "NamedAddress", "value": self.config.get("named_address")} + ) + + +class GCPGatewayPolicy(KubernetesResource): + kind = "GCPGatewayPolicy" + api_version = "networking.gke.io/v1" + gateway: Gateway = None + + def body(self): + super().body() + self.root.spec.default.allowGlobalAccess = self.config.get( + "allow_global_access", False + ) + self.root.spec.targetRef = { + "group": "gateway.networking.k8s.io", + "kind": "Gateway", + "name": self.gateway.name, + } + + +class HTTPRoute(KubernetesResource): + kind = "HTTPRoute" + api_version = "gateway.networking.k8s.io/v1beta1" + gateway: Gateway = None + + def body(self): + super().body() + self.root.spec.setdefault("parentRefs", []).append( + { + "kind": "Gateway", + "name": self.gateway.name, + } + ) + + self.root.spec.hostnames = self.config.get("hostnames", []) + + for service_name, service_config in self.config.get("services", {}).items(): + match = {"path": {"value": service_config.get("path", "/")}} + rule = { + "backendRefs": [ + { + "name": service_config.get("service", service_name), + "port": service_config.get("port", 80), + } + ], + "matches": [match], + } + self.root.spec.setdefault("rules", []).append(rule) + + +@kgenlib.register_generator( + path="generators.kubernetes.gateway", +) +class GatewayGenerator(kgenlib.BaseStore): + def body(self): + filename = f"{self.name}-gateway.yaml" + gateway = Gateway(name=self.name, config=self.config) + gateway.filename = filename + self.add(gateway) + + policy = GCPGatewayPolicy(name=self.name, config=self.config, gateway=gateway) + policy.filename = filename + self.add(policy) + + for route_id, route_config in self.config.get("routes", {}).items(): + route_name = f"{self.name}-{route_id}" + route = HTTPRoute(name=route_name, config=route_config, gateway=gateway) + route.filename = filename + self.add(route) + + for service_id, service_config in route_config.get("services", {}).items(): + healthcheck = HealthCheckPolicy( + name=f"{route_name}-{service_id}", + config=service_config, + gateway=gateway, + ) + self.add(healthcheck) + + +class Service(KubernetesResource): + kind = "Service" + api_version = "v1" + + service_spec: dict + + def new(self): + super().new() + + def body(self): + config = self.config + workload = self.workload.root + service_spec = self.service_spec + + self.name = service_spec.get("service_name", self.name) + super().body() + + self.add_labels(config.get("labels", {})) + self.add_annotations(service_spec.annotations) + self.root.spec.setdefault("selector", {}).update( + workload.spec.template.metadata.labels + ) + self.root.spec.setdefault("selector", {}).update(service_spec.selectors) + self.root.spec.type = service_spec.type + if service_spec.get("publish_not_ready_address", False): + self.root.spec.publishNotReadyAddresses = True + if service_spec.get("headless", False): + self.root.spec.clusterIP = "None" + self.root.spec.clusterIP + self.root.spec.sessionAffinity = service_spec.get("session_affinity", "None") + all_ports = [config.ports] + [ + container.ports + for container in config.additional_containers.values() + if "ports" in container + ] + + self.exposed_ports = {} + + for port in all_ports: + for port_name in port.keys(): + if ( + not service_spec.expose_ports + or port_name in service_spec.expose_ports + ): + self.exposed_ports.update(port) + + for port_name in sorted(self.exposed_ports): + self.root.spec.setdefault("ports", []) + port_spec = self.exposed_ports[port_name] + port_spec["name"] = port_name + service_port = port_spec.get("service_port", None) + if service_port: + self.root.spec.setdefault("ports", []).append( + { + "name": port_name, + "port": service_port, + "targetPort": port_name, + "protocol": port_spec.get("protocol", "TCP"), + } + ) + + +@kgenlib.register_generator(path="ingresses") +class IngressComponent(kgenlib.BaseStore): + name: str + config: Any + + def body(self): + name = self.name + config = self.config + ingress = Ingress(name=name, config=config) + self.add(ingress) + + if "managed_certificate" in config: + certificate_name = config.managed_certificate + additional_domains = config.get("additional_domains", []) + domains = [certificate_name] + additional_domains + ingress.add_annotations( + {"networking.gke.io/managed-certificates": certificate_name} + ) + self.add( + GoogleManagedCertificate( + name=certificate_name, config={"domains": domains} + ) + ) diff --git a/system/generators/kubernetes/prometheus.py b/system/generators/kubernetes/prometheus.py new file mode 100644 index 00000000..f65d6554 --- /dev/null +++ b/system/generators/kubernetes/prometheus.py @@ -0,0 +1,54 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import KubernetesResource, kgenlib + + +@kgenlib.register_generator( + path="generators.prometheus.gen_pod_monitoring", + apply_patches=["generators.prometheus.defaults.gen_pod_monitoring"], +) +class PodMonitoring(KubernetesResource): + kind = "PodMonitoring" + api_version = "monitoring.googleapis.com/v1" + + def body(self): + super().body() + self.root.spec = self.config + + +class PrometheusRule(KubernetesResource): + kind = "PrometheusRule" + api_version = "monitoring.coreos.com/v1" + + def body(self): + super().body() + name = self.name + config = self.config + self.root.spec.setdefault("groups", []).append( + {"name": name, "rules": config.prometheus_rules.rules} + ) + + +class ServiceMonitor(KubernetesResource): + kind = "ServiceMonitor" + api_version = "monitoring.coreos.com/v1" + + def new(self): + super().new() + + def body(self): + name = self.name + workload = self.workload + self.name = "{}-metrics".format(name) + + super().body() + name = self.name + config = self.config + self.root.spec.endpoints = config.service_monitors.endpoints + self.root.spec.jobLabel = name + self.root.spec.namespaceSelector.matchNames = [self.namespace] + self.root.spec.selector.matchLabels = ( + workload.root.spec.template.metadata.labels + ) diff --git a/system/generators/kubernetes/rbac.py b/system/generators/kubernetes/rbac.py new file mode 100644 index 00000000..de191627 --- /dev/null +++ b/system/generators/kubernetes/rbac.py @@ -0,0 +1,115 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import KubernetesResource, kgenlib + + +class Role(KubernetesResource): + kind = "Role" + api_version = "rbac.authorization.k8s.io/v1" + + def body(self): + super().body() + config = self.config + self.root.rules = config["role"]["rules"] + + +class RoleBinding(KubernetesResource): + kind = "RoleBinding" + api_version = "rbac.authorization.k8s.io/v1" + + def body(self): + super().body() + config = self.config + sa = self.sa + name = config.get("name", self.name) + default_role_ref = { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "Role", + "name": name, + } + default_subject = [ + { + "kind": "ServiceAccount", + "name": sa.name, + "namespace": sa.namespace, + } + ] + self.root.roleRef = config.get("roleRef", default_role_ref) + self.root.subjects = config.get("subject", default_subject) + + +class ClusterRole(KubernetesResource): + kind = "ClusterRole" + api_version = "rbac.authorization.k8s.io/v1" + + def new(self): + super().new() + + def body(self): + super().body() + config = self.config + self.root.rules = config.cluster_role.rules + + +class ClusterRoleBinding(KubernetesResource): + kind = "ClusterRoleBinding" + api_version = "rbac.authorization.k8s.io/v1" + + def body(self): + super().body() + config = self.config + sa = self.sa + default_role_ref = { + "apiGroup": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "name": config.name, + } + default_subject = [ + { + "kind": "ServiceAccount", + "name": sa.name, + "namespace": sa.namespace, + } + ] + self.root.roleRef = config.get("roleRef", default_role_ref) + self.root.subjects = config.get("subject", default_subject) + + +@kgenlib.register_generator(path="generators.kubernetes.service_accounts") +class ServiceAccountGenerator(kgenlib.BaseStore): + def body(self): + config = self.config + name = config.get("name", self.name) + namespace = config["namespace"] + sa = ServiceAccount(name=name, config=config) + sa.add_annotations(config.annotations) + sa.add_labels(config.labels) + + roles = config.get("roles") + objs = [sa] + if roles is not None: + role_cfg = {"role": {"rules": roles}} + r = Role(name=f"{name}-role", namespace=namespace, config=role_cfg) + rb_cfg = {"name": r.name} + rb = RoleBinding( + name=f"{name}-role-binding", namespace=namespace, config=rb_cfg, sa=sa + ) + + objs += [r, rb] + + self.add_list(objs) + + +class ServiceAccount(KubernetesResource): + kind = "ServiceAccount" + api_version = "v1" + + def new(self): + super().new() + + def body(self): + super().body() + config = self.config + self.add_annotations(config.service_account.annotations) diff --git a/components/generators/kubernetes/storage.py b/system/generators/kubernetes/storage.py similarity index 62% rename from components/generators/kubernetes/storage.py rename to system/generators/kubernetes/storage.py index 2eec677b..c02b9c70 100644 --- a/components/generators/kubernetes/storage.py +++ b/system/generators/kubernetes/storage.py @@ -1,14 +1,15 @@ +import logging + +logger = logging.getLogger(__name__) + import base64 import hashlib import logging import os -from kapitan.inputs.kadet import Dict, load_from_search_paths +from kapitan.inputs.kadet import Dict -from .common import KubernetesResource, ResourceType - -logger = logging.getLogger(__name__) -kgenlib = load_from_search_paths("generators") +from .common import KubernetesResource, kgenlib class SharedConfig(KubernetesResource): @@ -25,20 +26,18 @@ class SharedConfig(KubernetesResource): def encode_string(unencoded_string): return base64.b64encode(unencoded_string.encode("ascii")).decode("ascii") - def setup_metadata(self, inventory): - namespace = inventory.parameters.get("namespace", None) - + def setup_metadata(self): + namespace = None if self.component: namespace = self.component.get("namespace", namespace) namespace = self.config.get("namespace", namespace) if namespace: - self.add_namespace(namespace) + self.set_namespace(namespace) self.add_annotations(self.config.get("annotations", {}).copy()) self.add_labels(self.config.get("labels", {}).copy()) - self.setup_global_defaults(inventory=inventory) self.items = self.config["items"] @@ -77,9 +76,7 @@ def add_item(self, key, value, request_encode=False, stringdata=False): self.root[field][key] = self.encode_string(value) if encode else value def add_string_data(self, string_data, encode=False, stringdata=True): - for key, spec in string_data.items(): - if "value" in spec: value = spec.get("value") if "template" in spec: @@ -106,8 +103,65 @@ def versioning(self, enabled=False): class ConfigMap(SharedConfig): - resource_type = ResourceType(kind="ConfigMap", api_version="v1", id="config_map") + kind = "ConfigMap" + api_version = "v1" class Secret(SharedConfig): - resource_type = ResourceType(kind="Secret", api_version="v1", id="secret") + kind = "Secret" + api_version = "v1" + + +class ComponentConfig(ConfigMap): + config: Dict + + def body(self): + super().body() + self.setup_metadata() + self.versioning_enabled = self.config.get("versioned", False) + if getattr(self, "workload", None) and self.workload.root.metadata.name: + self.add_label("name", self.workload.root.metadata.name) + self.add_data(self.config.data) + self.add_directory(self.config.directory, encode=False) + if getattr(self, "workload", None): + self.workload.add_volumes_for_object(self) + + +class ComponentSecret(Secret): + config: Dict + + def new(self): + super().new() + + def body(self): + super().body() + self.root.type = self.config.get("type", "Opaque") + self.versioning_enabled = self.config.get("versioned", False) + if getattr(self, "workload", None) and self.workload.root.metadata.name: + self.add_label("name", self.workload.root.metadata.name) + self.setup_metadata() + if self.config.data: + self.add_data(self.config.data) + if self.config.string_data: + self.add_string_data(self.config.string_data) + self.add_directory(self.config.directory, encode=True) + if getattr(self, "workload", None): + self.workload.add_volumes_for_object(self) + + +@kgenlib.register_generator( + path="generators.kubernetes.secrets", + apply_patches=["generators.manifest.default_resource"], +) +class SecretGenerator(kgenlib.BaseStore): + def body(self): + self.add(ComponentSecret(name=self.name, config=self.config)) + + +@kgenlib.register_generator( + path="generators.kubernetes.config_maps", + apply_patches=["generators.manifest.default_resource"], +) +class ConfigGenerator(kgenlib.BaseStore): + def body(self): + self.add(ComponentConfig(name=self.name, config=self.config)) diff --git a/components/generators/kubernetes/__init__.py b/system/generators/kubernetes/workloads.py similarity index 60% rename from components/generators/kubernetes/__init__.py rename to system/generators/kubernetes/workloads.py index 6b00489a..bfe5d62d 100644 --- a/components/generators/kubernetes/__init__.py +++ b/system/generators/kubernetes/workloads.py @@ -1,27 +1,67 @@ import logging + +logger = logging.getLogger(__name__) + from typing import Any -from kapitan.inputs.helm import HelmChart -from kapitan.inputs.kadet import ( - BaseModel, - BaseObj, - CompileError, - Dict, - inventory, - load_from_search_paths, +from kapitan.inputs.kadet import BaseModel, BaseObj, CompileError + +from .autoscaling import ( + HorizontalPodAutoscaler, + KedaScaledObject, + PodDisruptionBudget, + VerticalPodAutoscaler, ) +from .base import MutatingWebhookConfiguration +from .common import KubernetesResource, kgenlib +from .networking import HealthCheckPolicy, NetworkPolicy, Service +from .prometheus import PrometheusRule, ServiceMonitor +from .rbac import ClusterRole, ClusterRoleBinding, Role, RoleBinding, ServiceAccount +from .storage import ComponentConfig, ComponentSecret + -kgenlib = load_from_search_paths("generators") +class GenerateMultipleObjectsForClass(kgenlib.BaseStore): + """Helper to generate multiple classes -from .common import KubernetesResource, ResourceType -from .networking import NetworkPolicy -from .rbac import ClusterRole, ClusterRoleBinding, Role, RoleBinding -from .storage import ConfigMap, Secret + As a convention for generators we have that if you define only one policy/config/secret configuration + for your component, then the name of that resource will be the component {name} itself. -logger = logging.getLogger(__name__) + However if there are multiple objects being defined, then we call them: {name}-{object_name} + + This class helps achieve that for policies/config/secrets to avoid duplication. + """ + + component_config: dict + generating_class: Any + workload: Any + + def body(self): + component_config = self.component_config + name = self.name + objects_configs = self.config + generating_class = self.generating_class + workload = self.workload + for object_name, object_config in objects_configs.items(): + if object_config == None: + raise CompileError( + f"error with '{object_name}' for component {name}: configuration cannot be empty!" + ) + + if len(objects_configs.items()) == 1: + name = f"{self.name}" + else: + name = f"{self.name}-{object_name}" + + generated_object = generating_class( + name=name, + object_name=object_name, + config=object_config, + component=component_config, + workload=workload, + ) -inv = inventory(lazy=True) + self.add(generated_object) class Workload(KubernetesResource): @@ -40,10 +80,6 @@ def create_workflow(cls, name, config): else: raise () - if config.get("namespace") or inv.parameters.get("namespace"): - workload.root.metadata.namespace = config.setdefault( - "namespace", inv.parameters.namespace - ) workload.add_annotations(config.setdefault("annotations", {})) workload.root.spec.template.metadata.annotations = config.get( "pod_annotations", {} @@ -73,9 +109,9 @@ def create_workflow(cls, name, config): ] workload.add_init_containers(init_containers) - if config.image_pull_secrets or inv.parameters.image_pull_secrets: + if config.image_pull_secrets: workload.root.spec.template.spec.imagePullSecrets = config.get( - "image_pull_secrets", inv.parameters.image_pull_secrets + "image_pull_secrets" ) workload.root.spec.template.spec.dnsPolicy = config.dns_policy workload.root.spec.template.spec.terminationGracePeriodSeconds = config.get( @@ -185,279 +221,18 @@ def add_volumes_for_object(self, object): key: { "defaultMode": object.config.get("default_mode", 420), name_key: rendered_name, - "items": [{"key": value, "path": value} for value in object.items], + "items": [ + {"key": value, "path": value} + for value in object.config.get("items", []) + ], }, } ) -@kgenlib.register_generator(path="generators.kubernetes.service_accounts") -class ServiceAccountGenerator(kgenlib.BaseStore): - def body(self): - config = self.config - name = config.get("name", self.name) - sa = ServiceAccount(name=name, config=config) - sa.add_annotations(config.annotations) - sa.add_labels(config.labels) - self.add(sa) - - -class ServiceAccount(KubernetesResource): - resource_type = ResourceType( - kind="ServiceAccount", api_version="v1", id="service_account" - ) - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - self.add_annotations(config.service_account.annotations) - if config.image_pull_secrets or inv.parameters.pull_secret.name: - self.root.imagePullSecrets = [ - { - "name": config.get( - "image_pull_secrets", inv.parameters.pull_secret.name - ) - } - ] - - -class ComponentConfig(ConfigMap): - config: Dict - - def new(self): - super().new() - - def body(self): - super().body() - self.versioning_enabled = self.config.get("versioned", False) - self.setup_metadata(inventory=inv) - if getattr(self, "workload", None) and self.workload.root.metadata.name: - self.add_label("name", self.workload.root.metadata.name) - self.add_data(self.config.data) - self.add_directory(self.config.directory, encode=False) - if getattr(self, "workload", None): - self.workload.add_volumes_for_object(self) - - -@kgenlib.register_generator(path="generators.kubernetes.config_maps") -class ConfigGenerator(kgenlib.BaseStore): - def body(self): - self.add(ComponentConfig(name=self.name, config=self.config)) - - -class ComponentSecret(Secret): - config: Dict - - def new(self): - super().new() - - def body(self): - super().body() - self.root.type = self.config.get("type", "Opaque") - self.versioning_enabled = self.config.get("versioned", False) - if getattr(self, "workload", None) and self.workload.root.metadata.name: - self.add_label("name", self.workload.root.metadata.name) - self.setup_metadata(inventory=inv) - if self.config.data: - self.add_data(self.config.data) - if self.config.string_data: - self.add_string_data(self.config.string_data) - self.add_directory(self.config.directory, encode=True) - if getattr(self, "workload", None): - self.workload.add_volumes_for_object(self) - - -@kgenlib.register_generator(path="generators.kubernetes.secrets") -class SecretGenerator(kgenlib.BaseStore): - def body(self): - self.add(ComponentSecret(name=self.name, config=self.config)) - - -class Service(KubernetesResource): - - resource_type = ResourceType(kind="Service", api_version="v1", id="service") - workload: Workload - service_spec: dict - - def new(self): - super().new() - - def body(self): - config = self.config - workload = self.workload.root - service_spec = self.service_spec - - self.name = service_spec.get("service_name", self.name) - super().body() - - self.add_labels(config.get("labels", {})) - self.add_annotations(service_spec.annotations) - self.root.spec.setdefault("selector", {}).update( - workload.spec.template.metadata.labels - ) - self.root.spec.setdefault("selector", {}).update(service_spec.selectors) - self.root.spec.type = service_spec.type - if service_spec.get("publish_not_ready_address", False): - self.root.spec.publishNotReadyAddresses = True - if service_spec.get("headless", False): - self.root.spec.clusterIP = "None" - self.root.spec.clusterIP - self.root.spec.sessionAffinity = service_spec.get("session_affinity", "None") - all_ports = [config.ports] + [ - container.ports - for container in config.additional_containers.values() - if "ports" in container - ] - - exposed_ports = {} - - for port in all_ports: - for port_name in port.keys(): - if ( - not service_spec.expose_ports - or port_name in service_spec.expose_ports - ): - exposed_ports.update(port) - - for port_name in sorted(exposed_ports): - self.root.spec.setdefault("ports", []) - port_spec = exposed_ports[port_name] - service_port = port_spec.get("service_port", None) - if service_port: - self.root.spec.setdefault("ports", []).append( - { - "name": port_name, - "port": service_port, - "targetPort": port_name, - "protocol": port_spec.get("protocol", "TCP"), - } - ) - - -class Ingress(KubernetesResource): - resource_type = ResourceType( - kind="Ingress", api_version="networking.k8s.io/v1", id="ingress" - ) - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - - self.add_annotations(config.get("annotations", {})) - self.add_labels(config.get("labels", {})) - if "default_backend" in config: - self.root.spec.backend.service.name = config.default_backend.get("name") - self.root.spec.backend.service.port = config.default_backend.get("port", 80) - if "paths" in config: - host = config.host - paths = config.paths - self.root.spec.setdefault("rules", []).extend( - [{"host": host, "http": {"paths": paths}}] - ) - if "rules" in config: - self.root.spec.setdefault("rules", []).extend(config.rules) - if config.tls: - self.root.spec.tls = config.tls - - -class GoogleManagedCertificate(KubernetesResource): - resource_type = ResourceType( - kind="ManagedCertificate", - api_version="networking.gke.io/v1beta1", - id="google_managed_certificate", - ) - - def body(self): - super().body() - config = self.config - self.root.spec.domains = config.get("domains", []) - - -@kgenlib.register_generator(path="certmanager.issuer") -class CertManagerIssuer(KubernetesResource): - resource_type = ResourceType( - kind="Issuer", api_version="cert-manager.io/v1", id="cert_manager_issuer" - ) - - def body(self): - config = self.config - super().body() - self.root.spec = config.get("spec") - - -@kgenlib.register_generator(path="certmanager.cluster_issuer") -class CertManagerClusterIssuer(KubernetesResource): - resource_type = ResourceType( - kind="ClusterIssuer", - api_version="cert-manager.io/v1", - id="cert_manager_cluster_issuer", - ) - - def body(self): - config = self.config - super().body() - self.root.spec = config.get("spec") - - -@kgenlib.register_generator(path="certmanager.certificate") -class CertManagerCertificate(KubernetesResource): - resource_type = ResourceType( - kind="Certificate", - api_version="cert-manager.io/v1", - id="cert_manager_certificate", - ) - - def body(self): - config = self.config - super().body() - self.root.spec = config.get("spec") - - -class IstioPolicy(KubernetesResource): - resource_type = ResourceType( - kind="IstioPolicy", - api_version="authentication.istio.io/v1alpha1", - id="istio_policy", - ) - - def body(self): - super().body() - config = self.config - name = self.name - self.root.spec.origins = config.istio_policy.policies.origins - self.root.spec.principalBinding = "USE_ORIGIN" - self.root.spec.targets = [{"name": name}] - - -@kgenlib.register_generator(path="generators.kubernetes.namespace") -class NamespaceGenerator(kgenlib.BaseStore): - def body(self): - name = self.config.get("name", self.name) - self.add(Namespace(name=name, config=self.config)) - - -class Namespace(KubernetesResource): - resource_type = ResourceType(kind="Namespace", api_version="v1", id="namespace") - - def body(self): - super().body() - config = self.config - labels = config.get("labels", {}) - annotations = config.get("annotations", {}) - self.add_labels(labels) - self.add_annotations(annotations) - - class Deployment(Workload): - resource_type = ResourceType( - kind="Deployment", api_version="apps/v1", id="deployment" - ) + kind = "Deployment" + api_version = "apps/v1" def body(self): default_strategy = { @@ -488,9 +263,8 @@ def body(self): class StatefulSet(Workload): - resource_type = ResourceType( - kind="StatefulSet", api_version="apps/v1", id="stateful_set" - ) + kind = "StatefulSet" + api_version = "apps/v1" def body(self): default_strategy = {} @@ -520,9 +294,8 @@ def body(self): class DaemonSet(Workload): - resource_type = ResourceType( - kind="DaemonSet", api_version="apps/v1", id="daemon_set" - ) + kind = "DaemonSet" + api_version = "apps/v1" def body(self): super().body() @@ -547,7 +320,8 @@ def body(self): class Job(Workload): - resource_type = ResourceType(kind="Job", api_version="batch/v1", id="job") + kind = "Job" + api_version = "batch/v1" def body(self): super().body() @@ -568,7 +342,8 @@ def body(self): class CronJob(Workload): - resource_type = ResourceType(kind="Job", api_version="batch/v1beta1", id="cronjob") + kind = "CronJob" + api_version = "batch/v1" job: Job def body(self): @@ -597,7 +372,6 @@ def find_key_in_config(key, configs): ) def process_envs(self, config): - name = self.name for env_name, value in sorted(config.env.items()): @@ -747,203 +521,9 @@ def body(self): self.process_envs(config) -class GenerateMultipleObjectsForClass(kgenlib.BaseStore): - """Helper to generate multiple classes - - As a convention for generators we have that if you define only one policy/config/secret configuration - for your component, then the name of that resource will be the component {name} itself. - - However if there are multiple objects being defined, then we call them: {name}-{object_name} - - This class helps achieve that for policies/config/secrets to avoid duplication. - """ - - component_config: dict - generating_class: Any - workload: Any - - def body(self): - component_config = self.component_config - name = self.name - objects_configs = self.config - generating_class = self.generating_class - workload = self.workload - - for object_name, object_config in objects_configs.items(): - if object_config == None: - raise CompileError( - f"error with '{object_name}' for component {name}: configuration cannot be empty!" - ) - - if len(objects_configs.items()) == 1: - name = f"{self.name}" - else: - name = f"{self.name}-{object_name}" - - generated_object = generating_class( - name=name, - object_name=object_name, - config=object_config, - component=component_config, - workload=workload, - ) - - self.add(generated_object) - - -class PrometheusRule(KubernetesResource): - resource_type = ResourceType( - kind="PrometheusRule", - api_version="monitoring.coreos.com/v1", - id="prometheus_rule", - ) - - def body(self): - super().body() - name = self.name - config = self.config - self.root.spec.setdefault("groups", []).append( - {"name": name, "rules": config.prometheus_rules.rules} - ) - - -class BackendConfig(KubernetesResource): - resource_type = ResourceType( - kind="BackendConfig", api_version="cloud.google.com/v1", id="backend_config" - ) - - def body(self): - super().body() - self.root.spec = self.config.backend_config - - -class ServiceMonitor(KubernetesResource): - resource_type = ResourceType( - kind="ServiceMonitor", - api_version="monitoring.coreos.com/v1", - id="service_monitor", - ) - workload: Workload - - def new(self): - super().new() - - def body(self): - # TODO(ademaria) This name mangling is here just to simplify diff. - # Change it once done - name = self.name - workload = self.workload - self.name = "{}-metrics".format(name) - - super().body() - name = self.name - config = self.config - self.root.spec.endpoints = config.service_monitors.endpoints - self.root.spec.jobLabel = name - self.root.spec.namespaceSelector.matchNames = [self.namespace] - self.root.spec.selector.matchLabels = ( - workload.root.spec.template.metadata.labels - ) - - -class MutatingWebhookConfiguration(KubernetesResource): - resource_type = ResourceType( - kind="MutatingWebhookConfiguration", - api_version="admissionregistration.k8s.io/v1", - id="mutating_webhook_configuration", - ) - - def new(self): - super().new() - - def body(self): - super().body() - name = self.name - config = self.config - self.root.webhooks = config.webhooks - - -class PodDisruptionBudget(KubernetesResource): - resource_type = ResourceType( - kind="PodDisruptionBudget", - api_version="policy/v1beta1", - id="pod_disruption_budget", - ) - workload: Workload - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - workload = self.workload - self.add_namespace(config.get("namespace", inv.parameters.namespace)) - if config.auto_pdb: - self.root.spec.maxUnavailable = 1 - else: - self.root.spec.minAvailable = config.pdb_min_available - self.root.spec.selector.matchLabels = ( - workload.root.spec.template.metadata.labels - ) - - -class VerticalPodAutoscaler(KubernetesResource): - resource_type = ResourceType( - kind="VerticalPodAutoscaler", - api_version="autoscaling.k8s.io/v1beta2", - id="vertical_pod_autoscaler", - ) - workload: Workload - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - workload = self.workload - self.add_labels(workload.root.metadata.labels) - self.root.spec.targetRef.apiVersion = workload.api_version - self.root.spec.targetRef.kind = workload.kind - self.root.spec.targetRef.name = workload.name - self.root.spec.updatePolicy.updateMode = config.vpa - - # TODO(ademaria) Istio blacklist is always desirable but add way to make it configurable. - self.root.spec.resourcePolicy.containerPolicies = [ - {"containerName": "istio-proxy", "mode": "Off"} - ] - - -class HorizontalPodAutoscaler(KubernetesResource): - resource_type = ResourceType( - kind="HorizontalPodAutoscaler", - api_version="autoscaling.k8s.io/v2beta2", - id="horizontal_pod_autoscaler", - ) - workload: Workload - - def new(self): - super().new() - - def body(self): - super().body() - config = self.config - workload = self.workload - self.add_namespace(inv.parameters.namespace) - self.add_labels(workload.root.metadata.labels) - self.root.spec.scaleTargetRef.apiVersion = workload.api_version - self.root.spec.scaleTargetRef.kind = workload.kind - self.root.spec.scaleTargetRef.name = workload.name - self.root.spec.minReplicas = config.hpa.min_replicas - self.root.spec.maxReplicas = config.hpa.max_replicas - self.root.spec.metrics = config.hpa.metrics - - class PodSecurityPolicy(KubernetesResource): - resource_type = ResourceType( - kind="PodSecurityPolicy", api_version="policy/v1beta1", id="pod_security_policy" - ) + kind = "PodSecurityPolicy" + api_version = "policy/v1beta1" workload: Workload def new(self): @@ -965,31 +545,6 @@ def body(self): } -@kgenlib.register_generator(path="ingresses") -class IngressComponent(kgenlib.BaseStore): - name: str - config: Any - - def body(self): - name = self.name - config = self.config - ingress = Ingress(name=name, config=config) - self.add(ingress) - - if "managed_certificate" in config: - certificate_name = config.managed_certificate - additional_domains = config.get("additional_domains", []) - domains = [certificate_name] + additional_domains - ingress.add_annotations( - {"networking.gke.io/managed-certificates": certificate_name} - ) - self.add( - GoogleManagedCertificate( - name=certificate_name, config={"domains": domains} - ) - ) - - @kgenlib.register_generator( path="components", apply_patches=[ @@ -1033,15 +588,6 @@ def body(self): self.add(configs) self.add(secrets) - if ( - config.vpa - and inv.parameters.get("enable_vpa", True) - and config.type != "job" - ): - vpa = VerticalPodAutoscaler(name=name, config=config, workload=workload) - vpa.add_label("app.kapicorp.dev/component", name) - self.add(vpa) - if config.pdb_min_available: pdb = PodDisruptionBudget(name=name, config=config, workload=workload) pdb.add_label("app.kapicorp.dev/component", name) @@ -1052,6 +598,18 @@ def body(self): hpa.add_label("app.kapicorp.dev/component", name) self.add(hpa) + if config.get("vpa", False): + vpa = VerticalPodAutoscaler(name=name, config=config, workload=workload) + vpa.add_label("app.kapicorp.dev/component", name) + self.add(vpa) + + if config.keda_scaled_object: + scaled_object = KedaScaledObject( + name=name, config=config, workload=workload + ) + scaled_object.add_label("app.kapicorp.dev/component", name) + self.add(scaled_object) + if config.type != "job": if config.pdb_min_available or config.auto_pdb: pdb = PodDisruptionBudget(name=name, config=config, workload=workload) @@ -1076,6 +634,7 @@ def body(self): service_spec=config.service, ) service.add_label("app.kapicorp.dev/component", name) + self.add(service) if config.additional_services: @@ -1143,37 +702,3 @@ def body(self): backend_config = BackendConfig(name=name, config=config) backend_config.add_label("app.kapicorp.dev/component", name) self.add(backend_config) - - -class MyHelmChart(HelmChart): - def new(self): - for obj in self.load_chart(): - if obj: - self.root[ - f"{obj['metadata']['name'].lower()}-{obj['kind'].lower().replace(':','-')}" - ] = BaseObj.from_dict(obj) - - -@kgenlib.register_generator(path="charts") -class HelmChartGenerator(kgenlib.BaseStore): - name: str - config: Any - - def body(self): - helm_config = self.config.to_dict() - chart_name = self.config.helm_params.name - - rendered_chart = MyHelmChart(**helm_config) - - for helm_resource in rendered_chart.root.values(): - resource = KubernetesResource.from_baseobj(helm_resource) - resource.add_label("app.kapicorp.dev/component", chart_name) - self.add(resource) - - -def main(input_params): - generator = kgenlib.BaseGenerator(inventory=inv) - store = generator.generate() - store.process_mutations(input_params.get("mutations", {})) - - return store.dump() diff --git a/system/generators/terraform/__init__.py b/system/generators/terraform/__init__.py new file mode 100644 index 00000000..e13f4ed0 --- /dev/null +++ b/system/generators/terraform/__init__.py @@ -0,0 +1,26 @@ +import logging + +from kapitan.inputs.kadet import inventory + +from .common import TerraformStore, kgenlib + +kgenlib.load_generators(__name__, __file__) + +logger = logging.getLogger(__name__) + + +def main(input_params): + target_inventory = inventory() + + defaults_path = "parameters.generators.terraform.defaults" + generator = kgenlib.BaseGenerator( + inventory=target_inventory, store=TerraformStore, defaults_path=defaults_path + ) + + store = generator.generate() + + # mutations are currently not supported for terraform + mutations = input_params.get("mutations", {}) + store.process_mutations(mutations) + + return store.dump() diff --git a/system/generators/terraform/common.py b/system/generators/terraform/common.py new file mode 100644 index 00000000..fb56cfd7 --- /dev/null +++ b/system/generators/terraform/common.py @@ -0,0 +1,142 @@ +import logging + +from kapitan.inputs.kadet import load_from_search_paths + +kgenlib = load_from_search_paths("kgenlib") +logger = logging.getLogger(__name__) + + +class TerraformStore(kgenlib.BaseStore): + def dump(self, output_filename=None): + """Return object dict/list.""" + for content in self.get_content_list(): + if output_filename: + output_format = output_filename + else: + output_format = getattr(content, "filename", "output") + + filename = output_format.format(content=content) + logging.debug(f"Adding {content.root} to {filename}") + kgenlib.merge(content.root, self.root.setdefault(filename, {})) + + return super().dump(already_processed=True) + + +class TerraformBlock(kgenlib.BaseContent): + block_type: str + type: str = None + id: str + defaults: dict = {} + config: dict = {} + + def new(self): + if self.type: + self.filename = f"{self.type}.tf" + self.provider = self.type.split("_")[0] + self.patch_config(f"provider.{self.provider}.{self.block_type}") + else: + self.filename = f"{self.block_type}.tf" + + self.patch_config(f"{self.type}") + + def patch_config(self, inventory_path: str) -> None: + """Apply patch to config""" + patch = kgenlib.findpath(self.defaults, inventory_path, {}) + logging.debug(f"Applying patch {inventory_path} for {self.id}: {patch}") + kgenlib.merge(patch, self.config) + + @property + def resource(self): + if self.type: + return self.root[self.block_type][self.type].setdefault(self.id, {}) + else: + return self.root[self.block_type].setdefault(self.id, {}) + + @resource.setter + def resource(self, value): + self.add(value) + + def set(self, config=None): + if config is None: + config = self.config + self.root[self.block_type][self.type].setdefault(self.id, config).update(config) + + def add(self, name, value): + self.root[self.block_type][self.type].setdefault(self.id, {})[name] = value + + def get_reference( + self, attr: str = None, wrap: bool = True, prefix: str = "" + ) -> str: + """Get reference or attribute reference for terraform resource + + Args: + attr (str, optional): The attribute to get. Defaults to None. + wrap (bool, optional): Whether to wrap the result. Defaults to True. + prefix (str, optional): Whether to prefix the result. Defaults to "". + + Raises: + TypeError: Unknown block_type + + Returns: + str: a reference or attribute reference for terraform, e.g. "${var.foo}" + """ + + if self.block_type in ("data", "resource"): + reference = f"{prefix}{self.type}.{self.id}" + elif self.block_type in ("local", "output", "variable"): + reference = f"{prefix}{self.id}" + else: + raise TypeError( + f"Cannot produced wrapped reference for block_type={self.block_type}" + ) + + if attr: + reference = f"{reference}.{attr}" + + if wrap: + return f"${{{reference}}}" + else: + return reference + + +class TerraformResource(TerraformBlock): + block_type = "resource" + + +class TerraformLocal(TerraformBlock): + block_type = "locals" + + def set_local(self, name, value): + self.root.locals.setdefault(name, value) + + def body(self): + if self.config: + config = self.config + name = config.get("name", self.id) + value = config.get("value", None) + if value: + self.set_local(name, value) + + +class TerraformData(TerraformBlock): + block_type = "data" + + def body(self): + config = self.config + name = config.get("name", self.id) + value = config.get("value") + self.root.data.setdefault(name, value) + + +class TerraformProvider(TerraformBlock): + block_type = "provider" + + def add(self, name, value): + self.root.setdefault(self.block_type, {}).setdefault(name, value)[name] = value + + def set(self, config=None): + if config is None: + config = self.config + self.root.setdefault(self.block_type, {}).setdefault(self.id, config).update( + config + ) diff --git a/system/generators/terraform/github.py b/system/generators/terraform/github.py new file mode 100644 index 00000000..6db235f3 --- /dev/null +++ b/system/generators/terraform/github.py @@ -0,0 +1,57 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import TerraformResource, TerraformStore, kgenlib + + +@kgenlib.register_generator( + path="terraform.gen_github_repository", + apply_patches=["generators.terraform.defaults.gen_github_repository"], +) +class GenGitHubRepository(TerraformStore): + def body(self): + resource_id = self.id + config = self.config + + branch_protection_config = config.pop("branch_protection", {}) + deploy_keys_config = config.pop("deploy_keys", {}) + + resource_name = self.name + logging.debug(f"Processing github_repository {resource_name}") + repository = TerraformResource( + id=resource_id, + type="github_repository", + config=config, + defaults=self.defaults, + ) + repository.set(config) + repository.filename = "github_repository.tf" + + self.add(repository) + + for branches_name, branches_config in branch_protection_config.items(): + logger.debug(f"Processing branch protection for {branches_name}") + branch_protection = TerraformResource( + id=f"{resource_id}_{branches_name}", + type="github_branch_protection", + config=branches_config, + defaults=self.defaults, + ) + branch_protection.filename = "github_branch_protection.tf" + branch_protection.set(branch_protection.config) + branch_protection.add("repository_id", repository.get_reference("node_id")) + self.add(branch_protection) + + for deploy_key_name, deploy_key_branches in deploy_keys_config.items(): + logger.debug(f"Processing deploy keys for {deploy_key_name}") + deploy_key = TerraformResource( + id=f"{resource_id}_{deploy_key_name}", + type="github_repository_deploy_key", + config=deploy_key_branches, + defaults=self.defaults, + ) + deploy_key.filename = "github_deploy_key.tf" + deploy_key.set(deploy_key.config) + deploy_key.add("repository", repository.get_reference("name")) + self.add(deploy_key) diff --git a/system/generators/terraform/google.py b/system/generators/terraform/google.py new file mode 100644 index 00000000..d738c23d --- /dev/null +++ b/system/generators/terraform/google.py @@ -0,0 +1,504 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import TerraformResource, TerraformStore, kgenlib + + +class GoogleResource(TerraformResource): + def body(self): + self.resource.project = self.config.get("project") + super().body() + + +@kgenlib.register_generator(path="ingresses") +class GenIngressResources(TerraformStore): + def body(self): + config = self.config + id = self.id + self.filename = "ingresses_resources.tf" + + if config.get("dns", False): + dns_config = config["dns"] + for rule in config.get("rules", []): + if rule.get("host", False): + dns_record = GoogleResource( + id=id, + type="google_dns_record_set", + defaults=self.defaults, + config=config.get("google_dns_record_set", {}), + ) + + dns_record.set() + + dns_name = rule["host"].strip(".") + tf_ip_ref = dns_config["tf_ip_ref"] + dns_record.resource.rrdatas = [f"${{{tf_ip_ref}.address}}"] + + dns_record.resource.name = f"{dns_name}." + dns_record.filename = self.filename + self.add(dns_record) + + +@kgenlib.register_generator(path="terraform.gen_google_redis_instance") +class GenRedisInstance(TerraformStore): + def body(self): + config = self.config + defaults = config.pop("default_config", {}) + config = {**defaults, **config} + name = self.name + resource_id = self.id + + instance = GoogleResource( + id=resource_id, + type="google_redis_instance", + defaults=self.defaults, + config=config, + ) + + instance.resource.name = name + instance.resource.tier = config["tier"] + instance.resource.memory_size_gb = config["memory_size_gb"] + instance.resource.region = config["region"] + + record = GoogleResource( + id=resource_id, + type="google_dns_record_set", + defaults=self.defaults, + config=config, + ) + + dns_cfg = config["dns"] + record.resource.name = f'{config["endpoint"]}.' + record.resource.managed_zone = dns_cfg["zone_name"] + record.resource.type = dns_cfg["type"] + record.resource.ttl = dns_cfg["ttl"] + record.resource.rrdatas = [f"${{google_redis_instance.{resource_id}.host}}"] + + resources = [instance, record] + for r in resources: + r.filename = f"{resource_id}_cluster.tf" + + self.add_list(resources) + + +@kgenlib.register_generator(path="terraform.gen_google_compute_global_address") +class GenGoogleGlobalComputeAddress(TerraformStore): + def body(self): + self.filename = "gen_google_compute_global_address.tf" + + config = self.config + name = self.name + id = self.id + + ip_address = GoogleResource( + id=id, + type="google_compute_global_address", + defaults=self.defaults, + config=config, + ) + + ip_address.set() + ip_address.filename = self.filename + + self.add(ip_address) + + +@kgenlib.register_generator(path="terraform.gen_google_compute_address") +class GenGoogleComputeAddress(TerraformStore): + def body(self): + self.filename = "gen_google_compute_address.tf" + + config = self.config + id = self.id + + ip_address = GoogleResource( + id=id, + type="google_compute_address", + defaults=self.defaults, + config=config, + ) + + ip_address.set() + ip_address.filename = self.filename + + self.add(ip_address) + + +@kgenlib.register_generator(path="terraform.gen_google_service_account") +class GenGoogleServiceAccount(TerraformStore): + def body(self): + self.filename = "gen_google_service_account.tf" + resource_id = self.id + config = self.config + resource_name = self.name + + sa = GoogleResource( + id=resource_name, + type="google_service_account", + config=config, + defaults=self.defaults, + ) + sa_account_id = config.get("account_id", resource_name) + sa.resource.account_id = sa_account_id + sa.resource.display_name = config.get("display_name", resource_name) + sa.resource.description = config.get("description") + sa.filename = self.filename + + self.add(sa) + + if config.get("bindings", {}): + for binding_role, binding_config in config.bindings.items(): + binding_id = binding_role.split("/")[1].replace(".", "_") + sa_binding = GoogleResource( + id=f"{resource_id}_{binding_id}", + type="google_service_account_iam_binding", + config=config, + defaults=self.defaults, + ) + sa_binding.resource.service_account_id = sa.get_reference( + attr="name", wrap=True + ) + sa_binding.resource.role = binding_role + sa_binding.resource.members = binding_config.members + sa_binding.filename = self.filename + sa_binding.resource.pop( + "project" + ) # `project` is not supported for `service_account_iam_binding` + + self.add(sa_binding) + + for iam_member_config in config.get("service_account_iam", []): + role = iam_member_config.role + member = iam_member_config.member + iam_id = role.split("/")[1].replace(".", "_") + sa_name = member.split("/")[-1][:-1] + iam_id = f"{iam_id}_{sa_name}" + iam_member = GoogleResource( + id=f"{resource_id}_{iam_id}", + type="google_service_account_iam_member", + config=config, + defaults=self.defaults, + ) + iam_member.resource.service_account_id = sa.get_reference( + attr="name", wrap=True + ) + iam_member.resource.role = role + iam_member.resource.member = member + iam_member.resource.pop( + "project" + ) # `project` is not supported for `service_account_iam_binding` + + self.add(iam_member) + + if config.get("roles", {}): + for role_item in config.roles: + role_id = role_item.split("/")[1].replace(".", "_") + role_name = f"{resource_name}_{role_id}" + sa_role = GoogleResource( + id=role_name, + type="gcp_project_id_iam_member", + config=config, + defaults=self.defaults, + ) + sa_role.resource.role = role_item + sa_role.filename = self.filename + sa_role.resource.member = ( + f"serviceAccount:{sa.get_reference(attr='email', wrap=True)}" + ) + self.add(sa_role) + + if config.get("bucket_iam"): + for bucket_name, bucket_config in config.bucket_iam.items(): + bucket_iam_name = f"{resource_name}_{bucket_name}" + + for role in bucket_config.roles: + role_id = role.split("/")[1].replace(".", "_") + bucket_role_name = f"{bucket_iam_name}_{role_id}" + bucket_role = GoogleResource( + id=bucket_role_name, + type="google_storage_bucket_iam_member", + config=config, + defaults=self.defaults, + ) + bucket_role.resource.bucket = bucket_name + bucket_role.resource.role = role + bucket_role.resource.pop("project") + bucket_role.resource.member = ( + f"serviceAccount:{sa.get_reference(attr='email', wrap=True)}" + ) + bucket_role.filename = self.filename + self.add(bucket_role) + + if config.get("pubsub_topic_iam"): + for topic_name, topic_config in config.pubsub_topic_iam.items(): + if "topic" in topic_config: + topic_name = topic_config.topic + topic_iam_name = f"{resource_name}_{topic_name}" + project_name = topic_config.project + + for role in topic_config.roles: + role_id = role.split("/")[1].replace(".", "_") + topic_role_name = f"{topic_iam_name}_{role_id}" + topic_role = GoogleResource( + id=topic_role_name, + type="google_pubsub_topic_iam_member", + config=config, + defaults=self.defaults, + ) + topic_role.resource.project = project_name + topic_role.resource.topic = topic_name + topic_role.resource.role = role + topic_role.resource.member = ( + f"serviceAccount:{sa.get_reference(attr='email', wrap=True)}" + ) + topic_role.filename = self.filename + self.add(topic_role) + + if config.get("pubsub_subscription_iam"): + for ( + subscription_name, + subscription_config, + ) in config.pubsub_subscription_iam.items(): + if "subscription" in subscription_config: + subscription_name = subscription_config.subscription + subscription_iam_name = f"{resource_name}_{subscription_name}" + project_name = subscription_config.project + + for role in subscription_config.roles: + role_id = role.split("/")[1].replace(".", "_") + subscription_role_name = f"{subscription_iam_name}_{role_id}" + subscription_role = GoogleResource( + id=subscription_role_name, + type="google_pubsub_subscription_iam_member", + config=config, + defaults=self.defaults, + ) + subscription_role.resource.project = project_name + subscription_role.resource.subscription = subscription_name + subscription_role.resource.role = role + subscription_role.resource.member = ( + f"serviceAccount:{sa.get_reference(attr='email', wrap=True)}" + ) + subscription_role.filename = self.filename + self.add(subscription_role) + + if config.get("project_iam"): + for project_name, iam_config in config.project_iam.items(): + if "project" in iam_config: + project_name = iam_config.project + project_iam_name = f"{resource_name}_{project_name}" + + for role in iam_config.roles: + role_id = role.split("/")[1].replace(".", "_") + iam_member_resource_name = f"{project_iam_name}_{role_id}" + iam_member = GoogleResource( + id=iam_member_resource_name, + type="gcp_project_id_iam_member", + config=config, + defaults=self.defaults, + ) + iam_member.resource.project = project_name + iam_member.resource.role = role + iam_member.resource.member = ( + f"serviceAccount:{sa.get_reference(attr='email', wrap=True)}" + ) + iam_member.filename = self.filename + self.add(iam_member) + + for repo_id, roles in config.get("artifact_registry_iam", {}).items(): + for role in roles: + repo_iam_member_cfg = { + "repo_id": repo_id, + "role": role, + "member": f"serviceAccount:{sa.get_reference('email')}", + "member_name": sa_account_id, + } + repo_iam_member = gen_artifact_registry_repository_iam_member( + repo_iam_member_cfg, self.defaults + ) + self.add(repo_iam_member) + + +@kgenlib.register_generator( + path="terraform.gen_google_container_cluster", + apply_patches=["generators.terraform.defaults.gen_google_container_cluster"], +) +class GenGoogleContainerCluster(TerraformStore): + def body(self): + self.filename = "gen_google_container_cluster.tf" + id = self.id + config = self.config + name = self.name + + pools = config.pop("pools", {}) + + cluster = GoogleResource( + id=id, + type="google_container_cluster", + defaults=self.defaults, + config=config, + ) + cluster.resource.name = name + cluster.set(config) + cluster.filename = self.filename + cluster.resource.setdefault("depends_on", []).append( + "gcp_project_id_service.container" + ) + + self.add(cluster) + + for pool_name, pool_config in pools.items(): + pool = GoogleResource( + id=pool_name, + type="google_container_node_pool", + config=pool_config, + defaults=self.defaults, + ) + pool.resource.update(pool_config) + pool.resource.cluster = cluster.get_reference(attr="id", wrap=True) + pool.filename = self.filename + + if not pool_config.get("autoscaling", {}): + # If autoscaling config is not defined or empty, make sure to remove it from the pool config + pool.resource.pop("autoscaling", {}) + else: + # If autoscaling is configured, remove static node count + # and set initial node count to the lowest allowed in autoscaling + pool.resource.pop("node_count", None) + if "initial_node_count" not in pool.resource: + pool.resource["initial_node_count"] = pool_config["autoscaling"][ + "total_min_node_count" + ] + + self.add(pool) + + +@kgenlib.register_generator( + path="terraform.gen_google_storage_bucket", + apply_patches=["generators.terraform.defaults.gen_google_storage_bucket"], +) +class GoogleStorageBucketGenerator(TerraformStore): + location: str = "EU" + + def body(self): + self.filename = "gen_google_storage_bucket.tf" + resource_id = self.id + config = self.config + resource_name = self.name + bucket = GoogleResource( + type="google_storage_bucket", + id=resource_id, + config=config, + defaults=self.defaults, + ) + bucket.add("name", resource_name) + bucket.filename = self.filename + bucket.add("location", config.get("location", self.location)) + bucket.add("versioning", config.get("versioning", {})) + bucket.add("lifecycle_rule", config.get("lifecycle_rule", [])) + bucket.add("cors", config.get("cors", [])) + bucket.add("labels", config.get("labels", {})) + bucket.add( + "uniform_bucket_level_access", + config.get("uniform_bucket_level_access", True), + ) + + self.add(bucket) + + if config.get("bindings", {}): + for binding_role, binding_config in config.bindings.items(): + for member in binding_config.members: + binding_id = binding_role.split("/")[1].replace(".", "_") + binding_id = f"{resource_id}_{binding_id}_{member}" + binding_id = ( + binding_id.replace("@", "_") + .replace(".", "_") + .replace(":", "_") + .lower() + ) + bucket_binding = GoogleResource( + type="google_storage_bucket_iam_member", + id=binding_id, + config=config, + defaults=self.defaults, + ) + bucket_binding.filename = self.filename + bucket_binding.add( + "bucket", bucket.get_reference(attr="name", wrap=True) + ) + bucket_binding.add("role", binding_role) + bucket_binding.add("member", member) + bucket_binding.resource.pop( + "project" + ) # `project` is not supported for `google_storage_bucket_iam_binding` + + self.add(bucket_binding) + + +@kgenlib.register_generator(path="terraform.gen_google_artifact_registry_repository") +class GoogleArtifactRegistryGenerator(TerraformStore): + def body(self): + self.filename = "gen_google_artifact_registry_repository.tf" + resource_id = self.id + config = self.config + config.setdefault("repository_id", self.name) + repo = GoogleResource( + type="google_artifact_registry_repository", + id=resource_id, + config=config, + defaults=self.defaults, + ) + iam_members = config.pop("iam_members", []) + repo.set(config) + + for member_cfg in iam_members: + for role in member_cfg.get("roles", []): + repo_iam_member_cfg = { + "repo_id": f"{config.project}/{config.location}/{config.repository_id}", + "role": role, + "member": member_cfg["member"], + } + repo_iam_member = gen_artifact_registry_repository_iam_member( + repo_iam_member_cfg, self.defaults + ) + + self.add(repo_iam_member) + + self.add(repo) + + +def gen_artifact_registry_repository_iam_member(config, defaults): + role = config.get("role") + + gcp_project, location, repo_name = config.get("repo_id").split("/") + repo_id = f"projects/{gcp_project}/locations/{location}/repositories/{repo_name}" + member = config.get("member") + + member_name = config.get("member_name") + if member_name is None: + # turn serviceAccount:service-695xxxxx@gcp-sa-aiplatform.iam.gserviceaccount.com + # into service-695xxxx + member_name = config.get("member").split("@")[0] + member_name = member_name.split(":")[1] + + role_id = role.split("/")[-1].replace(".", "-") + name = config.get("name", f"{member_name}-{repo_name}-{role_id}") + if name[0].isdigit(): + name = f"_{name}" + iam_policy_config = { + "project": gcp_project, + "location": location, + "repository": repo_id, + "role": role, + "member": member, + } + repo_iam_member = GoogleResource( + type="google_artifact_registry_repository_iam_member", + id=name, + config=iam_policy_config, + defaults=defaults, + ) + repo_iam_member.set(iam_policy_config) + + return repo_iam_member diff --git a/system/generators/terraform/terraform.py b/system/generators/terraform/terraform.py new file mode 100644 index 00000000..de3011c2 --- /dev/null +++ b/system/generators/terraform/terraform.py @@ -0,0 +1,129 @@ +import logging + +logger = logging.getLogger(__name__) + +from .common import ( + TerraformBlock, + TerraformData, + TerraformLocal, + TerraformProvider, + TerraformResource, + TerraformStore, + kgenlib, +) + + +@kgenlib.register_generator(path="terraform.gen_backend") +class Backend(TerraformBlock): + block_type = "terraform" + type = "backend" + + def body(self): + config = self.config + + self.resource.bucket = config.get("bucket") + self.resource.prefix = config.get("prefix") + self.filename = "terraform.tf" + + +@kgenlib.register_generator(path="terraform.gen_required_providers") +class RequiredProvider(TerraformBlock): + block_type = "terraform" + type = "required_providers" + + def body(self): + config = self.config + + self.set(config) + self.filename = "terraform.tf" + + +@kgenlib.register_generator(path="terraform.gen_provider") +class Provider(TerraformStore): + def body(self): + id = self.id + config = self.config + + provider = TerraformProvider(id=id, config=config) + provider.set(config) + + self.add(provider) + + +@kgenlib.register_generator(path="terraform.gen_locals") +class Local(TerraformStore): + def body(self): + import base64 + + id = self.id + config = self.config + logger.debug(f"Adding local {id} with config {config}") + + value = config.get("value") + + # Handle support for Kapitan gkms secrets + if value.startswith("?{gkms:"): + local = TerraformLocal(id=id) + reference = f"{id}_reference" + local.set_local(name=reference, value=value) + + data = f"{id}_data" + # Split the reference on the : and take the second element (the base64 encoded data) + local.set_local( + name=data, + value=f'${{yamldecode(base64decode(element(split(":", local.{reference}), 1)))}}', + ) + + # Create the google_kms_secret data source + gkms = TerraformData(id=id, type="google_kms_secret") + gkms.add("ciphertext", f"${{local.{data}.data}}") + gkms.add("crypto_key", f"${{local.{data}.key}}") + self.add(gkms) + + # Create the local conditional on the data being base64 encoded or not + local.set_local( + name=id, + value=f'${{local.{data}.data == "base64" ? base64decode(data.google_kms_secret.{id}.plaintext) : data.google_kms_secret.{id}.plaintext}}', + ) + + self.add(local) + + else: + local = TerraformLocal(id=id, config=config) + self.add(local) + + +@kgenlib.register_generator(path="terraform.data_sources") +class TerraformDataSource(TerraformStore): + def body(self): + data_source_type = self.name + data_sources_sets = self.config + + for data_source_id, data_source_config in data_sources_sets.items(): + data_block = TerraformData( + id=data_source_id, + type=data_source_type, + config=data_source_config, + defaults=self.defaults, + ) + data_block.set(data_source_config) + + self.add(data_block) + + +@kgenlib.register_generator(path="terraform.resources.generic") +class TerraformGenericResource(TerraformStore): + def body(self): + resource_type = self.name + resource_sets = self.config + + for resource_id, resource_config in resource_sets.items(): + resource = TerraformResource( + id=resource_id, + type=resource_type, + config=resource_config, + defaults=self.defaults, + ) + resource.set(resource_config) + + self.add(resource) diff --git a/system/lib/jinja2_filters.py b/system/lib/jinja2_filters.py new file mode 100644 index 00000000..f63dcd86 --- /dev/null +++ b/system/lib/jinja2_filters.py @@ -0,0 +1,14 @@ +import json +import os + + +def to_json(obj): + return json.dumps(obj, ensure_ascii=False, indent=4) + + +def basename(path): + return os.path.basename(path) + + +def dirname(path): + return os.path.dirname(path) diff --git a/lib/generators/__init__.py b/system/lib/kgenlib/__init__.py similarity index 56% rename from lib/generators/__init__.py rename to system/lib/kgenlib/__init__.py index ec8915ce..2322ab46 100644 --- a/lib/generators/__init__.py +++ b/system/lib/kgenlib/__init__.py @@ -2,42 +2,92 @@ import functools import logging from enum import Enum -from types import FunctionType from typing import List import yaml from box.exceptions import BoxValueError from kapitan.cached import args from kapitan.inputs.helm import HelmChart -from kapitan.inputs.kadet import BaseModel, BaseObj, CompileError, Dict, current_target +from kapitan.inputs.kadet import ( + BaseModel, + BaseObj, + CompileError, + Dict, + current_target, + inventory_global, +) from kapitan.utils import render_jinja2_file logger = logging.getLogger(__name__) search_paths = args.get("search_paths") registered_generators = contextvars.ContextVar( - "current registered_generators in thread" + "current registered_generators in thread", default={} ) target = current_target.get() -registered_generators.set({}) + + +@functools.lru_cache +def load_generators(name, path): + from importlib import import_module + from inspect import isclass + from pathlib import Path + from pkgutil import iter_modules + + # iterate through the modules in the current package + package_dir = Path(path).resolve().parent + for _, module_name, _ in iter_modules([package_dir]): + # import the module and iterate through its attributes + module = import_module(f"{name}.{module_name}") + for attribute_name in dir(module): + attribute = getattr(module, attribute_name) + + if isclass(attribute): + # Add the class to this package's variables + globals()[attribute_name] = attribute + + +class DeleteContent(Exception): + # Raised when a content should be deleted + pass + + +def patch_config(config: Dict, inventory: Dict, inventory_path: str) -> None: + """Apply patch to config""" + patch = findpath(inventory, inventory_path, {}) + logger.debug(f"Applying patch {inventory_path} : {patch}") + merge(patch, config) def register_function(func, params): - logging.debug( + target = current_target.get() + logger.debug( f"Registering generator {func.__name__} with params {params} for target {target}" ) + my_dict = registered_generators.get() - my_dict.setdefault(target, []).append((func, params)) + generator_list = my_dict.get(target, []) + generator_list.append((func, params)) + + logger.debug( + f"Currently registered {len(generator_list)} generators for target {target}" + ) + + my_dict[target] = generator_list + registered_generators.set(my_dict) def merge(source, destination): for key, value in source.items(): if isinstance(value, dict): - node = destination.setdefault(key, value) + node = destination.get(key, None) if node is None: destination[key] = value + elif len(node) == 0: + # node is set to an empty dict on purpose as a way to override the value + pass else: merge(value, node) else: @@ -50,6 +100,27 @@ def render_jinja(filename, ctx): return render_jinja2_file(filename, ctx, search_paths=search_paths) +def findpaths_by_property(obj: dict, property: str) -> dict: + """ + Traverses the whole dictionary looking of objects containing a given property. + + Args: + obj: the dictionary to scan for a given property + property: the key to look for in a dictionary + + Returns: + A dictionary with found objects. Keys in the dictionary are the "name" properties of these objects. + """ + res = {} + for k, v in obj.items(): + if k == property: + res[obj["name"]] = obj + if isinstance(v, dict): + sub_results = findpaths_by_property(v, property) + res = {**res, **sub_results} + return res + + def findpath(obj, path, default={}): value = default if path: @@ -63,6 +134,10 @@ def findpath(obj, path, default={}): if value is not None: return value logging.info(f"Key {e} not found in {obj}: ignoring") + except AttributeError as e: + if value is not None: + return value + logging.info(f"Attribute {e} not found in {obj}: ignoring") if len(path_parts) == 1: return value @@ -92,6 +167,9 @@ class BaseContent(BaseModel): content_type: ContentType = ContentType.YAML filename: str = "output" + def body(self): + pass + @classmethod def from_baseobj(cls, baseobj: BaseObj): """Return a BaseContent initialised with baseobj.""" @@ -145,11 +223,14 @@ def mutate(self, mutations: List): if action == "delete": for condition in conditions: if self.match(condition["conditions"]): - self = None + raise DeleteContent(f"Deleting {self} because of {condition}") if action == "bundle": for condition in conditions: if self.match(condition["conditions"]): - self.filename = condition["filename"].format(content=self) + try: + self.filename = condition["filename"].format(content=self) + except (AttributeError, KeyError): + pass if condition.get("break", True): break @@ -165,7 +246,7 @@ def match(self, match_conditions): return True def patch(self, patch): - self.root.merge_update(Dict(patch)) + self.root.merge_update(Dict(patch), box_merge_lists="extend") class BaseStore(BaseModel): @@ -182,7 +263,7 @@ def from_yaml_file(cls, file_path): return store def add(self, object): - logging.debug(f"Adding {type(object)} to store") + logger.debug(f"Adding {type(object)} to store") if isinstance(object, BaseContent): self.content_list.append(object) elif isinstance(object, BaseStore): @@ -221,23 +302,32 @@ def process_mutations(self, mutations: Dict): for content in self.get_content_list(): try: content.mutate(mutations) + except DeleteContent as e: + logger.debug(e) + self.content_list.remove(content) except: raise CompileError(f"Error when processing mutations on {content}") def get_content_list(self): - return self.content_list + return getattr(self, "content_list", []) def dump(self, output_filename=None, already_processed=False): """Return object dict/list.""" - logging.debug(f"Dumping {len(self.get_content_list())} items") + logger.debug(f"Dumping {len(self.get_content_list())} items") if not already_processed: for content in self.get_content_list(): if output_filename: output_format = output_filename else: output_format = getattr(content, "filename", "output") - filename = output_format.format(content=content) + file_content_list = self.root.get(filename, []) + if content in file_content_list: + logger.debug( + f"Skipping duplicated content content for reason 'Duplicate name {content.name} for {filename}'" + ) + continue + self.root.setdefault(filename, []).append(content) return super().dump() @@ -245,34 +335,48 @@ def dump(self, output_filename=None, already_processed=False): class BaseGenerator: def __init__( - self, inventory: Dict, store: BaseStore = None, defaults_path: str = None + self, + inventory: Dict, + store: BaseStore = None, + defaults_path: str = None, ) -> None: self.inventory = inventory + self.global_inventory = inventory_global() self.generator_defaults = findpath(self.inventory, defaults_path) - logging.debug(f"Setting {self.generator_defaults} as generator defaults") + logger.debug(f"Setting {self.generator_defaults} as generator defaults") if store == None: self.store = BaseStore() else: self.store = store() - def expand_and_run(self, func, params): - inventory = self.inventory + def expand_and_run(self, func, params, inventory=None): + if inventory == None: + inventory = self.inventory + path = params.get("path") + activation_property = params.get("activation_property") patches = params.get("apply_patches", []) - configs = findpath(inventory.parameters, path) + if path is not None: + configs = findpath(inventory.parameters, path) + elif activation_property is not None: + configs = findpaths_by_property(inventory.parameters, activation_property) + else: + raise CompileError( + f"generator need to provide either 'path' or 'activation_property'" + ) + if configs: - logging.debug( + logger.debug( f"Found {len(configs)} configs to generate at {path} for target {target}" ) - - for name, config in configs.items(): + for config_id, config in configs.items(): patched_config = Dict(config) patch_paths_to_apply = patches patches_applied = [] for path in patch_paths_to_apply: try: - path = path.format(**config) + path = path.format(**patched_config) except KeyError: # Silently ignore missing keys continue @@ -282,24 +386,52 @@ def expand_and_run(self, func, params): patched_config = merge(patch, patched_config) local_params = { - "name": name, + "id": config_id, + "name": patched_config.get("name", config_id), "config": patched_config, "patches_applied": patches_applied, "original_config": config, "defaults": self.generator_defaults, + "inventory": inventory, + "global_inventory": self.global_inventory, + "target": current_target.get(), } - logging.debug( - f"Running class {func.__name__} with params {local_params.keys()} and name {name}" + logger.debug( + f"Running class {func.__name__} for {config_id} with params {local_params.keys()}" ) self.store.add(func(**local_params)) def generate(self): generators = registered_generators.get().get(target, []) - logging.debug( + logger.debug( f"{len(generators)} classes registered as generators for target {target}" ) for func, params in generators: - - logging.debug(f"Expanding {func.__name__} with params {params}") - self.expand_and_run(func=func, params=params) + activation_path = params.get("activation_path", False) + global_generator = params.get("global_generator", False) + if activation_path and global_generator: + logger.debug( + f"Running global generator {func.__name__} with activation path {activation_path}" + ) + if not findpath(self.inventory.parameters, activation_path): + logger.debug( + f"Skipping global generator {func.__name__} with params {params}" + ) + continue + else: + logger.debug( + f"Running global generator {func.__name__} with params {params}" + ) + + for _, inventory in self.global_inventory.items(): + self.expand_and_run( + func=func, params=params, inventory=inventory + ) + elif not global_generator: + logger.debug(f"Expanding {func.__name__} with params {params}") + self.expand_and_run(func=func, params=params) + else: + logger.debug( + f"Skipping generator {func.__name__} with params {params} because not global and no activation path" + ) return self.store diff --git a/system/refs/shared/gcp_billing_account b/system/refs/shared/gcp_billing_account new file mode 100644 index 00000000..6b47c9db --- /dev/null +++ b/system/refs/shared/gcp_billing_account @@ -0,0 +1,3 @@ +data: jpzaR_ArxEkpIIljqRpFstsP_yw34RR07D6lAynfwIw +encoding: original +type: plain diff --git a/system/refs/shared/gcp_organization_id b/system/refs/shared/gcp_organization_id new file mode 100644 index 00000000..473b29b6 --- /dev/null +++ b/system/refs/shared/gcp_organization_id @@ -0,0 +1,3 @@ +data: az1oDhA50eU5d2ToHhNFrSaWNqAa1iaosXyZfd6SZQ2 +encoding: original +type: plain diff --git a/refs/targets/dev-sockshop/mysql_password b/system/refs/targets/dev-sockshop/mysql_password similarity index 100% rename from refs/targets/dev-sockshop/mysql_password rename to system/refs/targets/dev-sockshop/mysql_password diff --git a/refs/targets/dev/mysql_password b/system/refs/targets/dev/mysql_password similarity index 100% rename from refs/targets/dev/mysql_password rename to system/refs/targets/dev/mysql_password diff --git a/refs/targets/echo-server/password b/system/refs/targets/echo-server/password similarity index 100% rename from refs/targets/echo-server/password rename to system/refs/targets/echo-server/password diff --git a/refs/targets/examples/mysql-password b/system/refs/targets/examples/mysql-password similarity index 100% rename from refs/targets/examples/mysql-password rename to system/refs/targets/examples/mysql-password diff --git a/refs/targets/examples/mysql-root-password b/system/refs/targets/examples/mysql-root-password similarity index 100% rename from refs/targets/examples/mysql-root-password rename to system/refs/targets/examples/mysql-root-password diff --git a/refs/targets/examples/shared-password-base64-as-plain b/system/refs/targets/examples/shared-password-base64-as-plain similarity index 100% rename from refs/targets/examples/shared-password-base64-as-plain rename to system/refs/targets/examples/shared-password-base64-as-plain diff --git a/refs/targets/examples/shared-password-plain-as-base64 b/system/refs/targets/examples/shared-password-plain-as-base64 similarity index 100% rename from refs/targets/examples/shared-password-plain-as-base64 rename to system/refs/targets/examples/shared-password-plain-as-base64 diff --git a/refs/targets/examples/shared-password-plain-as-plain-pass b/system/refs/targets/examples/shared-password-plain-as-plain-pass similarity index 100% rename from refs/targets/examples/shared-password-plain-as-plain-pass rename to system/refs/targets/examples/shared-password-plain-as-plain-pass diff --git a/refs/targets/examples/shared-password-plain-as-plain-user b/system/refs/targets/examples/shared-password-plain-as-plain-user similarity index 100% rename from refs/targets/examples/shared-password-plain-as-plain-user rename to system/refs/targets/examples/shared-password-plain-as-plain-user diff --git a/refs/targets/examples/shared-password-string-data b/system/refs/targets/examples/shared-password-string-data similarity index 100% rename from refs/targets/examples/shared-password-string-data rename to system/refs/targets/examples/shared-password-string-data diff --git a/refs/targets/gke-pvm-killer/gke-pvm-killer-service-account b/system/refs/targets/gke-pvm-killer/gke-pvm-killer-service-account similarity index 100% rename from refs/targets/gke-pvm-killer/gke-pvm-killer-service-account rename to system/refs/targets/gke-pvm-killer/gke-pvm-killer-service-account diff --git a/refs/targets/global/mysql-password b/system/refs/targets/global/mysql-password similarity index 100% rename from refs/targets/global/mysql-password rename to system/refs/targets/global/mysql-password diff --git a/refs/targets/global/mysql-root-password b/system/refs/targets/global/mysql-root-password similarity index 100% rename from refs/targets/global/mysql-root-password rename to system/refs/targets/global/mysql-root-password diff --git a/refs/targets/mysql/mysql-password b/system/refs/targets/mysql/mysql-password similarity index 100% rename from refs/targets/mysql/mysql-password rename to system/refs/targets/mysql/mysql-password diff --git a/refs/targets/mysql/mysql-root-password b/system/refs/targets/mysql/mysql-root-password similarity index 100% rename from refs/targets/mysql/mysql-root-password rename to system/refs/targets/mysql/mysql-root-password diff --git a/refs/targets/postgres-proxy/postgres-proxy-service-account b/system/refs/targets/postgres-proxy/postgres-proxy-service-account similarity index 100% rename from refs/targets/postgres-proxy/postgres-proxy-service-account rename to system/refs/targets/postgres-proxy/postgres-proxy-service-account diff --git a/refs/targets/pritunl/mongodb_password b/system/refs/targets/pritunl/mongodb_password similarity index 100% rename from refs/targets/pritunl/mongodb_password rename to system/refs/targets/pritunl/mongodb_password diff --git a/refs/targets/pritunl/pritunl_password b/system/refs/targets/pritunl/pritunl_password similarity index 100% rename from refs/targets/pritunl/pritunl_password rename to system/refs/targets/pritunl/pritunl_password diff --git a/refs/targets/prod-sockshop/mysql_password b/system/refs/targets/prod-sockshop/mysql_password similarity index 100% rename from refs/targets/prod-sockshop/mysql_password rename to system/refs/targets/prod-sockshop/mysql_password diff --git a/refs/targets/prod-sockshop/sockshop.kapicorp.com.crt b/system/refs/targets/prod-sockshop/sockshop.kapicorp.com.crt similarity index 100% rename from refs/targets/prod-sockshop/sockshop.kapicorp.com.crt rename to system/refs/targets/prod-sockshop/sockshop.kapicorp.com.crt diff --git a/refs/targets/prod-sockshop/sockshop.kapicorp.com.key b/system/refs/targets/prod-sockshop/sockshop.kapicorp.com.key similarity index 100% rename from refs/targets/prod-sockshop/sockshop.kapicorp.com.key rename to system/refs/targets/prod-sockshop/sockshop.kapicorp.com.key diff --git a/refs/targets/prod/mysql_password b/system/refs/targets/prod/mysql_password similarity index 100% rename from refs/targets/prod/mysql_password rename to system/refs/targets/prod/mysql_password diff --git a/refs/targets/sock-shop/mysql_password b/system/refs/targets/sock-shop/mysql_password similarity index 100% rename from refs/targets/sock-shop/mysql_password rename to system/refs/targets/sock-shop/mysql_password diff --git a/refs/targets/tesoro/kapicorp-tesoro-cacert-pem b/system/refs/targets/tesoro/kapicorp-tesoro-cacert-pem similarity index 100% rename from refs/targets/tesoro/kapicorp-tesoro-cacert-pem rename to system/refs/targets/tesoro/kapicorp-tesoro-cacert-pem diff --git a/refs/targets/tesoro/kapicorp-tesoro-cert-key b/system/refs/targets/tesoro/kapicorp-tesoro-cert-key similarity index 100% rename from refs/targets/tesoro/kapicorp-tesoro-cert-key rename to system/refs/targets/tesoro/kapicorp-tesoro-cert-key diff --git a/refs/targets/tesoro/kapicorp-tesoro-cert-pem b/system/refs/targets/tesoro/kapicorp-tesoro-cert-pem similarity index 100% rename from refs/targets/tesoro/kapicorp-tesoro-cert-pem rename to system/refs/targets/tesoro/kapicorp-tesoro-cert-pem diff --git a/refs/targets/tutorial/password b/system/refs/targets/tutorial/password similarity index 100% rename from refs/targets/tutorial/password rename to system/refs/targets/tutorial/password diff --git a/resources/kapicorp-gmks-demo-service-account.json b/system/resources/kapicorp-gmks-demo-service-account.json similarity index 100% rename from resources/kapicorp-gmks-demo-service-account.json rename to system/resources/kapicorp-gmks-demo-service-account.json diff --git a/compiled/kapicorp-demo-march/terraform/.terraform.lock.hcl b/system/resources/state/kapicorp-demo-march/.terraform.lock.hcl similarity index 100% rename from compiled/kapicorp-demo-march/terraform/.terraform.lock.hcl rename to system/resources/state/kapicorp-demo-march/.terraform.lock.hcl diff --git a/resources/state/kapicorp-demo-march/.terraform.lock.hcl b/system/resources/state/kapicorp-project-123/.terraform.lock.hcl similarity index 100% rename from resources/state/kapicorp-demo-march/.terraform.lock.hcl rename to system/resources/state/kapicorp-project-123/.terraform.lock.hcl diff --git a/resources/state/kapicorp-project-123/.terraform.lock.hcl b/system/resources/state/kapicorp-terraform-admin/.terraform.lock.hcl similarity index 100% rename from resources/state/kapicorp-project-123/.terraform.lock.hcl rename to system/resources/state/kapicorp-terraform-admin/.terraform.lock.hcl diff --git a/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/.helmignore b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/.helmignore new file mode 100644 index 00000000..3a063296 --- /dev/null +++ b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/.helmignore @@ -0,0 +1,4 @@ +/*.tgz +output +ci/ +*.gotmpl diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/Chart.lock b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/Chart.lock similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/Chart.lock rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/Chart.lock diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/Chart.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/Chart.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/Chart.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/Chart.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/README.md b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/README.md similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/README.md rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/README.md diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/Chart.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/Chart.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/Chart.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/Chart.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/README.md b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/README.md similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/README.md rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/README.md diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/ci/haproxy-enabled-values.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/ci/haproxy-enabled-values.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/ci/haproxy-enabled-values.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/ci/haproxy-enabled-values.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/NOTES.txt b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/NOTES.txt similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/NOTES.txt rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/NOTES.txt diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_configs.tpl b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_configs.tpl similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_configs.tpl rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_configs.tpl diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_helpers.tpl b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_helpers.tpl similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_helpers.tpl rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/_helpers.tpl diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-auth-secret.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-auth-secret.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-auth-secret.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-auth-secret.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-announce-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-announce-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-announce-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-announce-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-configmap.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-configmap.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-configmap.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-configmap.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-exporter-script-configmap.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-exporter-script-configmap.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-exporter-script-configmap.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-exporter-script-configmap.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-health-configmap.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-health-configmap.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-health-configmap.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-health-configmap.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-pdb.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-pdb.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-pdb.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-pdb.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-psp.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-psp.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-psp.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-psp.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-role.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-role.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-role.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-role.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-rolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-rolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-rolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-rolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-secret.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-secret.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-secret.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-secret.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-serviceaccount.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-serviceaccount.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-serviceaccount.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-serviceaccount.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-servicemonitor.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-servicemonitor.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-servicemonitor.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-servicemonitor.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-statefulset.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-statefulset.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-statefulset.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-ha-statefulset.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-deployment.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-deployment.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-deployment.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-deployment.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-psp.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-psp.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-psp.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-psp.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-role.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-role.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-role.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-role.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-rolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-rolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-rolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-rolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-serviceaccount.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-serviceaccount.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-serviceaccount.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-serviceaccount.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-servicemonitor.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-servicemonitor.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-servicemonitor.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-haproxy-servicemonitor.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-tls-secret.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-tls-secret.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-tls-secret.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/redis-tls-secret.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/sentinel-auth-secret.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/sentinel-auth-secret.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/sentinel-auth-secret.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/sentinel-auth-secret.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-configmap.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-configmap.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-configmap.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-configmap.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-pod.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-pod.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-pod.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/templates/tests/test-redis-ha-pod.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/values.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/values.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/charts/redis-ha/values.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/charts/redis-ha/values.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/crds/crd-application.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/crds/crd-application.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/crds/crd-application.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/crds/crd-application.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/crds/crd-extension.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/crds/crd-extension.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/crds/crd-extension.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/crds/crd-extension.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/crds/crd-project.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/crds/crd-project.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/crds/crd-project.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/crds/crd-project.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/NOTES.txt b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/NOTES.txt similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/NOTES.txt rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/NOTES.txt diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/_helpers.tpl b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/_helpers.tpl similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/_helpers.tpl rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/_helpers.tpl diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-aggregate-roles.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-aggregate-roles.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-aggregate-roles.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-aggregate-roles.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrole.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrole.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrole.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrole.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/clusterrolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/deployment.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/deployment.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/deployment.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/deployment.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/metrics-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/metrics-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/metrics-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/metrics-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/networkpolicy.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/networkpolicy.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/networkpolicy.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/networkpolicy.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/prometheusrule.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/prometheusrule.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/prometheusrule.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/prometheusrule.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/role.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/role.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/role.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/role.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/rolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/rolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/rolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/rolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/serviceaccount.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/serviceaccount.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/serviceaccount.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/serviceaccount.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/servicemonitor.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/servicemonitor.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/servicemonitor.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-application-controller/servicemonitor.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/applications.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/applications.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/applications.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/applications.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-cm.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-cm.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-cm.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-cm.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-gpg-keys-cm.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-gpg-keys-cm.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-gpg-keys-cm.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-gpg-keys-cm.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-rbac-cm.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-rbac-cm.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-rbac-cm.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-rbac-cm.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-secret.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-secret.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-secret.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-secret.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-ssh-known-hosts-cm.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-ssh-known-hosts-cm.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-ssh-known-hosts-cm.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-ssh-known-hosts-cm.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-styles-cm.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-styles-cm.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-styles-cm.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-styles-cm.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-tls-certs-cm.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-tls-certs-cm.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-tls-certs-cm.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/argocd-tls-certs-cm.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/cluster-secrets.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/cluster-secrets.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/cluster-secrets.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/cluster-secrets.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/projects.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/projects.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/projects.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/projects.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret-legacy.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret-legacy.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret-legacy.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret-legacy.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-credentials-secret.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-secret.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-secret.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-secret.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-configs/repository-secret.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrole.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrole.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrole.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrole.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/clusterrolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/deployment.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/deployment.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/deployment.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/deployment.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/hpa.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/hpa.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/hpa.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/hpa.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/metrics-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/metrics-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/metrics-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/metrics-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/networkpolicy.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/networkpolicy.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/networkpolicy.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/networkpolicy.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/role.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/role.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/role.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/role.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/rolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/rolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/rolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/rolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/serviceaccount.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/serviceaccount.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/serviceaccount.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/serviceaccount.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/servicemonitor.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/servicemonitor.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/servicemonitor.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-repo-server/servicemonitor.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/alb-grpc-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/alb-grpc-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/alb-grpc-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/alb-grpc-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/backendconfig.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/backendconfig.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/backendconfig.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/backendconfig.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/certificate.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/certificate.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/certificate.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/certificate.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrole.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrole.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrole.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrole.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/clusterrolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/deployment.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/deployment.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/deployment.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/deployment.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions-rolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions-rolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions-rolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions-rolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extensions.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extentions-role.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extentions-role.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extentions-role.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/extentions-role.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/frontendconfig.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/frontendconfig.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/frontendconfig.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/frontendconfig.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/hpa.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/hpa.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/hpa.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/hpa.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress-grpc.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress-grpc.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress-grpc.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress-grpc.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/ingress.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/managedCertificate.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/managedCertificate.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/managedCertificate.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/managedCertificate.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/metrics-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/metrics-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/metrics-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/metrics-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/networkpolicy.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/networkpolicy.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/networkpolicy.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/networkpolicy.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/role.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/role.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/role.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/role.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/rolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/rolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/rolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/rolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/route.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/route.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/route.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/route.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/serviceaccount.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/serviceaccount.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/serviceaccount.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/serviceaccount.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/servicemonitor.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/servicemonitor.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/argocd-server/servicemonitor.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/argocd-server/servicemonitor.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/deployment.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/deployment.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/deployment.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/deployment.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/networkpolicy.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/networkpolicy.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/networkpolicy.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/networkpolicy.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/role.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/role.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/role.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/role.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/rolebinding.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/rolebinding.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/rolebinding.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/rolebinding.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/serviceaccount.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/serviceaccount.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/serviceaccount.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/serviceaccount.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/servicemonitor.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/servicemonitor.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/dex/servicemonitor.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/dex/servicemonitor.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/extra-manifests.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/extra-manifests.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/extra-manifests.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/extra-manifests.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/networkpolicy-default-deny.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/networkpolicy-default-deny.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/networkpolicy-default-deny.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/networkpolicy-default-deny.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/deployment.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/deployment.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/deployment.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/deployment.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/metrics-service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/metrics-service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/metrics-service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/metrics-service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/networkpolicy.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/networkpolicy.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/networkpolicy.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/networkpolicy.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/service.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/service.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/service.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/service.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/serviceaccount.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/serviceaccount.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/serviceaccount.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/serviceaccount.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/servicemonitor.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/servicemonitor.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/templates/redis/servicemonitor.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/templates/redis/servicemonitor.yaml diff --git a/components/charts/argo-cd/3.32.0/v2.2.3/values.yaml b/system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/values.yaml similarity index 100% rename from components/charts/argo-cd/3.32.0/v2.2.3/values.yaml rename to system/sources/charts/argo-cd/argo-cd/3.32.0/v2.2.3/values.yaml diff --git a/components/actions-runner-controller.yaml b/system/sources/manifests/actions-runner-controller.yaml similarity index 100% rename from components/actions-runner-controller.yaml rename to system/sources/manifests/actions-runner-controller.yaml diff --git a/components/echo-server/echo-server.conf.j2 b/system/sources/templates/echo-server/echo-server.conf.j2 similarity index 100% rename from components/echo-server/echo-server.conf.j2 rename to system/sources/templates/echo-server/echo-server.conf.j2 diff --git a/components/filebeat/templates/filebeat.yml.j2 b/system/sources/templates/filebeat/templates/filebeat.yml.j2 similarity index 100% rename from components/filebeat/templates/filebeat.yml.j2 rename to system/sources/templates/filebeat/templates/filebeat.yml.j2 diff --git a/components/logstash/templates/example.conf.j2 b/system/sources/templates/logstash/templates/example.conf.j2 similarity index 100% rename from components/logstash/templates/example.conf.j2 rename to system/sources/templates/logstash/templates/example.conf.j2 diff --git a/components/logstash/templates/logstash.yml.j2 b/system/sources/templates/logstash/templates/logstash.yml.j2 similarity index 100% rename from components/logstash/templates/logstash.yml.j2 rename to system/sources/templates/logstash/templates/logstash.yml.j2 diff --git a/components/logstash/templates/pipelines.yml.j2 b/system/sources/templates/logstash/templates/pipelines.yml.j2 similarity index 100% rename from components/logstash/templates/pipelines.yml.j2 rename to system/sources/templates/logstash/templates/pipelines.yml.j2 diff --git a/components/mysql/mytemplate.cnf.j2 b/system/sources/templates/mysql/mytemplate.cnf.j2 similarity index 100% rename from components/mysql/mytemplate.cnf.j2 rename to system/sources/templates/mysql/mytemplate.cnf.j2 diff --git a/components/vault/extraconfig-from-values.hcl.j2 b/system/sources/templates/vault/extraconfig-from-values.hcl.j2 similarity index 100% rename from components/vault/extraconfig-from-values.hcl.j2 rename to system/sources/templates/vault/extraconfig-from-values.hcl.j2 diff --git a/system/templates/CODEOWNERS b/system/templates/CODEOWNERS new file mode 100644 index 00000000..52b409f4 --- /dev/null +++ b/system/templates/CODEOWNERS @@ -0,0 +1,20 @@ +# GENERATED BY KAPITAN. DO NOT EDIT. +# To update this file, add or remove to the "parameters.codeowners" list +# in the inventory in the desired target/class and run `kapitan compile` +# The current default configurations for codeowners are: + +# ./kapitan searchvar codeowners +# ./inventory/classes/common.yml ['kapicorp/devops'] + +* @kapicorp/devops # Catch-all rule + +inventory/ @kapicorp/everyone +system/refs/ @kapicorp/everyone + +{% set params = inventory.parameters %} +{% for target in inventory_global | sort() %} +{% set p = inventory_global[target].parameters %} +{% if p.codeowners is defined %} +compiled/{{p.target_path}} @{{p.codeowners | reverse | join(' @')}} +{% endif %} +{% endfor %} diff --git a/system/templates/atlantis.yaml b/system/templates/atlantis.yaml new file mode 100644 index 00000000..76484014 --- /dev/null +++ b/system/templates/atlantis.yaml @@ -0,0 +1,24 @@ +version: 3 +automerge: false + +# Disabling because it seems to create some issues: +# The default workspace at path compiled/gcp/X is currently locked by another command that is running for this pull request. +# Wait until the previous command is complete and try again. +# parallel_plan: true +# parallel_apply: true + +projects: +{% set params = inventory.parameters %} + +{% for terraform_target in inventory_global | sort() %} +{% set p = inventory_global[terraform_target].parameters %} +{% if p.terraform is defined %} +- name: {{terraform_target}} + dir: compiled/{{p.target_path}}/terraform + workspace: default + terraform_version: {{params.args.atlantis.terraform_version}} + autoplan: + when_modified: ["*.tf.json"] + enabled: true +{% endif %} +{% endfor %} diff --git a/templates/docs/README.md b/system/templates/docs/README.md similarity index 89% rename from templates/docs/README.md rename to system/templates/docs/README.md index 9241777d..3818f104 100644 --- a/templates/docs/README.md +++ b/system/templates/docs/README.md @@ -4,7 +4,7 @@ ||| | --- | --- | | **Target** | {{ p.target_name }} | -| **Project** | `{{p.google_project | default('not defined')}}`| +| **Project** | `{{p.gcp_project_id | default('not defined')}}`| | **Cluster** | {% if p.cluster is defined %} {{p.cluster.name }} {% else %} 'Not defined' {% endif %} | | **Namespace** | `{{p.namespace}}` | diff --git a/templates/docs/global/README.md b/system/templates/docs/global/README.md similarity index 100% rename from templates/docs/global/README.md rename to system/templates/docs/global/README.md diff --git a/templates/docs/inventory.md b/system/templates/docs/inventory.md similarity index 100% rename from templates/docs/inventory.md rename to system/templates/docs/inventory.md diff --git a/templates/docs/service_component.md.j2 b/system/templates/docs/service_component.md.j2 similarity index 100% rename from templates/docs/service_component.md.j2 rename to system/templates/docs/service_component.md.j2 diff --git a/system/templates/scripts/gcloud/gcloud b/system/templates/scripts/gcloud/gcloud new file mode 100755 index 00000000..7969a983 --- /dev/null +++ b/system/templates/scripts/gcloud/gcloud @@ -0,0 +1,12 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +{% set project = input_params.gcp_project_id | default(inventory.parameters.gcp_project_id) %} +{% if project != "undefined" %} + ${GCLOUD_BINARY} --project {{project}} "$@" +{% else %} + >&2 echo "gcp_project_id not defined in kapitan. Using system default" + ${GCLOUD_BINARY} "$@" +{% endif %} diff --git a/system/templates/scripts/gcloud/get_project_number b/system/templates/scripts/gcloud/get_project_number new file mode 100755 index 00000000..e031923a --- /dev/null +++ b/system/templates/scripts/gcloud/get_project_number @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +get_project_number ${TARGET_NAME} | set_reference_name project_number diff --git a/system/templates/scripts/generate_tesoro_certs.sh b/system/templates/scripts/generate_tesoro_certs.sh new file mode 100755 index 00000000..2d1a01bb --- /dev/null +++ b/system/templates/scripts/generate_tesoro_certs.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +{% set p = inventory.parameters %} +NAMESPACE={{p.namespace}} + +# Generates new certificates +CACERT_KEY=rootCA.key +CACERT_PEM=rootCA.crt +CERT_KEY=priv.key +CERT_PEM=cert.pem +CN=tesoro.${NAMESPACE}.svc + +pushd ${SCRIPT_TMP_DIR} + openssl genrsa -out ${CACERT_KEY} 4096 > /dev/null + openssl req -x509 -new -nodes -key ${CACERT_KEY} -subj "/CN=CA-${CN}" -sha256 -days 1024 -out ${CACERT_PEM} > /dev/null + + + openssl genrsa -out ${CERT_KEY} 2048 > /dev/null + openssl req -new -sha256 -key ${CERT_KEY} -subj "/CN=${CN}" -out csr.csr >/dev/null + openssl x509 -req -in csr.csr -CA ${CACERT_PEM} -extfile <(printf "subjectAltName=DNS:${CN}") -CAkey ${CACERT_KEY} -CAcreateserial -out ${CERT_PEM} -days 500 -sha256 > /dev/null + openssl x509 -in ${CERT_PEM} -noout +popd + +cat ${SCRIPT_TMP_DIR}/${CERT_PEM} | set_reference {{p.kapicorp.tesoro.refs.certificate}} --base64 +cat ${SCRIPT_TMP_DIR}/${CERT_KEY} | set_reference {{p.kapicorp.tesoro.refs.private_key}} --base64 +cat ${SCRIPT_TMP_DIR}/${CACERT_PEM} | set_reference {{p.kapicorp.tesoro.refs.cacert}} --base64 diff --git a/system/templates/scripts/github/import_repository b/system/templates/scripts/github/import_repository new file mode 100755 index 00000000..bd1e7531 --- /dev/null +++ b/system/templates/scripts/github/import_repository @@ -0,0 +1,10 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +TERRAFORM_COMMAND=${SCRIPT_RELATIVE_DIR}/terraform + +${TERRAFORM_COMMAND} init +${TERRAFORM_COMMAND} import github_repository.repository ${TARGET_NAME} +${TERRAFORM_COMMAND} import github_branch_protection.repository_main ${TARGET_NAME}:main diff --git a/system/templates/scripts/gke/gke_config_import b/system/templates/scripts/gke/gke_config_import new file mode 100755 index 00000000..da4abedd --- /dev/null +++ b/system/templates/scripts/gke/gke_config_import @@ -0,0 +1,40 @@ +#!/bin/bash +# generated with Kapitan + +# Imports cluster config from GKE clusters into Kapitan inventory +# Usage: ./gke_import [project1] [project2] ... +# If no projects are specified, all projects will be discovered and imported + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +{% set p = inventory.parameters %} + +{% raw %} +PROJECTS_LIST=() +if [[ ${#@} -gt 0 ]] +then + PROJECTS_LIST+=${@} +else + echo "No projects specified, discovering all projects" + PROJECTS_LIST+=$(get_all_projects) +fi + + +for PROJECT in ${PROJECTS_LIST} +do + echo "Processing project ${PROJECT}" + for CLUSTER_NAME_ZONE_PAIR in $(get_clusters_for_project ${PROJECT}) + do + CLUSTER_NAME=$(echo ${CLUSTER_NAME_ZONE_PAIR} | cut -d':' -f1) + ZONE=$(echo ${CLUSTER_NAME_ZONE_PAIR} | cut -d':' -f2) + ID="gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}" + + echo "..Discovered Kubernetes cluster $CLUSTER_NAME on project $PROJECT..." + echo "....Fetching config" + + get_cluster_config ${CLUSTER_NAME} --zone ${ZONE} --project ${PROJECT} | set_reference plain:resources/${ID}/config + echo "....Config saved to plain:resources/${ID}/config" + echo + done +done +{% endraw %} diff --git a/system/templates/scripts/includes/bash.include b/system/templates/scripts/includes/bash.include new file mode 100644 index 00000000..cd080f6b --- /dev/null +++ b/system/templates/scripts/includes/bash.include @@ -0,0 +1,58 @@ +set -o nounset -o pipefail -o noclobber -o errexit + +{% set p = inventory.parameters %} +TARGET_NAME="{{p.target_name}}" +TARGET_PATH="{{p.target_path}}" +GCP_PROJECT_ID="{{p.gcp_project_id}}" +TARGET="{{p.target}}" +TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR=compiled/${TARGET_PATH} + +SCRIPT_RELATIVE_PATH=${BASH_SOURCE[1]} +SCRIPT_RELATIVE_DIR=$(dirname ${SCRIPT_RELATIVE_PATH}) +SCRIPT_ABS_PATH=$(cd "${SCRIPT_RELATIVE_DIR}"; pwd) + +KAPITAN_ABSOLUTE_BASEDIR=${SCRIPT_ABS_PATH%${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/*} +SCRIPT_RELATIVE_DIR_FROM_KAPITAN_BASEDIR=$(dirname ${SCRIPT_ABS_PATH#${KAPITAN_ABSOLUTE_BASEDIR}/}) + +GIT_ABSOLUTE_BASEDIR=$(cd ${KAPITAN_ABSOLUTE_BASEDIR}; git rev-parse --show-toplevel) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${GIT_ABSOLUTE_BASEDIR}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_SCRIPT_DIR=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=${SCRIPT_ABS_PATH}) +KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD=$(realpath ${KAPITAN_ABSOLUTE_BASEDIR} --relative-to=$(pwd)) + +BASH_INCLUDE_RELATIVE_PATH=${BASH_SOURCE[0]} +BASH_INCLUDE_RELATIVE_DIR=$(dirname ${BASH_INCLUDE_RELATIVE_PATH}) + +KAPITAN_TEMPLATES_DIRNAME=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_GIT_BASEDIR%%/*} + +# Legacy +ROOT=${KAPITAN_ABSOLUTE_BASEDIR} +DIR=${SCRIPT_RELATIVE_DIR} + +source ${SCRIPT_RELATIVE_DIR}/gcloud.include +source ${SCRIPT_RELATIVE_DIR}/kapitan.include + +KUBECTL_CONTEXT="${TARGET}" +KUBECTL_SCRIPT="${SCRIPT_RELATIVE_DIR}/kubectl" +KUBECTL_COMMAND="${KUBECTL_BINARY} --context ${KUBECTL_CONTEXT}" + +function in_docker() { + grep -sq 'docker\|lxc' /proc/1/cgroup +} + +function check_installed() { + CMD=$1 + if ! $(which ${CMD} > /dev/null); then + error "${CMD} not installed. Exiting..." + fi +} + +# Only GNU xargs supports --no-run-if-empty +XARGS="xargs --no-run-if-empty" +if ! echo | $XARGS 2>/dev/null; then + # Looks like we have BSD xargs, use -x instead + XARGS="xargs" +fi + + +SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" +trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT diff --git a/system/templates/scripts/includes/bash.include-test.sh b/system/templates/scripts/includes/bash.include-test.sh new file mode 100755 index 00000000..d1de40b2 --- /dev/null +++ b/system/templates/scripts/includes/bash.include-test.sh @@ -0,0 +1,36 @@ +source $(dirname ${BASH_SOURCE[0]})/bash.include +set -o nounset +o pipefail +o noclobber +o errexit + +{% set p = inventory.parameters %} + +testTargetName() { + assertEquals ${TARGET_NAME} "{{p.target_name}}" +} + +testTargetPath() { + assertEquals ${TARGET_PATH} "{{p.target_path}}" +} + +testKapitanFound() { + assertTrue "kapitan found at ${KAPITAN_COMMAND}" "[ -r ${KAPITAN_COMMAND} ]" +} + +testKapitanBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD} ]" +} + +testTargetBaseDir() { + assertTrue "[ -r ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/compiled/${TARGET_PATH} ]" +} + +# TODO(ademaria) understand why this doesn' +# testCreateRef() { +# NAME=$(echo $RANDOM | md5sum | head -c 20) +# EXPECTED_REF=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/refs/targets/${TARGET_PATH}/${NAME} +# echo "TEST" | set_reference_name ${NAME} +# assertTrue "[ -r ${EXPECTED_REF} ]" +# } + + +# Load shUnit2. +. ${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/scripts/shunit2 diff --git a/system/templates/scripts/includes/gcloud.include b/system/templates/scripts/includes/gcloud.include new file mode 100644 index 00000000..8a37b70b --- /dev/null +++ b/system/templates/scripts/includes/gcloud.include @@ -0,0 +1,41 @@ +GCLOUD_COMMAND=${SCRIPT_RELATIVE_DIR}/gcloud + +function get_project_number() { + PROJECT_NUMBER=${1:-${TARGET_NAME}} + ${GCLOUD_COMMAND} projects list --filter="${PROJECT_NUMBER}" --format='value[terminator=""](PROJECT_NUMBER)' 2>/dev/null +} + +function get_all_projects() { + PROJECT_FILTER=${1:-"kapicorp-*"} + ${GCLOUD_COMMAND} projects list --format='value[terminator=" "](project_id)' --filter="project_id:${PROJECT_FILTER}" 2>/dev/null +} + +function get_clusters_for_project() { + GCP_PROJECT_ID=${1:-${GCP_PROJECT_ID}} + ${GCLOUD_COMMAND} --format='value[terminator=" ",separator=":"](name, zone)' container clusters list --project="${GCP_PROJECT_ID}" 2>/dev/null +} + +function get_cluster_config() { + ${GCLOUD_COMMAND} --format=yaml container clusters describe "$@" 2>/dev/null +} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +GCLOUD_IMAGE="google/cloud-sdk:latest" +GCLOUD_DOCKER_ROOT=/src +GCLOUD_CONTAINER="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -w ${GCLOUD_DOCKER_ROOT} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${GCLOUD_DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + -v $HOME/.kube:/.kube:delegated \ + -v $HOME/.docker:/.docker:delegated \ + ${GCLOUD_IMAGE}" + +GCLOUD_BINARY="${GCLOUD_CONTAINER} gcloud" +KUBECTL_BINARY="${GCLOUD_CONTAINER} kubectl" diff --git a/system/templates/scripts/includes/kapitan.include b/system/templates/scripts/includes/kapitan.include new file mode 100644 index 00000000..fda9ab55 --- /dev/null +++ b/system/templates/scripts/includes/kapitan.include @@ -0,0 +1,21 @@ + +KAPITAN_COMMAND=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/kapitan + +function set_reference() { + REFERENCE=${1? please pass full reference name} + shift + + ${KAPITAN_COMMAND} refs -f - -t ${TARGET} --write "${REFERENCE}" "$@" +} + +function set_reference_name() { + NAME=${1? please pass reference name} + shift + REFERENCE="plain:targets/${TARGET_PATH}/${NAME}" + set_reference ${REFERENCE} "$@" +} + +function reveal_reference_tag() { + REFERENCE_TAG=${1? please reference tag } + ${KAPITAN_COMMAND} refs --reveal --tag ${REFERENCE_TAG} -t ${TARGET} +} diff --git a/system/templates/scripts/kapitan/set_reference b/system/templates/scripts/kapitan/set_reference new file mode 100755 index 00000000..bba07c8f --- /dev/null +++ b/system/templates/scripts/kapitan/set_reference @@ -0,0 +1,10 @@ +#!/bin/bash +# generated with Kapitan +source $(dirname ${BASH_SOURCE[0]})/bash.include + +{% set p = inventory.parameters %} + +REFERENCE=${1? "Pass a reference as first argument: e.g. gkms:targets/{{p.target_path}}/reference_name"} +shift + +set_reference ${REFERENCE} "$@" diff --git a/templates/scripts/kubernetes/apply.sh b/system/templates/scripts/kubernetes/apply similarity index 64% rename from templates/scripts/kubernetes/apply.sh rename to system/templates/scripts/kubernetes/apply index bd8bbf9a..b2e15abc 100755 --- a/templates/scripts/kubernetes/apply.sh +++ b/system/templates/scripts/kubernetes/apply @@ -1,34 +1,24 @@ #!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include {% set p = inventory.parameters %} -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel)/ -KAPITAN="${ROOT}/kapitan" FILE=${1:-} -# Only GNU xargs supports --no-run-if-empty -XARGS="xargs --no-run-if-empty" -if ! echo | $XARGS 2>/dev/null; then - # Looks like we have BSD xargs, use -x instead - XARGS="xargs" -fi - -## if tesoro is enabled, no need to reveal {% if p.use_tesoro | default(false)%} +## if tesoro is enabled, no need to reveal apply () { FILEPATH=${1?} - ${DIR}/kubectl.sh apply --recursive -f "${FILEPATH}" + ${KUBECTL_SCRIPT} apply --recursive -f "${FILEPATH}" } {% else %} apply () { FILEPATH=${1?} - ${KAPITAN} refs --reveal -f "${FILEPATH}" | ${DIR}/kubectl.sh apply -f - + ${KAPITAN_COMMAND} refs --reveal -f "${FILEPATH}" | ${KUBECTL_SCRIPT} apply -f - } {% endif %} - - - if [[ ! -z $FILE ]] then # Apply files passed at the command line @@ -46,7 +36,7 @@ else fi # Apply files in specific order - for SECTION in pre-deploy manifests + for SECTION in manifests do echo "## run kubectl apply for ${SECTION}" DEPLOY_PATH=${DIR}/../${SECTION} @@ -56,4 +46,3 @@ else fi done fi - diff --git a/system/templates/scripts/kubernetes/delete_completed b/system/templates/scripts/kubernetes/delete_completed new file mode 100755 index 00000000..b026351a --- /dev/null +++ b/system/templates/scripts/kubernetes/delete_completed @@ -0,0 +1,6 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +${KUBECTL_COMMAND} delete pod --field-selector=status.phase==Failed diff --git a/system/templates/scripts/kubernetes/kubectl b/system/templates/scripts/kubernetes/kubectl new file mode 100755 index 00000000..1a20ab1b --- /dev/null +++ b/system/templates/scripts/kubernetes/kubectl @@ -0,0 +1,13 @@ +#!/bin/bash +# generated with Kapitan +{% set p = inventory.parameters %} +source $(dirname ${BASH_SOURCE[0]})/bash.include + + + +if [[ -p /dev/stdin ]] +then + cat | ${KUBECTL_COMMAND} "$@" +else + ${KUBECTL_COMMAND} "$@" +fi diff --git a/system/templates/scripts/kubernetes/setup_cluster b/system/templates/scripts/kubernetes/setup_cluster new file mode 100755 index 00000000..42a1c955 --- /dev/null +++ b/system/templates/scripts/kubernetes/setup_cluster @@ -0,0 +1,29 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +setup_kubectl() { + ${DIR}/setup_cluster >/dev/null + ${DIR}/setup_context >/dev/null +} + +{% set p = inventory.parameters %} + +{% set cluster = p.cluster %} +{% if cluster.type == "gke" %} +CLUSTER_INFO=$(${KUBECTL_COMMAND} cluster-info || (setup_kubectl && ${KUBECTL_COMMAND} cluster-info)) + +CLUSTER_PUBLIC_IP=$(echo ${CLUSTER_INFO} | egrep -oE "\b([0-9]{1,3}\.){3}[0-9]{1,3}\b" | head -n 1 | iconv -f utf-8 -t ascii//translit) +INVENTORY_CLUSTER_PUBLIC_IP="{{p.cluster.public_endpoint|default('UNKNOWN')}}" + +# Check if the cluster public IP has changed +if [[ "${CLUSTER_PUBLIC_IP}" != "${INVENTORY_CLUSTER_PUBLIC_IP}" ]] +then + setup_kubectl +fi +${GCLOUD_COMMAND} container clusters get-credentials {{cluster.name}} --zone {{cluster.zone}} --project {{cluster.gcp_project_id}} +{% elif cluster.type == "kind" %} +KIND="kind" +$KIND create cluster -q --name {{cluster.name}} || echo "Kind cluster {{cluster.name}} already exists!" +$KIND export kubeconfig +{% endif %} diff --git a/system/templates/scripts/kubernetes/setup_context b/system/templates/scripts/kubernetes/setup_context new file mode 100755 index 00000000..2305d727 --- /dev/null +++ b/system/templates/scripts/kubernetes/setup_context @@ -0,0 +1,9 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +{% set p = inventory.parameters %} +{% set cluster = p.cluster %} + +${KUBECTL_COMMAND} config set-context ${KUBECTL_CONTEXT} --cluster {{cluster.id}} --user {{cluster.user}} --namespace {{p.namespace}} diff --git a/system/templates/scripts/postgres/postgres.include b/system/templates/scripts/postgres/postgres.include new file mode 100644 index 00000000..56a393b4 --- /dev/null +++ b/system/templates/scripts/postgres/postgres.include @@ -0,0 +1,60 @@ +#!/bin/bash -e +{% set params = input_params %} + +RUN_ARGS="" +POSTGRES_DOCKER_IMAGE="postgres:14" +SQL_PROXY_IMAGE="{{params.sqlproxy_image}}" +POSTGRESQL_INSTANCE={{params.instance_name}} + +DOCKER_NETWORK=${POSTGRESQL_INSTANCE} +SQL_PROXY_DOCKER_NAME={{params.docker_name}} +PGPORT={{params.port}} +CREDENTIALS=$HOME/.config/gcloud/application_default_credentials.json + +SQL_INSTANCE_ACTUAL_INSTANCE=$(echo ${POSTGRESQL_INSTANCE} | cut -d: -f3) +SQL_INSTANCE_ACTUAL_PROJECT=$(echo ${POSTGRESQL_INSTANCE} | cut -d: -f1) +SQL_PROXY_COMMAND="docker run -d --name=${SQL_PROXY_DOCKER_NAME} -u $(id -u ${USER}):$(id -g ${USER}) --rm --net=${DOCKER_NETWORK} -v ${CREDENTIALS}:/credentials.json -e GOOGLE_APPLICATION_CREDENTIALS=/credentials.json ${SQL_PROXY_IMAGE} /cloud_sql_proxy" + +psql_command() { + POSTGRES_UTILS="docker run --rm --net=${DOCKER_NETWORK} -i ${RUN_ARGS} ${POSTGRES_DOCKER_IMAGE}" + ${POSTGRES_UTILS} psql "$@" +} + +function is_container_running { + local name="$1" + [[ "$(docker container ls --quiet --filter "name=${name}")" != "" ]] +} + +# Kill the docker container on exit +function cleanup { + if is_container_running "${SQL_PROXY_DOCKER_NAME}"; then + docker kill "${SQL_PROXY_DOCKER_NAME}" >> /dev/null + fi +} + + + +pull_images() { + docker pull ${POSTGRES_DOCKER_IMAGE} >/dev/null & + docker pull ${SQL_PROXY_IMAGE} > /dev/null & +} + +setup_docker() { + if [[ ! -f ${CREDENTIALS} ]] + then + echo Credentials not found. Please run: + echo gcloud auth application-default login + exit 1 + fi + + pull_images + + # Creates ad-hoc network to avoid conflicts with other instances. + docker network create --driver=bridge --opt=com.docker.network.bridge.enable_ip_masquerade=true ${DOCKER_NETWORK} 2> /dev/null \ + || true + + + # Setup cloud_sql_proxy + echo "Connecting to ${POSTGRESQL_INSTANCE} on project ${SQL_INSTANCE_ACTUAL_PROJECT} using docker/cloud_sql_proxy" + $SQL_PROXY_COMMAND -enable_iam_login -instances=${POSTGRESQL_INSTANCE}=tcp:0.0.0.0:${PGPORT} > /dev/null || true +} diff --git a/system/templates/scripts/postgres/psql b/system/templates/scripts/postgres/psql new file mode 100755 index 00000000..99156471 --- /dev/null +++ b/system/templates/scripts/postgres/psql @@ -0,0 +1,23 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include +source $(dirname ${BASH_SOURCE[0]})/postgres.include + +trap cleanup EXIT SIGINT SIGTERM + +RED='\033[0;31m' +NC='\033[0m' # No Color + +PGDATABASE=${1:-postgres} +PGUSER=${USER}@google.com +PGOPTIONS="--search_path=${PGDATABASE},public" + + +setup_docker + +(umask 077 ; touch $HOME/.psql_history) + + +export RUN_ARGS="-u $(id -u ${USER}):$(id -g ${USER}) -v $HOME/.psql_history:/tmp/psql_history -t -e PSQL_HISTORY=/tmp/psql_history -e PGOPTIONS=${PGOPTIONS} -e PGDATABASE=${PGDATABASE}" +psql_command -h ${SQL_PROXY_DOCKER_NAME} -p ${PGPORT} -U ${PGUSER} "$@" diff --git a/system/templates/scripts/terraform/terraform b/system/templates/scripts/terraform/terraform new file mode 100755 index 00000000..1d3ded3d --- /dev/null +++ b/system/templates/scripts/terraform/terraform @@ -0,0 +1,46 @@ +#!/bin/bash +# generated with Kapitan + +source $(dirname ${BASH_SOURCE[0]})/bash.include + +######################################################################################## +# Check required binaries are installed + +error(){ + echo "${@}" +} + +DOCKER_ROOT=/src +TF_DIR=${TARGET_RELATIVE_PATH_FROM_KAPITAN_BASEDIR}/terraform +TF_DATA_DIR="${DOCKER_ROOT}/.TF_DATA_DIR/${TARGET_PATH}" +OUTPUT_DIR="output/${TARGET_PATH}" +LOCK_FILE=${TF_DIR}/.terraform.lock.hcl +STATE_DIR=${KAPITAN_BASEDIR_RELATIVE_PATH_FROM_PWD}/system/resources/state/${TARGET_PATH} + +DEBUG=${DEBUG:-0} +TERRAFORM_IMAGE=hashicorp/terraform:{{inventory.parameters.terraform_version}} + +# if running in a tty +TTY_FLAG="" + +if [ -t 0 ]; then TTY_FLAG="-t"; fi + +TERRAFORM_BINARY="docker \ + run --rm -i -u $UID \ + ${TTY_FLAG} \ + --network host \ + -e TF_DATA_DIR=${TF_DATA_DIR} \ + -e TF_LOG \ + -w ${DOCKER_ROOT}/${TF_DIR} \ + -v ${KAPITAN_ABSOLUTE_BASEDIR}:${DOCKER_ROOT}:delegated \ + -v $HOME/.config/gcloud:/.config/gcloud:delegated \ + ${TERRAFORM_IMAGE}" + +${TERRAFORM_BINARY} "$@" + + +if [[ -f ${LOCK_FILE} ]] +then + mkdir -p ${STATE_DIR} + cp ${LOCK_FILE} ${STATE_DIR}/.terraform.lock.hcl +fi diff --git a/system/templates/scripts/update-tags.sh b/system/templates/scripts/update-tags.sh new file mode 100644 index 00000000..1884d3ff --- /dev/null +++ b/system/templates/scripts/update-tags.sh @@ -0,0 +1,48 @@ +#!/bin/bash -e +set -o nounset +set -o errexit +set -o pipefail + +GITHUB_BOT=kapitanbot + +if [[ -z "${GITHUB_ACCESS}" || -z "${TAG}" || -z "${REPO}" ]]; then + echo 'One or more variables are undefined, skipping' + exit 0 +fi + +set_tag() { + curl -u ${GITHUB_BOT}:"${GITHUB_ACCESS}" \ + --write-out "%{http_code}" --silent --output /dev/null \ + -X POST -H 'Content-Type: application/json' \ + "https://api.github.com/repos/${REPO}/git/refs" -d"{ \"ref\": \"refs/tags/${TAG}\", \"sha\": \"${COMMIT_SHA}\" }" +} + +delete_tag() { + curl -u ${GITHUB_BOT}:"${GITHUB_ACCESS}" \ + --write-out "%{http_code}" --silent --output /dev/null \ + -X DELETE "https://api.github.com/repos/${REPO}/git/refs/tags/${TAG}" +} + +if [[ -n "${COMMIT_SHA}" ]] +then + echo "Setting ${TAG} to ${COMMIT_SHA} on ${REPO}" + error_code=$(set_tag) + if [[ $error_code -eq 422 ]] + then + echo -n "Tag exists, deleting (204 is OK): " + delete_tag + echo "" + error_code=$(set_tag) + echo "Setting Tag (201 is OK): ${error_code}" + else + echo "Setting Tag (201 is OK): ${error_code}" + fi +else + echo "COMMIT_SHA is not set, skipping" + exit 0 +fi + +if [[ $error_code -ne 201 ]] +then + exit 1 +fi diff --git a/templates/scripts/generate_sa_secrets.sh b/templates/scripts/generate_sa_secrets.sh deleted file mode 100755 index 3e73a748..00000000 --- a/templates/scripts/generate_sa_secrets.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -e - -{% set i = inventory.parameters %} -TARGET={{i.target_name}} - - -DIR=$(dirname ${BASH_SOURCE[0]}) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel) -KAPITAN_COMMAND=${ROOT}/kapitan - -{% for sa_key in i.service_accounts %} -{% set sa = i.service_accounts[sa_key] %} -echo "Generating secret for {{sa.name}}" -gcloud --project {{i.google_project}} iam service-accounts keys \ -create - \ ---iam-account={{sa.name}} | ${KAPITAN_COMMAND} refs --write {{sa.ref}} --base64 -f - -t ${TARGET} - -echo "Summary of available keys (please remove obsolete ones after deploying changes)" - -gcloud --project {{i.google_project}} iam service-accounts keys \ -list --iam-account={{sa.name}} - -##### -{% endfor %} diff --git a/templates/scripts/generate_tesoro_certs.sh b/templates/scripts/generate_tesoro_certs.sh deleted file mode 100755 index 85585849..00000000 --- a/templates/scripts/generate_tesoro_certs.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -{% set p = inventory.parameters %} -set -e - -SCRIPT_TMP_DIR="$(mktemp -d /tmp/kapitan.XXXXXXXXXX)" -trap "rm -fr '${SCRIPT_TMP_DIR}'" INT TERM EXIT - -NAMESPACE={{p.namespace}} -TARGET_NAME={{p.target_name}} - -# Generates new certificates -CACERT_KEY=rootCA.key -CACERT_PEM=rootCA.crt -CERT_KEY=priv.key -CERT_PEM=cert.pem -CN=tesoro.${NAMESPACE}.svc - -pushd ${SCRIPT_TMP_DIR} -openssl genrsa -out ${CACERT_KEY} 4096 > /dev/null -openssl req -x509 -new -nodes -key ${CACERT_KEY} -subj "/CN=CA-${CN}" -sha256 -days 1024 -out ${CACERT_PEM} > /dev/null - - -openssl genrsa -out ${CERT_KEY} 2048 > /dev/null -openssl req -new -sha256 -key ${CERT_KEY} -subj "/CN=${CN}" -out csr.csr >/dev/null -openssl x509 -req -in csr.csr -CA ${CACERT_PEM} -extfile <(printf "subjectAltName=DNS:${CN}") -CAkey ${CACERT_KEY} -CAcreateserial -out ${CERT_PEM} -days 500 -sha256 > /dev/null -openssl x509 -in ${CERT_PEM} -noout -popd -kapitan refs -t {{p.target_name}} --write {{p.kapicorp.tesoro.refs.certificate}} --base64 -f ${SCRIPT_TMP_DIR}/${CERT_PEM} -kapitan refs -t {{p.target_name}} --write {{p.kapicorp.tesoro.refs.private_key}} --base64 -f ${SCRIPT_TMP_DIR}/${CERT_KEY} -kapitan refs -t {{p.target_name}} --write {{p.kapicorp.tesoro.refs.cacert}} --base64 -f ${SCRIPT_TMP_DIR}/${CACERT_PEM} diff --git a/templates/scripts/kubernetes/kubectl.sh b/templates/scripts/kubernetes/kubectl.sh deleted file mode 100755 index fa9eb638..00000000 --- a/templates/scripts/kubernetes/kubectl.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -DIR=$(dirname ${BASH_SOURCE[0]}) -${DIR}/setup_context.sh >/dev/null -if [[ -p /dev/stdin ]] -then - INPUT=$( cat ) -fi -{% set i = inventory.parameters %} -KUBECTL="kubectl --context {{i.target_name}}" -echo "${INPUT}" | ${KUBECTL} "$@" diff --git a/templates/scripts/kubernetes/setup_cluster.sh b/templates/scripts/kubernetes/setup_cluster.sh deleted file mode 100755 index b15a8485..00000000 --- a/templates/scripts/kubernetes/setup_cluster.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit - -{% set p = inventory.parameters %} - -{% set cluster = p.cluster %} -{% if cluster.type == "gke" %} -GCLOUD="gcloud" -${GCLOUD} container clusters get-credentials {{cluster.name}} --zone {{cluster.zone}} --project {{cluster.google_project}} -{% elif cluster.type == "kind" %} -KIND="kind" -$KIND create cluster -q --name {{cluster.name}} || echo "Kind cluster {{cluster.name}} already exists!" -$KIND export kubeconfig -{% endif %} diff --git a/templates/scripts/kubernetes/setup_context.sh b/templates/scripts/kubernetes/setup_context.sh deleted file mode 100755 index 67d3b008..00000000 --- a/templates/scripts/kubernetes/setup_context.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -o nounset -o pipefail -o noclobber -o errexit -KUBECTL="kubectl" - -{% set p = inventory.parameters %} - -{% set cluster = p.cluster %} -${KUBECTL} config set-context {{p.target_name}} --cluster {{cluster.id}} --user {{cluster.user}} --namespace {{p.namespace}} - diff --git a/templates/scripts/terraform/terraform.sh b/templates/scripts/terraform/terraform.sh deleted file mode 100755 index 79d1f945..00000000 --- a/templates/scripts/terraform/terraform.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -set -e # If a command fails, the whole script exit -set -u # Treat unset variables as an error, and immediately exit. -set -o pipefail # this will make your script exit if any command in a pipeline errors - - -DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) -ROOT=$(cd "${DIR}"; git rev-parse --show-toplevel) -TARGET_NAME={{inventory.parameters.target_name}} -GCP_PROJECT={{inventory.parameters.google.project}} - -######################################################################################## -# Check required binaries are installed - -error(){ - echo "${@}" -} - -check_installed() { - CMD=$1 - if ! $(which ${CMD} > /dev/null); then - error "${CMD} not installed. Exiting..." - fi -} - -check_installed terraform - -######################################################################################## -# Variables -export DIR=$(realpath $(dirname ${BASH_SOURCE[0]})) # Folder where this script is -export TF_DIR=$(realpath ${DIR}/../terraform) # Folder where TF files are -export TF_DATA_DIR=$(realpath -m ${DIR}/../../../.TF_DATA_DIR/${GCP_PROJECT}) # Folder for TF initialization (preferable outside of compiled) -export OUTPUT_DIR=$(realpath -m ${DIR}/../../../output/${GCP_PROJECT}) # Folder for storing output files (preferable outside of compiled) -export TERRAFORM="terraform" -DEBUG=${DEBUG:-0} - -######################################################################################## -# MAIN - -if [ $DEBUG -ne 0 ]; then - debug -fi - -pushd $TF_DIR &> /dev/null - -terraform "$@" - -if [[ -f $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl ]] -then - mkdir -p $ROOT/resources/state/${TARGET_NAME}/ - cp $ROOT/compiled/${TARGET_NAME}/terraform/.terraform.lock.hcl \ - $ROOT/resources/state/${TARGET_NAME}/.terraform.lock.hcl -fi \ No newline at end of file