From 45fcce74a6e202397b3856d85643750b28e2917b Mon Sep 17 00:00:00 2001 From: He Liu Date: Mon, 6 May 2024 15:43:00 +0800 Subject: [PATCH] enabel external oidc in capi --- ...s-private-release-4.15__amd64-nightly.yaml | 22 ++- ...-tests-private-release-4.15-periodics.yaml | 20 +-- .../cucushift/hypershift-extended/capi/OWNERS | 2 + .../hypershift-extended/capi/clear/OWNERS | 2 + ...hift-extended-capi-clear-ref.metadata.json | 6 +- .../capi/deprovision/OWNERS | 2 + ...xtended-capi-deprovision-ref.metadata.json | 6 +- .../hypershift-extended/capi/enable-hc/OWNERS | 8 + ...rshift-extended-capi-enable-hc-commands.sh | 58 ++++++++ ...-extended-capi-enable-hc-ref.metadata.json | 15 ++ ...ypershift-extended-capi-enable-hc-ref.yaml | 19 +++ .../capi/health-check/OWNERS | 2 + ...ift-extended-capi-health-check-commands.sh | 30 +++- ...tended-capi-health-check-ref.metadata.json | 6 +- .../hypershift-extended/capi/init/OWNERS | 2 + ...-hypershift-extended-capi-init-commands.sh | 9 +- ...shift-extended-capi-init-ref.metadata.json | 6 +- .../hypershift-extended/capi/provision/OWNERS | 2 + ...rshift-extended-capi-provision-commands.sh | 139 +++++++++++++++--- ...-extended-capi-provision-ref.metadata.json | 6 +- ...ypershift-extended-capi-provision-ref.yaml | 19 ++- .../capi/vpc-peering/OWNERS | 2 + ...hift-extended-capi-vpc-peering-commands.sh | 37 +---- ...xtended-capi-vpc-peering-ref.metadata.json | 6 +- ...osa-aws-sts-hcp-capi-private-workflow.yaml | 9 +- .../capi/rosa-aws-sts-hcp-capi-workflow.yaml | 12 +- 26 files changed, 331 insertions(+), 116 deletions(-) create mode 100644 ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/OWNERS create mode 100644 ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-commands.sh create mode 100644 ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.metadata.json create mode 100644 ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.yaml diff --git a/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml b/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml index 7d33cf2d1db9..1dd7e9b5d56c 100644 --- a/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml +++ b/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml @@ -1036,33 +1036,31 @@ tests: test: - chain: openshift-e2e-test-hypershift-qe workflow: rosa-aws-sts-hcp-cilium -- as: aws-rosa-hcp-capi-stage-critical-f2 - cron: 5 2 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29 * * +- as: aws-rosa-hcp-capi-stage-f7 + cron: 12 6 7,14,23,30 * * steps: cluster_profile: aws-sd-qe env: BASE_DOMAIN: qe.devcluster.openshift.com - E2E_RUN_TAGS: '@rosa' - OPENSHIFT_VERSION: 4.15.0 + OPENSHIFT_VERSION: "4.15" REGION: us-west-2 TEST_FILTERS: ~ChkUpgrade&;~NonPreRelease&;~Serial&;~Disruptive&;~DisconnectedOnly&;~HyperShiftMGMT&;~MicroShiftOnly&;~NonHyperShiftHOST&;ROSA& - TEST_IMPORTANCE: Critical test: - - chain: openshift-e2e-test-hypershift-qe + - ref: openshift-extended-test + - ref: openshift-e2e-test-qe-report workflow: rosa-aws-sts-hcp-capi -- as: aws-rosa-hcp-capi-private-stage-critical-f7 - cron: 15 14 3,10,17,24 * * +- as: aws-rosa-hcp-capi-private-stage-f7 + cron: 9 23 3,12,19,26 * * steps: cluster_profile: aws-sd-qe env: BASE_DOMAIN: qe.devcluster.openshift.com - E2E_RUN_TAGS: '@rosa' - OPENSHIFT_VERSION: 4.15.0 + OPENSHIFT_VERSION: "4.15" REGION: us-west-2 TEST_FILTERS: ~ChkUpgrade&;~NonPreRelease&;~Serial&;~Disruptive&;~DisconnectedOnly&;~HyperShiftMGMT&;~MicroShiftOnly&;~NonHyperShiftHOST&;ROSA& - TEST_IMPORTANCE: Critical test: - - chain: openshift-e2e-test-hypershift-qe + - ref: openshift-extended-test + - ref: openshift-e2e-test-qe-report workflow: rosa-aws-sts-hcp-capi-private - as: aws-rosa-hcp-int-full-f7 cron: 29 23 4,13,20,27 * * diff --git a/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml b/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml index 56a0c230a01d..79ebd652250d 100644 --- a/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml +++ b/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml @@ -11382,7 +11382,7 @@ periodics: secretName: result-aggregator - agent: kubernetes cluster: build05 - cron: 15 14 3,10,17,24 * * + cron: 9 23 3,12,19,26 * * decorate: true decoration_config: skip_cloning: true @@ -11397,7 +11397,7 @@ periodics: ci.openshift.io/generator: prowgen job-release: "4.15" pj-rehearse.openshift.io/can-be-rehearsed: "true" - name: periodic-ci-openshift-openshift-tests-private-release-4.15-amd64-nightly-aws-rosa-hcp-capi-private-stage-critical-f7 + name: periodic-ci-openshift-openshift-tests-private-release-4.15-amd64-nightly-aws-rosa-hcp-capi-private-stage-f7 reporter_config: slack: channel: '#managed-hypershift-ci-watcher' @@ -11418,8 +11418,8 @@ periodics: - --oauth-token-path=/usr/local/github-credentials/oauth - --report-credentials-file=/etc/report/credentials - --secret-dir=/secrets/ci-pull-credentials - - --secret-dir=/usr/local/aws-rosa-hcp-capi-private-stage-critical-f7-cluster-profile - - --target=aws-rosa-hcp-capi-private-stage-critical-f7 + - --secret-dir=/usr/local/aws-rosa-hcp-capi-private-stage-f7-cluster-profile + - --target=aws-rosa-hcp-capi-private-stage-f7 - --variant=amd64-nightly command: - ci-operator @@ -11436,7 +11436,7 @@ periodics: - mountPath: /secrets/ci-pull-credentials name: ci-pull-credentials readOnly: true - - mountPath: /usr/local/aws-rosa-hcp-capi-private-stage-critical-f7-cluster-profile + - mountPath: /usr/local/aws-rosa-hcp-capi-private-stage-f7-cluster-profile name: cluster-profile - mountPath: /secrets/gcs name: gcs-credentials @@ -11481,7 +11481,7 @@ periodics: secretName: result-aggregator - agent: kubernetes cluster: build05 - cron: 5 2 1,3,5,7,9,11,13,15,17,19,21,23,25,27,29 * * + cron: 12 6 7,14,23,30 * * decorate: true decoration_config: skip_cloning: true @@ -11496,7 +11496,7 @@ periodics: ci.openshift.io/generator: prowgen job-release: "4.15" pj-rehearse.openshift.io/can-be-rehearsed: "true" - name: periodic-ci-openshift-openshift-tests-private-release-4.15-amd64-nightly-aws-rosa-hcp-capi-stage-critical-f2 + name: periodic-ci-openshift-openshift-tests-private-release-4.15-amd64-nightly-aws-rosa-hcp-capi-stage-f7 reporter_config: slack: channel: '#managed-hypershift-ci-watcher' @@ -11517,8 +11517,8 @@ periodics: - --oauth-token-path=/usr/local/github-credentials/oauth - --report-credentials-file=/etc/report/credentials - --secret-dir=/secrets/ci-pull-credentials - - --secret-dir=/usr/local/aws-rosa-hcp-capi-stage-critical-f2-cluster-profile - - --target=aws-rosa-hcp-capi-stage-critical-f2 + - --secret-dir=/usr/local/aws-rosa-hcp-capi-stage-f7-cluster-profile + - --target=aws-rosa-hcp-capi-stage-f7 - --variant=amd64-nightly command: - ci-operator @@ -11535,7 +11535,7 @@ periodics: - mountPath: /secrets/ci-pull-credentials name: ci-pull-credentials readOnly: true - - mountPath: /usr/local/aws-rosa-hcp-capi-stage-critical-f2-cluster-profile + - mountPath: /usr/local/aws-rosa-hcp-capi-stage-f7-cluster-profile name: cluster-profile - mountPath: /secrets/gcs name: gcs-credentials diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/OWNERS index 6a395b07ddd1..878bbf66f43a 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/OWNERS +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/OWNERS @@ -1,6 +1,8 @@ approvers: - LiangquanLi930 - heliubj18 + - fxierh reviewers: - LiangquanLi930 - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/OWNERS index 6a395b07ddd1..878bbf66f43a 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/OWNERS +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/OWNERS @@ -1,6 +1,8 @@ approvers: - LiangquanLi930 - heliubj18 + - fxierh reviewers: - LiangquanLi930 - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/cucushift-hypershift-extended-capi-clear-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/cucushift-hypershift-extended-capi-clear-ref.metadata.json index 952aeb4a2ae1..913890da5e9b 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/cucushift-hypershift-extended-capi-clear-ref.metadata.json +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/clear/cucushift-hypershift-extended-capi-clear-ref.metadata.json @@ -3,11 +3,13 @@ "owners": { "approvers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ], "reviewers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ] } } \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/OWNERS index 6a395b07ddd1..878bbf66f43a 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/OWNERS +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/OWNERS @@ -1,6 +1,8 @@ approvers: - LiangquanLi930 - heliubj18 + - fxierh reviewers: - LiangquanLi930 - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/cucushift-hypershift-extended-capi-deprovision-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/cucushift-hypershift-extended-capi-deprovision-ref.metadata.json index 4945ef1aa88f..c248b41db569 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/cucushift-hypershift-extended-capi-deprovision-ref.metadata.json +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/deprovision/cucushift-hypershift-extended-capi-deprovision-ref.metadata.json @@ -3,11 +3,13 @@ "owners": { "approvers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ], "reviewers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ] } } \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/OWNERS new file mode 100644 index 000000000000..878bbf66f43a --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/OWNERS @@ -0,0 +1,8 @@ +approvers: + - LiangquanLi930 + - heliubj18 + - fxierh +reviewers: + - LiangquanLi930 + - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-commands.sh b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-commands.sh new file mode 100644 index 000000000000..1c8843f12287 --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-commands.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +set -euo pipefail + +export KUBECONFIG="${SHARED_DIR}/kubeconfig" +if [[ -f "${SHARED_DIR}/mgmt_kubeconfig" ]]; then + export KUBECONFIG="${SHARED_DIR}/mgmt_kubeconfig" +fi + +# get cluster namesapce +CLUSTER_NAME=$(cat "${SHARED_DIR}/cluster-name") +if [[ -z "${CLUSTER_NAME}" ]] ; then + echo "Error: cluster name not found" + exit 1 +fi + +read -r namespace _ _ <<< "$(oc get cluster -A | grep ${CLUSTER_NAME})" +if [[ -z "${namespace}" ]]; then + echo "Error: capi cluster name not found, ${CLUSTER_NAME}" + exit 1 +fi + +secret_name="${CLUSTER_NAME}-kubeconfig" +if [[ "${ENABLE_EXTERNAL_OIDC}" == "true" ]]; then + secret_name="${CLUSTER_NAME}-bootstrap-kubeconfig" +fi + +max_retries=10 +retry_delay=30 +retries=0 +secret="" +while (( retries < max_retries )); do + secret=$(oc get secret -n ${namespace} ${secret_name} --ignore-not-found -ojsonpath='{.data.value}') + if [[ ! -z "$secret" ]]; then + echo "find the secret ${secret_name} in ${namespace}" + break + fi + + retries=$(( retries + 1 )) + if (( retries < max_retries )); then + echo "Retrying in $retry_delay seconds..." + sleep $retry_delay + else + oc get secret -n ${namespace} + echo "capi kubeconfig not found, exit" + exit 1 + fi +done + +if [[ ! -f "${SHARED_DIR}/mgmt_kubeconfig" ]] ; then + mv $KUBECONFIG "${SHARED_DIR}/mgmt_kubeconfig" +fi + +echo "${secret}" | base64 -d > "${SHARED_DIR}/kubeconfig" +echo "hosted cluster kubeconfig is switched" +oc whoami + + diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.metadata.json new file mode 100644 index 000000000000..a60179ae02ac --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.metadata.json @@ -0,0 +1,15 @@ +{ + "path": "cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.yaml", + "owners": { + "approvers": [ + "LiangquanLi930", + "heliubj18", + "fxierh" + ], + "reviewers": [ + "LiangquanLi930", + "heliubj18", + "fxierh" + ] + } +} \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.yaml b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.yaml new file mode 100644 index 000000000000..4a46269d3951 --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/enable-hc/cucushift-hypershift-extended-capi-enable-hc-ref.yaml @@ -0,0 +1,19 @@ +ref: + as: cucushift-hypershift-extended-capi-enable-hc + from_image: + namespace: ocp + name: "4.12" + tag: upi-installer + grace_period: 5m + cli: latest + commands: cucushift-hypershift-extended-capi-enable-hc-commands.sh + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: ENABLE_EXTERNAL_OIDC + default: "false" + documentation: Enable external OIDC. + documentation: |- + prepare some resources to install capi and capa controllers diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/OWNERS index 6a395b07ddd1..878bbf66f43a 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/OWNERS +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/OWNERS @@ -1,6 +1,8 @@ approvers: - LiangquanLi930 - heliubj18 + - fxierh reviewers: - LiangquanLi930 - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-commands.sh b/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-commands.sh index dc2821163152..5eefda026a17 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-commands.sh +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-commands.sh @@ -14,11 +14,11 @@ function set_proxy () { } function rosa_login() { - ROSA_VERSION=$(rosa version) + # ROSA_VERSION=$(rosa version) ROSA_TOKEN=$(cat "${CLUSTER_PROFILE_DIR}/ocm-token") if [[ ! -z "${ROSA_TOKEN}" ]]; then - echo "Logging into ${OCM_LOGIN_ENV} with offline token using rosa cli ${ROSA_VERSION}" + echo "Logging into ${OCM_LOGIN_ENV} with offline token using rosa cli" rosa login --env "${OCM_LOGIN_ENV}" --token "${ROSA_TOKEN}" ocm login --url "${OCM_LOGIN_ENV}" --token "${ROSA_TOKEN}" else @@ -71,11 +71,12 @@ fi echo "check machinepool, rosamachinepool status" machinepools=$(oc get MachinePools -n "${namespace}" -ojsonpath='{.items[?(@.spec.clusterName=="'"${CLUSTER_NAME}"'")].metadata.name}') for machinepool in ${machinepools} ; do - mp_status=$(oc get MachinePool "${machinepool}" -n "${namespace}" -ojsonpath='{.status.phase}') - if [[ "${mp_status}" != "Running" ]]; then - echo "Error: machinepool ${machinepool} is not in the Running status: ${mp_status}" - exit 1 - fi +# ignore machinepool status, it is still in the ScalingUp status when external oidc +# mp_status=$(oc get MachinePool "${machinepool}" -n "${namespace}" -ojsonpath='{.status.phase}') +# if [[ "${mp_status}" != "Running" ]]; then +# echo "Error: machinepool ${machinepool} is not in the Running status: ${mp_status}" +# exit 1 +# fi rosamachinepool_name=$(oc get MachinePool -n "${namespace}" "${machinepool}" -ojsonpath='{.spec.template.spec.infrastructureRef.name}') is_ready=$(oc get rosamachinepool "${rosamachinepool_name}" -n "${namespace}" -ojsonpath='{.status.ready}') @@ -191,7 +192,20 @@ fi tags=$(jq -r '.spec.additionalTags //""' < "${capi_cp_json_file}") if [[ -n "${tags}" ]]; then echo "check rosacontrolplane additionalTags" - hc_dft_sg=$(cat "${SHARED_DIR}/capi_hcp_default_security_group") + hc_dft_sg="" + if [[ -f "${SHARED_DIR}/capi_hcp_default_security_group" ]] ; then + hc_dft_sg=$(cat "${SHARED_DIR}/capi_hcp_default_security_group") + else + cluster_id=$(cat "${SHARED_DIR}/cluster-id") + hc_vpc_id=$(cat "${SHARED_DIR}/vpc_id") + hc_dft_sg=$(aws ec2 describe-security-groups --region ${REGION} --filters "Name=vpc-id,Values=${hc_vpc_id}" "Name=group-name,Values=${cluster_id}-default-sg" --query 'SecurityGroups[].GroupId' --output text) + fi + + if [[ -z "${hc_dft_sg}" ]] ; then + echo "default security group not found error" + exit 1 + fi + echo "${tags}" | jq -r 'to_entries[] | "\(.key) \(.value)"' | while read key value; do contain_key=$(jq -e '.aws.tags | contains({"'"${key}"'": "'"${value}"'"})' < "${rosa_hcp_info_file}") if [[ "${contain_key}" != "true" ]] ; then diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-ref.metadata.json index 3d283ce386bf..a3d6b76f9bae 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-ref.metadata.json +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/health-check/cucushift-hypershift-extended-capi-health-check-ref.metadata.json @@ -3,11 +3,13 @@ "owners": { "approvers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ], "reviewers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ] } } \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/OWNERS index 6a395b07ddd1..878bbf66f43a 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/OWNERS +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/OWNERS @@ -1,6 +1,8 @@ approvers: - LiangquanLi930 - heliubj18 + - fxierh reviewers: - LiangquanLi930 - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-commands.sh b/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-commands.sh index 3be5e1c6fa8d..93fa9d3fc957 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-commands.sh +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-commands.sh @@ -6,19 +6,16 @@ export AWS_SHARED_CREDENTIALS_FILE="${CLUSTER_PROFILE_DIR}/.awscred" export AWS_REGION=${REGION} export AWS_PAGER="" -#todo debug only - +# debug only # aws s3 cp s3://heli-test/kubeconfig ${SHARED_DIR}/kubeconfig -# cp ${SHARED_DIR}/kubeconfig ${SHARED_DIR}/mgmt_kubeconfig - # download clusterctl and clusterawsadm mkdir -p /tmp/bin export PATH=/tmp/bin:$PATH -curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/clusterctl-linux-amd64 -o /tmp/bin/clusterctl && \ +curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/clusterctl-linux-amd64 -o /tmp/bin/clusterctl && \ chmod +x /tmp/bin/clusterctl -curl -L https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.4.1/clusterawsadm-linux-amd64 -o /tmp/bin/clusterawsadm && \ +curl -L https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.5.0/clusterawsadm_v2.5.0_linux_amd64 -o /tmp/bin/clusterawsadm && \ chmod +x /tmp/bin/clusterawsadm export KUBECONFIG="${SHARED_DIR}/kubeconfig" diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-ref.metadata.json index 2b7f517e37df..de28eb8fb1f2 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-ref.metadata.json +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/init/cucushift-hypershift-extended-capi-init-ref.metadata.json @@ -3,11 +3,13 @@ "owners": { "approvers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ], "reviewers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ] } } \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/OWNERS index 6a395b07ddd1..878bbf66f43a 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/OWNERS +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/OWNERS @@ -1,6 +1,8 @@ approvers: - LiangquanLi930 - heliubj18 + - fxierh reviewers: - LiangquanLi930 - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-commands.sh b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-commands.sh index 01569174645e..6b48944a7047 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-commands.sh +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-commands.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -euo pipefail +set -xeuo pipefail function retry() { local check_func=$1 @@ -22,7 +22,7 @@ function retry() { fi done - echo "Failed to run check function $1 after $max_retries attempts." + echo "Failed to run check function $check_func after $max_retries attempts." return 1 } @@ -32,6 +32,7 @@ function is_hcp_started() { if [[ -n "${cluster_res}" ]] ; then return 0 fi + rosa logs install -c ${CLUSTER_NAME} return 1 } @@ -56,11 +57,12 @@ function rosa_login() { ocm_api_url="https://api.integration.openshift.com" fi - ROSA_VERSION=$(rosa version) + # there is a bug for rosa version that would cause panic + # ROSA_VERSION=$(rosa version) ROSA_TOKEN=$(cat "${CLUSTER_PROFILE_DIR}/ocm-token") if [[ ! -z "${ROSA_TOKEN}" ]]; then - echo "Logging into ${OCM_LOGIN_ENV} with offline token using rosa cli ${ROSA_VERSION}" + # echo "Logging into ${OCM_LOGIN_ENV} with offline token using rosa cli ${ROSA_VERSION}" rosa login --env "${OCM_LOGIN_ENV}" --token "${ROSA_TOKEN}" ocm login --url "${OCM_LOGIN_ENV}" --token "${ROSA_TOKEN}" else @@ -72,24 +74,82 @@ function rosa_login() { oc create secret -n default generic rosa-creds-secret --from-literal=ocmToken="${ROSA_TOKEN}" --from-literal=ocmApiUrl="${ocm_api_url}" } -function export_envs() { - # kubeconfig - export KUBECONFIG="${SHARED_DIR}/kubeconfig" - if [[ -f "${SHARED_DIR}/mgmt_kubeconfig" ]]; then - export KUBECONFIG="${SHARED_DIR}/mgmt_kubeconfig" +function find_openshift_version() { + # Get the openshift version + CHANNEL_GROUP=stable + version_cmd="rosa list versions --hosted-cp --channel-group ${CHANNEL_GROUP} -o json" + if [[ ${AVAILABLE_UPGRADE} == "yes" ]] ; then + version_cmd="$version_cmd | jq -r '.[] | select(.available_upgrades!=null) .raw_id'" + else + version_cmd="$version_cmd | jq -r '.[].raw_id'" + fi + versionList=$(eval $version_cmd) + echo -e "Available cluster versions:\n${versionList}" + + if [[ -z "$OPENSHIFT_VERSION" ]]; then + OPENSHIFT_VERSION=$(echo "$versionList" | head -1) + elif [[ $OPENSHIFT_VERSION =~ ^[0-9]+\.[0-9]+$ ]]; then + OPENSHIFT_VERSION=$(echo "$versionList" | grep -E "^${OPENSHIFT_VERSION}" | head -1 || true) + else + # Match the whole line + OPENSHIFT_VERSION=$(echo "$versionList" | grep -x "${OPENSHIFT_VERSION}" || true) + fi + + if [[ -z "$OPENSHIFT_VERSION" ]]; then + echo "Requested cluster version not available!" + exit 1 fi +} - # aws env - export AWS_SHARED_CREDENTIALS_FILE="${CLUSTER_PROFILE_DIR}/.awscred" - export AWS_REGION=${REGION} - export AWS_PAGER="" +function set_eternal_azure_oidc() { + ISSUER_URL="$(cat /var/run/hypershift-ext-oidc-app-cli/issuer-url)" + CLI_CLIENT_ID="$(cat /var/run/hypershift-ext-oidc-app-cli/client-id)" + CONSOLE_CLIENT_ID="$(cat /var/run/hypershift-ext-oidc-app-console/client-id)" + CONSOLE_CLIENT_SECRET="$(cat /var/run/hypershift-ext-oidc-app-console/client-secret)" + CONSOLE_CLIENT_SECRET_NAME=console-secret + + exist=$(oc -n default get secret ${CONSOLE_CLIENT_SECRET_NAME} --ignore-not-found) + if [[ -n "${exist}" ]] ; then + oc delete -n default secret ${CONSOLE_CLIENT_SECRET_NAME} + fi + oc -n default create secret generic ${CONSOLE_CLIENT_SECRET_NAME} --from-literal=clientSecret="${CONSOLE_CLIENT_SECRET}" + + # - componentName: cli + # componentNamespace: openshift-console + # clientID: ${CLI_CLIENT_ID} + export EXTERNAL_AUTH_PROVIDERS=" enableExternalAuthProviders: true + externalAuthProviders: + - name: entra-id + issuer: + issuerURL: ${ISSUER_URL} + audiences: + - ${CONSOLE_CLIENT_ID} + - ${CLI_CLIENT_ID} + oidcClients: + - componentName: console + componentNamespace: openshift-console + clientID: ${CONSOLE_CLIENT_ID} + clientSecret: + name: ${CONSOLE_CLIENT_SECRET_NAME} + claimMappings: + username: + claim: email + prefixPolicy: Prefix + prefix: \"oidc-user-test:\" + groups: + claim: groups + prefix: \"oidc-groups-test:\"" +} +function export_envs() { # export capi env variables prefix="ci-capi-hcp-test-long-name" subfix=$(openssl rand -hex 10) CLUSTER_NAME=${CLUSTER_NAME:-"$prefix-$subfix"} echo "${CLUSTER_NAME}" > "${SHARED_DIR}/cluster-name" export CLUSTER_NAME=${CLUSTER_NAME} + + find_openshift_version export OPENSHIFT_VERSION=${OPENSHIFT_VERSION} AWS_ACCOUNT_ID=$(aws sts get-caller-identity | jq '.Account' | cut -d'"' -f2 | tr -d '\n') @@ -98,10 +158,11 @@ function export_envs() { OIDC_CONFIG_ID=$(cat "${SHARED_DIR}/oidc-config" | jq -r '.id') export OIDC_CONFIG_ID=${OIDC_CONFIG_ID} - ACCOUNT_ROLES_PREFIX=$(cat "${SHARED_DIR}/account-roles-prefix") + CLUSTER_PREFIX=$(head -n 1 "${SHARED_DIR}/cluster-prefix") + ACCOUNT_ROLES_PREFIX=$CLUSTER_PREFIX export ACCOUNT_ROLES_PREFIX=${ACCOUNT_ROLES_PREFIX} - OPERATOR_ROLES_PREFIX=$(cat "${SHARED_DIR}/operator-roles-prefix") + OPERATOR_ROLES_PREFIX=$CLUSTER_PREFIX export OPERATOR_ROLES_PREFIX=${OPERATOR_ROLES_PREFIX} OPERATOR_ROLES_ARNS_FILE="${SHARED_DIR}/operator-roles-arns" @@ -178,11 +239,11 @@ ${ADDITIONAL_SECURITY_GROUPS_YAML}" fi if [[ -n "${CLUSTER_SECTOR}" ]]; then - psList=$(ocm get /api/osd_fleet_mgmt/v1/service_clusters --parameter search="sector is '${CLUSTER_SECTOR}' and region is '${CLOUD_PROVIDER_REGION}' and status in ('ready')" | jq -r '.items[].provision_shard_reference.id') + psList=$(ocm get /api/osd_fleet_mgmt/v1/service_clusters --parameter search="sector is '${CLUSTER_SECTOR}' and region is '${REGION}' and status in ('ready')" | jq -r '.items[].provision_shard_reference.id') if [[ -z "$psList" ]]; then echo "no ready provision shard found, trying to find maintenance status provision shard" # try to find maintenance mode SC, currently osdfm api doesn't support status in ('ready', 'maintenance') query. - psList=$(ocm get /api/osd_fleet_mgmt/v1/service_clusters --parameter search="sector is '${CLUSTER_SECTOR}' and region is '${CLOUD_PROVIDER_REGION}' and status in ('maintenance')" | jq -r '.items[].provision_shard_reference.id') + psList=$(ocm get /api/osd_fleet_mgmt/v1/service_clusters --parameter search="sector is '${CLUSTER_SECTOR}' and region is '${REGION}' and status in ('maintenance')" | jq -r '.items[].provision_shard_reference.id') if [[ -z "$psList" ]]; then echo "No available provision shard!" exit 1 @@ -208,15 +269,25 @@ ${ADDITIONAL_SECURITY_GROUPS_YAML}" export NODEPOOL_NAME="nodepool-0" -# # some other optional spec of rosacontrolplane -# export MACHINE_CIDR=${MACHINE_CIDR} -# export NETWORK_TYPE=${NETWORK_TYPE} -# export ENDPOINT_ACCESS=${ENDPOINT_ACCESS} + if [[ "${ENABLE_EXTERNAL_OIDC}" == "true" ]]; then + set_eternal_azure_oidc + fi } # main -export_envs +# kubeconfig +export KUBECONFIG="${SHARED_DIR}/kubeconfig" +if [[ -f "${SHARED_DIR}/mgmt_kubeconfig" ]]; then + export KUBECONFIG="${SHARED_DIR}/mgmt_kubeconfig" +fi + +# aws env +export AWS_SHARED_CREDENTIALS_FILE="${CLUSTER_PROFILE_DIR}/.awscred" +export AWS_REGION=${REGION} +export AWS_PAGER="" + rosa_login +export_envs download_envsubst # create AWSClusterControllerIdentity @@ -260,6 +331,7 @@ kind: ROSAControlPlane metadata: name: "${CLUSTER_NAME}-control-plane" spec: +${EXTERNAL_AUTH_PROVIDERS} rosaClusterName: ${CLUSTER_NAME:0:54} version: "${OPENSHIFT_VERSION}" region: "${AWS_REGION}" @@ -401,6 +473,29 @@ if [[ "${INFRA_ID}" == "null" ]]; then fi echo "${INFRA_ID}" > "${SHARED_DIR}/infra_id" +# now the rosa steps are bound to this config file in the SHARED_DIR, generate it to reuse those steps +cluster_config_file="${SHARED_DIR}/cluster-config" +cat > ${cluster_config_file} << EOF +{ + "name": "${CLUSTER_NAME}", + "sts": "true", + "hypershift": "true", + "region": "${REGION}", + "version": { + "channel_group": "stable", + "raw_id": "${OPENSHIFT_VERSION}", + "major_version": "$(echo ${OPENSHIFT_VERSION} | awk -F. '{print $1"."$2}')" + }, + "tags": "${TAGS}", + "disable_scp_checks": "true", + "disable_workload_monitoring": "false", + "etcd_encryption": ${ETCD_ENCRYPTION}, + "enable_customer_managed_key": "false", + "fips": "false", + "private": "${ENDPOINT_ACCESS}" +} +EOF + # do not check worker node here # wait for cluster machinepool ready # retry is_machine_pool_ready "${NODEPOOL_NAME}" diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.metadata.json index 639e4885f465..62828ba0737b 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.metadata.json +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.metadata.json @@ -3,11 +3,13 @@ "owners": { "approvers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ], "reviewers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ] } } \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.yaml b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.yaml index 5973156fccae..d567b0bbfbad 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.yaml +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/provision/cucushift-hypershift-extended-capi-provision-ref.yaml @@ -9,6 +9,13 @@ ref: requests: cpu: 100m memory: 100Mi + credentials: + - mount_path: /var/run/hypershift-ext-oidc-app-cli + name: hypershift-ext-oidc-app-cli + namespace: test-credentials + - mount_path: /var/run/hypershift-ext-oidc-app-console + name: hypershift-ext-oidc-app-console + namespace: test-credentials env: - name: OCM_LOGIN_ENV default: "staging" @@ -22,12 +29,6 @@ ref: - name: AVAILABILITY_ZONES default: "" documentation: The availability zones to use when installing a non-BYOVPC cluster. Format should be a comma-separated list, etc. 'a,b'. - - name: ACCOUNT_ROLES_PREFIX - default: "" - documentation: User-defined prefix for all generated AWS resources. if not specified, will use the namespace name as the prefix. - - name: OPERATOR_ROLES_PREFIX - default: "" - documentation: User-defined prefix for generated AWS operator policies. - name: REGION default: "us-west-2" documentation: "The AWS region of the cluster." @@ -73,6 +74,12 @@ ref: - name: CLUSTER_SECTOR default: "" documentation: Sector groups a set of service clusters for HCP. The supported values are [canary, main]. + - name: ENABLE_EXTERNAL_OIDC + default: "false" + documentation: Enable external OIDC. + - name: AVAILABLE_UPGRADE + default: "no" + documentation: Set to 'yes' to pick up the openshift version that could be upgraded. - name: NODE_DRAIN_GRACE_PERIOD default: "" documentation: |- diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/OWNERS index 6a395b07ddd1..878bbf66f43a 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/OWNERS +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/OWNERS @@ -1,6 +1,8 @@ approvers: - LiangquanLi930 - heliubj18 + - fxierh reviewers: - LiangquanLi930 - heliubj18 + - fxierh diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-commands.sh b/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-commands.sh index 068230b1d54b..0979ec1a25da 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-commands.sh +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-commands.sh @@ -16,10 +16,11 @@ function set_proxy () { function retry() { local check_func=$1 shift - local max_retries=20 + local max_retries=30 local retry_delay=60 local retries=0 + echo "retry $check_func" while (( retries < max_retries )); do if $check_func "$@"; then return 0 @@ -47,15 +48,6 @@ function check_vpc_peering_connection() { return 0 } -function check_kubeconfig_secret() { - local res - res=$(oc get secret -A | grep ${CLUSTER_NAME}-kubeconfig) - if [[ -z "$res" ]]; then - return 1 - fi - return 0 -} - set_proxy export AWS_SHARED_CREDENTIALS_FILE="${CLUSTER_PROFILE_DIR}/.awscred" @@ -111,28 +103,9 @@ done # If the hosted cluster has been created, update the default security group to enable access from the management cluster's CAPI controller to the hosted cluster's API server. # in a standard rosa hcp, apiserver port is always 443, we need to expose 443 to the ip range of mgmt cidr. # capi kubeconfig is needed here to check capi resources -if [[ ! -f "${SHARED_DIR}/mgmt_kubeconfig" ]]; then - echo "capi mgmt kubeconfig file mgmt_kubeconfig not found error" - exit 1 -fi -export KUBECONFIG="${SHARED_DIR}/mgmt_kubeconfig" -CLUSTER_NAME=$(cat "${SHARED_DIR}/cluster-name") -if [[ -z "${CLUSTER_NAME}" ]] ; then - echo "Error: cluster name not found" - exit 1 -fi - -read -r namespace _ status _ <<< "$(oc get cluster -A --ignore-not-found | grep ${CLUSTER_NAME})" -if [[ -n "${namespace}" ]] ; then - echo "found ${CLUSTER_NAME} in namespace ${namespace}, status is ${status}" - cluster_id=$(cat "${SHARED_DIR}/cluster-id") - dft_security_group_id=$(aws ec2 describe-security-groups --region ${REGION} --filters "Name=vpc-id,Values=${hc_vpc_id}" "Name=group-name,Values=${cluster_id}-default-sg" --query 'SecurityGroups[].GroupId' --output text) - aws ec2 authorize-security-group-ingress --region ${REGION} --group-id ${dft_security_group_id} --protocol tcp --port 443 --cidr ${mgmt_vpc_cidr} - retry check_kubeconfig_secret - - set +x - oc get secret -n ${namespace} ${CLUSTER_NAME}-kubeconfig --ignore-not-found -ojsonpath='{.data.value}' | base64 -d > "${SHARED_DIR}/kubeconfig" -fi +cluster_id=$(cat "${SHARED_DIR}/cluster-id") +dft_security_group_id=$(aws ec2 describe-security-groups --region ${REGION} --filters "Name=vpc-id,Values=${hc_vpc_id}" "Name=group-name,Values=${cluster_id}-default-sg" --query 'SecurityGroups[].GroupId' --output text) +aws ec2 authorize-security-group-ingress --region ${REGION} --group-id ${dft_security_group_id} --protocol tcp --port 443 --cidr ${mgmt_vpc_cidr} echo "${dft_security_group_id}" > ${SHARED_DIR}/capi_hcp_default_security_group echo "vpc-peering config done" diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-ref.metadata.json index 2cd72eee1b7a..ba01e68aaefa 100644 --- a/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-ref.metadata.json +++ b/ci-operator/step-registry/cucushift/hypershift-extended/capi/vpc-peering/cucushift-hypershift-extended-capi-vpc-peering-ref.metadata.json @@ -3,11 +3,13 @@ "owners": { "approvers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ], "reviewers": [ "LiangquanLi930", - "heliubj18" + "heliubj18", + "fxierh" ] } } \ No newline at end of file diff --git a/ci-operator/step-registry/rosa/aws/sts/hcp/capi-private/rosa-aws-sts-hcp-capi-private-workflow.yaml b/ci-operator/step-registry/rosa/aws/sts/hcp/capi-private/rosa-aws-sts-hcp-capi-private-workflow.yaml index 47a516670b04..f84b32055d14 100644 --- a/ci-operator/step-registry/rosa/aws/sts/hcp/capi-private/rosa-aws-sts-hcp-capi-private-workflow.yaml +++ b/ci-operator/step-registry/rosa/aws/sts/hcp/capi-private/rosa-aws-sts-hcp-capi-private-workflow.yaml @@ -17,6 +17,7 @@ workflow: DEFAULT_MP_MAX_REPLICAS: 6 DEFAULT_MP_MIN_REPLICAS: 3 NODE_DRAIN_GRACE_PERIOD: "10m" + ENABLE_EXTERNAL_OIDC: "true" pre: - chain: cucushift-installer-rehearse-aws-ipi-ovn-provision - ref: aws-provision-vpc-shared @@ -30,12 +31,14 @@ workflow: - ref: cucushift-hypershift-extended-capi-init - ref: cucushift-hypershift-extended-capi-provision - ref: cucushift-hypershift-extended-capi-vpc-peering + - ref: cucushift-hypershift-extended-capi-enable-hc - ref: cucushift-hypershift-extended-cilium - - ref: rosa-cluster-notify-error + # - ref: rosa-cluster-notify-error - ref: rosa-cluster-wait-ready-operators - ref: aws-provision-tags-for-byo-vpc - - ref: osd-ccs-conf-idp-htpasswd-multi-users - ref: rosa-cluster-wait-ready-nodes + - ref: openshift-extended-web-tests-ext-oidc-cli-login + - ref: cucushift-hypershift-extended-external-oidc-grant-user-role - ref: cucushift-hypershift-extended-capi-health-check post: - ref: cucushift-hypershift-extended-capi-deprovision @@ -45,7 +48,7 @@ workflow: - chain: rosa-sts-oidc-config-delete - ref: aws-deprovision-security-group - ref: aws-deprovision-stacks - - chain: ipi-deprovision + - ref: ipi-deprovision-deprovision documentation: |- This workflow installs a rosa private hcp cluster by capi. The cluster is set with htpasswd idp, and the login informations are stored under $SHARED_DIR/api.login. After finish testing, the cluster will be deprovsioned. diff --git a/ci-operator/step-registry/rosa/aws/sts/hcp/capi/rosa-aws-sts-hcp-capi-workflow.yaml b/ci-operator/step-registry/rosa/aws/sts/hcp/capi/rosa-aws-sts-hcp-capi-workflow.yaml index 5a7ea2829259..bfd67562e949 100644 --- a/ci-operator/step-registry/rosa/aws/sts/hcp/capi/rosa-aws-sts-hcp-capi-workflow.yaml +++ b/ci-operator/step-registry/rosa/aws/sts/hcp/capi/rosa-aws-sts-hcp-capi-workflow.yaml @@ -4,6 +4,7 @@ workflow: env: HOSTED_CP: "true" ZONES_COUNT: "1" + ENABLE_EXTERNAL_OIDC: "true" pre: - chain: cucushift-installer-rehearse-aws-ipi-ovn-provision - ref: aws-provision-vpc-shared @@ -11,18 +12,19 @@ workflow: - chain: rosa-sts-oidc-config-create - ref: cucushift-hypershift-extended-capi-init - ref: cucushift-hypershift-extended-capi-provision - - ref: rosa-cluster-notify-error - - ref: rosa-conf-idp-htpasswd + - ref: cucushift-hypershift-extended-capi-enable-hc + # - ref: rosa-cluster-notify-error - ref: rosa-cluster-wait-ready-operators - ref: aws-provision-tags-for-byo-vpc - - ref: osd-ccs-conf-idp-htpasswd-multi-users - ref: rosa-cluster-wait-ready-nodes + - ref: openshift-extended-web-tests-ext-oidc-cli-login + - ref: cucushift-hypershift-extended-external-oidc-grant-user-role + - ref: cucushift-hypershift-extended-capi-health-check post: - ref: cucushift-hypershift-extended-capi-deprovision - ref: cucushift-hypershift-extended-capi-clear - - chain: rosa-sts-oidc-config-delete - ref: aws-deprovision-stacks - - chain: cucushift-installer-rehearse-aws-ipi-deprovision + - ref: ipi-deprovision-deprovision documentation: |- This workflow installs a rosa hcp cluster by capi. The cluster is set with htpasswd idp, and the login informations are stored under $SHARED_DIR/api.login. After finish testing, the cluster will be deprovsioned.