diff --git a/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml b/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml index 646ea94bd8a08..486df89b0505d 100644 --- a/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml +++ b/ci-operator/config/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15__amd64-nightly.yaml @@ -990,6 +990,18 @@ tests: test: - chain: openshift-e2e-test-qe workflow: rosa-aws-sts-shared-vpc +- as: aws-rosa-sts-hcp-cilium-stage-full-f2 + cron: 4 12 2,4,6,8,10,12,14,16,18,20,22,24,26,28,30 * * + steps: + cluster_profile: aws-sd-qe + env: + CHANNEL_GROUP: nightly + E2E_RUN_TAGS: '@rosa and @hypershift-hosted' + OPENSHIFT_VERSION: "4.15" + TEST_FILTERS: ~ChkUpgrade&;~NonPreRelease&;~Serial&;~Disruptive&;~DisconnectedOnly&;~HyperShiftMGMT&;~MicroShiftOnly&;~NonHyperShiftHOST&;ROSA& + test: + - chain: openshift-e2e-test-hypershift-qe + workflow: rosa-aws-sts-hypershift-cilium - as: aws-rosa-sts-hcp-int-full-f7 cron: 29 23 4,13,20,27 * * steps: diff --git a/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml b/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml index 66e3a2f8a89b8..93c0b8bda36b5 100644 --- a/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml +++ b/ci-operator/jobs/openshift/openshift-tests-private/openshift-openshift-tests-private-release-4.15-periodics.yaml @@ -14828,6 +14828,105 @@ periodics: - name: result-aggregator secret: secretName: result-aggregator +- agent: kubernetes + cluster: build03 + cron: 4 12 2,4,6,8,10,12,14,16,18,20,22,24,26,28,30 * * + decorate: true + decoration_config: + skip_cloning: true + extra_refs: + - base_ref: release-4.15 + org: openshift + repo: openshift-tests-private + labels: + ci-operator.openshift.io/cloud: aws + ci-operator.openshift.io/cloud-cluster-profile: aws-sd-qe + ci-operator.openshift.io/variant: amd64-nightly + ci.openshift.io/generator: prowgen + job-release: "4.15" + pj-rehearse.openshift.io/can-be-rehearsed: "true" + name: periodic-ci-openshift-openshift-tests-private-release-4.15-amd64-nightly-aws-rosa-sts-hcp-cilium-stage-full-f2 + reporter_config: + slack: + channel: '#managed-hypershift-ci-watcher' + job_states_to_report: + - failure + - error + - success + report_template: '{{if eq .Status.State "success"}} :rainbow: Job *{{.Spec.Job}}* + ended with *{{.Status.State}}*. <{{.Status.URL}}|View logs> :rainbow: {{else}} + :volcano: Job *{{.Spec.Job}}* ended with *{{.Status.State}}*. <{{.Status.URL}}|View + logs> :volcano: {{end}}' + spec: + containers: + - args: + - --gcs-upload-secret=/secrets/gcs/service-account.json + - --image-import-pull-secret=/etc/pull-secret/.dockerconfigjson + - --lease-server-credentials-file=/etc/boskos/credentials + - --oauth-token-path=/usr/local/github-credentials/oauth + - --report-credentials-file=/etc/report/credentials + - --secret-dir=/secrets/ci-pull-credentials + - --secret-dir=/usr/local/aws-rosa-sts-hcp-cilium-stage-full-f2-cluster-profile + - --target=aws-rosa-sts-hcp-cilium-stage-full-f2 + - --variant=amd64-nightly + command: + - ci-operator + image: ci-operator:latest + imagePullPolicy: Always + name: "" + resources: + requests: + cpu: 10m + volumeMounts: + - mountPath: /etc/boskos + name: boskos + readOnly: true + - mountPath: /secrets/ci-pull-credentials + name: ci-pull-credentials + readOnly: true + - mountPath: /usr/local/aws-rosa-sts-hcp-cilium-stage-full-f2-cluster-profile + name: cluster-profile + - mountPath: /secrets/gcs + name: gcs-credentials + readOnly: true + - mountPath: /usr/local/github-credentials + name: github-credentials-openshift-ci-robot-private-git-cloner + readOnly: true + - mountPath: /secrets/manifest-tool + name: manifest-tool-local-pusher + readOnly: true + - mountPath: /etc/pull-secret + name: pull-secret + readOnly: true + - mountPath: /etc/report + name: result-aggregator + readOnly: true + serviceAccountName: ci-operator + volumes: + - name: boskos + secret: + items: + - key: credentials + path: credentials + secretName: boskos-credentials + - name: ci-pull-credentials + secret: + secretName: ci-pull-credentials + - name: cluster-profile + secret: + secretName: cluster-secrets-aws-sd-qe + - name: github-credentials-openshift-ci-robot-private-git-cloner + secret: + secretName: github-credentials-openshift-ci-robot-private-git-cloner + - name: manifest-tool-local-pusher + secret: + secretName: manifest-tool-local-pusher + - name: pull-secret + secret: + secretName: registry-pull-credentials + - name: result-aggregator + secret: + secretName: result-aggregator - agent: kubernetes cluster: build03 cron: 29 23 4,13,20,27 * * diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/cilium/OWNERS b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/OWNERS new file mode 100644 index 0000000000000..6a395b07ddd1f --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/OWNERS @@ -0,0 +1,6 @@ +approvers: + - LiangquanLi930 + - heliubj18 +reviewers: + - LiangquanLi930 + - heliubj18 diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-commands.sh b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-commands.sh new file mode 100644 index 0000000000000..caf25d276f4de --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-commands.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +set -xeuo pipefail + +export KUBECONFIG="${SHARED_DIR}/kubeconfig" +if [[ -f "${SHARED_DIR}/nested_kubeconfig" ]]; then + export KUBECONFIG="${SHARED_DIR}/nested_kubeconfig" +fi + +cilium_ns=$(oc get ns cilium --ignore-not-found) +if [[ -z "$cilium_ns" ]]; then + oc create ns cilium +fi + +oc label ns cilium security.openshift.io/scc.podSecurityLabelSync=false pod-security.kubernetes.io/enforce=privileged pod-security.kubernetes.io/audit=privileged pod-security.kubernetes.io/warn=privileged --overwrite + +# apply isovalent cilium 1.14.5 CNI +version="1.14.5" +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-03-cilium-ciliumconfigs-crd.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00000-cilium-namespace.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00001-cilium-olm-serviceaccount.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00002-cilium-olm-deployment.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00003-cilium-olm-service.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00004-cilium-olm-leader-election-role.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00005-cilium-olm-role.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00006-leader-election-rolebinding.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00007-cilium-olm-rolebinding.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00008-cilium-cilium-olm-clusterrole.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00009-cilium-cilium-clusterrole.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00010-cilium-cilium-olm-clusterrolebinding.yaml +oc apply -f https://raw.githubusercontent.com/isovalent/olm-for-cilium/main/manifests/cilium.v${version}/cluster-network-06-cilium-00011-cilium-cilium-clusterrolebinding.yaml + + +PODCIDR=$(oc get network cluster -o jsonpath='{.spec.clusterNetwork[0].cidr}') +HOSTPREFIX=$(oc get network cluster -o jsonpath='{.spec.clusterNetwork[0].hostPrefix}') +echo 'apiVersion: cilium.io/v1alpha1 + kind: CiliumConfig + metadata: + name: cilium + namespace: cilium + spec: + debug: + enabled: true + k8s: + requireIPv4PodCIDR: true + logSystemLoad: true + bpf: + preallocateMaps: true + etcd: + leaseTTL: 30s + ipv4: + enabled: true + ipv6: + enabled: false + identityChangeGracePeriod: 0s + ipam: + mode: "cluster-pool" + operator: + clusterPoolIPv4PodCIDRList: + - "${PODCIDR}" + clusterPoolIPv4MaskSize: "${HOSTPREFIX}" + nativeRoutingCIDR: "${PODCIDR}" + endpointRoutes: {enabled: true} + clusterHealthPort: 9940 + tunnelPort: 4789 + cni: + binPath: "/var/lib/cni/bin" + confPath: "/var/run/multus/cni/net.d" + chainingMode: portmap + prometheus: + serviceMonitor: {enabled: false} + hubble: + tls: {enabled: false} + sessionAffinity: true +' | envsubst > /tmp/ciliumconfig.json + +cat /tmp/ciliumconfig.json +oc apply -f /tmp/ciliumconfig.json +oc wait --for=condition=Ready pod -n cilium --all --timeout=5m \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-ref.metadata.json b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-ref.metadata.json new file mode 100644 index 0000000000000..d87168bd67edd --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-ref.metadata.json @@ -0,0 +1,13 @@ +{ + "path": "cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-ref.yaml", + "owners": { + "approvers": [ + "LiangquanLi930", + "heliubj18" + ], + "reviewers": [ + "LiangquanLi930", + "heliubj18" + ] + } +} \ No newline at end of file diff --git a/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-ref.yaml b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-ref.yaml new file mode 100644 index 0000000000000..2087ab8dcab0d --- /dev/null +++ b/ci-operator/step-registry/cucushift/hypershift-extended/cilium/cucushift-hypershift-extended-cilium-ref.yaml @@ -0,0 +1,15 @@ +ref: + as: cucushift-hypershift-extended-cilium + from_image: + namespace: ocp + name: "4.12" + tag: upi-installer + grace_period: 5m + cli: latest + commands: cucushift-hypershift-extended-cilium-commands.sh + resources: + requests: + cpu: 100m + memory: 100Mi + documentation: |- + install cilium CNI for the hosted cluster. In this case, the HostedCluster.spec.networking.networkType should be Other diff --git a/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/OWNERS b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/OWNERS new file mode 100644 index 0000000000000..b9f78f1655978 --- /dev/null +++ b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/OWNERS @@ -0,0 +1,14 @@ +reviewers: +- yasun1 +- xueli181114 +- yuwang-RH +- tzhou5 +- yingzhanredhat +- yufchang +- radtriste +approvers: +- yasun1 +- xueli181114 +- yuwang-RH +- yufchang +- radtriste diff --git a/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-commands.sh b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-commands.sh new file mode 100755 index 0000000000000..de2c6dcd0e7c9 --- /dev/null +++ b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-commands.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -o nounset +set -o errexit +set -o pipefail + +trap 'CHILDREN=$(jobs -p); if test -n "${CHILDREN}"; then kill ${CHILDREN} && wait; fi' TERM + +# Obtain aws credentials +AWSCRED="${CLUSTER_PROFILE_DIR}/.awscred" +if [[ -f "${AWSCRED}" ]]; then + AWS_ACCOUNT_ID=$(cat "${AWSCRED}" | grep aws_account_id | tr -d ' ' | cut -d '=' -f 2) + AWS_ACCESS_KEY_ID=$(cat "${AWSCRED}" | grep aws_access_key_id | tr -d ' ' | cut -d '=' -f 2) + AWS_SECRET_ACCESS_KEY=$(cat "${AWSCRED}" | grep aws_secret_access_key | tr -d ' ' | cut -d '=' -f 2) +else + echo "Did not find compatible cloud provider cluster_profile" + exit 1 +fi + +# Log in +OCM_VERSION=$(ocm version) +OCM_TOKEN=$(cat "${CLUSTER_PROFILE_DIR}/ocm-token") +echo "Logging into ${OCM_LOGIN_ENV} with offline token using ocm cli ${OCM_VERSION}" +ocm login --url "${OCM_LOGIN_ENV}" --token "${OCM_TOKEN}" + +CLUSTER_ID=$(cat "${SHARED_DIR}/cluster-id") +ocm get /api/clusters_mgmt/v1/clusters/${CLUSTER_ID}/credentials | jq -r .kubeconfig > "${SHARED_DIR}/kubeconfig" \ No newline at end of file diff --git a/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-ref.metadata.json b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-ref.metadata.json new file mode 100644 index 0000000000000..f5bbe4acca69a --- /dev/null +++ b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-ref.metadata.json @@ -0,0 +1,23 @@ +{ + "path": "osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-ref.yaml", + "owners": { + "approvers": [ + "yasun1", + "xueli181114", + "yuwang-RH", + "yufchang", + "radtriste", + "heliubj18" + ], + "reviewers": [ + "yasun1", + "xueli181114", + "yuwang-RH", + "tzhou5", + "yingzhanredhat", + "yufchang", + "radtriste", + "heliubj18" + ] + } +} \ No newline at end of file diff --git a/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-ref.yaml b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-ref.yaml new file mode 100644 index 0000000000000..26fe25ebc1a91 --- /dev/null +++ b/ci-operator/step-registry/osd-ccs/cluster/provision/admin-kubeconfig/osd-ccs-cluster-provision-admin-kubeconfig-ref.yaml @@ -0,0 +1,16 @@ +ref: + as: osd-ccs-cluster-provision-admin-kubeconfig + from: cli-ocm + grace_period: 10m + commands: osd-ccs-cluster-provision-admin-kubeconfig-commands.sh + resources: + requests: + cpu: 100m + memory: 300Mi + timeout: 2h0m0s + env: + - name: OCM_LOGIN_ENV + default: "staging" + documentation: The environment for ocm login. The supported values are [production, staging]. + documentation: |- + Using ocm cli to create an osd ccs AWS cluster with the provided cluster profile. The cluster profile should include the offline token ocm-token to login. diff --git a/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/OWNERS b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/OWNERS new file mode 100644 index 0000000000000..02aa413e220ff --- /dev/null +++ b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/OWNERS @@ -0,0 +1,20 @@ +reviewers: +- yasun1 +- xueli181114 +- yuwang-RH +- tzhou5 +- yingzhanredhat +- yufchang +- jtaleric +- svetsa-rh +- radtriste +- heliubj18 +approvers: +- yasun1 +- xueli181114 +- yuwang-RH +- yufchang +- jtaleric +- svetsa-rh +- radtriste +- heliubj18 \ No newline at end of file diff --git a/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-chain.metadata.json b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-chain.metadata.json new file mode 100644 index 0000000000000..035c3b32869d6 --- /dev/null +++ b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-chain.metadata.json @@ -0,0 +1,27 @@ +{ + "path": "rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-chain.yaml", + "owners": { + "approvers": [ + "yasun1", + "xueli181114", + "yuwang-RH", + "yufchang", + "jtaleric", + "svetsa-rh", + "radtriste", + "heliubj18" + ], + "reviewers": [ + "yasun1", + "xueli181114", + "yuwang-RH", + "tzhou5", + "yingzhanredhat", + "yufchang", + "jtaleric", + "svetsa-rh", + "radtriste", + "heliubj18" + ] + } +} \ No newline at end of file diff --git a/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-chain.yaml b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-chain.yaml new file mode 100644 index 0000000000000..240fa7e6d1185 --- /dev/null +++ b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-chain.yaml @@ -0,0 +1,21 @@ +chain: + as: rosa-aws-sts-hypershift-cilium + env: + - name: HOSTED_CP + default: "true" + - name: NO_CNI + default: "true" + steps: + - ref: aws-provision-vpc-shared + - ref: aws-provision-tags-for-byo-vpc-ocm-pre + - chain: rosa-sts-oidc-config-create + - ref: rosa-cluster-provision + - ref: osd-ccs-cluster-provision-admin-kubeconfig + - ref: cucushift-hypershift-extended-cilium + - ref: rosa-cluster-notify-error + - ref: rosa-cluster-wait-ready-operators + - ref: rosa-conf-idp-htpasswd + - ref: aws-provision-tags-for-byo-vpc + documentation: >- + This chain installs a rosa hypershift cluster with cilium CNI. The cluster is configured + with the HTPasswd IDP, and provide the cluster-admin user 'rosa-admin' to login the cluser. \ No newline at end of file diff --git a/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-workflow.metadata.json b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-workflow.metadata.json new file mode 100644 index 0000000000000..cdf5046491445 --- /dev/null +++ b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-workflow.metadata.json @@ -0,0 +1,27 @@ +{ + "path": "rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-workflow.yaml", + "owners": { + "approvers": [ + "yasun1", + "xueli181114", + "yuwang-RH", + "yufchang", + "jtaleric", + "svetsa-rh", + "radtriste", + "heliubj18" + ], + "reviewers": [ + "yasun1", + "xueli181114", + "yuwang-RH", + "tzhou5", + "yingzhanredhat", + "yufchang", + "jtaleric", + "svetsa-rh", + "radtriste", + "heliubj18" + ] + } +} \ No newline at end of file diff --git a/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-workflow.yaml b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-workflow.yaml new file mode 100644 index 0000000000000..aa3c53f9aa180 --- /dev/null +++ b/ci-operator/step-registry/rosa/aws/sts/hypershift/cilium/rosa-aws-sts-hypershift-cilium-workflow.yaml @@ -0,0 +1,19 @@ +workflow: + as: rosa-aws-sts-hypershift-cilium + steps: + env: + HOSTED_CP: "true" + ZONES_COUNT: "1" + COMPUTE_MACHINE_TYPE: "m5.2xlarge" + REPLICAS: "3" + DISABLE_WORKLOAD_MONITORING: "false" + NO_CNI: "true" + pre: + - chain: rosa-aws-sts-hypershift-cilium + - ref: osd-ccs-conf-idp-htpasswd-multi-users + - ref: rosa-cluster-wait-ready-nodes + post: + - chain: rosa-aws-sts-hypershift-deprovision + documentation: |- + This workflow installs a rosa hypershift cluster with cilium CNI. The cluster is set with htpasswd idp, and the login informations are stored under $SHARED_DIR/api.login. + After finish testing, the cluster will be deprovsioned. diff --git a/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-commands.sh b/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-commands.sh index 825afd6f0edb3..e2872fd9c4387 100755 --- a/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-commands.sh +++ b/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-commands.sh @@ -29,6 +29,7 @@ PRIVATE_SUBNET_ONLY="false" CLUSTER_TIMEOUT=${CLUSTER_TIMEOUT} ENABLE_SHARED_VPC=${ENABLE_SHARED_VPC:-"no"} ADDITIONAL_SECURITY_GROUP=${ADDITIONAL_SECURITY_GROUP:-false} +NO_CNI=${NO_CNI:-false} # Record Cluster Configurations cluster_config_file="${SHARED_DIR}/cluster-config" @@ -416,6 +417,12 @@ if [[ "$DRY_RUN" == "true" ]]; then DRY_RUN_SWITCH="--dry-run" fi +NO_CNI_SWITCH="" +if [[ "$NO_CNI" == "true" ]]; then + NO_CNI_SWITCH="--no-cni" +fi + + # Save the cluster config to ARTIFACT_DIR cat "${SHARED_DIR}/cluster-config" | sed "s/$AWS_ACCOUNT_ID/$AWS_ACCOUNT_ID_MASK/g" > "${ARTIFACT_DIR}/cluster-config" @@ -487,6 +494,7 @@ ${COMPUTER_NODE_ZONES_SWITCH} \ ${COMPUTER_NODE_DISK_SIZE_SWITCH} \ ${SHARED_VPC_SWITCH} \ ${SECURITY_GROUP_ID_SWITCH} \ +${NO_CNI_SWITCH} \ ${DRY_RUN_SWITCH} " | sed -E 's/\s{2,}/ /g' > "${SHARED_DIR}/create_cluster.sh" cat "${SHARED_DIR}/create_cluster.sh" | sed "s/$AWS_ACCOUNT_ID/$AWS_ACCOUNT_ID_MASK/g" > "${ARTIFACT_DIR}/create_cluster.sh" @@ -527,6 +535,7 @@ rosa create cluster -y \ ${COMPUTER_NODE_DISK_SIZE_SWITCH} \ ${SHARED_VPC_SWITCH} \ ${SECURITY_GROUP_ID_SWITCH} \ + ${NO_CNI_SWITCH} \ ${DRY_RUN_SWITCH} \ | sed "s/$AWS_ACCOUNT_ID/$AWS_ACCOUNT_ID_MASK/g" > "${CLUSTER_INFO}" diff --git a/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-ref.yaml b/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-ref.yaml index 0471c2088a0a4..5cecc4c4b27e2 100644 --- a/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-ref.yaml +++ b/ci-operator/step-registry/rosa/cluster/provision/rosa-cluster-provision-ref.yaml @@ -117,6 +117,9 @@ ref: - name: CLUSTER_TIMEOUT default: "7200" documentation: Set to number of seconds for the cluster to timeout if it's not ready. + - name: NO_CNI + default: "false" + documentation: Set to 'true' if you want to deploy a cluster without CNI. documentation: |- Using rosa cli to create a rosa cluster with the provided cluster profile. The cluster profile should include the offline token ocm-token to login and .awscred to init the rosa environment.