From 63a54338c0f8d7b6be0bb58c407f45eb33dd13c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Iranzo=20G=C3=B3mez?= Date: Fri, 26 Jun 2020 15:43:08 +0200 Subject: [PATCH] Add pre-commit configuration and execute over repo files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Pablo Iranzo Gómez --- .flake8 | 5 + .gitignore | 1 - .pre-commit-config.yaml | 53 ++++ .yaspeller.json | 82 ++++++ Dockerfile.test-infra | 2 +- README.md | 65 ++++- create_full_environment.sh | 22 +- discovery-infra/bm_inventory_api.py | 66 +++-- discovery-infra/consts.py | 1 + discovery-infra/delete_nodes.py | 54 +++- discovery-infra/install_cluster.py | 91 +++++-- discovery-infra/logger.py | 10 +- discovery-infra/start_discovery.py | 313 ++++++++++++++++------ discovery-infra/update_bm_inventory_cm.py | 24 +- discovery-infra/utils.py | 163 +++++++---- discovery-infra/virsh_cleanup.py | 60 ++++- install_env_and_run_full_flow.sh | 2 +- requirements.txt | 6 +- scripts/assisted_deployment.sh | 39 ++- scripts/deploy_bm_inventory.sh | 3 +- scripts/deploy_ui.sh | 10 +- scripts/install_environment.sh | 172 ++++++------ scripts/install_minikube.sh | 64 ++--- scripts/run_minikube.sh | 16 +- scripts/utils.sh | 39 ++- skipper.yaml | 64 ++--- terraform_files/terraform.tfvars.json | 12 +- terraform_files/variables-libvirt.tf | 1 - terraform_files/volume/outputs.tf | 1 - 29 files changed, 975 insertions(+), 466 deletions(-) create mode 100644 .flake8 create mode 100644 .pre-commit-config.yaml create mode 100644 .yaspeller.json diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000000..90677040f2a --- /dev/null +++ b/.flake8 @@ -0,0 +1,5 @@ +[flake8] +ignore = E203, E266, E501, W503, F403, F401, E402, E722, C901 +max-line-length = 79 +max-complexity = 18 +select = B,C,E,F,W,T4,B9 diff --git a/.gitignore b/.gitignore index f7373d8b27c..ec17f701472 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,3 @@ bm-inventory *__pycache__* minikube *.log - diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..84b48c594cf --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,53 @@ +fail_fast: true +repos: + - repo: meta + hooks: + - id: check-useless-excludes + - repo: https://github.com/prettier/prettier + rev: 2.0.2 + hooks: + - id: prettier + files: \.(css|js|md|markdown|json) + - repo: https://github.com/python/black + rev: 19.10b0 + hooks: + - id: black + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.5.0 + hooks: + - id: check-added-large-files + - id: check-ast + - id: check-case-conflict + - id: check-executables-have-shebangs + - id: check-json + - id: check-merge-conflict + - id: check-symlinks + - id: check-xml + - id: check-yaml + args: [--unsafe] + - id: end-of-file-fixer + - id: fix-encoding-pragma + - id: forbid-new-submodules + - id: requirements-txt-fixer + - id: sort-simple-yaml + - id: trailing-whitespace + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.5.0 + hooks: + - id: flake8 + - repo: https://github.com/pecigonzalo/pre-commit-shfmt.git + rev: 9ee28e3f14556aa88dd5255f2e091d1d2f607bb7 + hooks: + - id: shell-fmt + args: + - --indent=4 + - repo: https://github.com/asottile/blacken-docs + rev: v1.6.0 + hooks: + - id: blacken-docs +# - repo: https://github.com/hcodes/yaspeller.git +# rev: v6.1.0 +# hooks: +# - id: yaspeller +# files: ".md" +# types: [markdown] diff --git a/.yaspeller.json b/.yaspeller.json new file mode 100644 index 00000000000..82d8d1e3454 --- /dev/null +++ b/.yaspeller.json @@ -0,0 +1,82 @@ +{ + "ignoreUrls": true, + "findRepeatWords": true, + "maxRequests": 5, + "ignoreDigits": true, + "lang": "en", + "dictionary": [ + "Ansible", + "baremetal", + "BareMetalHost", + "CI", + "CIDR", + "ConfigMap", + "DCO", + "dhcp", + "dhcpd", + "dnsmasq", + "endpoint", + "filesystem", + "filesystems", + "GA", + "GitHub", + "hostname", + "hostnames", + "iDRAC", + "iLO", + "IPI", + "IPMI", + "KNI", + "machineset", + "Markdown", + "nameserver", + "NICs", + "OCP", + "openshift", + "OpenShift", + "orchestrator", + "pingable", + "playbook", + "playbooks", + "podman", + "provisioner", + "PRs", + "PXE", + "RHCOS", + "RHEL", + "routable", + "subzone", + "Unreachable", + "VIPs", + "VLAN", + "VM", + "YAML", + "DPDK", + "PTP", + "Kubernetes", + "virtualization", + "NMstate", + "Hugepages", + "NFV", + "BM", + "SR-IOV", + "CNV", + "Netlify", + "SCTP", + "sexualized", + "asciidoc", + "FQDN", + "MachineConfig", + "VMs", + "NTP", + "QEMU", + "kubelet", + "ansible", + "endfor", + "Jekyll", + "ipi", + "versioned", + "unversioned", + "devprev" + ] +} diff --git a/Dockerfile.test-infra b/Dockerfile.test-infra index 0b38b4fa6d9..3bd4a60ea5b 100644 --- a/Dockerfile.test-infra +++ b/Dockerfile.test-infra @@ -17,7 +17,7 @@ VOLUME [ "/var/lib/libvirt/" ] RUN wget `wget https://www.terraform.io/downloads.html -q -O - | grep -oP "(https://releases.hashicorp.com/terraform/.*linux_amd64\.zip)(?=\")" | head -n 1` && unzip terraform*.zip -d /usr/bin/ && rm -rf terraform*.zip -RUN wget https://dl.google.com/go/go1.13.4.linux-amd64.tar.gz && tar -C /usr/local -xf go1.13.4.linux-amd64.tar.gz && rm -f go1.13.4.linux-amd64.tar.gz +RUN wget https://dl.google.com/go/go1.13.4.linux-amd64.tar.gz && tar -C /usr/local -xf go1.13.4.linux-amd64.tar.gz && rm -f go1.13.4.linux-amd64.tar.gz ENV PATH /usr/local/go/bin:$PATH RUN mkdir -p ~/.terraform.d/plugins RUN go get -v -u github.com/dmacvicar/terraform-provider-libvirt && cd ~/.terraform.d/plugins && go build -a -v github.com/dmacvicar/terraform-provider-libvirt && rm -rf ~/go diff --git a/README.md b/README.md index 167341dece1..dd74e157379 100644 --- a/README.md +++ b/README.md @@ -1,39 +1,49 @@ # Test-Infra + This project deploys the OpenShift Assisted Installer in Minikube and spawns libvirt VMs that represent bare metal hosts. # Prerequisites + - CentOS 8 or RHEL 8 host - File system that supports d_type - Ideally on a bare metal host with at least 64G of RAM. - Run as a user with passwordless sudo access or be ready to enter sudo password for prepare phase. - Get a valid pull secret (JSON string) from [redhat.com](https://cloud.redhat.com/openshift/install/pull-secret) if you want to test the installation (not needed for testing only the discovery flow). Export it as + ```bash export PULL_SECRET='' ``` # Instructions - ## Host preparation + On the bare metal host: + ```bash dnf install -y git make cd /home/test # don't do it on /root it will breaks build image mounts and fail to run git clone https://github.com/tsorya/test-infra.git ``` + When using this infra for the first time on a host, run: + ```bash make create_full_environment ``` + This will install required packages, configure libvirt, pull relevant Docker images, and start Minikube. ## Usage + There are different options to use test-infra, which can be found in the makefile. ## Full flow cases + The following is a list of stages that will be run: + 1. Start Minikube if not started yet -1. Deploy services for assisted deployment on Minikube +1. Deploy services for assisted deployment on Minikube 1. Create cluster in bm-inventory service 1. Download ISO image 1. Spawn required number of VMs from downloaded ISO with parameters that can be configured by OS env (check makefile) @@ -46,86 +56,107 @@ The following is a list of stages that will be run: 1. Verifying cluster is in state "installed" 1. Download kubeconfig to build/kubeconfig - **Note**: Please make sure no previous cluster is running before running a new one (it will rewrite its build files). ### Run full flow with install + To run the full flow, including installation: + ```bash make run_full_flow_with_install ``` + Or to run it together with create_full_environment (requires sudo password): -````bash + +```bash make all -```` +``` + ### Run full flow without install + To run the flow without the installation stage: + ```bash make run_full_flow ``` ### Run only deploy nodes (without pre deploy of all assisted service) + ```bash make deploy_nodes or make deploy_nodes_with_install ``` ### Redeploy nodes + ```bash make redeploy_nodes or make redeploy_nodes_with_install ``` ### Redeploy with assisted services + ```bash make redeploy_all or make redeploy_all_with_install ``` ## Cleaning + Cleaning test-infra environment. ### Clean all include minikube + ```bash make destroy ``` ### Clean nodes only + ```bash make destroy_nodes ``` ### Delete all virsh resources + Sometimes you may need to delete all libvirt resources + ```bash make delete_all_virsh_resources ``` ### Install cluster + Install cluster after nodes were deployed. Can take ClusterId as os env + ```bash -make install_cluster +make install_cluster ``` ### Create cluster and download ISO + ```bash make download_iso ``` ### deploy_bm_inventory and Create cluster and download ISO + ```bash make download_iso_for_remote_use ``` ### start_minikube and Deploy UI and and open port forwarding on port 6008, allows to connect to it from browser + ```bash make deploy_ui ``` + ### Kill all open port forwarding commands, will be part of destroy target + ```bash make kill_all_port_forwardings ``` - ## OS parameters used for configurations -~~~~ + +``` BMI_BRANCH bm-inventory branch to use, default: master IMAGE path to ISO to spawn VM with, if set vms will be spawn with this iso without creating cluster NUM_MASTERS number of VMs to spawn as masters, default: 3 @@ -149,42 +180,50 @@ INSTALLER_IMAGE: assisted-installer image to use, will update bm-inventory co SERVICE: bm-inventory image to use DEPLOY_TAG: the tag to be used for all images (bm-inventory, assisted-installer, agent, etc) this will override any other os params -~~~~ +``` ## Test bm-inventory image + ```bash make redeploy_all SERVICE= -or +or export PULL_SECRET=''; make redeploy_all_with_install SERVICE= ``` ## Test agent image + ```bash -make redeploy_all AGENT_DOCKER_IMAGE= +make redeploy_all AGENT_DOCKER_IMAGE= or make redeploy_all_with_install AGENT_DOCKER_IMAGE= ``` ## Test installer image + ```bash -make redeploy_all INSTALLER_IMAGE= +make redeploy_all INSTALLER_IMAGE= or export PULL_SECRET=''; make redeploy_all_with_install INSTALLER_IMAGE= ``` ## Test installer, bm-inventory and agent images in the same flow + ```bash make redeploy_all INSTALLER_IMAGE= AGENT_DOCKER_IMAGE= SERVICE= -or +or export PULL_SECRET=''; make redeploy_all_with_install INSTALLER_IMAGE= AGENT_DOCKER_IMAGE= SERVICE= ``` + # Test infra image ## Create and push new image will create new bm-inventory client, build new image and push image + ```bash make build_and_push_image IMAGE_NAME= IMAGE_TAG= ``` + ## Use new image, will pull image from hub, check that image is public, if tag is not latest update skipper YAML + ```bash make image_build IMAGE_NAME= IMAGE_TAG= ``` diff --git a/create_full_environment.sh b/create_full_environment.sh index 4f56380df3f..31a59e9cb0e 100755 --- a/create_full_environment.sh +++ b/create_full_environment.sh @@ -2,15 +2,15 @@ set -o errexit -function error () { +function error() { echo $@ 1>&2 } # Check OS OS=$(awk -F= '/^ID=/ { print $2 }' /etc/os-release | tr -d '"') if [[ ! ${OS} =~ ^(centos)$ ]] && [[ ! ${OS} =~ ^(rhel)$ ]] && [[ ! ${OS} =~ ^(fedora)$ ]]; then - error "\"${OS}\" is an unsupported OS. We support only CentOS, RHEL or FEDORA." - exit 1 + error "\"${OS}\" is an unsupported OS. We support only CentOS, RHEL or FEDORA." + exit 1 fi #Check CentOS version @@ -18,16 +18,14 @@ VER=$(awk -F= '/^VERSION_ID=/ { print $2 }' /etc/os-release | tr -d '"' | cut -f VER_SUPPORTED=8 if [[ ${OS} =~ ^(centos)$ && ${VER} -ne ${VER_SUPPORTED} ]]; then - error "CentOS version ${VER_SUPPORTED} is required." - exit 1 -elif [[ ${OS} =~ ^(rhel)$ && ${VER} -ne ${VER_SUPPORTED} ]] -then - error "RHEL version ${VER_SUPPORTED} is required." - exit 1 + error "CentOS version ${VER_SUPPORTED} is required." + exit 1 +elif [[ ${OS} =~ ^(rhel)$ && ${VER} -ne ${VER_SUPPORTED} ]]; then + error "RHEL version ${VER_SUPPORTED} is required." + exit 1 fi # TODO add minimum version fedora validation - echo "Installing environment" scripts/install_environment.sh echo "Done installing" @@ -40,6 +38,6 @@ echo "Installing minikube and oc" make install_minikube if [ -z "${NO_MINIKUBE}" ]; then - echo "Install and start minikube" - make start_minikube + echo "Install and start minikube" + make start_minikube fi diff --git a/discovery-infra/bm_inventory_api.py b/discovery-infra/bm_inventory_api.py index a1e98a25d54..988a6ec9e93 100644 --- a/discovery-infra/bm_inventory_api.py +++ b/discovery-infra/bm_inventory_api.py @@ -1,14 +1,15 @@ -import waiting +# -*- coding: utf-8 -*- import json -from tqdm import tqdm -import utils + import consts +import utils +import waiting from bm_inventory_client import ApiClient, Configuration, api, models from logger import log +from tqdm import tqdm class InventoryClient(object): - def __init__(self, inventory_url): self.inventory_url = inventory_url configs = Configuration() @@ -18,13 +19,18 @@ def __init__(self, inventory_url): def wait_for_api_readiness(self): log.info("Waiting for inventory api to be ready") - waiting.wait(lambda: self.clusters_list() is not None, - timeout_seconds=consts.WAIT_FOR_BM_API, - sleep_seconds=5, waiting_for="Wait till inventory is ready", - expected_exceptions=Exception) + waiting.wait( + lambda: self.clusters_list() is not None, + timeout_seconds=consts.WAIT_FOR_BM_API, + sleep_seconds=5, + waiting_for="Wait till inventory is ready", + expected_exceptions=Exception, + ) def create_cluster(self, name, ssh_public_key=None, **cluster_params): - cluster = models.ClusterCreateParams(name=name, ssh_public_key=ssh_public_key, **cluster_params) + cluster = models.ClusterCreateParams( + name=name, ssh_public_key=ssh_public_key, **cluster_params + ) log.info("Creating cluster with params %s", cluster.__dict__) result = self.client.register_cluster(new_cluster_params=cluster) return result @@ -49,7 +55,7 @@ def cluster_get(self, cluster_id): def _download(self, response, file_path): progress = tqdm(iterable=response.read_chunked()) - with open(file_path, 'wb') as f: + with open(file_path, "wb") as f: for chunk in progress: f.write(chunk) progress.close() @@ -60,26 +66,37 @@ def generate_image(self, cluster_id, ssh_key, proxy_url=None): if proxy_url: image_create_params.proxy_url = proxy_url log.info("Generating image with params %s", image_create_params.__dict__) - return self.client.generate_cluster_iso(cluster_id=cluster_id, image_create_params=image_create_params) + return self.client.generate_cluster_iso( + cluster_id=cluster_id, image_create_params=image_create_params + ) def download_image(self, cluster_id, image_path): log.info("Downloading image for cluster %s to %s", cluster_id, image_path) - response = self.client.download_cluster_iso(cluster_id=cluster_id, - _preload_content=False) + response = self.client.download_cluster_iso( + cluster_id=cluster_id, _preload_content=False + ) self._download(response=response, file_path=image_path) - def generate_and_download_image(self, cluster_id, ssh_key, image_path, proxy_url=None): + def generate_and_download_image( + self, cluster_id, ssh_key, image_path, proxy_url=None + ): self.generate_image(cluster_id=cluster_id, ssh_key=ssh_key, proxy_url=proxy_url) self.download_image(cluster_id=cluster_id, image_path=image_path) def set_hosts_roles(self, cluster_id, hosts_with_roles): - log.info("Setting roles for hosts %s in cluster %s", hosts_with_roles, cluster_id) + log.info( + "Setting roles for hosts %s in cluster %s", hosts_with_roles, cluster_id + ) hosts = models.ClusterUpdateParams(hosts_roles=hosts_with_roles) - return self.client.update_cluster(cluster_id=cluster_id, cluster_update_params=hosts) + return self.client.update_cluster( + cluster_id=cluster_id, cluster_update_params=hosts + ) def update_cluster(self, cluster_id, update_params): log.info("Updating cluster %s with params %s", cluster_id, update_params) - return self.client.update_cluster(cluster_id=cluster_id, cluster_update_params=update_params) + return self.client.update_cluster( + cluster_id=cluster_id, cluster_update_params=update_params + ) def delete_cluster(self, cluster_id): log.info("Deleting cluster %s", cluster_id) @@ -103,18 +120,25 @@ def get_host_by_mac(self, cluster_id, mac): def download_and_save_file(self, cluster_id, file_name, file_path): log.info("Downloading %s to %s", file_name, file_path) - response = self.client.download_cluster_files(cluster_id=cluster_id, file_name=file_name, - _preload_content=False) + response = self.client.download_cluster_files( + cluster_id=cluster_id, file_name=file_name, _preload_content=False + ) with open(file_path, "wb") as _file: _file.write(response.data) def download_kubeconfig_no_ingress(self, cluster_id, kubeconfig_path): log.info("Downloading kubeconfig-noingress to %s", kubeconfig_path) - self.download_and_save_file(cluster_id=cluster_id, file_name="kubeconfig-noingress", file_path=kubeconfig_path) + self.download_and_save_file( + cluster_id=cluster_id, + file_name="kubeconfig-noingress", + file_path=kubeconfig_path, + ) def download_kubeconfig(self, cluster_id, kubeconfig_path): log.info("Downloading kubeconfig to %s", kubeconfig_path) - response = self.client.download_cluster_kubeconfig(cluster_id=cluster_id, _preload_content=False) + response = self.client.download_cluster_kubeconfig( + cluster_id=cluster_id, _preload_content=False + ) with open(kubeconfig_path, "wb") as _file: _file.write(response.data) diff --git a/discovery-infra/consts.py b/discovery-infra/consts.py index f0b4b2c92a1..85d5bea4304 100644 --- a/discovery-infra/consts.py +++ b/discovery-infra/consts.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import os TF_FOLDER = "build/terraform" diff --git a/discovery-infra/delete_nodes.py b/discovery-infra/delete_nodes.py index 42a578ad4ed..32fd04f63a0 100755 --- a/discovery-infra/delete_nodes.py +++ b/discovery-infra/delete_nodes.py @@ -1,11 +1,13 @@ #!/usr/bin/python3 +# -*- coding: utf-8 -*- import argparse import shutil + +import bm_inventory_api import consts import utils import virsh_cleanup -import bm_inventory_api from logger import log @@ -14,7 +16,9 @@ def try_to_delete_cluster(tfvars): try: cluster_id = tfvars.get("cluster_inventory_id") if cluster_id: - client = bm_inventory_api.create_client(args.inventory_url, wait_for_url=False) + client = bm_inventory_api.create_client( + args.inventory_url, wait_for_url=False + ) client.delete_cluster(cluster_id=cluster_id) # TODO add different exception validations except Exception as exc: @@ -25,17 +29,23 @@ def try_to_delete_cluster(tfvars): def delete_nodes(tfvars): try: log.info("Start running terraform delete") - cmd = "cd %s && terraform destroy -auto-approve " \ - "-input=false -state=terraform.tfstate -state-out=terraform.tfstate " \ - "-var-file=terraform.tfvars.json" % consts.TF_FOLDER + cmd = ( + "cd %s && terraform destroy -auto-approve " + "-input=false -state=terraform.tfstate -state-out=terraform.tfstate " + "-var-file=terraform.tfvars.json" % consts.TF_FOLDER + ) utils.run_command_with_output(cmd) except: log.exception("Failed to run terraform delete, deleting %s", consts.TF_FOLDER) shutil.rmtree(consts.TF_FOLDER) finally: - virsh_cleanup.clean_virsh_resources(virsh_cleanup.DEFAULT_SKIP_LIST, - [tfvars.get("cluster_name", consts.TEST_INFRA), - tfvars.get("libvirt_network_name", consts.TEST_INFRA)]) + virsh_cleanup.clean_virsh_resources( + virsh_cleanup.DEFAULT_SKIP_LIST, + [ + tfvars.get("cluster_name", consts.TEST_INFRA), + tfvars.get("libvirt_network_name", consts.TEST_INFRA), + ], + ) # Deletes every single virsh resource, leaves only defaults @@ -58,10 +68,28 @@ def main(): if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Run delete nodes flow') - parser.add_argument('-iU', '--inventory-url', help="Full url of remote inventory", type=str, default="") - parser.add_argument('-id', '--cluster-id', help='Cluster id to install', type=str, default=None) - parser.add_argument('-n', '--only-nodes', help='Delete only nodes, without cluster', action="store_true") - parser.add_argument('-a', '--delete-all', help='Delete only nodes, without cluster', action="store_true") + parser = argparse.ArgumentParser(description="Run delete nodes flow") + parser.add_argument( + "-iU", + "--inventory-url", + help="Full url of remote inventory", + type=str, + default="", + ) + parser.add_argument( + "-id", "--cluster-id", help="Cluster id to install", type=str, default=None + ) + parser.add_argument( + "-n", + "--only-nodes", + help="Delete only nodes, without cluster", + action="store_true", + ) + parser.add_argument( + "-a", + "--delete-all", + help="Delete only nodes, without cluster", + action="store_true", + ) args = parser.parse_args() main() diff --git a/discovery-infra/install_cluster.py b/discovery-infra/install_cluster.py index 704ce0340a5..2fee0003431 100755 --- a/discovery-infra/install_cluster.py +++ b/discovery-infra/install_cluster.py @@ -1,10 +1,12 @@ #!/usr/bin/python3 +# -*- coding: utf-8 -*- import argparse -import waiting -import utils -import consts + import bm_inventory_api +import consts +import utils +import waiting from logger import log @@ -24,20 +26,29 @@ def verify_pull_secret(cluster, client, pull_secret): def _install_cluster(client, cluster): cluster = client.install_cluster(cluster_id=cluster.id) - utils.wait_till_all_hosts_are_in_status(client=client, cluster_id=cluster.id, - nodes_count=len(cluster.hosts), statuses=[consts.NodesStatus.INSTALLING], - interval=30) + utils.wait_till_all_hosts_are_in_status( + client=client, + cluster_id=cluster.id, + nodes_count=len(cluster.hosts), + statuses=[consts.NodesStatus.INSTALLING], + interval=30, + ) -def wait_till_installed(client, cluster, timeout=60*60*2): +def wait_till_installed(client, cluster, timeout=60 * 60 * 2): log.info("Waiting %s till cluster finished installation", timeout) # TODO: Change host validation for only previous known hosts - utils.wait_till_all_hosts_are_in_status(client=client, cluster_id=cluster.id, - nodes_count=len(cluster.hosts), - statuses=[consts.NodesStatus.INSTALLED], - timeout=timeout, interval=60) - utils.wait_till_cluster_is_in_status(client=client, cluster_id=cluster.id, - statuses=[consts.ClusterStatus.INSTALLED]) + utils.wait_till_all_hosts_are_in_status( + client=client, + cluster_id=cluster.id, + nodes_count=len(cluster.hosts), + statuses=[consts.NodesStatus.INSTALLED], + timeout=timeout, + interval=60, + ) + utils.wait_till_cluster_is_in_status( + client=client, cluster_id=cluster.id, statuses=[consts.ClusterStatus.INSTALLED] + ) # Runs installation flow : @@ -51,8 +62,11 @@ def run_install_flow(client, cluster_id, kubeconfig_path, pull_secret): log.info("Verifying pull secret") verify_pull_secret(client=client, cluster=cluster, pull_secret=pull_secret) log.info("Wait till cluster is ready") - utils.wait_till_cluster_is_in_status(client=client, cluster_id=cluster_id, - statuses=[consts.ClusterStatus.READY, consts.ClusterStatus.INSTALLING]) + utils.wait_till_cluster_is_in_status( + client=client, + cluster_id=cluster_id, + statuses=[consts.ClusterStatus.READY, consts.ClusterStatus.INSTALLING], + ) cluster = client.cluster_get(cluster_id) if cluster.status == consts.ClusterStatus.READY: log.info("Install cluster %s", cluster_id) @@ -62,16 +76,23 @@ def run_install_flow(client, cluster_id, kubeconfig_path, pull_secret): log.info("Cluster is already in installing status, skipping install command") log.info("Download kubeconfig-noingress") - client.download_kubeconfig_no_ingress(cluster_id=cluster_id, kubeconfig_path=kubeconfig_path) + client.download_kubeconfig_no_ingress( + cluster_id=cluster_id, kubeconfig_path=kubeconfig_path + ) wait_till_installed(client=client, cluster=cluster) log.info("Download kubeconfig") - waiting.wait(lambda: client.download_kubeconfig(cluster_id=cluster_id, kubeconfig_path=kubeconfig_path) is None, - timeout_seconds=240, - sleep_seconds=20, - expected_exceptions=Exception, - waiting_for="Kubeconfig") + waiting.wait( + lambda: client.download_kubeconfig( + cluster_id=cluster_id, kubeconfig_path=kubeconfig_path + ) + is None, + timeout_seconds=240, + sleep_seconds=20, + expected_exceptions=Exception, + waiting_for="Kubeconfig", + ) def main(): @@ -81,16 +102,28 @@ def main(): if not args.cluster_id: args.cluster_id = utils.get_tfvars()["cluster_inventory_id"] client = bm_inventory_api.create_client(wait_for_url=False) - run_install_flow(client=client, cluster_id=args.cluster_id, - kubeconfig_path=args.kubeconfig_path, - pull_secret=args.pull_secret) + run_install_flow( + client=client, + cluster_id=args.cluster_id, + kubeconfig_path=args.kubeconfig_path, + pull_secret=args.pull_secret, + ) if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Run discovery flow') - parser.add_argument('-id', '--cluster-id', help='Cluster id to install', type=str, default=None) - parser.add_argument('-k', '--kubeconfig-path', help='Path to downloaded kubeconfig', type=str, - default="build/kubeconfig") - parser.add_argument('-ps', '--pull-secret', help='Pull secret', type=str, default="") + parser = argparse.ArgumentParser(description="Run discovery flow") + parser.add_argument( + "-id", "--cluster-id", help="Cluster id to install", type=str, default=None + ) + parser.add_argument( + "-k", + "--kubeconfig-path", + help="Path to downloaded kubeconfig", + type=str, + default="build/kubeconfig", + ) + parser.add_argument( + "-ps", "--pull-secret", help="Pull secret", type=str, default="" + ) args = parser.parse_args() main() diff --git a/discovery-infra/logger.py b/discovery-infra/logger.py index 5be8d589ba0..5c56b5d9add 100644 --- a/discovery-infra/logger.py +++ b/discovery-infra/logger.py @@ -1,16 +1,20 @@ +# -*- coding: utf-8 -*- import logging import sys logging.getLogger("requests").setLevel(logging.ERROR) logging.getLogger("urllib3").setLevel(logging.ERROR) -log = logging.getLogger('') +log = logging.getLogger("") log.setLevel(logging.DEBUG) format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ch = logging.StreamHandler(sys.stdout) -ch.setFormatter(logging.Formatter('%(asctime)s %(levelname)-10s %(message)s \t' - '(%(pathname)s:%(lineno)d)')) +ch.setFormatter( + logging.Formatter( + "%(asctime)s %(levelname)-10s %(message)s \t" "(%(pathname)s:%(lineno)d)" + ) +) log.addHandler(ch) fh = logging.FileHandler(filename="test_infra.log") diff --git a/discovery-infra/start_discovery.py b/discovery-infra/start_discovery.py index d40389a5cd3..d13f94c5551 100755 --- a/discovery-infra/start_discovery.py +++ b/discovery-infra/start_discovery.py @@ -1,20 +1,22 @@ #!/usr/bin/python3 +# -*- coding: utf-8 -*- +import argparse +import ipaddress import json -import waiting import os import pprint -import argparse -import ipaddress +import time import uuid from distutils.dir_util import copy_tree from pathlib import Path -import utils -import consts + import bm_inventory_api +import consts import install_cluster +import utils +import waiting from logger import log -import time # Creates ip list, if will be needed in any other place, please move to utils @@ -30,15 +32,25 @@ def fill_tfvars(image_path, storage_path, master_count, nodes_details): with open(consts.TFVARS_JSON_FILE) as _file: tfvars = json.load(_file) - network_subnet_starting_ip = str(ipaddress.ip_address(ipaddress.IPv4Network( - nodes_details["machine_cidr"]).network_address) + 10) + network_subnet_starting_ip = str( + ipaddress.ip_address( + ipaddress.IPv4Network(nodes_details["machine_cidr"]).network_address + ) + + 10 + ) tfvars["image_path"] = image_path tfvars["master_count"] = min(master_count, consts.NUMBER_OF_MASTERS) - tfvars["libvirt_master_ips"] = _create_ip_address_list(min(master_count, consts.NUMBER_OF_MASTERS), - starting_ip_addr=network_subnet_starting_ip) + tfvars["libvirt_master_ips"] = _create_ip_address_list( + min(master_count, consts.NUMBER_OF_MASTERS), + starting_ip_addr=network_subnet_starting_ip, + ) tfvars["api_vip"] = _get_vips_ips()[0] - tfvars["libvirt_worker_ips"] = _create_ip_address_list(nodes_details["worker_count"], starting_ip_addr=str( - ipaddress.ip_address(consts.STARTING_IP_ADDRESS) + tfvars["master_count"])) + tfvars["libvirt_worker_ips"] = _create_ip_address_list( + nodes_details["worker_count"], + starting_ip_addr=str( + ipaddress.ip_address(consts.STARTING_IP_ADDRESS) + tfvars["master_count"] + ), + ) tfvars["libvirt_storage_pool_path"] = storage_path tfvars.update(nodes_details) @@ -56,22 +68,34 @@ def create_nodes(image_path, storage_path, master_count, nodes_details): # Starts terraform nodes creation, waits till all nodes will get ip and will move to known status -def create_nodes_and_wait_till_registered(inventory_client, cluster, image_path, storage_path, - master_count, nodes_details): +def create_nodes_and_wait_till_registered( + inventory_client, cluster, image_path, storage_path, master_count, nodes_details +): nodes_count = master_count + nodes_details["worker_count"] - create_nodes(image_path, storage_path=storage_path, master_count=master_count, nodes_details=nodes_details) + create_nodes( + image_path, + storage_path=storage_path, + master_count=master_count, + nodes_details=nodes_details, + ) # TODO: Check for only new nodes - utils.wait_till_nodes_are_ready(nodes_count=nodes_count, network_name=nodes_details["libvirt_network_name"]) + utils.wait_till_nodes_are_ready( + nodes_count=nodes_count, network_name=nodes_details["libvirt_network_name"] + ) if not inventory_client: log.info("No inventory url, will not wait till nodes registration") return log.info("Wait till nodes will be registered") - waiting.wait(lambda: utils.are_all_libvirt_nodes_in_cluster_hosts(inventory_client, cluster.id, - nodes_details["libvirt_network_name"]), - timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, - sleep_seconds=10, waiting_for="Nodes to be registered in inventory service") + waiting.wait( + lambda: utils.are_all_libvirt_nodes_in_cluster_hosts( + inventory_client, cluster.id, nodes_details["libvirt_network_name"] + ), + timeout_seconds=consts.NODES_REGISTERED_TIMEOUT, + sleep_seconds=10, + waiting_for="Nodes to be registered in inventory service", + ) log.info("Registered nodes are:") pprint.pprint(inventory_client.get_cluster_hosts(cluster.id)) @@ -90,7 +114,9 @@ def set_hosts_roles(client, cluster_id, network_name): if libvirt_mac.lower() in map(lambda nic: nic["mac"].lower(), hw["nics"]): added_hosts.append({"id": host["id"], "role": libvirt_metadata["role"]}) - assert len(libvirt_nodes) == len(added_hosts), "All nodes should have matching inventory hosts" + assert len(libvirt_nodes) == len( + added_hosts + ), "All nodes should have matching inventory hosts" client.set_hosts_roles(cluster_id=cluster_id, hosts_with_roles=added_hosts) @@ -103,35 +129,44 @@ def set_cluster_vips(client, cluster_id): def _get_vips_ips(): - network_subnet_starting_ip = str(ipaddress.ip_address(ipaddress.IPv4Network( - args.vm_network_cidr).network_address) + 100) - ips = _create_ip_address_list(2, starting_ip_addr=str( - ipaddress.ip_address(network_subnet_starting_ip))) + network_subnet_starting_ip = str( + ipaddress.ip_address( + ipaddress.IPv4Network(args.vm_network_cidr).network_address + ) + + 100 + ) + ips = _create_ip_address_list( + 2, starting_ip_addr=str(ipaddress.ip_address(network_subnet_starting_ip)) + ) return ips[0], ips[1] # TODO add config file # Converts params from args to bm-inventory cluster params def _cluster_create_params(): - params = {"openshift_version": args.openshift_version, - "base_dns_domain": args.base_dns_domain, - "cluster_network_cidr": args.cluster_network, - "cluster_network_host_prefix": args.host_prefix, - "service_network_cidr": args.service_network, - "pull_secret": args.pull_secret} + params = { + "openshift_version": args.openshift_version, + "base_dns_domain": args.base_dns_domain, + "cluster_network_cidr": args.cluster_network, + "cluster_network_host_prefix": args.host_prefix, + "service_network_cidr": args.service_network, + "pull_secret": args.pull_secret, + } return params # convert params from args to terraform tfvars def _create_node_details(cluster_name): - return {"libvirt_worker_memory": args.worker_memory, - "libvirt_master_memory": args.master_memory, - "worker_count": args.number_of_workers, - "cluster_name": cluster_name, - "cluster_domain": args.base_dns_domain, - "machine_cidr": args.vm_network_cidr, - "libvirt_network_name": args.network_name, - "libvirt_network_if": args.network_bridge} + return { + "libvirt_worker_memory": args.worker_memory, + "libvirt_master_memory": args.master_memory, + "worker_count": args.number_of_workers, + "cluster_name": cluster_name, + "cluster_domain": args.base_dns_domain, + "machine_cidr": args.vm_network_cidr, + "libvirt_network_name": args.network_name, + "libvirt_network_if": args.network_bridge, + } # Create vms from downloaded iso that will connect to bm-inventory and register @@ -140,34 +175,47 @@ def nodes_flow(client, cluster_name, cluster): nodes_details = _create_node_details(cluster_name) if cluster: nodes_details["cluster_inventory_id"] = cluster.id - create_nodes_and_wait_till_registered(inventory_client=client, - cluster=cluster, - image_path=args.image or consts.IMAGE_PATH, - storage_path=args.storage_path, - master_count=args.master_count, - nodes_details=nodes_details) + create_nodes_and_wait_till_registered( + inventory_client=client, + cluster=cluster, + image_path=args.image or consts.IMAGE_PATH, + storage_path=args.storage_path, + master_count=args.master_count, + nodes_details=nodes_details, + ) if client: cluster_info = client.cluster_get(cluster.id) macs = utils.get_libvirt_nodes_macs(nodes_details["libvirt_network_name"]) if not (cluster_info.api_vip and cluster_info.ingress_vip): - utils.wait_till_hosts_with_macs_are_in_status(client=client, cluster_id=cluster.id, macs=macs, - statuses=[consts.NodesStatus.INSUFFICIENT]) + utils.wait_till_hosts_with_macs_are_in_status( + client=client, + cluster_id=cluster.id, + macs=macs, + statuses=[consts.NodesStatus.INSUFFICIENT], + ) set_cluster_vips(client, cluster.id) else: log.info("VIPs already configured") set_hosts_roles(client, cluster.id, nodes_details["libvirt_network_name"]) - utils.wait_till_hosts_with_macs_are_in_status(client=client, cluster_id=cluster.id, macs=macs, - statuses=[consts.NodesStatus.KNOWN]) + utils.wait_till_hosts_with_macs_are_in_status( + client=client, + cluster_id=cluster.id, + macs=macs, + statuses=[consts.NodesStatus.KNOWN], + ) log.info("Printing after setting roles") pprint.pprint(client.get_cluster_hosts(cluster.id)) if args.install_cluster: time.sleep(10) - install_cluster.run_install_flow(client=client, cluster_id=cluster.id, - kubeconfig_path=consts.DEFAULT_CLUSTER_KUBECONFIG_PATH, - pull_secret=args.pull_secret) + install_cluster.run_install_flow( + client=client, + cluster_id=cluster.id, + kubeconfig_path=consts.DEFAULT_CLUSTER_KUBECONFIG_PATH, + pull_secret=args.pull_secret, + ) def main(): @@ -181,13 +229,16 @@ def main(): if args.cluster_id: cluster = client.cluster_get(cluster_id=args.cluster_id) else: - cluster = client.create_cluster(cluster_name, - ssh_public_key=args.ssh_key, - **_cluster_create_params() - ) + cluster = client.create_cluster( + cluster_name, ssh_public_key=args.ssh_key, **_cluster_create_params() + ) - client.generate_and_download_image(cluster_id=cluster.id, image_path=consts.IMAGE_PATH, ssh_key=args.ssh_key, - proxy_url=args.proxy_url) + client.generate_and_download_image( + cluster_id=cluster.id, + image_path=consts.IMAGE_PATH, + ssh_key=args.ssh_key, + proxy_url=args.proxy_url, + ) # Iso only, cluster will be up and iso downloaded but vm will not be created if not args.iso_only: @@ -195,35 +246,123 @@ def main(): if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Run discovery flow') - parser.add_argument('-i', '--image', help='Run terraform with given image', type=str, default="") - parser.add_argument('-n', '--master-count', help='Masters count to spawn', type=int, default=3) - parser.add_argument('-p', '--storage-path', help="Path to storage pool", type=str, - default=consts.STORAGE_PATH) - parser.add_argument('-si', '--skip-inventory', help='Node count to spawn', action="store_true") - parser.add_argument('-k', '--ssh-key', help="Path to ssh key", type=str, - default="") - parser.add_argument('-mm', '--master-memory', help='Master memory (ram) in mb', type=int, default=8192) - parser.add_argument('-wm', '--worker-memory', help='Worker memory (ram) in mb', type=int, default=8192) - parser.add_argument('-nw', '--number-of-workers', help='Workers count to spawn', type=int, default=0) - parser.add_argument('-cn', '--cluster-network', help='Cluster network with cidr', type=str, default="10.128.0.0/14") - parser.add_argument('-hp', '--host-prefix', help='Host prefix to use', type=int, default=23) - parser.add_argument('-sn', '--service-network', help='Network for services', type=str, default="172.30.0.0/16") - parser.add_argument('-ps', '--pull-secret', help='Pull secret', type=str, default="") - parser.add_argument('-ov', '--openshift-version', help='Openshift version', type=str, default="4.5") - parser.add_argument('-bd', '--base-dns-domain', help='Base dns domain', type=str, default="redhat.com") - parser.add_argument('-cN', '--cluster-name', help='Cluster name', type=str, default="") - parser.add_argument('-vN', '--vm-network-cidr', help="Vm network cidr", type=str, default="192.168.126.0/24") - parser.add_argument('-nN', '--network-name', help="Network name", type=str, default="test-infra-net") - parser.add_argument('-in', '--install-cluster', help="Install cluster, will take latest id", action="store_true") - parser.add_argument('-nB', '--network-bridge', help="Network bridge to use", type=str, default="tt0") - parser.add_argument('-iO', '--iso-only', help="Create cluster and download iso, no need to spawn cluster", - action="store_true") - parser.add_argument('-pU', '--proxy-url', help="Proxy url to pass to inventory cluster", type=str, default="") - parser.add_argument('-rv', '--run-with-vips', help="Run cluster create with adding vips " - "from the same subnet as vms", type=str, default="no") - parser.add_argument('-iU', '--inventory-url', help="Full url of remote inventory", type=str, default="") - parser.add_argument('-id', '--cluster-id', help='Cluster id to install', type=str, default=None) + parser = argparse.ArgumentParser(description="Run discovery flow") + parser.add_argument( + "-i", "--image", help="Run terraform with given image", type=str, default="" + ) + parser.add_argument( + "-n", "--master-count", help="Masters count to spawn", type=int, default=3 + ) + parser.add_argument( + "-p", + "--storage-path", + help="Path to storage pool", + type=str, + default=consts.STORAGE_PATH, + ) + parser.add_argument( + "-si", "--skip-inventory", help="Node count to spawn", action="store_true" + ) + parser.add_argument("-k", "--ssh-key", help="Path to ssh key", type=str, default="") + parser.add_argument( + "-mm", + "--master-memory", + help="Master memory (ram) in mb", + type=int, + default=8192, + ) + parser.add_argument( + "-wm", + "--worker-memory", + help="Worker memory (ram) in mb", + type=int, + default=8192, + ) + parser.add_argument( + "-nw", "--number-of-workers", help="Workers count to spawn", type=int, default=0 + ) + parser.add_argument( + "-cn", + "--cluster-network", + help="Cluster network with cidr", + type=str, + default="10.128.0.0/14", + ) + parser.add_argument( + "-hp", "--host-prefix", help="Host prefix to use", type=int, default=23 + ) + parser.add_argument( + "-sn", + "--service-network", + help="Network for services", + type=str, + default="172.30.0.0/16", + ) + parser.add_argument( + "-ps", "--pull-secret", help="Pull secret", type=str, default="" + ) + parser.add_argument( + "-ov", "--openshift-version", help="Openshift version", type=str, default="4.5" + ) + parser.add_argument( + "-bd", + "--base-dns-domain", + help="Base dns domain", + type=str, + default="redhat.com", + ) + parser.add_argument( + "-cN", "--cluster-name", help="Cluster name", type=str, default="" + ) + parser.add_argument( + "-vN", + "--vm-network-cidr", + help="Vm network cidr", + type=str, + default="192.168.126.0/24", + ) + parser.add_argument( + "-nN", "--network-name", help="Network name", type=str, default="test-infra-net" + ) + parser.add_argument( + "-in", + "--install-cluster", + help="Install cluster, will take latest id", + action="store_true", + ) + parser.add_argument( + "-nB", "--network-bridge", help="Network bridge to use", type=str, default="tt0" + ) + parser.add_argument( + "-iO", + "--iso-only", + help="Create cluster and download iso, no need to spawn cluster", + action="store_true", + ) + parser.add_argument( + "-pU", + "--proxy-url", + help="Proxy url to pass to inventory cluster", + type=str, + default="", + ) + parser.add_argument( + "-rv", + "--run-with-vips", + help="Run cluster create with adding vips " "from the same subnet as vms", + type=str, + default="no", + ) + parser.add_argument( + "-iU", + "--inventory-url", + help="Full url of remote inventory", + type=str, + default="", + ) + parser.add_argument( + "-id", "--cluster-id", help="Cluster id to install", type=str, default=None + ) args = parser.parse_args() if not args.pull_secret and args.install_cluster: diff --git a/discovery-infra/update_bm_inventory_cm.py b/discovery-infra/update_bm_inventory_cm.py index beb8a9f67f7..0de3a1b7197 100755 --- a/discovery-infra/update_bm_inventory_cm.py +++ b/discovery-infra/update_bm_inventory_cm.py @@ -1,17 +1,27 @@ #!/usr/bin/python3 +# -*- coding: utf-8 -*- # Idea is to pass os environments to bm-inventory config map, to make an easy way to configure bm-inventory -import yaml import os +import yaml + CM_PATH = "bm-inventory/deploy/bm-inventory-configmap.yaml" -ENVS = [("HW_VALIDATOR_MIN_CPU_CORES", "2"), ("HW_VALIDATOR_MIN_CPU_CORES_WORKER", "2"), - ("HW_VALIDATOR_MIN_CPU_CORES_MASTER", "4"), ("HW_VALIDATOR_MIN_RAM_GIB", "3"), - ("HW_VALIDATOR_MIN_RAM_GIB_WORKER", "3"), ("HW_VALIDATOR_MIN_RAM_GIB_MASTER", "8"), - ("HW_VALIDATOR_MIN_DISK_SIZE_GIB", "10"), ("INSTALLER_IMAGE", ""), - ("INVENTORY_URL", ""), ("INVENTORY_PORT", ""), ("AGENT_DOCKER_IMAGE", ""), - ("KUBECONFIG_GENERATE_IMAGE", "")] +ENVS = [ + ("HW_VALIDATOR_MIN_CPU_CORES", "2"), + ("HW_VALIDATOR_MIN_CPU_CORES_WORKER", "2"), + ("HW_VALIDATOR_MIN_CPU_CORES_MASTER", "4"), + ("HW_VALIDATOR_MIN_RAM_GIB", "3"), + ("HW_VALIDATOR_MIN_RAM_GIB_WORKER", "3"), + ("HW_VALIDATOR_MIN_RAM_GIB_MASTER", "8"), + ("HW_VALIDATOR_MIN_DISK_SIZE_GIB", "10"), + ("INSTALLER_IMAGE", ""), + ("INVENTORY_URL", ""), + ("INVENTORY_PORT", ""), + ("AGENT_DOCKER_IMAGE", ""), + ("KUBECONFIG_GENERATE_IMAGE", ""), +] def read_yaml(): diff --git a/discovery-infra/utils.py b/discovery-infra/utils.py index d821db266f5..c105b3bf311 100644 --- a/discovery-infra/utils.py +++ b/discovery-infra/utils.py @@ -1,30 +1,40 @@ +# -*- coding: utf-8 -*- +import itertools +import json import os +import shlex import shutil -import itertools import subprocess from pathlib import Path -import shlex -import waiting -import json -from retry import retry + import consts -from logger import log import libvirt +import waiting +from logger import log +from retry import retry -conn = libvirt.open('qemu:///system') +conn = libvirt.open("qemu:///system") def run_command(command, shell=False): command = command if shell else shlex.split(command) - process = subprocess.run(command, shell=shell, check=True, stdout=subprocess.PIPE, universal_newlines=True) + process = subprocess.run( + command, + shell=shell, + check=True, + stdout=subprocess.PIPE, + universal_newlines=True, + ) output = process.stdout.strip() return output def run_command_with_output(command): - with subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p: + with subprocess.Popen( + command, shell=True, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True + ) as p: for line in p.stdout: - print(line, end='') # process line here + print(line, end="") # process line here if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, p.args) @@ -53,12 +63,18 @@ def get_network_leases(network_name): def wait_till_nodes_are_ready(nodes_count, network_name): log.info("Wait till %s nodes will be ready and have ips", nodes_count) try: - waiting.wait(lambda: len(get_network_leases(network_name)) >= nodes_count, - timeout_seconds=consts.NODES_REGISTERED_TIMEOUT * nodes_count, - sleep_seconds=10, waiting_for="Nodes to have ips") + waiting.wait( + lambda: len(get_network_leases(network_name)) >= nodes_count, + timeout_seconds=consts.NODES_REGISTERED_TIMEOUT * nodes_count, + sleep_seconds=10, + waiting_for="Nodes to have ips", + ) log.info("All nodes have booted and got ips") except: - log.error("Not all nodes are ready. Current dhcp leases are %s", get_network_leases(network_name)) + log.error( + "Not all nodes are ready. Current dhcp leases are %s", + get_network_leases(network_name), + ) raise @@ -68,13 +84,19 @@ def get_libvirt_nodes_mac_role_ip_and_name(network_name): try: leases = get_network_leases(network_name) for lease in leases: - nodes_data[lease["mac"]] = {"ip": lease["ipaddr"], - "name": lease["hostname"], - "role": consts.NodeRoles.WORKER if - consts.NodeRoles.WORKER in lease["hostname"] else consts.NodeRoles.MASTER} + nodes_data[lease["mac"]] = { + "ip": lease["ipaddr"], + "name": lease["hostname"], + "role": consts.NodeRoles.WORKER + if consts.NodeRoles.WORKER in lease["hostname"] + else consts.NodeRoles.MASTER, + } return nodes_data except: - log.error("Failed to get nodes macs from libvirt. Output is %s", get_network_leases(network_name)) + log.error( + "Failed to get nodes macs from libvirt. Output is %s", + get_network_leases(network_name), + ) raise @@ -84,8 +106,10 @@ def get_libvirt_nodes_macs(network_name): def are_all_libvirt_nodes_in_cluster_hosts(client, cluster_id, network_name): hosts_macs = client.get_hosts_id_with_macs(cluster_id) - return all(mac.lower() in - map(str.lower, itertools.chain(*hosts_macs.values())) for mac in get_libvirt_nodes_macs(network_name)) + return all( + mac.lower() in map(str.lower, itertools.chain(*hosts_macs.values())) + for mac in get_libvirt_nodes_macs(network_name) + ) def get_cluster_hosts_with_mac(client, cluster_id, macs): @@ -100,59 +124,108 @@ def get_tfvars(): return tfvars -def are_hosts_in_status(client, cluster_id, hosts, nodes_count, statuses, fall_on_error_status=True): +def are_hosts_in_status( + client, cluster_id, hosts, nodes_count, statuses, fall_on_error_status=True +): hosts_in_status = [host for host in hosts if host["status"] in statuses] if len(hosts_in_status) >= nodes_count: return True - elif fall_on_error_status and len([host for host in hosts if host["status"] == consts.NodesStatus.ERROR]) > 0: - hosts_in_error = [host for host in hosts if host["status"] == consts.NodesStatus.ERROR] - log.error("Some of the hosts are in insufficient or error status. Hosts in error %s", hosts_in_error) + elif ( + fall_on_error_status + and len([host for host in hosts if host["status"] == consts.NodesStatus.ERROR]) + > 0 + ): + hosts_in_error = [ + host for host in hosts if host["status"] == consts.NodesStatus.ERROR + ] + log.error( + "Some of the hosts are in insufficient or error status. Hosts in error %s", + hosts_in_error, + ) raise Exception("All the nodes must be in valid status, but got some in error") - log.info("Asked hosts to be in one of the statuses from %s and currently hosts statuses are %s", statuses, - [(host["id"], host["status"], host["status_info"]) for host in hosts]) + log.info( + "Asked hosts to be in one of the statuses from %s and currently hosts statuses are %s", + statuses, + [(host["id"], host["status"], host["status_info"]) for host in hosts], + ) return False -def wait_till_hosts_with_macs_are_in_status(client, cluster_id, macs, statuses, - timeout=consts.NODES_REGISTERED_TIMEOUT, - fall_on_error_status=True, interval=5): +def wait_till_hosts_with_macs_are_in_status( + client, + cluster_id, + macs, + statuses, + timeout=consts.NODES_REGISTERED_TIMEOUT, + fall_on_error_status=True, + interval=5, +): log.info("Wait till %s nodes are in one of the statuses %s", len(macs), statuses) try: - waiting.wait(lambda: are_hosts_in_status(client, cluster_id, get_cluster_hosts_with_mac(client, cluster_id, macs), - len(macs), statuses, fall_on_error_status), - timeout_seconds=timeout, - sleep_seconds=interval, waiting_for="Nodes to be in of the statuses %s" % statuses) + waiting.wait( + lambda: are_hosts_in_status( + client, + cluster_id, + get_cluster_hosts_with_mac(client, cluster_id, macs), + len(macs), + statuses, + fall_on_error_status, + ), + timeout_seconds=timeout, + sleep_seconds=interval, + waiting_for="Nodes to be in of the statuses %s" % statuses, + ) except: hosts = get_cluster_hosts_with_mac(client, cluster_id, macs) log.info("All nodes: %s", hosts) raise -def wait_till_all_hosts_are_in_status(client, cluster_id, nodes_count, statuses, - timeout=consts.NODES_REGISTERED_TIMEOUT, - fall_on_error_status=True, interval=5): +def wait_till_all_hosts_are_in_status( + client, + cluster_id, + nodes_count, + statuses, + timeout=consts.NODES_REGISTERED_TIMEOUT, + fall_on_error_status=True, + interval=5, +): hosts = client.get_cluster_hosts(cluster_id) log.info("Wait till %s nodes are in one of the statuses %s", nodes_count, statuses) try: - waiting.wait(lambda: are_hosts_in_status(client, cluster_id, client.get_cluster_hosts(cluster_id), - nodes_count, statuses, fall_on_error_status), - timeout_seconds=timeout, - sleep_seconds=interval, waiting_for="Nodes to be in of the statuses %s" % statuses) + waiting.wait( + lambda: are_hosts_in_status( + client, + cluster_id, + client.get_cluster_hosts(cluster_id), + nodes_count, + statuses, + fall_on_error_status, + ), + timeout_seconds=timeout, + sleep_seconds=interval, + waiting_for="Nodes to be in of the statuses %s" % statuses, + ) except: hosts = client.get_cluster_hosts(cluster_id) log.info("All nodes: %s", hosts) raise -def wait_till_cluster_is_in_status(client, cluster_id, statuses, timeout=consts.NODES_REGISTERED_TIMEOUT, interval=30): +def wait_till_cluster_is_in_status( + client, cluster_id, statuses, timeout=consts.NODES_REGISTERED_TIMEOUT, interval=30 +): log.info("Wait till cluster %s is in status %s", cluster_id, statuses) try: - waiting.wait(lambda: client.cluster_get(cluster_id).status in statuses, - timeout_seconds=timeout, - sleep_seconds=interval, waiting_for="Cluster to be in status %s" % statuses) + waiting.wait( + lambda: client.cluster_get(cluster_id).status in statuses, + timeout_seconds=timeout, + sleep_seconds=interval, + waiting_for="Cluster to be in status %s" % statuses, + ) except: log.info("Cluster: %s", client.cluster_get(cluster_id)) raise diff --git a/discovery-infra/virsh_cleanup.py b/discovery-infra/virsh_cleanup.py index 53e9384c03f..dbe14c6e487 100755 --- a/discovery-infra/virsh_cleanup.py +++ b/discovery-infra/virsh_cleanup.py @@ -1,7 +1,9 @@ #!/usr/bin/python3 +# -*- coding: utf-8 -*- import argparse import subprocess + from logger import log DEFAULT_SKIP_LIST = ["default"] @@ -9,14 +11,22 @@ def run_command(command, check=False, resource_filter=None): if resource_filter: - command += "| grep -E \"%s\"" % "|".join(resource_filter) - process = subprocess.run(command, shell=True, check=check, stdout=subprocess.PIPE, universal_newlines=True) + command += '| grep -E "%s"' % "|".join(resource_filter) + process = subprocess.run( + command, + shell=True, + check=check, + stdout=subprocess.PIPE, + universal_newlines=True, + ) output = process.stdout.strip() return output def clean_domains(skip_list, resource_filter): - domains = run_command("virsh -c qemu:///system list --all --name", resource_filter=resource_filter) + domains = run_command( + "virsh -c qemu:///system list --all --name", resource_filter=resource_filter + ) domains = domains.splitlines() for domain in domains: log.info("Deleting domain %s", domain) @@ -26,16 +36,24 @@ def clean_domains(skip_list, resource_filter): def clean_volumes(pool): - volumes_with_path = run_command("virsh -c qemu:///system vol-list %s | tail -n +3" % pool).splitlines() + volumes_with_path = run_command( + "virsh -c qemu:///system vol-list %s | tail -n +3" % pool + ).splitlines() for volume_with_path in volumes_with_path: volume, _ = volume_with_path.split() if volume: log.info("Deleting volume %s in pool %s", volume, pool) - run_command("virsh -c qemu:///system vol-delete --pool %s %s" % (pool, volume), check=False) + run_command( + "virsh -c qemu:///system vol-delete --pool %s %s" % (pool, volume), + check=False, + ) def clean_pools(skip_list, resource_filter): - pools = run_command("virsh -c qemu:///system pool-list --all --name", resource_filter=resource_filter) + pools = run_command( + "virsh -c qemu:///system pool-list --all --name", + resource_filter=resource_filter, + ) pools = pools.splitlines() for pool in pools: if pool and pool not in skip_list: @@ -46,7 +64,9 @@ def clean_pools(skip_list, resource_filter): def clean_networks(skip_list, resource_filter): - networks = run_command("virsh -c qemu:///system net-list --all --name", resource_filter=resource_filter) + networks = run_command( + "virsh -c qemu:///system net-list --all --name", resource_filter=resource_filter + ) networks = networks.splitlines() for net in networks: if net and net not in skip_list: @@ -75,11 +95,27 @@ def main(p_args): if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Description of your program') + parser = argparse.ArgumentParser(description="Description of your program") group = parser.add_mutually_exclusive_group() - group.add_argument('-a', '--all', help='Clean all virsh resources', action="store_true") - group.add_argument('-m', '--minikube', help='Clean minikube resources', action="store_true") - group.add_argument('-sm', '--skip-minikube', help='Clean all but skip minikube resources', action="store_true") - group.add_argument('-f', '--filter', help='List of filter of resources to delete', nargs="*",type=str, default=None) + group.add_argument( + "-a", "--all", help="Clean all virsh resources", action="store_true" + ) + group.add_argument( + "-m", "--minikube", help="Clean minikube resources", action="store_true" + ) + group.add_argument( + "-sm", + "--skip-minikube", + help="Clean all but skip minikube resources", + action="store_true", + ) + group.add_argument( + "-f", + "--filter", + help="List of filter of resources to delete", + nargs="*", + type=str, + default=None, + ) args = parser.parse_args() main(args) diff --git a/install_env_and_run_full_flow.sh b/install_env_and_run_full_flow.sh index 8cff2b38a0b..9cc039d1469 100755 --- a/install_env_and_run_full_flow.sh +++ b/install_env_and_run_full_flow.sh @@ -3,7 +3,7 @@ source create_full_environment.sh retVal=$? if [ $retVal -ne 0 ]; then - exit $retVal + exit $retVal fi source scripts/assisted_deployment.sh diff --git a/requirements.txt b/requirements.txt index a133d1bcae8..3cd2bacd2b3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ +boto3==1.13.11 +pyyaml==5.3.1 requests==2.20.0 -waiting==1.4.1 retry==0.9.2 tqdm==4.46.0 -pyyaml==5.3.1 -boto3==1.13.11 +waiting==1.4.1 diff --git a/scripts/assisted_deployment.sh b/scripts/assisted_deployment.sh index 709c8b23e39..1333b31046a 100755 --- a/scripts/assisted_deployment.sh +++ b/scripts/assisted_deployment.sh @@ -5,45 +5,42 @@ function destroy_all() { } function set_dns() { - API_VIP=$(ip route show dev ${NETWORK_BRIDGE:-"tt0"} | cut -d\ -f7) - FILE="/etc/NetworkManager/conf.d/dnsmasq.conf" - if ! [ -f "$FILE" ]; then - echo -e "[main]\ndns=dnsmasq" | sudo tee $FILE - fi - sudo truncate -s0 /etc/NetworkManager/dnsmasq.d/openshift-${CLUSTER_NAME}.conf - echo "server=/api.${CLUSTER_NAME}.${BASE_DOMAIN}/${API_VIP}" | sudo tee -a /etc/NetworkManager/dnsmasq.d/openshift-${CLUSTER_NAME}.conf - sudo systemctl reload NetworkManager + API_VIP=$(ip route show dev ${NETWORK_BRIDGE:-"tt0"} | cut -d\ -f7) + FILE="/etc/NetworkManager/conf.d/dnsmasq.conf" + if ! [ -f "$FILE" ]; then + echo -e "[main]\ndns=dnsmasq" | sudo tee $FILE + fi + sudo truncate -s0 /etc/NetworkManager/dnsmasq.d/openshift-${CLUSTER_NAME}.conf + echo "server=/api.${CLUSTER_NAME}.${BASE_DOMAIN}/${API_VIP}" | sudo tee -a /etc/NetworkManager/dnsmasq.d/openshift-${CLUSTER_NAME}.conf + sudo systemctl reload NetworkManager } # Delete after pushing fix to dev-scripts function wait_for_cluster() { - echo "Nothing to do" + echo "Nothing to do" } - #TODO ADD ALL RELEVANT OS ENVS function run() { - make $1 NUM_MASTERS=$NUM_MASTERS NUM_WORKERS=$NUM_WORKERS KUBECONFIG=$PWD/minikube_kubeconfig BASE_DOMAIN=$BASE_DOMAIN CLUSTER_NAME=$CLUSTER_NAME - retVal=$? - echo retVal - if [ $retVal -ne 0 ]; then - exit $retVal - fi + make $1 NUM_MASTERS=$NUM_MASTERS NUM_WORKERS=$NUM_WORKERS KUBECONFIG=$PWD/minikube_kubeconfig BASE_DOMAIN=$BASE_DOMAIN CLUSTER_NAME=$CLUSTER_NAME + retVal=$? + echo retVal + if [ $retVal -ne 0 ]; then + exit $retVal + fi } - function run_skipper_make_command() { make $1 retVal=$? echo retVal if [ $retVal -ne 0 ]; then - exit $retVal + exit $retVal fi } - function run_without_os_envs() { - run_skipper_make_command $1 + run_skipper_make_command $1 } -"$@" \ No newline at end of file +"$@" diff --git a/scripts/deploy_bm_inventory.sh b/scripts/deploy_bm_inventory.sh index 9f5a697edd5..8aefd3d575d 100755 --- a/scripts/deploy_bm_inventory.sh +++ b/scripts/deploy_bm_inventory.sh @@ -3,7 +3,6 @@ set -euo pipefail source scripts/utils.sh - export KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} export SERVICE_NAME=bm-inventory export INVENTORY_URL=$(get_main_ip) @@ -13,7 +12,7 @@ mkdir -p build print_log "Updating bm_inventory params" skipper run discovery-infra/update_bm_inventory_cm.py -skipper run "make -C bm-inventory/ deploy-all" ${SKIPPER_PARAMS} DEPLOY_TAG=${DEPLOY_TAG} +skipper run "make -C bm-inventory/ deploy-all" ${SKIPPER_PARAMS} DEPLOY_TAG=${DEPLOY_TAG} print_log "Wait till ${SERVICE_NAME} api is ready" wait_for_url_and_run "$(minikube service ${SERVICE_NAME} --url -n assisted-installer)" "echo \"waiting for ${SERVICE_NAME}\"" diff --git a/scripts/deploy_ui.sh b/scripts/deploy_ui.sh index e6b51a1ba92..08bdc1d2a8f 100755 --- a/scripts/deploy_ui.sh +++ b/scripts/deploy_ui.sh @@ -11,22 +11,22 @@ export UI_DEPLOY_FILE=build/ui_deploy.yaml export UI_SERVICE_NAME=ocp-metal-ui export NO_UI=${NO_UI:-n} if [ "${CONTAINER_COMMAND}" = "podman" ]; then - export PODMAN_FLAGS="--pull=always" + export PODMAN_FLAGS="--pull=always" else - export PODMAN_FLAGS="" + export PODMAN_FLAGS="" fi -if [ "${NO_UI}" != "n" ];then +if [ "${NO_UI}" != "n" ]; then exit 0 fi mkdir -p build #In case deploy tag is empty use latest -[[ -z "${DEPLOY_TAG}" ]] && export DEPLOY_TAG=latest +[[ -z "${DEPLOY_TAG}" ]] && export DEPLOY_TAG=latest print_log "Starting ui" -${CONTAINER_COMMAND} run ${PODMAN_FLAGS} --rm quay.io/ocpmetal/ocp-metal-ui:latest /deploy/deploy_config.sh -i quay.io/ocpmetal/ocp-metal-ui:${DEPLOY_TAG} > ${UI_DEPLOY_FILE} +${CONTAINER_COMMAND} run ${PODMAN_FLAGS} --rm quay.io/ocpmetal/ocp-metal-ui:latest /deploy/deploy_config.sh -i quay.io/ocpmetal/ocp-metal-ui:${DEPLOY_TAG} >${UI_DEPLOY_FILE} kubectl --kubeconfig=${KUBECONFIG} apply -f ${UI_DEPLOY_FILE} print_log "Wait till ui api is ready" diff --git a/scripts/install_environment.sh b/scripts/install_environment.sh index d55d48b19d7..b99f73cc687 100755 --- a/scripts/install_environment.sh +++ b/scripts/install_environment.sh @@ -1,9 +1,9 @@ +#!/bin/bash set -euo pipefail export EXTERNAL_PORT=${EXTERNAL_PORT:-y} export ADD_USER_TO_SUDO=${ADD_USER_TO_SUDO:-n} - function version_is_greater() { if [ "$(printf '%s\n' "$2" "$1" | sort -V | head -n1)" = "$2" ]; then return @@ -12,115 +12,113 @@ function version_is_greater() { false } - function install_libvirt() { - echo "Installing libvirt..." - sudo dnf install -y libvirt libvirt-devel libvirt-daemon-kvm qemu-kvm - sudo systemctl enable --now libvirtd - - current_version="$(libvirtd --version | awk '{print $3}')" - minimum_version="5.5.100" - - echo "Setting libvirt values" - sudo sed -i -e 's/#listen_tls/listen_tls/g' /etc/libvirt/libvirtd.conf - sudo sed -i -e 's/#listen_tcp/listen_tcp/g' /etc/libvirt/libvirtd.conf - sudo sed -i -e 's/#auth_tcp = "sasl"/auth_tcp = "none"/g' /etc/libvirt/libvirtd.conf - sudo sed -i -e 's/#tcp_port/tcp_port/g' /etc/libvirt/libvirtd.conf - sudo sed -i -e 's/#security_driver = "selinux"/security_driver = "none"/g' /etc/libvirt/qemu.conf - - if ! version_is_greater "$current_version" "$minimum_version"; then - echo "Adding --listen flag to libvirt" - sudo sed -i -e 's/#LIBVIRTD_ARGS="--listen"/LIBVIRTD_ARGS="--listen"/g' /etc/sysconfig/libvirtd - sudo systemctl restart libvirtd - else - echo "libvirtd version is greater then 5.5.x, starting libvirtd-tcp.socket" - sudo systemctl stop libvirtd - sudo systemctl restart libvirtd.socket - sudo systemctl enable --now libvirtd-tcp.socket - sudo systemctl start libvirtd-tcp.socket - sudo systemctl start libvirtd - fi - - current_user=$(whoami) - echo "Adding user ${current_user} to libvirt and qemu groups" - sudo gpasswd -a $current_user libvirt - sudo gpasswd -a $current_user qemu + echo "Installing libvirt..." + sudo dnf install -y libvirt libvirt-devel libvirt-daemon-kvm qemu-kvm + sudo systemctl enable --now libvirtd + + current_version="$(libvirtd --version | awk '{print $3}')" + minimum_version="5.5.100" + + echo "Setting libvirt values" + sudo sed -i -e 's/#listen_tls/listen_tls/g' /etc/libvirt/libvirtd.conf + sudo sed -i -e 's/#listen_tcp/listen_tcp/g' /etc/libvirt/libvirtd.conf + sudo sed -i -e 's/#auth_tcp = "sasl"/auth_tcp = "none"/g' /etc/libvirt/libvirtd.conf + sudo sed -i -e 's/#tcp_port/tcp_port/g' /etc/libvirt/libvirtd.conf + sudo sed -i -e 's/#security_driver = "selinux"/security_driver = "none"/g' /etc/libvirt/qemu.conf + + if ! version_is_greater "$current_version" "$minimum_version"; then + echo "Adding --listen flag to libvirt" + sudo sed -i -e 's/#LIBVIRTD_ARGS="--listen"/LIBVIRTD_ARGS="--listen"/g' /etc/sysconfig/libvirtd + sudo systemctl restart libvirtd + else + echo "libvirtd version is greater then 5.5.x, starting libvirtd-tcp.socket" + sudo systemctl stop libvirtd + sudo systemctl restart libvirtd.socket + sudo systemctl enable --now libvirtd-tcp.socket + sudo systemctl start libvirtd-tcp.socket + sudo systemctl start libvirtd + fi + + current_user=$(whoami) + echo "Adding user ${current_user} to libvirt and qemu groups" + sudo gpasswd -a $current_user libvirt + sudo gpasswd -a $current_user qemu } function install_runtime_container() { - echo "Installing container runitme package" - if ! [ -x "$(command -v docker)" ] && ! [ -x "$(command -v podman)" ]; then - sudo dnf install podman -y - elif [ -x "$(command -v podman)" ]; then - current_version="$(podman -v | awk '{print $3}')" - minimum_version="1.6.4" - if ! version_is_greater "$current_version" "$minimum_version"; then - sudo dnf install podman-$minimum_version -y + echo "Installing container runitme package" + if ! [ -x "$(command -v docker)" ] && ! [ -x "$(command -v podman)" ]; then + sudo dnf install podman -y + elif [ -x "$(command -v podman)" ]; then + current_version="$(podman -v | awk '{print $3}')" + minimum_version="1.6.4" + if ! version_is_greater "$current_version" "$minimum_version"; then + sudo dnf install podman-$minimum_version -y + fi + else + echo "docker or podman is already installed" fi - else - echo "docker or podman is already installed" - fi } -function install_packages(){ - echo "Installing dnf packages" - sudo dnf install -y make python3 python3-pip git jq bash-completion xinetd - sudo systemctl enable --now xinetd +function install_packages() { + echo "Installing dnf packages" + sudo dnf install -y make python3 python3-pip git jq bash-completion xinetd + sudo systemctl enable --now xinetd } function install_skipper() { - echo "Installing skipper and adding ~/.local/bin to PATH" - pip3 install strato-skipper==1.22.0 --user + echo "Installing skipper and adding ~/.local/bin to PATH" + pip3 install strato-skipper==1.22.0 --user - #grep -qxF "export PATH=~/.local/bin:$PATH" ~/.bashrc || echo "export PATH=~/.local/bin:$PATH" >> ~/.bashrc - #export PATH="$PATH:~/.local/bin" + #grep -qxF "export PATH=~/.local/bin:$PATH" ~/.bashrc || echo "export PATH=~/.local/bin:$PATH" >> ~/.bashrc + #export PATH="$PATH:~/.local/bin" - if ! [ -x "$(command -v skipper)" ]; then - sudo cp ~/.local/bin/skipper /usr/local/bin - fi + if ! [ -x "$(command -v skipper)" ]; then + sudo cp ~/.local/bin/skipper /usr/local/bin + fi } function config_firewalld() { - echo "Config firewall" - sudo systemctl start firewalld - if [ "${EXTERNAL_PORT}" = "y" ];then - echo "configuring external ports" - sudo firewall-cmd --zone=public --add-port=6000/tcp - sudo firewall-cmd --zone=public --add-port=6008/tcp - fi - echo "configuring libvirt zone ports ports" - sudo firewall-cmd --zone=libvirt --add-port=6000/tcp - sudo firewall-cmd --zone=libvirt --add-port=6008/tcp - # sudo firewall-cmd --reload - echo "Restarting libvirt after firewalld changes" - sudo systemctl restart libvirtd + echo "Config firewall" + sudo systemctl start firewalld + if [ "${EXTERNAL_PORT}" = "y" ]; then + echo "configuring external ports" + sudo firewall-cmd --zone=public --add-port=6000/tcp + sudo firewall-cmd --zone=public --add-port=6008/tcp + fi + echo "configuring libvirt zone ports ports" + sudo firewall-cmd --zone=libvirt --add-port=6000/tcp + sudo firewall-cmd --zone=libvirt --add-port=6008/tcp + # sudo firewall-cmd --reload + echo "Restarting libvirt after firewalld changes" + sudo systemctl restart libvirtd } function additional_configs() { - if [ "${ADD_USER_TO_SUDO}" != "n" ];then - current_user=$(whoami) - echo "Make $current_user sudo passwordless" - echo "$current_user ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/$current_user - fi - - if sudo virsh net-list --all | grep default | grep inactive; then - echo "default network is inactive, fixing it" - if sudo ip link del virbr0-nic; then - echo "Deleting virbr0-nic" + if [ "${ADD_USER_TO_SUDO}" != "n" ]; then + current_user=$(whoami) + echo "Make $current_user sudo passwordless" + echo "$current_user ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/$current_user fi - if sudo ip link del virbr0; then - echo "Deleting virbr0" + + if sudo virsh net-list --all | grep default | grep inactive; then + echo "default network is inactive, fixing it" + if sudo ip link del virbr0-nic; then + echo "Deleting virbr0-nic" + fi + if sudo ip link del virbr0; then + echo "Deleting virbr0" + fi + sudo virsh net-start default fi - sudo virsh net-start default - fi - touch ~/.gitconfig - sudo chmod ugo+rx "$(dirname "$(pwd)")" - echo "disaling selinux by setenforce 0" - sudo setenforce 0 + touch ~/.gitconfig + sudo chmod ugo+rx "$(dirname "$(pwd)")" + echo "disaling selinux by setenforce 0" + sudo setenforce 0 } - install_packages install_libvirt install_runtime_container diff --git a/scripts/install_minikube.sh b/scripts/install_minikube.sh index 2cdbc39956a..a3475241d30 100755 --- a/scripts/install_minikube.sh +++ b/scripts/install_minikube.sh @@ -1,46 +1,46 @@ - -export SUDO=$(if [ -x "$(command -v sudo)" ];then echo "sudo" ; else echo "";fi) +#!/bin/bash +export SUDO=$(if [ -x "$(command -v sudo)" ]; then echo "sudo"; else echo ""; fi) function install_minikube() { - if ! [ -x "$(command -v minikube)" ]; then - echo "Installing minikube..." - curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.8.2/minikube-linux-amd64 - chmod +x minikube - ${SUDO} cp minikube /usr/bin/ -else - echo "minikube is already installed" -fi + if ! [ -x "$(command -v minikube)" ]; then + echo "Installing minikube..." + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.8.2/minikube-linux-amd64 + chmod +x minikube + ${SUDO} cp minikube /usr/bin/ + else + echo "minikube is already installed" + fi } function install_kubectl() { - if ! [ -x "$(command -v kubectl)" ]; then - echo "Installing kubectl..." - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl - chmod +x kubectl - ${SUDO} mv kubectl /usr/bin/ -else - echo "kubectl is already installed" -fi + if ! [ -x "$(command -v kubectl)" ]; then + echo "Installing kubectl..." + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.17.0/bin/linux/amd64/kubectl + chmod +x kubectl + ${SUDO} mv kubectl /usr/bin/ + else + echo "kubectl is already installed" + fi } function install_kvm2_driver() { - if ! [ -x "$(command -v docker-machine-driver-kvm2)" ]; then - echo "Installing kvm2_driver..." - curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-kvm2 - chmod +x docker-machine-driver-kvm2 - ${SUDO} mv docker-machine-driver-kvm2 /usr/bin/ -else - echo "docker-machine-driver-kvm2 is already installed" -fi + if ! [ -x "$(command -v docker-machine-driver-kvm2)" ]; then + echo "Installing kvm2_driver..." + curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-kvm2 + chmod +x docker-machine-driver-kvm2 + ${SUDO} mv docker-machine-driver-kvm2 /usr/bin/ + else + echo "docker-machine-driver-kvm2 is already installed" + fi } function install_oc() { - if ! [ -x "$(command -v oc)" ]; then - echo "Installing oc..." - curl -Lo oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/oc/${OPENSHIFT_VERSION:-4.5}/linux/oc.tar.gz && ${SUDO} tar -C /usr/local/bin -xf oc.tar.gz && rm -f oc.tar.gz - else - echo "oc is already installed" - fi + if ! [ -x "$(command -v oc)" ]; then + echo "Installing oc..." + curl -Lo oc.tar.gz https://mirror.openshift.com/pub/openshift-v4/clients/oc/${OPENSHIFT_VERSION:-4.5}/linux/oc.tar.gz && ${SUDO} tar -C /usr/local/bin -xf oc.tar.gz && rm -f oc.tar.gz + else + echo "oc is already installed" + fi } install_minikube diff --git a/scripts/run_minikube.sh b/scripts/run_minikube.sh index ddb7aa6736d..a0e625711c1 100755 --- a/scripts/run_minikube.sh +++ b/scripts/run_minikube.sh @@ -1,17 +1,17 @@ - +#!/bin/bash function configure_minikube() { - echo "Configuring minikube..." - minikube config set ShowBootstrapperDeprecationNotification false - minikube config set WantUpdateNotification false - minikube config set WantReportErrorPrompt false - minikube config set WantKubectlDownloadMsg false + echo "Configuring minikube..." + minikube config set ShowBootstrapperDeprecationNotification false + minikube config set WantUpdateNotification false + minikube config set WantReportErrorPrompt false + minikube config set WantKubectlDownloadMsg false } function init_minikube() { #If the vm exists, it has already been initialized if [[ "$(virsh -c qemu:///system list --all)" != *"minikube"* ]]; then - #minikube start --kvm-network=test-infra-net --vm-driver=kvm2 --memory=4096 --force - minikube start --vm-driver=kvm2 --memory=4096 --force + #minikube start --kvm-network=test-infra-net --vm-driver=kvm2 --memory=4096 --force + minikube start --vm-driver=kvm2 --memory=4096 --force fi } diff --git a/scripts/utils.sh b/scripts/utils.sh index f71b8d6f305..bb36cf0284a 100755 --- a/scripts/utils.sh +++ b/scripts/utils.sh @@ -4,21 +4,20 @@ set -o nounset export KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} - function print_log() { - echo "$(basename $0): $1" + echo "$(basename $0): $1" } function url_reachable() { - curl -s $1 --max-time 4 > /dev/null + curl -s $1 --max-time 4 >/dev/null return $? } function spawn_port_forwarding_command() { - service_name=$1 - external_port=$2 + service_name=$1 + external_port=$2 - cat << EOF > build/xinetd-${service_name} + cat <build/xinetd-${service_name} service ${service_name} { flags = IPv4 @@ -34,17 +33,17 @@ service ${service_name} per_source = UNLIMITED } EOF - sudo mv build/xinetd-${service_name} /etc/xinetd.d/${service_name} --force - sudo systemctl start xinetd - sudo systemctl reload xinetd + sudo mv build/xinetd-${service_name} /etc/xinetd.d/${service_name} --force + sudo systemctl start xinetd + sudo systemctl reload xinetd } function run_in_background() { - bash -c "nohup $1 >/dev/null 2>&1 &" + bash -c "nohup $1 >/dev/null 2>&1 &" } function kill_all_port_forwardings() { - sudo systemctl stop xinetd + sudo systemctl stop xinetd } function get_main_ip() { @@ -59,20 +58,20 @@ function wait_for_url_and_run() { until [ $RETRIES -eq 0 ] || [ $STATUS -eq 0 ]; do - RETRIES=$((RETRIES-1)) + RETRIES=$((RETRIES - 1)) - echo "Running given function" - $2 + echo "Running given function" + $2 - echo "Sleeping for 30 seconds" - sleep 30s + echo "Sleeping for 30 seconds" + sleep 30s - echo "Verifying URL and port are accessible" - url_reachable "$1" && STATUS=$? || STATUS=$? + echo "Verifying URL and port are accessible" + url_reachable "$1" && STATUS=$? || STATUS=$? done if [ $RETRIES -eq 0 ]; then - echo "Timeout reached, URL $1 not reachable" - exit 1 + echo "Timeout reached, URL $1 not reachable" + exit 1 fi } diff --git a/skipper.yaml b/skipper.yaml index 0e4ff1a08af..fe64e32dd3b 100644 --- a/skipper.yaml +++ b/skipper.yaml @@ -3,36 +3,36 @@ build-container-image: test-infra build-container-tag: latest volumes: - - $HOME/.cache/go-build/:/go/pkg/mod/ - - $HOME/.minikube/:$HOME/.minikube/ - - $HOME/.kube/:$HOME/.kube/ - - /var/lib/libvirt/:/var/lib/libvirt/ - - /var/run/libvirt/:/var/run/libvirt/ - - /tmp:/tmp/ - - /var/lib/libvirt/dnsmasq/:/var/lib/libvirt/dnsmasq/ - - $HOME/.cache/libvirt/:$HOME/.cache/libvirt/ + - $HOME/.cache/go-build/:/go/pkg/mod/ + - $HOME/.minikube/:$HOME/.minikube/ + - $HOME/.kube/:$HOME/.kube/ + - /var/lib/libvirt/:/var/lib/libvirt/ + - /var/run/libvirt/:/var/run/libvirt/ + - /tmp:/tmp/ + - /var/lib/libvirt/dnsmasq/:/var/lib/libvirt/dnsmasq/ + - $HOME/.cache/libvirt/:$HOME/.cache/libvirt/ env: - PULL_SECRET: $PULL_SECRET - NUM_WORKERS: $NUM_WORKERS - SSH_PUB_KEY: $SSH_PUB_KEY - CLUSTER_NAME: $CLUSTER_NAME - BASE_DOMAIN: $BASE_DOMAIN - NETWORK_CIDR: $NETWORK_CIDR - NETWORK_NAME: $NETWORK_NAME - SERVICE: $SERVICE - OBJEXP: $OBJEXP - INSTALLER_IMAGE: $INSTALLER_IMAGE - NETWORK_BRIDGE: $NETWORK_BRIDGE - OPENSHIFT_VERSION: $OPENSHIFT_VERSION - KUBECONFIG: $KUBECONFIG - PROXY_URL: $PROXY_URL - INVENTORY_URL: $INVENTORY_URL - INVENTORY_PORT: $INVENTORY_PORT - AGENT_DOCKER_IMAGE: $AGENT_DOCKER_IMAGE - IMAGE: $IMAGE - BMI_BRANCH: $BMI_BRANCH - RUN_WITH_VIPS: $RUN_WITH_VIPS - KUBECONFIG_GENERATE_IMAGE: $KUBECONFIG_GENERATE_IMAGE - REMOTE_INVENTORY_URL: $REMOTE_INVENTORY_URL - CLUSTER_ID: $CLUSTER_ID - NUM_MASTERS: $NUM_MASTERS \ No newline at end of file + PULL_SECRET: $PULL_SECRET + NUM_WORKERS: $NUM_WORKERS + SSH_PUB_KEY: $SSH_PUB_KEY + CLUSTER_NAME: $CLUSTER_NAME + BASE_DOMAIN: $BASE_DOMAIN + NETWORK_CIDR: $NETWORK_CIDR + NETWORK_NAME: $NETWORK_NAME + SERVICE: $SERVICE + OBJEXP: $OBJEXP + INSTALLER_IMAGE: $INSTALLER_IMAGE + NETWORK_BRIDGE: $NETWORK_BRIDGE + OPENSHIFT_VERSION: $OPENSHIFT_VERSION + KUBECONFIG: $KUBECONFIG + PROXY_URL: $PROXY_URL + INVENTORY_URL: $INVENTORY_URL + INVENTORY_PORT: $INVENTORY_PORT + AGENT_DOCKER_IMAGE: $AGENT_DOCKER_IMAGE + IMAGE: $IMAGE + BMI_BRANCH: $BMI_BRANCH + RUN_WITH_VIPS: $RUN_WITH_VIPS + KUBECONFIG_GENERATE_IMAGE: $KUBECONFIG_GENERATE_IMAGE + REMOTE_INVENTORY_URL: $REMOTE_INVENTORY_URL + CLUSTER_ID: $CLUSTER_ID + NUM_MASTERS: $NUM_MASTERS diff --git a/terraform_files/terraform.tfvars.json b/terraform_files/terraform.tfvars.json index e73ac4690eb..b327b0ea3d6 100644 --- a/terraform_files/terraform.tfvars.json +++ b/terraform_files/terraform.tfvars.json @@ -7,14 +7,8 @@ "image_path": "tmp", "libvirt_uri": "qemu+tcp://192.168.122.1/system", "libvirt_network_if": "tt0", - "libvirt_master_ips": [ - "192.168.126.10", - "192.168.126.11", - "192.168.126.12" - ], - "libvirt_worker_ips": [ - "192.168.126.13" - ], + "libvirt_master_ips": ["192.168.126.10", "192.168.126.11", "192.168.126.12"], + "libvirt_worker_ips": ["192.168.126.13"], "libvirt_master_memory": "8192", "libvirt_master_vcpu": "4", "libvirt_worker_memory": "8192", @@ -22,5 +16,5 @@ "libvirt_storage_pool_path": "/var/lib/libvirt/openshift-images", "cluster_inventory_id": null, "libvirt_network_name": "test-infra-net", - "api_vip":"192.168.126.100" + "api_vip": "192.168.126.100" } diff --git a/terraform_files/variables-libvirt.tf b/terraform_files/variables-libvirt.tf index 639efc7f9f3..f29bd55267e 100644 --- a/terraform_files/variables-libvirt.tf +++ b/terraform_files/variables-libvirt.tf @@ -101,4 +101,3 @@ variable "libvirt_storage_pool_path" { type = string description = "storage pool path" } - diff --git a/terraform_files/volume/outputs.tf b/terraform_files/volume/outputs.tf index df945005305..0d558417752 100644 --- a/terraform_files/volume/outputs.tf +++ b/terraform_files/volume/outputs.tf @@ -1,4 +1,3 @@ output "coreos_base_volume_id" { value = libvirt_volume.coreos_base.id } -