From caea47318ed29ecb0ecaad55547428f3a2f7c537 Mon Sep 17 00:00:00 2001 From: gursewak1997 Date: Fri, 12 Jul 2024 10:41:44 -0700 Subject: [PATCH] Remove cosa remote-prune With the addition of cmd-cloud-prune, this command is mostly deprecated and hence deleting it. --- src/cmd-remote-prune | 81 ------------------------- src/cosalib/prune.py | 140 ------------------------------------------- 2 files changed, 221 deletions(-) delete mode 100755 src/cmd-remote-prune delete mode 100644 src/cosalib/prune.py diff --git a/src/cmd-remote-prune b/src/cmd-remote-prune deleted file mode 100755 index ab7f9cc0b0..0000000000 --- a/src/cmd-remote-prune +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/python3 -u - -''' - This script removes unreferenced builds from s3 bucket -''' - -import argparse -import sys -import os - -from cosalib.builds import Builds -from cosalib.prune import fetch_build_meta, get_unreferenced_s3_builds, delete_build - -parser = argparse.ArgumentParser(prog="coreos-assembler remote-prune") -parser.add_argument("--workdir", default='.', help="Path to workdir") -parser.add_argument("--dry-run", help="Don't actually delete anything", - action='store_true') -parser.add_argument('--azure-credentials', help='Path to Azure credentials file', - default=os.environ.get("AZURE_CREDENTIALS")) -parser.add_argument('--azure-resource-group', help='Resource group', - default=os.environ.get('AZURE_RESOURCE_GROUP')) -parser.add_argument("--gcp-json-key", help="GCP Service Account JSON Auth", - default=os.environ.get("GCP_JSON_AUTH")) -parser.add_argument("--gcp-project", help="GCP Project name", - default=os.environ.get("GCP_PROJECT_NAME")) - -subparsers = parser.add_subparsers(dest='cmd', title='subcommands') -subparsers.required = True - -s3 = subparsers.add_parser('s3', help='Prune s3 buckets') -s3.add_argument("--bucket", help="Bucket name") -s3.add_argument("--prefix", help="Key prefix") -s3.add_argument("--force", help="Wipe s3 key ignoring the errors", - action='store_true') - -args = parser.parse_args() - -builds = Builds(args.workdir) - -scanned_builds = [] -for build in builds.raw()["builds"]: - for arch in build['arches']: - build = fetch_build_meta(builds, build['id'], arch, args.bucket, args.prefix) - if build: - scanned_builds.append(build) - -new_builds = [] -builds_to_delete = [] - -# Find unreferenced builds in the bucket and remove them -buildids = [x.id for x in scanned_builds] -unreferenced_s3_builds = get_unreferenced_s3_builds(buildids, args.bucket, args.prefix) - -if args.dry_run: - print("Not removing anything: in dry-run mode") - sys.exit(0) - -cloud_config = { - 'azure': { - 'credentials': args.azure_credentials, - 'resource-group': args.azure_resource_group, - }, - 'gcp': { - 'json-key': args.gcp_json_key, - 'project': args.gcp_project, - } -} - -error_during_pruning = False -for unmatched_build_id in unreferenced_s3_builds: - # TODO: fetch arches from s3 - build = fetch_build_meta(builds, unmatched_build_id, 'x86_64', args.bucket, args.prefix) - if build: - try: - delete_build(build, args.bucket, args.prefix, cloud_config, args.force) - except Exception as e: - error_during_pruning = True - print(f"{e}") - -if error_during_pruning: - sys.exit(1) diff --git a/src/cosalib/prune.py b/src/cosalib/prune.py deleted file mode 100644 index eee6c338b3..0000000000 --- a/src/cosalib/prune.py +++ /dev/null @@ -1,140 +0,0 @@ -import collections -import json -import os - -from cosalib.s3 import S3 - -from cosalib.aws import ( - deregister_ami, - delete_snapshot -) - -from cosalib.aliyun import remove_aliyun_image -from cosalib.gcp import remove_gcp_image -from cosalib.azure import remove_azure_image - - -Build = collections.namedtuple('Build', ['id', 'timestamp', 'images', 'arches']) - - -def get_unreferenced_s3_builds(active_build_set, bucket, prefix): - """ - Scans s3 bucket and returns a list of build ID in the prefix - - :param active_build_set: list of known builds - :type active_build_set: list - """ - print(f"Looking for unreferenced builds in s3://{bucket}/{prefix}") - s3_subdirs = S3().list_objects(bucket, f"{prefix}/", result_key='CommonPrefixes') - s3_matched = set() - s3_unmatched = set() - for prefixKey in s3_subdirs: - subdir = prefixKey['Prefix'] - buildid = subdir.replace(prefix, '').strip("/") - if buildid not in active_build_set: - s3_unmatched.add(buildid) - else: - s3_matched.add(buildid) - for buildid in active_build_set: - if buildid not in s3_matched: - print(f"WARNING: Failed to find build in S3: {buildid}") - print(f"Found {len(s3_unmatched)} builds") - return s3_unmatched - - -def fetch_build_meta(builds, buildid, arch, bucket, prefix): - print(f"Looking for meta.json for '{buildid}'") - build_dir = builds.get_build_dir(buildid, arch) - - # Fetch missing meta.json paths - meta_json_path = os.path.join(build_dir, "meta.json") - if not os.path.exists(meta_json_path): - # Fetch it from s3 - os.makedirs(build_dir, exist_ok=True) - s3_key = f"{prefix}/{buildid}/{arch}/meta.json" - print(f"Fetching meta.json for '{buildid}' from s3://{bucket}/{prefix} to {meta_json_path}") - head_result = S3().head_object(bucket, s3_key) - if head_result: - print(f"Found s3 key at {s3_key}") - S3().download_file(bucket, s3_key, meta_json_path) - else: - print(f"Failed to find object at {s3_key}") - return None - - buildmeta_path = os.path.join(meta_json_path) - with open(buildmeta_path) as f: - buildmeta = json.load(f) - images = { - 'amis': buildmeta.get('amis') or [], - 'azure': buildmeta.get('azure') or [], - 'gcp': buildmeta.get('gcp') or [], - } - return Build( - id=buildid, - timestamp=buildmeta['coreos-assembler.build-timestamp'], - images=images, - arches=arch - ) - - -def delete_build(build, bucket, prefix, cloud_config, force=False): - print(f"Deleting build {build.id}") - errors = [] - # Unregister AMIs and snapshots - for ami in build.images.get('amis', []): - region_name = ami.get('name') - ami_id = ami.get('hvm') - snapshot_id = ami.get('snapshot') - if ami_id and region_name: - try: - deregister_ami(ami_id, region=region_name) - except Exception as e: - errors.append(e) - if snapshot_id and region_name: - try: - delete_snapshot(snapshot_id, region=region_name) - except Exception as e: - errors.append(e) - - aliyun = build.images.get('aliyun') - if aliyun: - region_name = aliyun.get('name') - aliyun_id = aliyun.get('hvm') - if region_name and aliyun_id: - try: - remove_aliyun_image(aliyun_id, region=region_name) - except Exception as e: - errors.append(e) - - azure = build.images.get('azure') - if azure: - image = azure.get('image') - resource_group = cloud_config.get('azure', {}).get('resource-group') - credentials = cloud_config.get('azure', {}).get('credentials') - if image and resource_group and credentials: - try: - remove_azure_image(image, resource_group, credentials) - except Exception as e: - errors.append(e) - - gcp = build.images.get('gcp') - if gcp: - gcp_image = gcp.get('image') - json_key = cloud_config.get('gcp', {}).get('json-key') - project = cloud_config.get('gcp', {}).get('project') - if gcp_image and json_key and project: - try: - remove_gcp_image(gcp_image, json_key, project) - except Exception as e: - errors.append(e) - - if len(errors) != 0: - print(f"Found errors when removing build {build.id}:") - for e in errors: - print(e) - if not force: - raise Exception() - - # Delete s3 bucket - print(f"Deleting key {prefix}{build.id} from bucket {bucket}") - S3().delete_object(bucket, f"{prefix}{str(build.id)}")