Skip to content

Commit

Permalink
Adding garbage removal for cloud uploads
Browse files Browse the repository at this point in the history
  • Loading branch information
gursewak1997 committed Jul 12, 2024
1 parent 3823521 commit 0c1b891
Show file tree
Hide file tree
Showing 13 changed files with 519 additions and 96 deletions.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ schema-check:
# Is the generated Go code synced with the schema?
grep -q "$(DIGEST)" pkg/builds/cosa_v1.go
grep -q "$(DIGEST)" pkg/builds/schema_doc.go
grep -q "$(DIGEST)" src/cmd-cloud-prune

install:
install -d $(DESTDIR)$(PREFIX)/lib/coreos-assembler
Expand Down
2 changes: 1 addition & 1 deletion cmd/coreos-assembler.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ var buildCommands = []string{"init", "fetch", "build", "run", "prune", "clean",
var advancedBuildCommands = []string{"buildfetch", "buildupload", "oc-adm-release", "push-container"}
var buildextendCommands = []string{"aliyun", "applehv", "aws", "azure", "digitalocean", "exoscale", "extensions-container", "gcp", "hashlist-experimental", "hyperv", "ibmcloud", "kubevirt", "live", "metal", "metal4k", "nutanix", "openstack", "qemu", "secex", "virtualbox", "vmware", "vultr"}

var utilityCommands = []string{"aws-replicate", "compress", "copy-container", "koji-upload", "kola", "push-container-manifest", "remote-build-container", "remote-prune", "remote-session", "sign", "tag", "update-variant"}
var utilityCommands = []string{"aws-replicate", "compress", "copy-container", "koji-upload", "kola", "push-container-manifest", "remote-build-container", "cloud-prune", "remote-session", "sign", "tag", "update-variant"}
var otherCommands = []string{"shell", "meta"}

func init() {
Expand Down
68 changes: 68 additions & 0 deletions mantle/cmd/ore/aws/delete-image.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package aws

import (
"fmt"
"os"

"github.com/spf13/cobra"
)

var (
cmdDeleteImage = &cobra.Command{
Use: "delete-image --ami <ami_id> --snapshot <snapshot_id> ...",
Short: "Delete AMI and/or snapshot",
Run: runDeleteImage,
}
amiID string
snapshotID string
allowMissing bool
)

func init() {
// Initialize the command and its flags
AWS.AddCommand(cmdDeleteImage)
cmdDeleteImage.Flags().StringVar(&amiID, "ami", "", "AWS ami tag")
cmdDeleteImage.Flags().StringVar(&snapshotID, "snapshot", "", "AWS snapshot tag")
cmdDeleteImage.Flags().BoolVar(&allowMissing, "allow-missing", false, "Do not error out on the resource not existing")
}

func runDeleteImage(cmd *cobra.Command, args []string) {
// Check if either amiID or snapshotID is provided
if amiID == "" && snapshotID == "" {
fmt.Fprintf(os.Stderr, "Provide --ami or --snapshot to delete\n")
os.Exit(1)
}

// Remove resources based on provided flags
if amiID != "" {
err := API.RemoveByAmiTag(amiID, allowMissing)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not delete %v: %v\n", amiID, err)
os.Exit(1)
}
}

if snapshotID != "" {
err := API.RemoveBySnapshotTag(snapshotID, allowMissing)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not delete %v: %v\n", snapshotID, err)
os.Exit(1)
}
}

os.Exit(0)
}
12 changes: 11 additions & 1 deletion mantle/cmd/ore/gcloud/delete-images.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"os"

"github.com/spf13/cobra"
"google.golang.org/api/googleapi"

"github.com/coreos/coreos-assembler/mantle/platform/api/gcloud"
)
Expand All @@ -29,10 +30,12 @@ var (
Short: "Delete GCP images",
Run: runDeleteImage,
}
allowMissing bool
)

func init() {
GCloud.AddCommand(cmdDeleteImage)
cmdDeleteImage.Flags().BoolVar(&allowMissing, "allow-missing", false, "Do not error out on the resource not existing")
}

func runDeleteImage(cmd *cobra.Command, args []string) {
Expand All @@ -46,7 +49,14 @@ func runDeleteImage(cmd *cobra.Command, args []string) {
for _, name := range args {
pending, err := api.DeleteImage(name)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
if gErr, ok := err.(*googleapi.Error); ok {
// Skip on NotFound error only if allowMissing flag is set to True
if gErr.Code == 404 && allowMissing {
plog.Infof("%v\n", err)
continue
}
}
fmt.Fprintf(os.Stderr, "Deleting %q failed: %v\n", name, err)
exit = 1
continue
}
Expand Down
43 changes: 43 additions & 0 deletions mantle/platform/api/aws/images.go
Original file line number Diff line number Diff line change
Expand Up @@ -733,6 +733,49 @@ func (a *API) FindImage(name string) (string, error) {
return "", nil
}

// Deregisters the ami.
func (a *API) RemoveByAmiTag(imageID string, allowMissing bool) error {
_, err := a.ec2.DeregisterImage(&ec2.DeregisterImageInput{ImageId: &imageID})
if err != nil {
if allowMissing {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidAMIID.NotFound" {
plog.Infof("%s does not exist.", imageID)
return nil
}
if awsErr.Code() == "InvalidAMIID.Unavailable" {
plog.Infof("%s is no longer available.", imageID)
return nil
}
}
}
return err
}
plog.Infof("Deregistered existing AMI %s", imageID)
return nil
}

func (a *API) RemoveBySnapshotTag(snapshotID string, allowMissing bool) error {
_, err := a.ec2.DeleteSnapshot(&ec2.DeleteSnapshotInput{SnapshotId: &snapshotID})
if err != nil {
if allowMissing {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidSnapshot.NotFound" {
plog.Infof("%s does not exist.", snapshotID)
return nil
}
if awsErr.Code() == "InvalidSnapshot.Unavailable" {
plog.Infof("%s is no longer available.", snapshotID)
return nil
}
}
}
return err
}
plog.Infof("Deregistered existing snapshot %s", snapshotID)
return nil
}

func (a *API) describeImage(imageID string) (*ec2.Image, error) {
describeRes, err := a.ec2.DescribeImages(&ec2.DescribeImagesInput{
ImageIds: aws.StringSlice([]string{imageID}),
Expand Down
2 changes: 1 addition & 1 deletion mantle/platform/api/gcloud/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ func (a *API) DeprecateImage(name string, state DeprecationState, replacement st
func (a *API) DeleteImage(name string) (*Pending, error) {
op, err := a.compute.Images.Delete(a.options.Project, name).Do()
if err != nil {
return nil, fmt.Errorf("Deleting %s failed: %v", name, err)
return nil, err
}
opReq := a.compute.GlobalOperations.Get(a.options.Project, op.Name)
return a.NewPending(op.Name, opReq), nil
Expand Down
63 changes: 2 additions & 61 deletions src/cmd-buildupload
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@ import sys
import tempfile
import subprocess
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
from tenacity import retry
from cosalib.s3 import s3_copy, s3_check_exists

sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))

Expand All @@ -22,13 +21,7 @@ CACHE_MAX_AGE_ARTIFACT = 60 * 60 * 24 * 365
# set metadata caching to 5m
CACHE_MAX_AGE_METADATA = 60 * 5
from cosalib.builds import Builds, BUILDFILES
from cosalib.cmdlib import (
load_json,
retry_stop_long,
retry_wait_long,
retry_boto_exception,
retry_callback
)
from cosalib.cmdlib import load_json


def main():
Expand Down Expand Up @@ -194,57 +187,5 @@ def s3_upload_build(s3_client, args, builddir, bucket, prefix):
dry_run=args.dry_run)


@retry(stop=retry_stop_long, wait=retry_wait_long,
retry=retry_boto_exception, before_sleep=retry_callback)
def s3_check_exists(s3_client, bucket, key, dry_run=False):
print(f"Checking if bucket '{bucket}' has key '{key}'")
try:
s3_client.head_object(Bucket=bucket, Key=key)
except ClientError as e:
if e.response['Error']['Code'] == '404':
return False
raise e
except NoCredentialsError as e:
# It's reasonable to run without creds if doing a dry-run
if dry_run:
return False
raise e
return True


@retry(stop=retry_stop_long, wait=retry_wait_long,
retry=retry_boto_exception, retry_error_callback=retry_callback)
def s3_copy(s3_client, src, bucket, key, max_age, acl, extra_args={}, dry_run=False):
extra_args = dict(extra_args)
if 'ContentType' not in extra_args:
if key.endswith('.json'):
extra_args['ContentType'] = 'application/json'
elif key.endswith('.tar'):
extra_args['ContentType'] = 'application/x-tar'
elif key.endswith('.xz'):
extra_args['ContentType'] = 'application/x-xz'
elif key.endswith('.gz'):
extra_args['ContentType'] = 'application/gzip'
elif key.endswith('.iso'):
extra_args['ContentType'] = 'application/x-iso9660-image'
else:
# use a standard MIME type for "binary blob" instead of the default
# 'binary/octet-stream' AWS slaps on
extra_args['ContentType'] = 'application/octet-stream'
upload_args = {
'CacheControl': f'max-age={max_age}',
'ACL': acl
}
upload_args.update(extra_args)

print((f"{'Would upload' if dry_run else 'Uploading'} {src} to "
f"s3://{bucket}/{key} with args {upload_args}"))

if dry_run:
return

s3_client.upload_file(Filename=src, Bucket=bucket, Key=key, ExtraArgs=upload_args)


if __name__ == '__main__':
sys.exit(main())
Loading

0 comments on commit 0c1b891

Please sign in to comment.