Skip to content
This repository has been archived by the owner on Jun 20, 2024. It is now read-only.

Commit

Permalink
feat: cleanup buckets
Browse files Browse the repository at this point in the history
  • Loading branch information
folkzb authored and fczuardi committed Nov 24, 2023
1 parent edba3e5 commit 8c19261
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 0 deletions.
4 changes: 4 additions & 0 deletions justfile
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@ list-tests:
--ignore utils \
| sed "s/\.js//g"

# clean test buckets
clean-buckets profile:
./src/k6/utils/clean-buckets.py {{profile}}

# Test a S3-compatible provider with k6
test remote test_name folder=date: _setup
@just _test-k6 {{remote}} {{test_name}} {{folder}}
Expand Down
38 changes: 38 additions & 0 deletions src/k6/utils/clean-buckets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#!/usr/bin/env python
import boto3
import sys
import yaml


def delete_bucket_contents(s3, bucket_name):
objects = s3.list_objects_v2(Bucket=bucket_name)
if 'Contents' in objects:
for obj in objects['Contents']:
s3.delete_object(Bucket=bucket_name, Key=obj['Key'])

def delete_buckets(profile):
with open("config.yaml", "r") as stream:
try:
data = yaml.safe_load(stream)
endpoint = data['remotes'][profile]['s3']['endpoint']
region = data['remotes'][profile]['s3']['region']
except yaml.YAMLError as exc:
print(exc)
session = boto3.Session(profile_name=profile)
s3 = session.client('s3', endpoint_url=endpoint, region_name=region)
response = s3.list_buckets()
for bucket in response['Buckets']:
if bucket['Name'].startswith('test'):
print(f"Deleting bucket: {bucket['Name']}...")
delete_bucket_contents(s3, bucket['Name'])
s3.delete_bucket(Bucket=bucket['Name'])

def main():
if len(sys.argv) != 2:
print("Use: python clean-all-buckets.py <profile>")
sys.exit(1)
profile = sys.argv[1]
delete_buckets(profile)

if __name__ == "__main__":
main()

0 comments on commit 8c19261

Please sign in to comment.