Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use external mcg endpoint in bucket utils #9733

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions ocs_ci/ocs/bucket_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def craft_s3_command(cmd, mcg_obj=None, api=False, signed_request_creds=None):
f"AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} "
f"{region}"
f"aws s3{api} "
f"--endpoint={mcg_obj.s3_internal_endpoint} "
f"--endpoint={mcg_obj.s3_endpoint} "
f"{no_ssl} "
)
string_wrapper = '"'
Expand Down Expand Up @@ -112,8 +112,8 @@ def craft_s3cmd_command(cmd, mcg_obj=None, signed_request_creds=None):
f"s3cmd --access_key={mcg_obj.access_key_id} "
f"--secret_key={mcg_obj.access_key} "
f"{region}"
f"--host={mcg_obj.s3_external_endpoint} "
f"--host-bucket={mcg_obj.s3_external_endpoint} "
f"--host={mcg_obj.s3_endpoint}"
f"--host-bucket={mcg_obj.s3_endpoint} "
f"{no_ssl} "
)
elif signed_request_creds:
Expand Down Expand Up @@ -291,7 +291,7 @@ def list_objects_from_bucket(
retrieve_cmd += " --recursive"

if s3_obj:
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_internal_endpoint]
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_endpoint]
elif signed_request_creds:
secrets = [
signed_request_creds.get("access_key_id"),
Expand Down Expand Up @@ -354,7 +354,7 @@ def copy_objects(
else:
retrieve_cmd = f"cp {src_obj} {target} {no_ssl}"
if s3_obj:
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_internal_endpoint]
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_endpoint]
elif signed_request_creds:
secrets = [
signed_request_creds.get("access_key_id"),
Expand Down Expand Up @@ -419,7 +419,7 @@ def upload_objects_with_javasdk(javas3_pod, s3_obj, bucket_name, is_multipart=Fa

access_key = s3_obj.access_key_id
secret_key = s3_obj.access_key
endpoint = s3_obj.s3_internal_endpoint
endpoint = s3_obj.s3_endpoint

# compile the src code
javas3_pod.exec_cmd_on_pod(command="mvn clean compile", out_yaml_format=False)
Expand Down Expand Up @@ -457,7 +457,7 @@ def sync_object_directory(
logger.info(f"Syncing all objects and directories from {src} to {target}")
retrieve_cmd = f"sync {src} {target}"
if s3_obj:
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_internal_endpoint]
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_endpoint]
elif signed_request_creds:
secrets = [
signed_request_creds.get("access_key_id"),
Expand Down Expand Up @@ -505,7 +505,7 @@ def download_objects_using_s3cmd(
else:
retrieve_cmd = f"get {src} {target}"
if s3_obj:
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_internal_endpoint]
secrets = [s3_obj.access_key_id, s3_obj.access_key, s3_obj.s3_endpoint]
elif signed_request_creds:
secrets = [
signed_request_creds.get("access_key_id"),
Expand Down Expand Up @@ -544,7 +544,7 @@ def rm_object_recursive(podobj, target, mcg_obj, option=""):
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
mcg_obj.s3_endpoint,
],
)

Expand Down Expand Up @@ -591,7 +591,7 @@ def write_individual_s3_objects(
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
mcg_obj.s3_endpoint,
],
)

Expand All @@ -616,7 +616,7 @@ def upload_parts(

"""
parts = []
secrets = [mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_internal_endpoint]
secrets = [mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint]
for count, part in enumerate(uploaded_parts, 1):
upload_cmd = (
f"upload-part --bucket {bucketname} --key {object_key}"
Expand Down Expand Up @@ -1453,7 +1453,7 @@ def del_objects(uploaded_objects_paths, awscli_pod, mcg_obj):
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
mcg_obj.s3_endpoint,
],
)

Expand Down
23 changes: 21 additions & 2 deletions ocs_ci/ocs/resources/mcg.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ class MCG:
(
s3_resource,
s3_endpoint,
s3_external_endpoint,
s3_internal_endpoint,
ocp_resource,
mgmt_endpoint,
Expand All @@ -66,7 +67,7 @@ class MCG:
noobaa_password,
noobaa_token,
data_to_mask,
) = (None,) * 13
) = (None,) * 14

def __init__(self, *args, **kwargs):
"""
Expand Down Expand Up @@ -105,7 +106,7 @@ def __init__(self, *args, **kwargs):

get_noobaa = OCP(kind="noobaa", namespace=self.namespace).get()

self.s3_endpoint = (
self.s3_external_endpoint = (
get_noobaa.get("items")[0]
.get("status")
.get("services")
Expand All @@ -119,6 +120,7 @@ def __init__(self, *args, **kwargs):
.get("serviceS3")
.get("internalDNS")[0]
)
self.s3_endpoint = self.determine_s3_endpoint()
self.mgmt_endpoint = (
get_noobaa.get("items")[0]
.get("status")
Expand Down Expand Up @@ -379,6 +381,23 @@ def _retrieve_reduction_data():
f"{total_size - total_reduced} bytes reduced out of {expected_reduction_in_bytes}."
)

def determine_s3_endpoint(self):
"""
Get internal mcg S3 endpoint if the cluster is in disconnected or proxy environment.
Get external endpoint otherwise.

Returns:
string: S3 endpoint URI
"""
return (
self.s3_external_endpoint
if (
config.ENV_DATA.get("platform", "").lower()
in constants.HCI_PROVIDER_CLIENT_PLATFORMS
)
else self.s3_internal_endpoint
)

Comment on lines +384 to +400
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. Please update the Docstring
  2. 0faef55 changes the self.s3_endpoint class member to be the internal endpoint on most cases, which is a big change that we need to verify.

While this endpoint should work on S3 operations that are made via the awscli pod which is within the cluster, some test cases are making s3 operations from the Jenkins agent via boto3 from outside of it. I fear this change will fail those kind of test cases.

def request_aws_credentials(self):
"""
Uses a CredentialsRequest CR to create an AWS IAM that allows the program
Expand Down
Loading