diff --git a/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py b/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py index 3d411a0e932c27..169682d9db9b76 100644 --- a/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py +++ b/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py @@ -130,17 +130,13 @@ def test_expiration(self): { "ID": "rule1", "Expiration": {"Days": 1}, - "Filter": { - "Prefix": "expire1/" - }, + "Filter": {"Prefix": "expire1/"}, "Status": "Enabled", }, { "ID": "rule2", "Expiration": {"Days": 5}, - "Filter": { - "Prefix": "expire3/" - }, + "Filter": {"Prefix": "expire3/"}, "Status": "Enabled", }, ] @@ -200,9 +196,7 @@ def test_lifecycle_versioning_enabled(self): { "ID": "rule1", "Expiration": {"Days": 1}, - "Filter": { - "Prefix": "expire1/" - }, + "Filter": {"Prefix": "expire1/"}, "Status": "Enabled", } ] @@ -212,7 +206,7 @@ def test_lifecycle_versioning_enabled(self): ) # give enough time to expire. - # 4 cycles because: + # 3 cycles because: # 1st cycle won't be expired yet (not still 1 day) # 2nd cycle rgw considers the bucket at processed # today and skips it @@ -238,9 +232,7 @@ def test_expiration_multiple_buckets(self): { "ID": "rule1", "Expiration": {"Days": 1}, - "Filter": { - "Prefix": "expire1/" - }, + "Filter": {"Prefix": "expire1/"}, "Status": "Enabled", } ] diff --git a/qa/rgw/store/sfs/tests/test-sfs-object-locking.py b/qa/rgw/store/sfs/tests/test-sfs-object-locking.py index 7ef3496fee5fa1..7b2b6b00498458 100644 --- a/qa/rgw/store/sfs/tests/test-sfs-object-locking.py +++ b/qa/rgw/store/sfs/tests/test-sfs-object-locking.py @@ -462,6 +462,7 @@ def test_object_locking_legal_hold(self): self.assertTrue(response["ResponseMetadata"]["HTTPStatusCode"] == 204) + if __name__ == "__main__": if len(sys.argv) == 2: address_port = sys.argv.pop() diff --git a/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py b/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py index d9dfbaf2810709..c01c814f95829a 100644 --- a/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py +++ b/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py @@ -19,24 +19,40 @@ import tempfile import os import filecmp +import threading + + +def _do_create_object(client, bucket_name, key, i): + body = "data {i}".format(i=i) + client.put_object(Bucket=bucket_name, Key=key, Body=body) + + +def _do_wait_completion(t): + for thr in t: + thr.join() + class VersioningSmokeTests(unittest.TestCase): - ACCESS_KEY='test' - SECRET_KEY='test' - URL='http://127.0.0.1:7480' - BUCKET_NAME_LENGTH=8 - OBJECT_NAME_LENGTH=10 + ACCESS_KEY = "test" + SECRET_KEY = "test" + URL = "http://127.0.0.1:7480" + BUCKET_NAME_LENGTH = 8 + OBJECT_NAME_LENGTH = 10 def setUp(self): - self.s3_client = boto3.client('s3', - endpoint_url=VersioningSmokeTests.URL, - aws_access_key_id="test", - aws_secret_access_key="test") - - self.s3 = boto3.resource('s3', - endpoint_url=VersioningSmokeTests.URL, - aws_access_key_id="test", - aws_secret_access_key="test") + self.s3_client = boto3.client( + "s3", + endpoint_url=VersioningSmokeTests.URL, + aws_access_key_id="test", + aws_secret_access_key="test", + ) + + self.s3 = boto3.resource( + "s3", + endpoint_url=VersioningSmokeTests.URL, + aws_access_key_id="test", + aws_secret_access_key="test", + ) self.test_dir = tempfile.TemporaryDirectory() @@ -46,7 +62,7 @@ def tearDown(self): def get_random_name(self, length) -> str: letters = string.ascii_lowercase - result_str = ''.join(random.choice(letters) for i in range(length)) + result_str = "".join(random.choice(letters) for i in range(length)) return result_str def get_random_bucket_name(self) -> str: @@ -58,221 +74,277 @@ def get_random_object_name(self) -> str: def generate_random_file(self, path, size=4): # size passed is in mb size = size * 1024 * 1024 - with open(path, 'wb') as fout: + with open(path, "wb") as fout: fout.write(os.urandom(size)) def assert_bucket_exists(self, bucket_name): response = self.s3_client.list_buckets() found = False - for bucket in response['Buckets']: - if (bucket["Name"] == bucket_name): + for bucket in response["Buckets"]: + if bucket["Name"] == bucket_name: found = True self.assertTrue(found) + def _do_create_versioned_obj_concurrent(self, bucket_name, key, num): + t = [] + for i in range(num): + thr = threading.Thread( + target=_do_create_object, args=(self.s3_client, bucket_name, key, i) + ) + thr.start() + t.append(thr) + return t + def test_create_bucket_enable_versioning(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) # ensure versioning is disabled (default) response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertFalse('Status' in response) - response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, - VersioningConfiguration={ - 'MFADelete': 'Disabled', - 'Status': 'Enabled'}) + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) + self.assertFalse("Status" in response) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) - self.assertTrue('Status' in response) - self.assertEqual('Enabled', response['Status']) + self.assertTrue("Status" in response) + self.assertEqual("Enabled", response["Status"]) def test_put_objects_versioning_enabled(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) - response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, - VersioningConfiguration={ - 'MFADelete': 'Disabled', - 'Status': 'Enabled'}) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) object_name = self.get_random_object_name() - test_file_path_1 = os.path.join(self.test_dir.name, 'test_file_1.bin') + test_file_path_1 = os.path.join(self.test_dir.name, "test_file_1.bin") self.generate_random_file(test_file_path_1) # upload the file self.s3_client.upload_file(test_file_path_1, bucket_name, object_name) # get the file and compare with the original - test_file_path_1_check = os.path.join(self.test_dir.name, 'test_file_1_check.bin') + test_file_path_1_check = os.path.join( + self.test_dir.name, "test_file_1_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_1_check) - self.assertTrue(filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False) + ) # now upload again with different content - test_file_path_2 = os.path.join(self.test_dir.name, 'test_file_2.bin') + test_file_path_2 = os.path.join(self.test_dir.name, "test_file_2.bin") self.generate_random_file(test_file_path_2) self.s3_client.upload_file(test_file_path_2, bucket_name, object_name) - test_file_path_2_check = os.path.join(self.test_dir.name, 'test_file_2_check.bin') + test_file_path_2_check = os.path.join( + self.test_dir.name, "test_file_2_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_2_check) - self.assertTrue(filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False) + ) # get etag of object response = self.s3_client.head_object(Bucket=bucket_name, Key=object_name) - self.assertTrue('ETag' in response) - etag = response['ETag'] + self.assertTrue("ETag" in response) + etag = response["ETag"] # check that we have 2 versions # only 1 version should be flagged as the latest - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('Versions' in response) - self.assertEqual(2, len(response['Versions'])) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(2, len(response["Versions"])) num_latest = 0 - last_version_id = '' - previous_version_id = '' - for version in response['Versions']: - self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) - self.assertEqual(object_name, version['Key']) - self.assertEqual('STANDARD', version['StorageClass']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertNotEqual('null', version['VersionId']) - if (version['IsLatest']): + last_version_id = "" + previous_version_id = "" + for version in response["Versions"]: + self.assertEqual(os.path.getsize(test_file_path_1), version["Size"]) + self.assertEqual(object_name, version["Key"]) + self.assertEqual("STANDARD", version["StorageClass"]) + self.assertEqual( + {"DisplayName": "M. Tester", "ID": "testid"}, version["Owner"] + ) + self.assertNotEqual("null", version["VersionId"]) + if version["IsLatest"]: num_latest += 1 - last_version_id = version['VersionId'] - self.assertEqual(etag, version['ETag']) + last_version_id = version["VersionId"] + self.assertEqual(etag, version["ETag"]) else: - previous_version_id = version['VersionId'] + previous_version_id = version["VersionId"] # check that all etags differ - for version in response['Versions']: - etag = version['ETag'] - version_id = version['VersionId'] - for version2 in response['Versions']: - version_id2 = version2['VersionId'] - if (version_id2 != version_id): - etag2 = version2['ETag'] + for version in response["Versions"]: + etag = version["ETag"] + version_id = version["VersionId"] + for version2 in response["Versions"]: + version_id2 = version2["VersionId"] + if version_id2 != version_id: + etag2 = version2["ETag"] self.assertNotEqual(etag, etag2) - self.assertEqual(1, num_latest) - self.assertNotEqual('', last_version_id) - self.assertNotEqual('', previous_version_id) + self.assertNotEqual("", last_version_id) + self.assertNotEqual("", previous_version_id) # download by version_id # download the last version - check_version_file = os.path.join(self.test_dir.name, 'check_version.bin') + check_version_file = os.path.join(self.test_dir.name, "check_version.bin") bucket = self.s3.Bucket(bucket_name) bucket.download_file( - object_name, - check_version_file, - ExtraArgs={"VersionId": last_version_id}) - self.assertTrue(filecmp.cmp(test_file_path_2, check_version_file, shallow=False)) + object_name, check_version_file, ExtraArgs={"VersionId": last_version_id} + ) + self.assertTrue( + filecmp.cmp(test_file_path_2, check_version_file, shallow=False) + ) # download the previous version - check_version_file_2 = os.path.join(self.test_dir.name, 'check_version2.bin') + check_version_file_2 = os.path.join(self.test_dir.name, "check_version2.bin") bucket.download_file( object_name, check_version_file_2, - ExtraArgs={"VersionId": previous_version_id}) - self.assertTrue(filecmp.cmp(test_file_path_1, check_version_file_2, shallow=False)) + ExtraArgs={"VersionId": previous_version_id}, + ) + self.assertTrue( + filecmp.cmp(test_file_path_1, check_version_file_2, shallow=False) + ) # delete the object self.s3_client.delete_object(Bucket=bucket_name, Key=object_name) # check that we have 2 versions plus 1 DeleteMarker # only 1 version should be flagged as the latest - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('Versions' in response) - self.assertEqual(2, len(response['Versions'])) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(2, len(response["Versions"])) num_latest = 0 - deleted_version_id = '' - for version in response['Versions']: - self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) - self.assertEqual(object_name, version['Key']) - self.assertEqual('STANDARD', version['StorageClass']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertNotEqual('null', version['VersionId']) - self.assertFalse(version['IsLatest']) - - self.assertEqual(1, len(response['DeleteMarkers'])) + deleted_version_id = "" + for version in response["Versions"]: + self.assertEqual(os.path.getsize(test_file_path_1), version["Size"]) + self.assertEqual(object_name, version["Key"]) + self.assertEqual("STANDARD", version["StorageClass"]) + self.assertEqual( + {"DisplayName": "M. Tester", "ID": "testid"}, version["Owner"] + ) + self.assertNotEqual("null", version["VersionId"]) + self.assertFalse(version["IsLatest"]) + + self.assertEqual(1, len(response["DeleteMarkers"])) # try to download the file, a 404 error should be returned - check_deleted_file = os.path.join(self.test_dir.name, 'check_deleted.bin') + check_deleted_file = os.path.join(self.test_dir.name, "check_deleted.bin") with self.assertRaises(botocore.exceptions.ClientError) as context: - response = self.s3_client.download_file(bucket_name, object_name, check_deleted_file) - self.assertTrue('404' in str(context.exception)) + response = self.s3_client.download_file( + bucket_name, object_name, check_deleted_file + ) + self.assertTrue("404" in str(context.exception)) # download the previous version, it should still be reacheable - check_version_file_2 = os.path.join(self.test_dir.name, 'check_version2.bin') + check_version_file_2 = os.path.join(self.test_dir.name, "check_version2.bin") bucket.download_file( - object_name, - check_version_file_2, - ExtraArgs={"VersionId": last_version_id}) - self.assertTrue(filecmp.cmp(test_file_path_2, check_version_file_2, shallow=False)) + object_name, check_version_file_2, ExtraArgs={"VersionId": last_version_id} + ) + self.assertTrue( + filecmp.cmp(test_file_path_2, check_version_file_2, shallow=False) + ) + + # delete the first version. (in this case version should be deleted + # permanently) + version_id_to_delete = response["Versions"][0]["VersionId"] + # delete the specific version + self.s3_client.delete_object( + Bucket=bucket_name, Key=object_name, VersionId=version_id_to_delete + ) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(1, len(response["Versions"])) + self.assertNotEqual(version_id_to_delete, response["Versions"][0]["VersionId"]) + self.assertTrue("DeleteMarkers" in response) + self.assertEqual(1, len(response["DeleteMarkers"])) def test_put_objects_no_versioning(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) object_name = self.get_random_object_name() - test_file_path_1 = os.path.join(self.test_dir.name, 'test_file_1.bin') + test_file_path_1 = os.path.join(self.test_dir.name, "test_file_1.bin") self.generate_random_file(test_file_path_1) # upload the file self.s3_client.upload_file(test_file_path_1, bucket_name, object_name) # get the file and compare with the original - test_file_path_1_check = os.path.join(self.test_dir.name, 'test_file_1_check.bin') + test_file_path_1_check = os.path.join( + self.test_dir.name, "test_file_1_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_1_check) - self.assertTrue(filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False) + ) # now upload again with different content - test_file_path_2 = os.path.join(self.test_dir.name, 'test_file_2.bin') + test_file_path_2 = os.path.join(self.test_dir.name, "test_file_2.bin") self.generate_random_file(test_file_path_2) self.s3_client.upload_file(test_file_path_2, bucket_name, object_name) - test_file_path_2_check = os.path.join(self.test_dir.name, 'test_file_2_check.bin') + test_file_path_2_check = os.path.join( + self.test_dir.name, "test_file_2_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_2_check) - self.assertTrue(filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False) + ) # get etag of object response = self.s3_client.head_object(Bucket=bucket_name, Key=object_name) - self.assertTrue('ETag' in response) - etag = response['ETag'] + self.assertTrue("ETag" in response) + etag = response["ETag"] # check that we have 1 version only # only 1 version should be flagged as the latest - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('Versions' in response) - self.assertEqual(1, len(response['Versions'])) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(1, len(response["Versions"])) num_latest = 0 - last_version_id = '' - previous_version_id = '' - for version in response['Versions']: - self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) - self.assertEqual(object_name, version['Key']) - self.assertEqual('STANDARD', version['StorageClass']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertEqual(etag, version['ETag']) - self.assertEqual('null', version['VersionId']) - self.assertTrue(version['IsLatest']) + last_version_id = "" + previous_version_id = "" + for version in response["Versions"]: + self.assertEqual(os.path.getsize(test_file_path_1), version["Size"]) + self.assertEqual(object_name, version["Key"]) + self.assertEqual("STANDARD", version["StorageClass"]) + self.assertEqual( + {"DisplayName": "M. Tester", "ID": "testid"}, version["Owner"] + ) + self.assertEqual(etag, version["ETag"]) + self.assertEqual("null", version["VersionId"]) + self.assertTrue(version["IsLatest"]) # delete the object self.s3_client.delete_object(Bucket=bucket_name, Key=object_name) - # we should still have 0 versions and 1 delete marker - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('DeleteMarkers' in response) - self.assertFalse('Versions' in response) - self.assertEqual(1, len(response['DeleteMarkers'])) - - num_latest = 0 - deleted_version_id = '' - for version in response['DeleteMarkers']: - self.assertEqual(object_name, version['Key']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertEqual('null', version['VersionId']) - self.assertTrue(version['IsLatest']) + # we should still have 0 versions and no delete markers + # non-versioned bucket don't create delete-markers + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertFalse("DeleteMarkers" in response) + self.assertFalse("Versions" in response) # try to download the file, a 404 error should be returned - check_deleted_file = os.path.join(self.test_dir.name, 'check_deleted.bin') + check_deleted_file = os.path.join(self.test_dir.name, "check_deleted.bin") with self.assertRaises(botocore.exceptions.ClientError) as context: - response = self.s3_client.download_file(bucket_name, object_name, check_deleted_file) - self.assertTrue('404' in str(context.exception)) + response = self.s3_client.download_file( + bucket_name, object_name, check_deleted_file + ) + self.assertTrue("404" in str(context.exception)) def upload_object_with_versions(self, bucket_name, object_name, number_of_versions): for i in range(number_of_versions): @@ -285,52 +357,89 @@ def test_list_objects_versioning_enabled_with_prefix(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) - response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, - VersioningConfiguration={ - 'MFADelete': 'Disabled', - 'Status': 'Enabled'}) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) - self.upload_object_with_versions(bucket_name, 'prefix_file_1.bin', 2) - self.upload_object_with_versions(bucket_name, 'prefix_file_2.bin', 2) - self.upload_object_with_versions(bucket_name, 'test_file.bin', 3) + self.upload_object_with_versions(bucket_name, "prefix_file_1.bin", 2) + self.upload_object_with_versions(bucket_name, "prefix_file_2.bin", 2) + self.upload_object_with_versions(bucket_name, "test_file.bin", 3) # get the list of version with prefix = 'prefix' - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix='prefix') - self.assertTrue('Versions' in response) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix="prefix" + ) + self.assertTrue("Versions" in response) # we should have 4 versions (2 per each file) - self.assertEqual(4, len(response['Versions'])) + self.assertEqual(4, len(response["Versions"])) # check that the results are the expected ones - for version in response['Versions']: - self.assertTrue(version['Key'].startswith('prefix')) + for version in response["Versions"]: + self.assertTrue(version["Key"].startswith("prefix")) # get the list of version with prefix = 'test' - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix='test') - self.assertTrue('Versions' in response) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix="test" + ) + self.assertTrue("Versions" in response) # we should have 3 versions - self.assertEqual(3, len(response['Versions'])) + self.assertEqual(3, len(response["Versions"])) # check that the results are the expected ones - for version in response['Versions']: - self.assertTrue(version['Key'].startswith('test')) + for version in response["Versions"]: + self.assertTrue(version["Key"].startswith("test")) # delete the prefix_file_1.bin object - self.s3_client.delete_object(Bucket=bucket_name, Key='prefix_file_1.bin') + self.s3_client.delete_object(Bucket=bucket_name, Key="prefix_file_1.bin") # get the list of version with prefix = 'prefix' - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix='prefix') - self.assertTrue('Versions' in response) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix="prefix" + ) + self.assertTrue("Versions" in response) # we should have still have 4 versions (2 per each file) - self.assertEqual(4, len(response['Versions'])) + self.assertEqual(4, len(response["Versions"])) # and we should have 1 delete marker - self.assertTrue('DeleteMarkers' in response) - self.assertEqual(1, len(response['DeleteMarkers'])) + self.assertTrue("DeleteMarkers" in response) + self.assertEqual(1, len(response["DeleteMarkers"])) # ensure that it's object we deleted - self.assertEqual('prefix_file_1.bin', response['DeleteMarkers'][0]['Key']) + self.assertEqual("prefix_file_1.bin", response["DeleteMarkers"][0]["Key"]) + + def test_create_concurrent(self): + bucket_name = self.get_random_bucket_name() + self.s3_client.create_bucket(Bucket=bucket_name) + self.assert_bucket_exists(bucket_name) + # ensure versioning is disabled (default) + response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) + self.assertFalse("Status" in response) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) + response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) + self.assertTrue("Status" in response) + self.assertEqual("Enabled", response["Status"]) + + key = "myobj" + num_versions = 5 + repeat = 25 + + for i in range(repeat): + key_obj = "%s-%s" % (key, i) + t = self._do_create_versioned_obj_concurrent( + bucket_name, key_obj, num_versions + ) + _do_wait_completion(t) + response = self.s3_client.list_object_versions(Bucket=bucket_name) + versions = response["Versions"] + self.assertEqual(num_versions * repeat, len(versions)) + print("Num versions: %s" % len(versions)) if __name__ == "__main__": if len(sys.argv) == 2: address_port = sys.argv.pop() - VersioningSmokeTests.URL = 'http://{0}'.format(address_port) + VersioningSmokeTests.URL = "http://{0}".format(address_port) unittest.main() else: - print ("usage: {0} ADDRESS:PORT".format(sys.argv[0])) + print("usage: {0} ADDRESS:PORT".format(sys.argv[0])) diff --git a/src/rgw/driver/sfs/CMakeLists.txt b/src/rgw/driver/sfs/CMakeLists.txt index fe01936692a975..19bd476925be9f 100644 --- a/src/rgw/driver/sfs/CMakeLists.txt +++ b/src/rgw/driver/sfs/CMakeLists.txt @@ -24,7 +24,6 @@ set(sfs_srcs sqlite/sqlite_lifecycle.cc sqlite/users/users_conversions.cc sqlite/buckets/bucket_conversions.cc - sqlite/versioned_object/versioned_object_conversions.cc sqlite/dbconn.cc bucket.cc multipart.cc diff --git a/src/rgw/driver/sfs/bucket.cc b/src/rgw/driver/sfs/bucket.cc index 28e1e43ef2a63f..d52b4aa2d443d5 100644 --- a/src/rgw/driver/sfs/bucket.cc +++ b/src/rgw/driver/sfs/bucket.cc @@ -43,18 +43,22 @@ void SFSBucket::write_meta(const DoutPrefixProvider* dpp) { } std::unique_ptr SFSBucket::_get_object(sfs::ObjectRef obj) { - rgw_obj_key key(obj->name); + rgw_obj_key key(obj->name, obj->instance); return make_unique(this->store, key, this, bucket); } std::unique_ptr SFSBucket::get_object(const rgw_obj_key& key) { - // note: the current code is completely ignoring the versionID in the key. - // please see to 'rgw_rest_s3.cc' RGWHandler_REST_S3::init_from_header(). - ldout(store->ceph_context(), 10) - << "bucket::" << __func__ << ": key" << key << dendl; + << "bucket::" << __func__ << ": key : " << key << dendl; try { - auto objref = bucket->get(key.name); + auto objref = bucket->get(key); + // bucket->get retrieves all the information from the db + // (incling the version_id for the last version) + // But in cases like delete operations we don't want to update the + // instance. That could convert a "delete marker" operation into a "delete + // specific version" operation. + // Return the object with the same key as it was requested. + objref->instance = key.instance; return _get_object(objref); } catch (const sfs::UnknownObjectException& _) { ldout(store->ceph_context(), 10) @@ -77,15 +81,12 @@ int SFSBucket::list( if (params.list_versions) { return list_versions(dpp, params, max, results, y); } - sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); auto use_prefix = !params.prefix.empty(); + // get_all returns the last version of all objects that are not deleted for (const auto& objref : bucket->get_all()) { if (use_prefix && objref->name.rfind(params.prefix, 0) != 0) continue; lsfs_dout(dpp, 10) << "object: " << objref->name << dendl; - - auto last_version = - db_versioned_objects.get_last_versioned_object(objref->path.get_uuid()); - if (last_version->object_state == rgw::sal::sfs::ObjectState::COMMITTED) { + if (!objref->deleted) { // check for delimiter if (check_add_common_prefix(dpp, objref->name, params, 0, results, y)) { continue; @@ -111,6 +112,8 @@ int SFSBucket::list_versions( ListResults& results, optional_yield y ) { auto use_prefix = !params.prefix.empty(); + sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); + // get_all returns the last version of all objects that are COMMITTED for (const auto& objref : bucket->get_all()) { if (use_prefix && objref->name.rfind(params.prefix, 0) != 0) continue; lsfs_dout(dpp, 10) << "object: " << objref->name << dendl; @@ -118,25 +121,40 @@ int SFSBucket::list_versions( if (check_add_common_prefix(dpp, objref->name, params, 0, results, y)) { continue; } - // get all available versions from db - sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); - auto last_version = - db_versioned_objects.get_last_versioned_object(objref->path.get_uuid()); - auto object_versions = - db_versioned_objects.get_versioned_objects(objref->path.get_uuid()); - for (const auto& object_version : object_versions) { + if (get_info().versioning_enabled()) { + auto object_versions = + db_versioned_objects.get_versioned_objects(objref->path.get_uuid()); + for (const auto& object_version : object_versions) { + if (object_version.object_state != + rgw::sal::sfs::ObjectState::COMMITTED) { + continue; + } + rgw_bucket_dir_entry dirent; + dirent.key = cls_rgw_obj_key(objref->name, object_version.version_id); + dirent.meta.accounted_size = object_version.size; + dirent.meta.mtime = object_version.create_time; + dirent.meta.etag = object_version.etag; + dirent.flags = rgw_bucket_dir_entry::FLAG_VER; + if (objref->version_id == object_version.id) { + dirent.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; + } + if (object_version.version_type == + rgw::sal::sfs::VersionType::DELETE_MARKER) { + dirent.flags |= rgw_bucket_dir_entry::FLAG_DELETE_MARKER; + } + dirent.meta.owner_display_name = bucket->get_owner().display_name; + dirent.meta.owner = bucket->get_owner().user_id.id; + results.objs.push_back(dirent); + } + } else { // non-versioned bucket rgw_bucket_dir_entry dirent; - dirent.key = cls_rgw_obj_key(objref->name, object_version.version_id); - dirent.meta.accounted_size = object_version.size; - dirent.meta.mtime = object_version.create_time; - dirent.meta.etag = object_version.etag; + // for non-versioned buckets we don't return the versionId + dirent.key = cls_rgw_obj_key(objref->name, ""); + dirent.meta.accounted_size = objref->get_meta().size; + dirent.meta.mtime = objref->get_meta().mtime; + dirent.meta.etag = objref->get_meta().etag; dirent.flags = rgw_bucket_dir_entry::FLAG_VER; - if (last_version.has_value() && last_version->id == object_version.id) { - dirent.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; - } - if (object_version.object_state == rgw::sal::sfs::ObjectState::DELETED) { - dirent.flags |= rgw_bucket_dir_entry::FLAG_DELETE_MARKER; - } + dirent.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; dirent.meta.owner_display_name = bucket->get_owner().display_name; dirent.meta.owner = bucket->get_owner().user_id.id; results.objs.push_back(dirent); @@ -238,12 +256,10 @@ bool SFSBucket::is_owner(User* user) { int SFSBucket::check_empty(const DoutPrefixProvider* dpp, optional_yield y) { /** Check in the backing store if this bucket is empty */ // check if there are still objects owned by the bucket - sfs::sqlite::SQLiteObjects db_objects(store->db_conn); - auto objects = db_objects.get_object_ids(get_name()); - sfs::sqlite::SQLiteVersionedObjects db_versions(store->db_conn); + sfs::sqlite::SQLiteVersionedObjects db_ver_objects(store->db_conn); + auto objects = db_ver_objects.list_last_versioned_objects(get_name()); for (const auto& obj : objects) { - auto last_version = db_versions.get_last_versioned_object(obj); - if (last_version->object_state != rgw::sal::sfs::ObjectState::DELETED) { + if (sfs::sqlite::get_version_type(obj) != sfs::VersionType::DELETE_MARKER) { ldpp_dout(dpp, -1) << __func__ << ": Bucket Not Empty.." << dendl; return -ENOTEMPTY; } diff --git a/src/rgw/driver/sfs/multipart.cc b/src/rgw/driver/sfs/multipart.cc index b1f0d5c4530a5f..4288ef74d6a226 100644 --- a/src/rgw/driver/sfs/multipart.cc +++ b/src/rgw/driver/sfs/multipart.cc @@ -134,7 +134,7 @@ int SFSMultipartUpload::complete( ceph_assert(target_obj); ceph_assert(target_obj->get_name() == mp->objref->name); - sfs::ObjectRef outobj = bucketref->get_or_create(target_obj->get_key()); + sfs::ObjectRef outobj = bucketref->create_version(target_obj->get_key()); std::filesystem::path outpath = store->get_data_path() / outobj->get_storage_path(); // ensure directory structure exists diff --git a/src/rgw/driver/sfs/object.cc b/src/rgw/driver/sfs/object.cc index 84012601fd9dba..ffafd0ef5a5780 100644 --- a/src/rgw/driver/sfs/object.cc +++ b/src/rgw/driver/sfs/object.cc @@ -33,7 +33,8 @@ SFSObject::SFSReadOp::SFSReadOp(SFSObject* _source) : source(_source) { In those cases the SFSReadOp is not properly initialized and those calls are going to fail. */ - source->refresh_meta(); + // read op needs to retrieve also the version_id from the db + source->refresh_meta(true); objref = source->get_object_ref(); } @@ -54,7 +55,8 @@ int SFSObject::SFSReadOp::prepare( lsfs_dout(dpp, 10) << "bucket: " << source->bucket->get_name() << ", obj: " << source->get_name() - << ", size: " << source->get_obj_size() << dendl; + << ", size: " << source->get_obj_size() + << ", versionId: " << source->get_instance() << dendl; if (params.lastmod) { *params.lastmod = source->get_mtime(); } @@ -150,7 +152,7 @@ int SFSObject::SFSDeleteOp::delete_obj( const DoutPrefixProvider* dpp, optional_yield y ) { lsfs_dout(dpp, 10) << "bucket: " << source->bucket->get_name() - << "bucket versioning: " + << " bucket versioning: " << source->bucket->versioning_enabled() << ", object: " << source->get_name() << ", instance: " << source->get_instance() << dendl; @@ -162,12 +164,16 @@ int SFSObject::SFSDeleteOp::delete_obj( } auto version_id = source->get_instance(); + std::string delete_marker_version_id; if (source->objref) { - bucketref->delete_object(source->objref, source->get_key()); - } else if (source->bucket->versioning_enabled()) { + bucketref->delete_object( + source->objref, source->get_key(), source->bucket->versioning_enabled(), + delete_marker_version_id + ); + } else if (source->bucket->versioning_enabled() && source->get_instance().empty()) { // create delete marker // even the object does not exist AWS creates a delete marker for it - // if versioning is enabled + // if versioning is enabled and an specific version was not specified version_id = bucketref->create_non_existing_object_delete_marker(source->get_key()); } @@ -176,6 +182,12 @@ int SFSObject::SFSDeleteOp::delete_obj( // and return the version id if (source->bucket->versioning_enabled()) { result.version_id = version_id; + if (!delete_marker_version_id.empty()) { + // a new delete marker was created. + // Return the version id generated for it. + result.version_id = delete_marker_version_id; + } + source->delete_marker = true; // needed for multiobject delete result.delete_marker = true; } return 0; @@ -220,7 +232,7 @@ int SFSObject::copy_object( std::filesystem::path srcpath = store->get_data_path() / objref->get_storage_path(); - sfs::ObjectRef dstref = dst_bucket_ref->get_or_create(dst_object->get_key()); + sfs::ObjectRef dstref = dst_bucket_ref->create_version(dst_object->get_key()); std::filesystem::path dstpath = store->get_data_path() / dstref->get_storage_path(); @@ -247,7 +259,9 @@ int SFSObject::copy_object( dest_meta.mtime = ceph::real_clock::now(); dstref->update_attrs(objref->get_attrs()); dstref->update_meta(dest_meta); - dstref->metadata_finish(store); + dstref->metadata_finish( + store, dst_bucket_ref->get_info().versioning_enabled() + ); return 0; } @@ -438,42 +452,30 @@ std::unique_ptr SFSObject::get_delete_op() { return std::make_unique(this, ref); } -void SFSObject::refresh_meta() { +void SFSObject::refresh_meta(bool update_version_id_from_metadata) { if (!bucketref) { bucketref = store->get_bucket_ref(bucket->get_name()); } try { - objref = bucketref->get(get_name()); + objref = bucketref->get(rgw_obj_key(get_name(), get_instance())); } catch (sfs::UnknownObjectException& e) { // object probably not created yet? return; } - _refresh_meta_from_object(); + _refresh_meta_from_object(objref, update_version_id_from_metadata); } -void SFSObject::_refresh_meta_from_object() { +void SFSObject::_refresh_meta_from_object( + sfs::ObjectRef objref, bool update_version_id_from_metadata +) { ceph_assert(objref); - if (!get_instance().empty() && get_instance() != objref->instance) { - // object specific version requested and it's not the last one - sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); - auto db_version = db_versioned_objects.get_versioned_object(get_instance()); - if (db_version.has_value()) { - auto uuid = objref->path.get_uuid(); - auto deleted = db_version->object_state == sfs::ObjectState::DELETED; - objref.reset(sfs::Object::create_for_query( - get_name(), uuid, deleted, db_version->id - )); - set_obj_size(db_version->size); - objref->update_attrs(db_version->attrs); - auto meta = objref->get_meta(); - meta.etag = db_version->etag; - objref->update_meta(meta); - } - } else { - set_obj_size(objref->get_meta().size); - } + // fill values from objref + set_obj_size(objref->get_meta().size); set_attrs(objref->get_attrs()); state.mtime = objref->get_meta().mtime; + if (update_version_id_from_metadata) { + set_instance(objref->instance); + } } } // namespace rgw::sal diff --git a/src/rgw/driver/sfs/object.h b/src/rgw/driver/sfs/object.h index db201f1bb5fa78..0f072b75c2c70b 100644 --- a/src/rgw/driver/sfs/object.h +++ b/src/rgw/driver/sfs/object.h @@ -36,7 +36,9 @@ class SFSObject : public StoreObject { protected: SFSObject(SFSObject&) = default; - void _refresh_meta_from_object(); + void _refresh_meta_from_object( + sfs::ObjectRef objref, bool update_version_id_from_metadata = false + ); public: /** @@ -109,7 +111,7 @@ class SFSObject : public StoreObject { store(_st), bucketref(_bucketref), objref(_objref) { - _refresh_meta_from_object(); + _refresh_meta_from_object(objref); } virtual std::unique_ptr clone() override { @@ -224,7 +226,13 @@ class SFSObject : public StoreObject { void set_object_ref(sfs::ObjectRef objref) { this->objref = objref; } - void refresh_meta(); + // Refresh metadata from db. + // Also retrieves version_id when specified. + // There are situations (like delete operations) in which we don't want to + // update the version_id passed in the S3 call. + // Doing so could convert an "add delete marker" call to a "delete a specific + // version" call. + void refresh_meta(bool update_version_id_from_metadata = false); const std::string get_cls_name() { return "object"; } }; diff --git a/src/rgw/driver/sfs/object_state.h b/src/rgw/driver/sfs/object_state.h index 91e4de53887db6..7361d0d6dea4a5 100644 --- a/src/rgw/driver/sfs/object_state.h +++ b/src/rgw/driver/sfs/object_state.h @@ -16,13 +16,7 @@ namespace rgw::sal::sfs { -enum class ObjectState { - OPEN = 0, - COMMITTED, - LOCKED, - DELETED, - LAST_VALUE = DELETED -}; +enum class ObjectState { OPEN = 0, COMMITTED, DELETED, LAST_VALUE = DELETED }; } // namespace rgw::sal::sfs diff --git a/src/rgw/driver/sfs/sfs_gc.cc b/src/rgw/driver/sfs/sfs_gc.cc index 71ad1c6965fc4e..ccc499587fc63b 100644 --- a/src/rgw/driver/sfs/sfs_gc.cc +++ b/src/rgw/driver/sfs/sfs_gc.cc @@ -96,13 +96,16 @@ void SFSGC::delete_objects(const std::string& bucket_id) { } auto obj_instance = std::unique_ptr(Object::create_for_immediate_deletion(object)); + lsfs_dout(this, 30) << "Deleting object: " << object.uuid << dendl; delete_object(*obj_instance.get()); } } void SFSGC::delete_versioned_objects(const Object& object) { sqlite::SQLiteVersionedObjects db_ver_objs(store->db_conn); - auto versions = db_ver_objs.get_versioned_objects(object.path.get_uuid()); + // get all versions. Including deleted ones + auto versions = + db_ver_objs.get_versioned_objects(object.path.get_uuid(), false); for (auto const& version : versions) { if (max_objects <= 0) { break; @@ -116,6 +119,7 @@ void SFSGC::delete_versioned_objects(const Object& object) { void SFSGC::delete_bucket(const std::string& bucket_id) { // delete the objects of the bucket first + lsfs_dout(this, 30) << "Deleting bucket: " << bucket_id << dendl; delete_objects(bucket_id); if (max_objects > 0) { sqlite::SQLiteBuckets db_buckets(store->db_conn); diff --git a/src/rgw/driver/sfs/sqlite/bindings/blob.h b/src/rgw/driver/sfs/sqlite/bindings/blob.h new file mode 100644 index 00000000000000..eb4eabd64daccb --- /dev/null +++ b/src/rgw/driver/sfs/sqlite/bindings/blob.h @@ -0,0 +1,69 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t +// vim: ts=8 sw=2 smarttab ft=cpp +/* + * Ceph - scalable distributed file system + * SFS SAL implementation + * + * Copyright (C) 2023 SUSE LLC + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ +#pragma once + +#include + +#include "rgw/driver/sfs/sqlite/conversion_utils.h" +#include "rgw/driver/sfs/sqlite/sqlite_orm.h" +#include "rgw_common.h" + +namespace sqlite_orm { + +template +struct type_printer< + T, typename std::enable_if< + std::is_same::value, void>::type> + : public blob_printer {}; + +template +struct statement_binder< + T, typename std::enable_if< + std::is_same::value, void>::type> { + int bind(sqlite3_stmt* stmt, int index, const T& value) { + std::vector blobValue; + rgw::sal::sfs::sqlite::encode_blob(value, blobValue); + return statement_binder>().bind(stmt, index, blobValue); + } +}; + +template +struct field_printer< + T, typename std::enable_if< + std::is_same::value, void>::type> { + std::string operator()(const T& value) const { return "ENCODED BLOB"; } +}; + +template +struct row_extractor< + T, typename std::enable_if< + std::is_same::value, void>::type> { + T extract(sqlite3_stmt* stmt, int columnIndex) { + auto blob_data = sqlite3_column_blob(stmt, columnIndex); + auto blob_size = sqlite3_column_bytes(stmt, columnIndex); + if (blob_data == nullptr || blob_size < 0) { + throw(std::system_error( + ERANGE, std::system_category(), + "Invalid blob at column : (" + std::to_string(columnIndex) + ")" + )); + } + T ret; + rgw::sal::sfs::sqlite::decode_blob( + reinterpret_cast(blob_data), + static_cast(blob_size), ret + ); + return ret; + } +}; +} // namespace sqlite_orm \ No newline at end of file diff --git a/src/rgw/driver/sfs/sqlite/conversion_utils.h b/src/rgw/driver/sfs/sqlite/conversion_utils.h index 8c55bdd70424ed..33d01e1ca7127c 100644 --- a/src/rgw/driver/sfs/sqlite/conversion_utils.h +++ b/src/rgw/driver/sfs/sqlite/conversion_utils.h @@ -25,6 +25,13 @@ void decode_blob(const BLOB_HOLDER& blob_holder, DEST& dest) { ceph::decode(dest, buffer); } +template +void decode_blob(const char* data, size_t data_size, DEST& dest) { + bufferlist buffer; + buffer.append(data, data_size); + ceph::decode(dest, buffer); +} + template void encode_blob(const ORIGIN& origin, BLOB_HOLDER& dest) { bufferlist buffer; diff --git a/src/rgw/driver/sfs/sqlite/dbconn.h b/src/rgw/driver/sfs/sqlite/dbconn.h index 3af9ade23d3450..d6a436780512b6 100644 --- a/src/rgw/driver/sfs/sqlite/dbconn.h +++ b/src/rgw/driver/sfs/sqlite/dbconn.h @@ -52,8 +52,9 @@ inline auto _make_storage(const std::string& path) { return sqlite_orm::make_storage( path, sqlite_orm::make_unique_index( - "versioned_object_objid_vid_unique", &DBVersionedObject::object_id, - &DBVersionedObject::version_id + "versioned_object_objid_vid_unique", + &DBOPVersionedObjectInfo::object_id, + &DBOPVersionedObjectInfo::version_id ), sqlite_orm::make_index("bucket_ownerid_idx", &DBBucket::owner_id), sqlite_orm::make_index("bucket_name_idx", &DBBucket::bucket_name), @@ -61,10 +62,10 @@ inline auto _make_storage(const std::string& path) { "objects_bucketid_idx", &DBOPObjectInfo::bucket_id ), sqlite_orm::make_index( - "vobjs_versionid_idx", &DBVersionedObject::version_id + "vobjs_versionid_idx", &DBOPVersionedObjectInfo::version_id ), sqlite_orm::make_index( - "vobjs_object_id_idx", &DBVersionedObject::object_id + "vobjs_object_id_idx", &DBOPVersionedObjectInfo::object_id ), sqlite_orm::make_table( std::string(USERS_TABLE), @@ -137,32 +138,38 @@ inline auto _make_storage(const std::string& path) { sqlite_orm::make_table( std::string(VERSIONED_OBJECTS_TABLE), sqlite_orm::make_column( - "id", &DBVersionedObject::id, sqlite_orm::autoincrement(), + "id", &DBOPVersionedObjectInfo::id, sqlite_orm::autoincrement(), sqlite_orm::primary_key() ), - sqlite_orm::make_column("object_id", &DBVersionedObject::object_id), - sqlite_orm::make_column("checksum", &DBVersionedObject::checksum), - sqlite_orm::make_column("size", &DBVersionedObject::size), sqlite_orm::make_column( - "create_time", &DBVersionedObject::create_time + "object_id", &DBOPVersionedObjectInfo::object_id ), sqlite_orm::make_column( - "delete_time", &DBVersionedObject::delete_time + "checksum", &DBOPVersionedObjectInfo::checksum ), + sqlite_orm::make_column("size", &DBOPVersionedObjectInfo::size), sqlite_orm::make_column( - "commit_time", &DBVersionedObject::commit_time + "create_time", &DBOPVersionedObjectInfo::create_time ), - sqlite_orm::make_column("mtime", &DBVersionedObject::mtime), sqlite_orm::make_column( - "object_state", &DBVersionedObject::object_state + "delete_time", &DBOPVersionedObjectInfo::delete_time ), - sqlite_orm::make_column("version_id", &DBVersionedObject::version_id), - sqlite_orm::make_column("etag", &DBVersionedObject::etag), - sqlite_orm::make_column("attrs", &DBVersionedObject::attrs), sqlite_orm::make_column( - "version_type", &DBVersionedObject::version_type + "commit_time", &DBOPVersionedObjectInfo::commit_time ), - sqlite_orm::foreign_key(&DBVersionedObject::object_id) + sqlite_orm::make_column("mtime", &DBOPVersionedObjectInfo::mtime), + sqlite_orm::make_column( + "object_state", &DBOPVersionedObjectInfo::object_state + ), + sqlite_orm::make_column( + "version_id", &DBOPVersionedObjectInfo::version_id + ), + sqlite_orm::make_column("etag", &DBOPVersionedObjectInfo::etag), + sqlite_orm::make_column("attrs", &DBOPVersionedObjectInfo::attrs), + sqlite_orm::make_column( + "version_type", &DBOPVersionedObjectInfo::version_type + ), + sqlite_orm::foreign_key(&DBOPVersionedObjectInfo::object_id) .references(&DBOPObjectInfo::uuid) ), sqlite_orm::make_table( diff --git a/src/rgw/driver/sfs/sqlite/sqlite_objects.cc b/src/rgw/driver/sfs/sqlite/sqlite_objects.cc index 8df3e32e5fef14..67d16e109fc57d 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_objects.cc +++ b/src/rgw/driver/sfs/sqlite/sqlite_objects.cc @@ -13,6 +13,8 @@ */ #include "sqlite_objects.h" +#include "sqlite_versioned_objects.h" + using namespace sqlite_orm; namespace rgw::sal::sfs::sqlite { @@ -47,6 +49,7 @@ std::optional SQLiteObjects::get_object( is_equal(&DBOPObjectInfo::bucket_id, bucket_id) and is_equal(&DBOPObjectInfo::name, object_name) )); + std::optional ret_value; // value must be unique if (objects.size() == 1) { @@ -65,17 +68,4 @@ void SQLiteObjects::remove_object(const uuid_d& uuid) const { storage.remove(uuid); } -std::vector SQLiteObjects::get_object_ids() const { - auto storage = conn->get_storage(); - return storage.select(&DBOPObjectInfo::uuid); -} - -std::vector SQLiteObjects::get_object_ids(const std::string& bucket_id -) const { - auto storage = conn->get_storage(); - return storage.select( - &DBOPObjectInfo::uuid, where(c(&DBOPObjectInfo::bucket_id) = bucket_id) - ); -} - } // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/sqlite_objects.h b/src/rgw/driver/sfs/sqlite/sqlite_objects.h index 5ce7bfe3310640..c04c451cdaee73 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_objects.h +++ b/src/rgw/driver/sfs/sqlite/sqlite_objects.h @@ -28,16 +28,15 @@ class SQLiteObjects { SQLiteObjects& operator=(const SQLiteObjects&) = delete; std::vector get_objects(const std::string& bucket_id) const; + std::optional get_object(const uuid_d& uuid) const; + std::optional get_object( const std::string& bucket_id, const std::string& object_name ) const; void store_object(const DBOPObjectInfo& object) const; void remove_object(const uuid_d& uuid) const; - - std::vector get_object_ids() const; - std::vector get_object_ids(const std::string& bucket_id) const; }; } // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc index 14c172304a2768..c2f40099d1584a 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc +++ b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc @@ -13,110 +13,416 @@ */ #include "sqlite_versioned_objects.h" +#include "rgw/driver/sfs/uuid_path.h" + using namespace sqlite_orm; namespace rgw::sal::sfs::sqlite { -std::vector get_rgw_versioned_objects( - const std::vector& db_versioned_objects -) { - std::vector ret_objs; - for (const auto& db_obj : db_versioned_objects) { - auto rgw_obj = get_rgw_versioned_object(db_obj); - ret_objs.push_back(rgw_obj); - } - return ret_objs; -} - SQLiteVersionedObjects::SQLiteVersionedObjects(DBConnRef _conn) : conn(_conn) {} std::optional -SQLiteVersionedObjects::get_versioned_object(uint id) const { +SQLiteVersionedObjects::get_versioned_object(uint id, bool filter_deleted) + const { auto storage = conn->get_storage(); - auto object = storage.get_pointer(id); + auto object = storage.get_pointer(id); std::optional ret_value; if (object) { - ret_value = get_rgw_versioned_object(*object); + if (!filter_deleted || object->object_state != ObjectState::DELETED) { + ret_value = *object; + } } return ret_value; } std::optional -SQLiteVersionedObjects::get_versioned_object(const std::string& version_id +SQLiteVersionedObjects::get_versioned_object( + const std::string& version_id, bool filter_deleted ) const { auto storage = conn->get_storage(); - auto versioned_objects = storage.get_all( - where(c(&DBVersionedObject::version_id) = version_id) + auto versioned_objects = storage.get_all( + where(c(&DBOPVersionedObjectInfo::version_id) = version_id) ); ceph_assert(versioned_objects.size() <= 1); std::optional ret_value; if (versioned_objects.size()) { - ret_value = get_rgw_versioned_object(versioned_objects[0]); + if (!filter_deleted || + versioned_objects[0].object_state != ObjectState::DELETED) { + ret_value = versioned_objects[0]; + } } return ret_value; } +std::optional +SQLiteVersionedObjects::get_versioned_object( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id +) const { + if (version_id.empty()) { + return _get_versioned_object(bucket_id, object_name); + } + return _get_versioned_object(bucket_id, object_name, version_id); +} + +DBOPObjectsListItems SQLiteVersionedObjects::list_last_versioned_objects( + const std::string& bucket_id +) const { + auto storage = conn->get_storage(); + auto results = storage.select( + columns( + &DBOPObjectInfo::uuid, &DBOPObjectInfo::name, + &DBOPVersionedObjectInfo::version_id, + max(&DBOPVersionedObjectInfo::commit_time), + max(&DBOPVersionedObjectInfo::id), &DBOPVersionedObjectInfo::size, + &DBOPVersionedObjectInfo::etag, &DBOPVersionedObjectInfo::mtime, + &DBOPVersionedObjectInfo::delete_time, + &DBOPVersionedObjectInfo::attrs, + &DBOPVersionedObjectInfo::version_type, + &DBOPVersionedObjectInfo::object_state + ), + inner_join(on( + is_equal(&DBOPObjectInfo::uuid, &DBOPVersionedObjectInfo::object_id) + )), + where( + is_equal(&DBOPObjectInfo::bucket_id, bucket_id) and + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) + ), + group_by(&DBOPObjectInfo::uuid), + order_by(&DBOPVersionedObjectInfo::create_time).asc() + ); + return results; +} + uint SQLiteVersionedObjects::insert_versioned_object( const DBOPVersionedObjectInfo& object ) const { auto storage = conn->get_storage(); - auto db_object = get_db_versioned_object(object); - return storage.insert(db_object); + auto id = storage.insert(object); + return id; } void SQLiteVersionedObjects::store_versioned_object( const DBOPVersionedObjectInfo& object ) const { auto storage = conn->get_storage(); - auto db_object = get_db_versioned_object(object); - storage.update(db_object); + storage.update(object); +} + +void SQLiteVersionedObjects::store_versioned_object_delete_rest_transact( + const DBOPVersionedObjectInfo& object +) const { + try { + auto storage = conn->get_storage(); + auto transaction = storage.transaction_guard(); + storage.update(object); + // soft delete the rest of this object + storage.update_all( + set(c(&DBOPVersionedObjectInfo::object_state) = ObjectState::DELETED), + where( + is_equal(&DBOPVersionedObjectInfo::object_id, object.object_id) and + is_not_equal(&DBOPVersionedObjectInfo::id, object.id) + ) + ); + transaction.commit(); + } catch (const std::system_error& e) { + // throw exception (will be caught later in the sfs logic) + // TODO revisit this when error handling is defined + throw(e); + } } void SQLiteVersionedObjects::remove_versioned_object(uint id) const { auto storage = conn->get_storage(); - storage.remove(id); + storage.remove(id); } -std::vector SQLiteVersionedObjects::get_versioned_object_ids() const { +std::vector SQLiteVersionedObjects::get_versioned_object_ids( + bool filter_deleted +) const { auto storage = conn->get_storage(); - return storage.select(&DBVersionedObject::id); + if (filter_deleted) { + return storage.select( + &DBOPVersionedObjectInfo::id, + where(is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + )) + ); + } + return storage.select(&DBOPVersionedObjectInfo::id); } std::vector SQLiteVersionedObjects::get_versioned_object_ids( - const uuid_d& object_id + const uuid_d& object_id, bool filter_deleted ) const { auto storage = conn->get_storage(); auto uuid = object_id.to_string(); + if (filter_deleted) { + return storage.select( + &DBOPVersionedObjectInfo::id, + where( + is_equal(&DBOPVersionedObjectInfo::object_id, uuid) and + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) + ) + ); + } return storage.select( - &DBVersionedObject::id, where(c(&DBVersionedObject::object_id) = uuid) + &DBOPVersionedObjectInfo::id, + where(c(&DBOPVersionedObjectInfo::object_id) = uuid) ); } std::vector -SQLiteVersionedObjects::get_versioned_objects(const uuid_d& object_id) const { +SQLiteVersionedObjects::get_versioned_objects( + const uuid_d& object_id, bool filter_deleted +) const { auto storage = conn->get_storage(); auto uuid = object_id.to_string(); - auto versioned_objects = storage.get_all( - where(c(&DBVersionedObject::object_id) = uuid) + if (filter_deleted) { + return storage.get_all( + where( + is_equal(&DBOPVersionedObjectInfo::object_id, uuid) and + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) + ), + order_by(&DBOPVersionedObjectInfo::commit_time).desc() + ); + } + return storage.get_all( + where(c(&DBOPVersionedObjectInfo::object_id) = uuid) + ); +} + +std::optional +SQLiteVersionedObjects::get_last_versioned_object( + const uuid_d& object_id, bool filter_deleted +) const { + auto storage = conn->get_storage(); + std::vector>> + max_commit_time_ids; + // we are looking for the ids that match the object_id with the highest + // commit_time and we want to get the highest id. + if (filter_deleted) { + max_commit_time_ids = storage.select( + columns( + &DBOPVersionedObjectInfo::id, + max(&DBOPVersionedObjectInfo::commit_time) + ), + where( + is_equal(&DBOPVersionedObjectInfo::object_id, object_id) and + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) + ), + group_by(&DBOPVersionedObjectInfo::id), + order_by(&DBOPVersionedObjectInfo::id).desc() + ); + } else { + max_commit_time_ids = storage.select( + columns( + &DBOPVersionedObjectInfo::id, + max(&DBOPVersionedObjectInfo::commit_time) + ), + where(is_equal(&DBOPVersionedObjectInfo::object_id, object_id)), + group_by(&DBOPVersionedObjectInfo::id), + order_by(&DBOPVersionedObjectInfo::id).desc() + ); + } + + // if found, value we are looking for is in the first position of the results + // because we ordered descending in the query + auto found_value = max_commit_time_ids.size() && + std::get<1>(max_commit_time_ids[0]) != nullptr; + + std::optional ret_value; + if (found_value) { + auto last_version_id = std::get<0>(max_commit_time_ids[0]); + auto last_version = + storage.get_pointer(last_version_id); + if (last_version) { + ret_value = *last_version; + } + } + return ret_value; +} + +std::optional +SQLiteVersionedObjects::delete_version_and_get_previous_transact(uint id) { + try { + auto storage = conn->get_storage(); + auto transaction = storage.transaction_guard(); + auto version = storage.get_pointer(id); + std::optional ret_value; + if (version != nullptr) { + auto object_id = version->object_id; + storage.remove(id); + // get the last version of the object now + auto max_commit_time_ids = storage.select( + columns( + &DBOPVersionedObjectInfo::id, + max(&DBOPVersionedObjectInfo::commit_time) + ), + where( + is_equal(&DBOPVersionedObjectInfo::object_id, object_id) and + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) + ), + group_by(&DBOPVersionedObjectInfo::id), + order_by(&DBOPVersionedObjectInfo::id).desc() + ); + auto found_value = max_commit_time_ids.size() && + std::get<1>(max_commit_time_ids[0]) != nullptr; + if (found_value) { + // if not value is found could be, for example, if lifecycle deleted all + // non current versions before. + auto last_version_id = std::get<0>(max_commit_time_ids[0]); + auto last_version = + storage.get_pointer(last_version_id); + if (last_version) { + ret_value = *last_version; + } + } + transaction.commit(); + } + return ret_value; + } catch (const std::system_error& e) { + // throw exception (will be caught later in the sfs logic) + // TODO revisit this when error handling is defined + throw(e); + } +} + +std::optional +SQLiteVersionedObjects::_get_versioned_object( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id +) const { + auto storage = conn->get_storage(); + auto ids = storage.select( + &DBOPVersionedObjectInfo::id, + inner_join(on( + is_equal(&DBOPObjectInfo::uuid, &DBOPVersionedObjectInfo::object_id) + )), + where( + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) and + is_equal(&DBOPObjectInfo::bucket_id, bucket_id) and + is_equal(&DBOPObjectInfo::name, object_name) and + is_equal(&DBOPVersionedObjectInfo::version_id, version_id) + ) ); - return get_rgw_versioned_objects(versioned_objects); + // TODO return an error if this returns more than 1 version? + // Only 1 object with no deleted versions should be present + std::optional ret_value; + if (ids.size() > 0) { + auto version = storage.get_pointer(ids[0]); + if (version != nullptr) { + ret_value = *version; + } + } + return ret_value; } std::optional -SQLiteVersionedObjects::get_last_versioned_object(const uuid_d& object_id +SQLiteVersionedObjects::_get_versioned_object( + const std::string& bucket_id, const std::string& object_name ) const { + // we don't have a version_id, so return the last available one that is + // committed auto storage = conn->get_storage(); - auto last_version_id = storage.max( - &DBVersionedObject::id, - where(c(&DBVersionedObject::object_id) = object_id.to_string()) + auto max_commit_time_ids = storage.select( + columns( + &DBOPVersionedObjectInfo::id, + max(&DBOPVersionedObjectInfo::commit_time) + ), + inner_join(on( + is_equal(&DBOPObjectInfo::uuid, &DBOPVersionedObjectInfo::object_id) + )), + where( + is_equal(&DBOPObjectInfo::bucket_id, bucket_id) and + is_equal(&DBOPObjectInfo::name, object_name) and + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) + ), + group_by(&DBOPVersionedObjectInfo::id), + order_by(&DBOPVersionedObjectInfo::id).desc() ); + auto found_value = max_commit_time_ids.size() && + std::get<1>(max_commit_time_ids[0]) != nullptr; std::optional ret_value; - if (last_version_id) { + if (found_value) { + // if not value is found could be, for example, if lifecycle deleted all + // non current versions before. + auto last_version_id = std::get<0>(max_commit_time_ids[0]); auto last_version = - storage.get_pointer(*last_version_id); + storage.get_pointer(last_version_id); if (last_version) { - ret_value = get_rgw_versioned_object(*last_version); + ret_value = *last_version; + } + } + return ret_value; +} + +std::optional +SQLiteVersionedObjects::create_new_versioned_object_transact( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id +) const { + std::optional ret_value; + try { + auto storage = conn->get_storage(); + auto transaction = storage.transaction_guard(); + auto objs = storage.select( + columns(&DBOPObjectInfo::uuid), + inner_join(on( + is_equal(&DBOPObjectInfo::uuid, &DBOPVersionedObjectInfo::object_id) + )), + where( + is_not_equal( + &DBOPVersionedObjectInfo::object_state, ObjectState::DELETED + ) and + is_equal(&DBOPObjectInfo::bucket_id, bucket_id) and + is_equal(&DBOPObjectInfo::name, object_name) + ), + group_by(&DBOPObjectInfo::uuid) + ); + // should return none or 1 + // TODO throw error if it returns more than 1 when error handling is + // defined? (only 1 object with the same name and not deleted should be + // found) + DBOPObjectInfo obj; + obj.name = object_name; + obj.bucket_id = bucket_id; + if (objs.size() == 0) { + // object does not exist + // create it + obj.uuid.generate_random(); + storage.replace(obj); + } else { + obj.uuid = std::get<0>(objs[0]); } + // create the version now + DBOPVersionedObjectInfo version; + version.object_id = obj.uuid; + version.object_state = ObjectState::OPEN; + version.version_type = VersionType::REGULAR; + version.version_id = version_id; + version.create_time = ceph::real_clock::now(); + version.id = storage.insert(version); + transaction.commit(); + ret_value = version; + } catch (const std::system_error& e) { + // throw exception (will be caught later in the sfs logic) + // TODO revisit this when error handling is defined + throw(e); } return ret_value; } diff --git a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h index caacff524fc5c9..d6b7aed886f443 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h +++ b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h @@ -14,7 +14,7 @@ #pragma once #include "dbconn.h" -#include "versioned_object/versioned_object_conversions.h" +#include "versioned_object/versioned_object_definitions.h" namespace rgw::sal::sfs::sqlite { @@ -28,23 +28,54 @@ class SQLiteVersionedObjects { SQLiteVersionedObjects(const SQLiteVersionedObjects&) = delete; SQLiteVersionedObjects& operator=(const SQLiteVersionedObjects&) = delete; - std::optional get_versioned_object(uint id) const; std::optional get_versioned_object( + uint id, bool filter_deleted = true + ) const; + std::optional get_versioned_object( + const std::string& version_id, bool filter_deleted = true + ) const; + std::optional get_versioned_object( + const std::string& bucket_id, const std::string& object_name, const std::string& version_id ) const; + DBOPObjectsListItems list_last_versioned_objects(const std::string& bucket_id + ) const; uint insert_versioned_object(const DBOPVersionedObjectInfo& object) const; void store_versioned_object(const DBOPVersionedObjectInfo& object) const; void remove_versioned_object(uint id) const; + void store_versioned_object_delete_rest_transact( + const DBOPVersionedObjectInfo& object + ) const; - std::vector get_versioned_object_ids() const; - std::vector get_versioned_object_ids(const uuid_d& object_id) const; + std::vector get_versioned_object_ids(bool filter_deleted = true) const; + std::vector get_versioned_object_ids( + const uuid_d& object_id, bool filter_deleted = true + ) const; std::vector get_versioned_objects( - const uuid_d& object_id + const uuid_d& object_id, bool filter_deleted = true ) const; std::optional get_last_versioned_object( - const uuid_d& object_id + const uuid_d& object_id, bool filter_deleted = true + ) const; + + std::optional + delete_version_and_get_previous_transact(uint id); + + std::optional create_new_versioned_object_transact( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id + ) const; + + private: + std::optional _get_versioned_object( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id + ) const; + + std::optional _get_versioned_object( + const std::string& bucket_id, const std::string& object_name ) const; }; diff --git a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.cc b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.cc deleted file mode 100644 index 9bcc4346c04921..00000000000000 --- a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.cc +++ /dev/null @@ -1,70 +0,0 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -// vim: ts=8 sw=2 smarttab ft=cpp -/* - * Ceph - scalable distributed file system - * SFS SAL implementation - * - * Copyright (C) 2022 SUSE LLC - * - * This is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software - * Foundation. See file COPYING. - */ -#include "versioned_object_conversions.h" - -#include "../conversion_utils.h" - -namespace rgw::sal::sfs::sqlite { - -ObjectState get_object_state(uint state) { - if (state > static_cast(ObjectState::LAST_VALUE)) { - throw(std::runtime_error( - "incorrect state found (" + std::to_string(state) + ")" - )); - } - return static_cast(state); -} - -uint get_uint_object_state(ObjectState state) { - return static_cast(state); -} - -DBOPVersionedObjectInfo get_rgw_versioned_object(const DBVersionedObject& object -) { - DBOPVersionedObjectInfo rgw_object; - rgw_object.id = object.id; - rgw_object.object_id = object.object_id; - rgw_object.checksum = object.checksum; - rgw_object.size = object.size; - rgw_object.create_time = object.create_time; - rgw_object.delete_time = object.delete_time; - rgw_object.commit_time = object.commit_time; - rgw_object.mtime = object.mtime; - rgw_object.object_state = object.object_state; - rgw_object.version_id = object.version_id; - rgw_object.etag = object.etag; - assign_optional_value(object.attrs, rgw_object.attrs); - rgw_object.version_type = object.version_type; - return rgw_object; -} - -DBVersionedObject get_db_versioned_object(const DBOPVersionedObjectInfo& object -) { - DBVersionedObject db_object; - db_object.id = object.id; - db_object.object_id = object.object_id; - db_object.checksum = object.checksum; - db_object.size = object.size; - db_object.create_time = object.create_time; - db_object.delete_time = object.delete_time; - db_object.commit_time = object.commit_time; - db_object.mtime = object.mtime; - db_object.object_state = object.object_state; - db_object.version_id = object.version_id; - db_object.etag = object.etag; - assign_db_value(object.attrs, db_object.attrs); - db_object.version_type = object.version_type; - return db_object; -} -} // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h deleted file mode 100644 index bfaa913a612b07..00000000000000 --- a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h +++ /dev/null @@ -1,26 +0,0 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -// vim: ts=8 sw=2 smarttab ft=cpp -/* - * Ceph - scalable distributed file system - * SFS SAL implementation - * - * Copyright (C) 2022 SUSE LLC - * - * This is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software - * Foundation. See file COPYING. - */ -#pragma once - -#include "versioned_object_definitions.h" - -namespace rgw::sal::sfs::sqlite { - -// Functions that convert DB type to RGW type (and vice-versa) -DBOPVersionedObjectInfo get_rgw_versioned_object(const DBVersionedObject& object -); -DBVersionedObject get_db_versioned_object(const DBOPVersionedObjectInfo& object -); - -} // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h index b41a60a9659f8e..f02dcf2742edde 100644 --- a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h +++ b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h @@ -16,6 +16,7 @@ #include #include "rgw/driver/sfs/object_state.h" +#include "rgw/driver/sfs/sqlite/bindings/blob.h" #include "rgw/driver/sfs/sqlite/bindings/enum.h" #include "rgw/driver/sfs/sqlite/bindings/real_time.h" #include "rgw/driver/sfs/version_type.h" @@ -24,24 +25,6 @@ namespace rgw::sal::sfs::sqlite { -using BLOB = std::vector; - -struct DBVersionedObject { - uint id; - uuid_d object_id; - std::string checksum; - size_t size; - ceph::real_time create_time; - ceph::real_time delete_time; - ceph::real_time commit_time; - ceph::real_time mtime; - ObjectState object_state; - std::string version_id; - std::string etag; - std::optional attrs; - VersionType version_type = rgw::sal::sfs::VersionType::REGULAR; -}; - struct DBOPVersionedObjectInfo { uint id; uuid_d object_id; @@ -58,4 +41,84 @@ struct DBOPVersionedObjectInfo { VersionType version_type = rgw::sal::sfs::VersionType::REGULAR; }; +using DBOPObjectsListItem = std::tuple< + decltype(DBOPObjectInfo::uuid), decltype(DBOPObjectInfo::name), + decltype(DBOPVersionedObjectInfo::version_id), + std::unique_ptr, + std::unique_ptr, + decltype(DBOPVersionedObjectInfo::size), + decltype(DBOPVersionedObjectInfo::etag), + decltype(DBOPVersionedObjectInfo::mtime), + decltype(DBOPVersionedObjectInfo::delete_time), + decltype(DBOPVersionedObjectInfo::attrs), + decltype(DBOPVersionedObjectInfo::version_type), + decltype(DBOPVersionedObjectInfo::object_state)>; + +using DBOPObjectsListItems = std::vector; + +/// DBOPObjectsListItem helpers +inline decltype(DBOPObjectInfo::uuid) get_uuid(const DBOPObjectsListItem& item +) { + return std::get<0>(item); +} + +inline decltype(DBOPObjectInfo::name) get_name(const DBOPObjectsListItem& item +) { + return std::get<1>(item); +} + +inline decltype(DBOPVersionedObjectInfo::version_id) get_version_id( + const DBOPObjectsListItem& item +) { + return std::get<2>(item); +} + +inline decltype(DBOPVersionedObjectInfo::id) get_id( + const DBOPObjectsListItem& item +) { + return *(std::get<4>(item)); +} + +inline decltype(DBOPVersionedObjectInfo::size) get_size( + const DBOPObjectsListItem& item +) { + return std::get<5>(item); +} + +inline decltype(DBOPVersionedObjectInfo::etag) get_etag( + const DBOPObjectsListItem& item +) { + return std::get<6>(item); +} + +inline decltype(DBOPVersionedObjectInfo::mtime) get_mtime( + const DBOPObjectsListItem& item +) { + return std::get<7>(item); +} + +inline decltype(DBOPVersionedObjectInfo::delete_time) get_delete_time( + const DBOPObjectsListItem& item +) { + return std::get<8>(item); +} + +inline decltype(DBOPVersionedObjectInfo::attrs) get_attrs( + const DBOPObjectsListItem& item +) { + return std::get<9>(item); +} + +inline decltype(DBOPVersionedObjectInfo::version_type) get_version_type( + const DBOPObjectsListItem& item +) { + return std::get<10>(item); +} + +inline decltype(DBOPVersionedObjectInfo::object_state) get_object_state( + const DBOPObjectsListItem& item +) { + return std::get<11>(item); +} + } // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/types.cc b/src/rgw/driver/sfs/types.cc index 525a6030032b18..81d220840573a2 100644 --- a/src/rgw/driver/sfs/types.cc +++ b/src/rgw/driver/sfs/types.cc @@ -25,10 +25,18 @@ #include "rgw/rgw_sal_sfs.h" #include "rgw_sal_sfs.h" +#define dout_subsys ceph_subsys_rgw namespace rgw::sal::sfs { -Object::Object(const std::string& _name, const uuid_d& _uuid) - : name(_name), path(_uuid), deleted(false) {} +std::string generate_new_version_id(CephContext* ceph_context) { +#define OBJ_INSTANCE_LEN 32 + char buf[OBJ_INSTANCE_LEN + 1]; + gen_rand_alphanumeric_no_underscore(ceph_context, buf, OBJ_INSTANCE_LEN); + return std::string(buf); +} + +Object::Object(const rgw_obj_key& _key, const uuid_d& _uuid) + : name(_key.name), instance(_key.instance), path(_uuid), deleted(false) {} Object* Object::create_for_immediate_deletion( const sqlite::DBOPObjectInfo& object @@ -57,103 +65,79 @@ Object* Object::create_from_obj_key(const rgw_obj_key& key) { return result; } -Object* Object::create_for_multipart(const std::string& name) { - Object* result = new Object(name, UUIDPath::create().get_uuid()); +Object* Object::create_from_db_version( + const std::string& object_name, + const sqlite::DBOPVersionedObjectInfo& version +) { + Object* result = new Object( + rgw_obj_key(object_name, version.version_id), version.object_id + ); + result->deleted = (version.version_type == VersionType::DELETE_MARKER); + result->version_id = version.id; + result->meta = { + .size = version.size, + .etag = version.etag, + .mtime = version.mtime, + .delete_at = version.delete_time}; + result->attrs = version.attrs; return result; } -Object* Object::create_commit_delete_marker( - const rgw_obj_key& key, SFStore* store, const std::string& bucket_id +Object* Object::create_from_db_version( + const std::string& object_name, const sqlite::DBOPObjectsListItem& version ) { - Object* result = new Object(key); - result->deleted = true; - - sqlite::DBOPObjectInfo oinfo; - oinfo.uuid = result->path.get_uuid(); - oinfo.bucket_id = bucket_id; - oinfo.name = result->name; + Object* result = new Object( + rgw_obj_key(object_name, sqlite::get_version_id(version)), + sqlite::get_uuid(version) + ); + result->deleted = + (sqlite::get_version_type(version) == VersionType::DELETE_MARKER); + result->version_id = sqlite::get_id(version); + result->meta = { + .size = sqlite::get_size(version), + .etag = sqlite::get_etag(version), + .mtime = sqlite::get_mtime(version), + .delete_at = sqlite::get_delete_time(version)}; + result->attrs = sqlite::get_attrs(version); + return result; +} - sqlite::SQLiteObjects dbobjs(store->db_conn); - dbobjs.store_object(oinfo); +Object* Object::create_for_multipart(const std::string& name) { + Object* result = new Object(name, UUIDPath::create().get_uuid()); return result; } -Object* Object::create_commit_new_object( - const rgw_obj_key& key, SFStore* store, const std::string& bucket_id, - const std::string* version_id +Object* Object::create_commit_delete_marker( + const rgw_obj_key& key, SFStore* store, const std::string& bucket_id ) { Object* result = new Object(key); - - if (version_id != nullptr) { - result->instance = *version_id; - } + result->deleted = true; sqlite::DBOPObjectInfo oinfo; oinfo.uuid = result->path.get_uuid(); oinfo.bucket_id = bucket_id; oinfo.name = result->name; - // TODO(https://github.com/aquarist-labs/s3gw/issues/378) make - // object and version insert a transaction sqlite::SQLiteObjects dbobjs(store->db_conn); dbobjs.store_object(oinfo); - - sqlite::DBOPVersionedObjectInfo version_info; - version_info.object_id = result->path.get_uuid(); - version_info.object_state = ObjectState::OPEN; - version_info.version_id = result->instance; - sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - result->version_id = db_versioned_objs.insert_versioned_object(version_info); - return result; -} - -Object* Object::try_create_with_last_version_fetch_from_database( - SFStore* store, const std::string& name, const std::string& bucket_id -) { - sqlite::SQLiteObjects objs(store->db_conn); - auto obj = objs.get_object(bucket_id, name); - if (!obj) { - return nullptr; - } - - sqlite::SQLiteVersionedObjects objs_versions(store->db_conn); - auto last_version = objs_versions.get_last_versioned_object(obj->uuid); - if (!last_version.has_value()) { - return nullptr; - } - - Object* result = new Object(name, obj->uuid); - result->deleted = (last_version->object_state == ObjectState::DELETED); - result->version_id = last_version->id; - result->meta = { - .size = last_version->size, - .etag = last_version->etag, - .mtime = last_version->mtime, - .delete_at = last_version->delete_time}; - result->attrs = last_version->attrs; - result->instance = last_version->version_id; - return result; } -Object* Object::try_create_fetch_from_database( +Object* Object::try_fetch_from_database( SFStore* store, const std::string& name, const std::string& bucket_id, const std::string& version_id ) { - sqlite::SQLiteObjects objs(store->db_conn); - auto obj = objs.get_object(bucket_id, name); - if (!obj) { - return nullptr; - } - sqlite::SQLiteVersionedObjects objs_versions(store->db_conn); - auto version = objs_versions.get_versioned_object(version_id); + // if version_id is empty it will get the last version for that object + auto version = + objs_versions.get_versioned_object(bucket_id, name, version_id); if (!version.has_value()) { return nullptr; } - Object* result = new Object(name, obj->uuid); - result->deleted = (version->object_state == ObjectState::DELETED); + auto result = + new Object(rgw_obj_key(name, version->version_id), version->object_id); + result->deleted = (version->version_type == VersionType::DELETE_MARKER); result->version_id = version->id; result->meta = { .size = version->size, @@ -161,7 +145,7 @@ Object* Object::try_create_fetch_from_database( .mtime = version->mtime, .delete_at = version->delete_time}; result->attrs = version->attrs; - result->instance = version->version_id; + return result; } @@ -206,30 +190,6 @@ void Object::update_attrs(const Attrs& update) { attrs = update; } -void Object::update_commit_new_version( - SFStore* store, const std::string& new_version -) { - sqlite::DBOPVersionedObjectInfo version_info; - version_info.object_id = path.get_uuid(); - version_info.object_state = ObjectState::OPEN; - version_info.version_id = new_version; - sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - version_id = db_versioned_objs.insert_versioned_object(version_info); - instance = new_version; -} - -void Object::metadata_change_version_state(SFStore* store, ObjectState state) { - sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - auto versioned_object = db_versioned_objs.get_versioned_object(version_id); - ceph_assert(versioned_object.has_value()); - versioned_object->object_state = state; - if (state == ObjectState::DELETED) { - deleted = true; - versioned_object->delete_time = ceph::real_clock::now(); - } - db_versioned_objs.store_versioned_object(*versioned_object); -} - void Object::metadata_flush_attrs(SFStore* store) { sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); auto versioned_object = db_versioned_objs.get_versioned_object(version_id); @@ -238,7 +198,7 @@ void Object::metadata_flush_attrs(SFStore* store) { db_versioned_objs.store_versioned_object(*versioned_object); } -void Object::metadata_finish(SFStore* store) { +void Object::metadata_finish(SFStore* store, bool versioning_enabled) { sqlite::SQLiteObjects dbobjs(store->db_conn); auto db_object = dbobjs.get_object(path.get_uuid()); ceph_assert(db_object.has_value()); @@ -246,7 +206,11 @@ void Object::metadata_finish(SFStore* store) { dbobjs.store_object(*db_object); sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - auto db_versioned_object = db_versioned_objs.get_versioned_object(version_id); + // get the object, even if it was deleted. + // 2 threads could be creating and deleting the object in parallel. + // last one finishing wins + auto db_versioned_object = + db_versioned_objs.get_versioned_object(version_id, false); ceph_assert(db_versioned_object.has_value()); // TODO calculate checksum. Is it already calculated while writing? db_versioned_object->size = meta.size; @@ -254,9 +218,16 @@ void Object::metadata_finish(SFStore* store) { db_versioned_object->delete_time = meta.delete_at; db_versioned_object->mtime = meta.mtime; db_versioned_object->object_state = ObjectState::COMMITTED; + db_versioned_object->commit_time = ceph::real_clock::now(); db_versioned_object->etag = meta.etag; db_versioned_object->attrs = get_attrs(); - db_versioned_objs.store_versioned_object(*db_versioned_object); + if (versioning_enabled) { + db_versioned_objs.store_versioned_object(*db_versioned_object); + } else { + db_versioned_objs.store_versioned_object_delete_rest_transact( + *db_versioned_object + ); + } } int Object::delete_object_version(SFStore* store) const { @@ -327,97 +298,83 @@ void MultipartUpload::abort(const DoutPrefixProvider* dpp) { objref.reset(); } -ObjectRef Bucket::get_or_create(const rgw_obj_key& key) { - const bool wants_specific_version = !key.instance.empty(); +ObjectRef Bucket::create_version(const rgw_obj_key& key) { + // even if a specific version was not asked we generate one + // non-versioned bucket objects will also have a version_id + auto version_id = key.instance; + if (version_id.empty()) { + version_id = generate_new_version_id(store->ceph_context()); + } ObjectRef result; - - auto maybe_result = Object::try_create_with_last_version_fetch_from_database( - store, key.name, info.bucket.bucket_id + sqlite::SQLiteVersionedObjects objs_versions(store->db_conn); + // create objects in a transaction. + // That way threads trying to create the same object in parallel will be + // synchronised by the database without using extra mutexes. + auto new_version = objs_versions.create_new_versioned_object_transact( + info.bucket.bucket_id, key.name, version_id ); - - if (maybe_result == nullptr) { // new object - result.reset(Object::create_commit_new_object( - key, store, info.bucket.bucket_id, &key.instance - )); - return result; + if (new_version.has_value()) { + result.reset(Object::create_from_db_version(key.name, *new_version)); } - - // an object exists with at least 1 version - if (wants_specific_version && maybe_result->instance == key.instance) { - // requested version happens to be the last version - result.reset(maybe_result); - } else if (wants_specific_version && maybe_result->instance != key.instance) { - // requested version is not last - - auto specific_version_object = Object::try_create_fetch_from_database( - store, key.name, info.bucket.bucket_id, key.instance - ); - - if (specific_version_object == nullptr) { - // requested version does not exist -> create it from last - // version object - result.reset(maybe_result); - result->update_commit_new_version(store, key.instance); - } else { - // requested version does exist -> return it - result.reset(specific_version_object); - } - } else { - // no specific version requested - return last - result.reset(maybe_result); - } - - ceph_assert(result); return result; } -ObjectRef Bucket::get(const std::string& name) { - auto maybe_result = Object::try_create_with_last_version_fetch_from_database( - store, name, info.bucket.bucket_id +ObjectRef Bucket::get(const rgw_obj_key& key) { + auto maybe_result = Object::try_fetch_from_database( + store, key.name, info.bucket.bucket_id, key.instance ); if (maybe_result == nullptr) { throw UnknownObjectException(); } + return std::shared_ptr(maybe_result); } std::vector Bucket::get_all() { std::vector result; - sqlite::SQLiteObjects dbobjs(store->db_conn); - for (const auto& db_obj : dbobjs.get_objects(info.bucket.bucket_id)) { - result.push_back(get(db_obj.name)); + sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); + // get the list of objects and its last version (filters deleted versions) + // if an object has all versions deleted it is also filtered + auto objects = + db_versioned_objs.list_last_versioned_objects(info.bucket.bucket_id); + for (const auto& db_obj : objects) { + if (sqlite::get_object_state(db_obj) == ObjectState::COMMITTED) { + result.push_back(std::shared_ptr( + Object::create_from_db_version(sqlite::get_name(db_obj), db_obj) + )); + } } return result; } -void Bucket::delete_object(ObjectRef objref, const rgw_obj_key& key) { +void Bucket::delete_object( + ObjectRef objref, const rgw_obj_key& key, bool versioned_bucket, + std::string& delete_marker_version_id +) { + delete_marker_version_id = ""; sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - // get the last available version to make a copy changing the object state to DELETED - auto last_version = - db_versioned_objs.get_last_versioned_object(objref->path.get_uuid()); - ceph_assert(last_version.has_value()); - if (last_version->object_state == ObjectState::DELETED) { - _undelete_object(objref, key, db_versioned_objs, *last_version); - } else { - last_version->object_state = ObjectState::DELETED; - last_version->delete_time = ceph::real_clock::now(); - if (last_version->version_id != "") { -// generate a new version id -#define OBJ_INSTANCE_LEN 32 - char buf[OBJ_INSTANCE_LEN + 1]; - gen_rand_alphanumeric_no_underscore( - store->ceph_context(), buf, OBJ_INSTANCE_LEN - ); - last_version->version_id = std::string(buf); - objref->instance = last_version->version_id; - // insert a new deleted version - db_versioned_objs.insert_versioned_object(*last_version); + if (!versioned_bucket) { + _delete_object_non_versioned(objref, key, db_versioned_objs); + } else { + if (key.instance.empty()) { + delete_marker_version_id = + _add_delete_marker(objref, key, db_versioned_objs); } else { - db_versioned_objs.store_versioned_object(*last_version); + // we have a version id (instance) + auto version_to_delete = + db_versioned_objs.get_versioned_object(key.instance); + if (version_to_delete.has_value()) { + if (version_to_delete->version_type == VersionType::DELETE_MARKER) { + _undelete_object(objref, key, db_versioned_objs, *version_to_delete); + } else { + _delete_object_version( + objref, key, db_versioned_objs, *version_to_delete + ); + } + } } - objref->deleted = true; } } @@ -427,17 +384,13 @@ std::string Bucket::create_non_existing_object_delete_marker( auto obj = std::shared_ptr( Object::create_commit_delete_marker(key, store, info.bucket.bucket_id) ); -// create the delete marker -// generate a new version id -#define OBJ_INSTANCE_LEN 32 - char buf[OBJ_INSTANCE_LEN + 1]; - gen_rand_alphanumeric_no_underscore( - store->ceph_context(), buf, OBJ_INSTANCE_LEN - ); - auto new_version_id = std::string(buf); + // create the delete marker + // generate a new version id + auto new_version_id = generate_new_version_id(store->ceph_context()); sqlite::DBOPVersionedObjectInfo version_info; version_info.object_id = obj->path.get_uuid(); - version_info.object_state = ObjectState::DELETED; + version_info.object_state = ObjectState::COMMITTED; + version_info.version_type = VersionType::DELETE_MARKER; version_info.version_id = new_version_id; version_info.delete_time = ceph::real_clock::now(); sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); @@ -455,12 +408,10 @@ void Bucket::_undelete_object( // versioned object // only remove the delete marker if the requested version id is the last one if (!key.instance.empty() && (key.instance == last_version.version_id)) { - // remove the delete marker - sqlite_versioned_objects.remove_versioned_object(last_version.id); - // get the previous id + // remove the delete marker and get the previous version in a transaction auto previous_version = - sqlite_versioned_objects.get_last_versioned_object( - objref->path.get_uuid() + sqlite_versioned_objects.delete_version_and_get_previous_transact( + last_version.id ); if (previous_version.has_value()) { objref->instance = previous_version->version_id; @@ -469,14 +420,50 @@ void Bucket::_undelete_object( // all versions were removed for this object } } - } else { - // non-versioned object - // just remove the delete marker in the version and store - last_version.object_state = ObjectState::COMMITTED; - last_version.delete_time = ceph::real_clock::now(); - sqlite_versioned_objects.store_versioned_object(last_version); - objref->deleted = false; } } +void Bucket::_delete_object_non_versioned( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& db_versioned_objs +) { + auto version_to_delete = + db_versioned_objs.get_last_versioned_object(objref->path.get_uuid()); + _delete_object_version(objref, key, db_versioned_objs, *version_to_delete); +} + +void Bucket::_delete_object_version( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& db_versioned_objs, + sqlite::DBOPVersionedObjectInfo& version +) { + auto now = ceph::real_clock::now(); + version.delete_time = now; + version.mtime = now; + version.object_state = ObjectState::DELETED; + db_versioned_objs.store_versioned_object(version); + objref->deleted = true; +} + +std::string Bucket::_add_delete_marker( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& db_versioned_objs +) { + std::string delete_marker_id; + auto now = ceph::real_clock::now(); + auto last_version = + db_versioned_objs.get_last_versioned_object(objref->path.get_uuid()); + if (last_version.has_value() && + last_version->object_state == ObjectState::COMMITTED) { + last_version->version_type = VersionType::DELETE_MARKER; + last_version->delete_time = now; + last_version->mtime = now; + last_version->version_id = generate_new_version_id(store->ceph_context()); + objref->instance = last_version->version_id; + db_versioned_objs.insert_versioned_object(*last_version); + objref->deleted = true; + delete_marker_id = last_version->version_id; + } + return delete_marker_id; +} } // namespace rgw::sal::sfs diff --git a/src/rgw/driver/sfs/types.h b/src/rgw/driver/sfs/types.h index b3cdfcfdc6b998..e6f21d82f8a6c8 100644 --- a/src/rgw/driver/sfs/types.h +++ b/src/rgw/driver/sfs/types.h @@ -58,7 +58,7 @@ class Object { std::map attrs; protected: - Object(const std::string& _name, const uuid_d& _uuid); + Object(const rgw_obj_key& _key, const uuid_d& _uuid); Object(const rgw_obj_key& key) : name(key.name), @@ -66,6 +66,10 @@ class Object { path(UUIDPath::create()), deleted(false) {} + static Object* _get_object( + SFStore* store, const std::string& bucket_id, const rgw_obj_key& key + ); + public: static Object* create_for_immediate_deletion( const sqlite::DBOPObjectInfo& object @@ -75,20 +79,23 @@ class Object { ); static Object* create_for_testing(const std::string& name); static Object* create_from_obj_key(const rgw_obj_key& key); + static Object* create_from_db_version( + const std::string& object_name, + const sqlite::DBOPVersionedObjectInfo& version + ); + static Object* create_from_db_version( + const std::string& object_name, const sqlite::DBOPObjectsListItem& version + ); static Object* create_for_multipart(const std::string& name); static Object* create_commit_delete_marker( const rgw_obj_key& key, SFStore* store, const std::string& bucket_id ); - static Object* create_commit_new_object( - const rgw_obj_key& key, SFStore* store, const std::string& bucket_id, - const std::string* version_id - ); - static Object* try_create_with_last_version_fetch_from_database( - SFStore* store, const std::string& name, const std::string& bucket_id - ); - static Object* try_create_fetch_from_database( + // static Object* try_create_with_last_version_fetch_from_database( + // SFStore* store, const std::string& name, const std::string& bucket_id + // ); + static Object* try_fetch_from_database( SFStore* store, const std::string& name, const std::string& bucket_id, const std::string& version_id ); @@ -105,20 +112,11 @@ class Object { std::filesystem::path get_storage_path() const; - /// Update version and commit to database - void update_commit_new_version(SFStore* store, const std::string& version_id); - - /// Change obj version state. - // Use this for example to update objs to in flight states like - // WRITING. - // Special case: DELETED sets this to deleted - // and commits a deletion time - void metadata_change_version_state(SFStore* store, ObjectState state); - /// Commit all object state to database // Including meta and attrs // Sets obj version state to COMMITTED - void metadata_finish(SFStore* store); + // For unversioned buckets it set the other versions state to DELETED + void metadata_finish(SFStore* store, bool versioning_enabled); /// Commit attrs to database void metadata_flush_attrs(SFStore* store); @@ -316,6 +314,22 @@ class Bucket { sqlite::DBOPVersionedObjectInfo& last_version ); + void _delete_object_non_versioned( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& sqlite_versioned_objects + ); + + void _delete_object_version( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& sqlite_versioned_objects, + sqlite::DBOPVersionedObjectInfo& version + ); + + std::string _add_delete_marker( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& sqlite_versioned_objects + ); + public: Bucket( CephContext* _cct, SFStore* _store, const RGWBucketInfo& _bucket_info, @@ -354,17 +368,21 @@ class Bucket { uint32_t get_flags() const { return info.flags; } public: - /// Return object for key. Do everything necessary to retrieve or - // create this object including object version. - ObjectRef get_or_create(const rgw_obj_key& key); + /// Create object version for key + ObjectRef create_version(const rgw_obj_key& key); - /// Get existing object by name. Throws if it doesn't exist. - ObjectRef get(const std::string& name); - /// Get copy of all objects + /// Get existing object by key. Throws if it doesn't exist. + ObjectRef get(const rgw_obj_key& key); + /// Get copy of all objects that are commited and not deleted std::vector get_all(); /// S3 delete object operation: delete version or create tombstone. - void delete_object(ObjectRef objref, const rgw_obj_key& key); + /// If a delete marker was added, it returns the new version id generated for + /// it + void delete_object( + ObjectRef objref, const rgw_obj_key& key, bool versioned_bucket, + std::string& delete_marker_version_id + ); /// Delete a non-existing object. Creates object with toumbstone // version in database. @@ -400,7 +418,7 @@ class Bucket { mp->finish(); multiparts.erase(it); - objref->metadata_finish(store); + objref->metadata_finish(store, get_info().versioning_enabled()); } std::string gen_multipart_upload_id() { diff --git a/src/rgw/driver/sfs/user.cc b/src/rgw/driver/sfs/user.cc index 04c67307769e84..bf518f306b4f57 100644 --- a/src/rgw/driver/sfs/user.cc +++ b/src/rgw/driver/sfs/user.cc @@ -159,7 +159,10 @@ int SFSUser::create_bucket( if (store->bucket_exists(b)) { *existed = true; - return -EEXIST; + // placement_rule.inherit_from(bucket->get_info().placement_rule); + // rgw_placement_rule selected_placement_rule; + // return -EEXIST; + return 0; } *existed = false; diff --git a/src/rgw/driver/sfs/writer.cc b/src/rgw/driver/sfs/writer.cc index 3801f41ecdac7b..abe199479343e5 100644 --- a/src/rgw/driver/sfs/writer.cc +++ b/src/rgw/driver/sfs/writer.cc @@ -190,7 +190,7 @@ int SFSAtomicWriter::prepare(optional_yield y) { } try { - objref = bucketref->get_or_create(obj.get_key()); + objref = bucketref->create_version(obj.get_key()); } catch (const std::system_error& e) { lsfs_dout(dpp, -1) << fmt::format( @@ -325,7 +325,7 @@ int SFSAtomicWriter::complete( *mtime = now; } try { - objref->metadata_finish(store); + objref->metadata_finish(store, bucketref->get_info().versioning_enabled()); } catch (const std::system_error& e) { lsfs_dout(dpp, -1) << fmt::format( "failed to update db object {}: {}. " diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc index 618ff9024a7e3a..80e976432832e9 100644 --- a/src/rgw/rgw_lc.cc +++ b/src/rgw/rgw_lc.cc @@ -1,59 +1,52 @@ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp +#include "rgw_lc.h" + +#include #include -#include -#include -#include -#include -#include -#include +#include #include #include +#include #include +#include +#include +#include +#include -#include "include/scope_guard.h" -#include "include/function2.hpp" +#include "cls/lock/cls_lock_client.h" #include "common/Formatter.h" #include "common/containers.h" #include "common/split.h" -#include +#include "fmt/format.h" +#include "include/function2.hpp" #include "include/random.h" -#include "cls/lock/cls_lock_client.h" -#include "rgw_perf_counters.h" -#include "rgw_common.h" +#include "include/scope_guard.h" #include "rgw_bucket.h" -#include "rgw_lc.h" -#include "rgw_zone.h" -#include "rgw_string.h" -#include "rgw_multi.h" -#include "rgw_sal.h" +#include "rgw_common.h" #include "rgw_lc_tier.h" +#include "rgw_multi.h" #include "rgw_notify.h" - -#include "fmt/format.h" - +#include "rgw_perf_counters.h" +#include "rgw_sal.h" +#include "rgw_string.h" +#include "rgw_zone.h" #include "services/svc_sys_obj.h" -#include "services/svc_zone.h" #include "services/svc_tier_rados.h" +#include "services/svc_zone.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw using namespace std; -const char* LC_STATUS[] = { - "UNINITIAL", - "PROCESSING", - "FAILED", - "COMPLETE" -}; +const char* LC_STATUS[] = {"UNINITIAL", "PROCESSING", "FAILED", "COMPLETE"}; using namespace librados; -bool LCRule::valid() const -{ +bool LCRule::valid() const { if (id.length() > MAX_ID_LEN) { return false; } @@ -61,9 +54,7 @@ bool LCRule::valid() const mp_expiration.empty() && !dm_expiration && transitions.empty() && noncur_transitions.empty()) { return false; - } - else if (!expiration.valid() || !noncur_expiration.valid() || - !mp_expiration.valid()) { + } else if (!expiration.valid() || !noncur_expiration.valid() || !mp_expiration.valid()) { return false; } if (!transitions.empty()) { @@ -89,9 +80,9 @@ bool LCRule::valid() const return true; } -void LCRule::init_simple_days_rule(std::string_view _id, - std::string_view _prefix, int num_days) -{ +void LCRule::init_simple_days_rule( + std::string_view _id, std::string_view _prefix, int num_days +) { id = _id; prefix = _prefix; char buf[32]; @@ -100,14 +91,13 @@ void LCRule::init_simple_days_rule(std::string_view _id, set_enabled(true); } -void RGWLifecycleConfiguration::add_rule(const LCRule& rule) -{ - auto& id = rule.get_id(); // note that this will return false for groups, but that's ok, we won't search groups +void RGWLifecycleConfiguration::add_rule(const LCRule& rule) { + auto& id = rule.get_id( + ); // note that this will return false for groups, but that's ok, we won't search groups rule_map.insert(pair(id, rule)); } -bool RGWLifecycleConfiguration::_add_rule(const LCRule& rule) -{ +bool RGWLifecycleConfiguration::_add_rule(const LCRule& rule) { lc_op op(rule.get_id()); op.status = rule.is_enabled(); if (rule.get_expiration().has_days()) { @@ -123,32 +113,32 @@ bool RGWLifecycleConfiguration::_add_rule(const LCRule& rule) op.mp_expiration = rule.get_mp_expiration().get_days(); } op.dm_expiration = rule.get_dm_expiration(); - for (const auto &elem : rule.get_transitions()) { + for (const auto& elem : rule.get_transitions()) { transition_action action; if (elem.second.has_days()) { action.days = elem.second.get_days(); } else { action.date = ceph::from_iso_8601(elem.second.get_date()); } - action.storage_class - = rgw_placement_rule::get_canonical_storage_class(elem.first); + action.storage_class = + rgw_placement_rule::get_canonical_storage_class(elem.first); op.transitions.emplace(elem.first, std::move(action)); } - for (const auto &elem : rule.get_noncur_transitions()) { + for (const auto& elem : rule.get_noncur_transitions()) { transition_action action; action.days = elem.second.get_days(); action.date = ceph::from_iso_8601(elem.second.get_date()); - action.storage_class - = rgw_placement_rule::get_canonical_storage_class(elem.first); + action.storage_class = + rgw_placement_rule::get_canonical_storage_class(elem.first); op.noncur_transitions.emplace(elem.first, std::move(action)); } std::string prefix; - if (rule.get_filter().has_prefix()){ + if (rule.get_filter().has_prefix()) { prefix = rule.get_filter().get_prefix(); } else { prefix = rule.get_prefix(); } - if (rule.get_filter().has_tags()){ + if (rule.get_filter().has_tags()) { op.obj_tags = rule.get_filter().get_tags(); } op.rule_flags = rule.get_filter().get_flags(); @@ -156,17 +146,16 @@ bool RGWLifecycleConfiguration::_add_rule(const LCRule& rule) return true; } -int RGWLifecycleConfiguration::check_and_add_rule(const LCRule& rule) -{ +int RGWLifecycleConfiguration::check_and_add_rule(const LCRule& rule) { if (!rule.valid()) { return -EINVAL; } auto& id = rule.get_id(); - if (rule_map.find(id) != rule_map.end()) { //id shouldn't be the same + if (rule_map.find(id) != rule_map.end()) { //id shouldn't be the same return -EINVAL; } - if (rule.get_filter().has_tags() && (rule.get_dm_expiration() || - !rule.get_mp_expiration().empty())) { + if (rule.get_filter().has_tags() && + (rule.get_dm_expiration() || !rule.get_mp_expiration().empty())) { return -ERR_INVALID_REQUEST; } rule_map.insert(pair(id, rule)); @@ -177,26 +166,26 @@ int RGWLifecycleConfiguration::check_and_add_rule(const LCRule& rule) return 0; } -bool RGWLifecycleConfiguration::has_same_action(const lc_op& first, - const lc_op& second) { - if ((first.expiration > 0 || first.expiration_date != boost::none) && - (second.expiration > 0 || second.expiration_date != boost::none)) { +bool RGWLifecycleConfiguration::has_same_action( + const lc_op& first, const lc_op& second +) { + if ((first.expiration > 0 || first.expiration_date != boost::none) && + (second.expiration > 0 || second.expiration_date != boost::none)) { return true; } else if (first.noncur_expiration > 0 && second.noncur_expiration > 0) { return true; } else if (first.mp_expiration > 0 && second.mp_expiration > 0) { return true; } else if (!first.transitions.empty() && !second.transitions.empty()) { - for (auto &elem : first.transitions) { + for (auto& elem : first.transitions) { if (second.transitions.find(elem.first) != second.transitions.end()) { return true; } } - } else if (!first.noncur_transitions.empty() && - !second.noncur_transitions.empty()) { - for (auto &elem : first.noncur_transitions) { + } else if (!first.noncur_transitions.empty() && !second.noncur_transitions.empty()) { + for (auto& elem : first.noncur_transitions) { if (second.noncur_transitions.find(elem.first) != - second.noncur_transitions.end()) { + second.noncur_transitions.end()) { return true; } } @@ -206,27 +195,25 @@ bool RGWLifecycleConfiguration::has_same_action(const lc_op& first, /* Formerly, this method checked for duplicate rules using an invalid * method (prefix uniqueness). */ -bool RGWLifecycleConfiguration::valid() -{ +bool RGWLifecycleConfiguration::valid() { return true; } -void *RGWLC::LCWorker::entry() { +void* RGWLC::LCWorker::entry() { do { - std::unique_ptr all_buckets; // empty restriction + std::unique_ptr all_buckets; // empty restriction utime_t start = ceph_clock_now(); if (should_work(start)) { ldpp_dout(dpp, 2) << "life cycle: start" << dendl; int r = lc->process(this, all_buckets, false /* once */); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: do life cycle process() returned error r=" - << r << dendl; + << r << dendl; } ldpp_dout(dpp, 2) << "life cycle: stop" << dendl; - cloud_targets.clear(); // clear cloud targets + cloud_targets.clear(); // clear cloud targets } - if (lc->going_down()) - break; + if (lc->going_down()) break; utime_t end = ceph_clock_now(); int secs = schedule_next_start_time(start, end); @@ -234,7 +221,7 @@ void *RGWLC::LCWorker::entry() { next.set_from_double(end + secs); ldpp_dout(dpp, 5) << "schedule life cycle next start time: " - << rgw_to_asctime(next) << dendl; + << rgw_to_asctime(next) << dendl; std::unique_lock l{lock}; cond.wait_for(l, std::chrono::seconds(secs)); @@ -243,13 +230,12 @@ void *RGWLC::LCWorker::entry() { return NULL; } -void RGWLC::initialize(CephContext *_cct, rgw::sal::Driver* _driver) { +void RGWLC::initialize(CephContext* _cct, rgw::sal::Driver* _driver) { cct = _cct; driver = _driver; sal_lc = driver->get_lifecycle(); max_objs = cct->_conf->rgw_lc_max_objs; - if (max_objs > HASH_PRIME) - max_objs = HASH_PRIME; + if (max_objs > HASH_PRIME) max_objs = HASH_PRIME; obj_names = new string[max_objs]; @@ -266,12 +252,13 @@ void RGWLC::initialize(CephContext *_cct, rgw::sal::Driver* _driver) { cookie = cookie_buf; } -void RGWLC::finalize() -{ +void RGWLC::finalize() { delete[] obj_names; } -static inline std::ostream& operator<<(std::ostream &os, rgw::sal::Lifecycle::LCEntry& ent) { +static inline std::ostream& operator<<( + std::ostream& os, rgw::sal::Lifecycle::LCEntry& ent +) { os << " - ceph_clock_now()) { + ceph_clock_now()) { return false; } } @@ -349,7 +337,7 @@ static bool pass_object_lock_check(rgw::sal::Driver* driver, rgw::sal::Object* o decode(obj_legal_hold, iter->second); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "ERROR: failed to decode RGWObjectLegalHold" - << dendl; + << dendl; return false; } if (obj_legal_hold.is_enabled()) { @@ -370,9 +358,9 @@ class LCObjsLister { rgw_bucket_dir_entry pre_obj; int64_t delay_ms; -public: - LCObjsLister(rgw::sal::Driver* _driver, rgw::sal::Bucket* _bucket) : - driver(_driver), bucket(_bucket) { + public: + LCObjsLister(rgw::sal::Driver* _driver, rgw::sal::Bucket* _bucket) + : driver(_driver), bucket(_bucket) { list_params.list_versions = bucket->versioned(); list_params.allow_unordered = true; delay_ms = driver->ctx()->_conf.get_val("rgw_lc_thread_delay"); @@ -383,11 +371,9 @@ class LCObjsLister { list_params.prefix = prefix; } - int init(const DoutPrefixProvider *dpp) { - return fetch(dpp); - } + int init(const DoutPrefixProvider* dpp) { return fetch(dpp); } - int fetch(const DoutPrefixProvider *dpp) { + int fetch(const DoutPrefixProvider* dpp) { int ret = bucket->list(dpp, list_params, 1000, list_results, null_yield); if (ret < 0) { return ret; @@ -402,20 +388,20 @@ class LCObjsLister { std::this_thread::sleep_for(std::chrono::milliseconds(delay_ms)); } - bool get_obj(const DoutPrefixProvider *dpp, rgw_bucket_dir_entry **obj, - std::function fetch_barrier - = []() { /* nada */}) { + bool get_obj( + const DoutPrefixProvider* dpp, rgw_bucket_dir_entry** obj, + std::function fetch_barrier = []() { /* nada */ } + ) { if (obj_iter == list_results.objs.end()) { if (!list_results.is_truncated) { delay(); return false; } else { - fetch_barrier(); + fetch_barrier(); list_params.marker = pre_obj.key; int ret = fetch(dpp); if (ret < 0) { - ldpp_dout(dpp, 0) << "ERROR: list_op returned ret=" << ret - << dendl; + ldpp_dout(dpp, 0) << "ERROR: list_op returned ret=" << ret << dendl; return false; } } @@ -426,9 +412,7 @@ class LCObjsLister { return obj_iter != list_results.objs.end(); } - rgw_bucket_dir_entry get_prev_obj() { - return pre_obj; - } + rgw_bucket_dir_entry get_prev_obj() { return pre_obj; } void next() { pre_obj = *obj_iter; @@ -437,7 +421,7 @@ class LCObjsLister { boost::optional next_key_name() { if (obj_iter == list_results.objs.end() || - (obj_iter + 1) == list_results.objs.end()) { + (obj_iter + 1) == list_results.objs.end()) { /* this should have been called after get_obj() was called, so this should * only happen if is_truncated is false */ return boost::none; @@ -449,7 +433,6 @@ class LCObjsLister { }; /* LCObjsLister */ struct op_env { - using LCWorker = RGWLC::LCWorker; lc_op op; @@ -458,17 +441,18 @@ struct op_env { rgw::sal::Bucket* bucket; LCObjsLister& ol; - op_env(lc_op& _op, rgw::sal::Driver* _driver, LCWorker* _worker, - rgw::sal::Bucket* _bucket, LCObjsLister& _ol) - : op(_op), driver(_driver), worker(_worker), bucket(_bucket), - ol(_ol) {} + op_env( + lc_op& _op, rgw::sal::Driver* _driver, LCWorker* _worker, + rgw::sal::Bucket* _bucket, LCObjsLister& _ol + ) + : op(_op), driver(_driver), worker(_worker), bucket(_bucket), ol(_ol) {} }; /* op_env */ class LCRuleOp; class WorkQ; struct lc_op_ctx { - CephContext *cct; + CephContext* cct; op_env env; rgw_bucket_dir_entry o; boost::optional next_key_name; @@ -476,43 +460,52 @@ struct lc_op_ctx { rgw::sal::Driver* driver; rgw::sal::Bucket* bucket; - lc_op& op; // ok--refers to expanded env.op + lc_op& op; // ok--refers to expanded env.op LCObjsLister& ol; std::unique_ptr obj; RGWObjectCtx rctx; - const DoutPrefixProvider *dpp; + const DoutPrefixProvider* dpp; WorkQ* wq; std::unique_ptr tier; - lc_op_ctx(op_env& env, rgw_bucket_dir_entry& o, - boost::optional next_key_name, - ceph::real_time effective_mtime, - const DoutPrefixProvider *dpp, WorkQ* wq) - : cct(env.driver->ctx()), env(env), o(o), next_key_name(next_key_name), - effective_mtime(effective_mtime), - driver(env.driver), bucket(env.bucket), op(env.op), ol(env.ol), - rctx(env.driver), dpp(dpp), wq(wq) - { - obj = bucket->get_object(o.key); - } + lc_op_ctx( + op_env& env, rgw_bucket_dir_entry& o, + boost::optional next_key_name, + ceph::real_time effective_mtime, const DoutPrefixProvider* dpp, WorkQ* wq + ) + : cct(env.driver->ctx()), + env(env), + o(o), + next_key_name(next_key_name), + effective_mtime(effective_mtime), + driver(env.driver), + bucket(env.bucket), + op(env.op), + ol(env.ol), + rctx(env.driver), + dpp(dpp), + wq(wq) { + obj = bucket->get_object(o.key); + } bool next_has_same_name(const std::string& key_name) { - return (next_key_name && key_name.compare( - boost::get(next_key_name)) == 0); + return ( + next_key_name && + key_name.compare(boost::get(next_key_name)) == 0 + ); } }; /* lc_op_ctx */ - static std::string lc_id = "rgw lifecycle"; static std::string lc_req_id = "0"; static int remove_expired_obj( - const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool remove_indeed, - rgw::notify::EventType event_type) -{ + const DoutPrefixProvider* dpp, lc_op_ctx& oc, bool remove_indeed, + rgw::notify::EventType event_type +) { auto& driver = oc.driver; auto& bucket_info = oc.bucket->get_info(); auto& o = oc.o; @@ -522,11 +515,14 @@ static int remove_expired_obj( std::string version_id; std::unique_ptr notify; + ldpp_dout(dpp, 1) << "XGM---------------------- remove_expired_obj: [" + << obj_key.name << ", " << obj_key.instance << "]" << dendl; if (!remove_indeed) { obj_key.instance.clear(); } else if (obj_key.instance.empty()) { obj_key.instance = "null"; } + ldpp_dout(dpp, 1) << "XGM ----- remove_indeed: " << remove_indeed << dendl; std::unique_ptr user; std::unique_ptr bucket; @@ -540,46 +536,56 @@ static int remove_expired_obj( obj = bucket->get_object(obj_key); + if (obj) { + ldpp_dout( + dpp, 1 + ) << "XGM---------------------- remove_expired_obj: after get_object: " + << obj->get_key() << dendl; + } else { + ldpp_dout(dpp, 1) << "XGM---------------------- remove_expired_obj: after " + "get_object: nullptr" + << dendl; + } + RGWObjState* obj_state{nullptr}; ret = obj->get_obj_state(dpp, &obj_state, null_yield, true); if (ret < 0) { return ret; } - std::unique_ptr del_op - = obj->get_delete_op(); - del_op->params.versioning_status - = obj->get_bucket()->get_info().versioning_status(); - del_op->params.obj_owner.set_id(rgw_user {meta.owner}); + std::unique_ptr del_op = obj->get_delete_op(); + del_op->params.versioning_status = + obj->get_bucket()->get_info().versioning_status(); + del_op->params.obj_owner.set_id(rgw_user{meta.owner}); del_op->params.obj_owner.set_name(meta.owner_display_name); del_op->params.bucket_owner.set_id(bucket_info.owner); del_op->params.unmod_since = meta.mtime; del_op->params.marker_version_id = version_id; // notification supported only for RADOS driver for now - notify = driver->get_notification(dpp, obj.get(), nullptr, event_type, - bucket.get(), lc_id, - const_cast(oc.bucket->get_tenant()), - lc_req_id, null_yield); + notify = driver->get_notification( + dpp, obj.get(), nullptr, event_type, bucket.get(), lc_id, + const_cast(oc.bucket->get_tenant()), lc_req_id, null_yield + ); ret = notify->publish_reserve(dpp, nullptr); - if ( ret < 0) { - ldpp_dout(dpp, 1) - << "ERROR: notify reservation failed, deferring delete of object k=" - << o.key - << dendl; + if (ret < 0) { + ldpp_dout( + dpp, 1 + ) << "ERROR: notify reservation failed, deferring delete of object k=" + << o.key << dendl; return ret; } - ret = del_op->delete_obj(dpp, null_yield); + ret = del_op->delete_obj(dpp, null_yield); if (ret < 0) { - ldpp_dout(dpp, 1) << - "ERROR: publishing notification failed, with error: " << ret << dendl; + ldpp_dout(dpp, 1) << "ERROR: publishing notification failed, with error: " + << ret << dendl; } else { // send request to notification manager - (void) notify->publish_commit(dpp, obj_state->size, - ceph::real_clock::now(), - obj_state->attrset[RGW_ATTR_ETAG].to_str(), - version_id); + (void)notify->publish_commit( + dpp, obj_state->size, ceph::real_clock::now(), + obj_state->attrset[RGW_ATTR_ETAG].to_str(), version_id + ); } return ret; @@ -587,10 +593,12 @@ static int remove_expired_obj( } /* remove_expired_obj */ class LCOpAction { -public: + public: virtual ~LCOpAction() {} - virtual bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) { + virtual bool check( + lc_op_ctx& oc, ceph::real_time* exp_time, const DoutPrefixProvider* dpp + ) { return false; } @@ -605,21 +613,17 @@ class LCOpAction { * but should_process() if the action has already been applied. In object removal * it doesn't matter, but in object transition it does. */ - virtual bool should_process() { - return true; - } + virtual bool should_process() { return true; } - virtual int process(lc_op_ctx& oc) { - return 0; - } + virtual int process(lc_op_ctx& oc) { return 0; } friend class LCOpRule; }; /* LCOpAction */ class LCOpFilter { -public: -virtual ~LCOpFilter() {} - virtual bool check(const DoutPrefixProvider *dpp, lc_op_ctx& oc) { + public: + virtual ~LCOpFilter() {} + virtual bool check(const DoutPrefixProvider* dpp, lc_op_ctx& oc) { return false; } }; /* LCOpFilter */ @@ -631,47 +635,42 @@ class LCOpRule { boost::optional next_key_name; ceph::real_time effective_mtime; - std::vector > filters; // n.b., sharing ovhd - std::vector > actions; + std::vector> filters; // n.b., sharing ovhd + std::vector> actions; -public: + public: LCOpRule(op_env& _env) : env(_env) {} - boost::optional get_next_key_name() { - return next_key_name; - } + boost::optional get_next_key_name() { return next_key_name; } - std::vector>& get_actions() { - return actions; - } + std::vector>& get_actions() { return actions; } void build(); void update(); - int process(rgw_bucket_dir_entry& o, const DoutPrefixProvider *dpp, - WorkQ* wq); + int process( + rgw_bucket_dir_entry& o, const DoutPrefixProvider* dpp, WorkQ* wq + ); }; /* LCOpRule */ -using WorkItem = - boost::variant, - /* uncompleted MPU expiration */ - std::tuple, - rgw_bucket_dir_entry>; - -class WorkQ : public Thread -{ -public: +using WorkItem = boost::variant< + void*, + /* out-of-line delete */ + std::tuple, + /* uncompleted MPU expiration */ + std::tuple, rgw_bucket_dir_entry>; + +class WorkQ : public Thread { + public: using unique_lock = std::unique_lock; using work_f = std::function; using dequeue_result = boost::variant; - static constexpr uint32_t FLAG_NONE = 0x0000; - static constexpr uint32_t FLAG_EWAIT_SYNC = 0x0001; - static constexpr uint32_t FLAG_DWAIT_SYNC = 0x0002; + static constexpr uint32_t FLAG_NONE = 0x0000; + static constexpr uint32_t FLAG_EWAIT_SYNC = 0x0001; + static constexpr uint32_t FLAG_DWAIT_SYNC = 0x0002; static constexpr uint32_t FLAG_EDRAIN_SYNC = 0x0004; -private: + private: const work_f bsf = [](RGWLC::LCWorker* wk, WorkQ* wq, WorkItem& wi) {}; RGWLC::LCWorker* wk; uint32_t qmax; @@ -682,26 +681,22 @@ class WorkQ : public Thread vector items; work_f f; -public: + public: WorkQ(RGWLC::LCWorker* wk, uint32_t ix, uint32_t qmax) - : wk(wk), qmax(qmax), ix(ix), flags(FLAG_NONE), f(bsf) - { - create(thr_name().c_str()); - } + : wk(wk), qmax(qmax), ix(ix), flags(FLAG_NONE), f(bsf) { + create(thr_name().c_str()); + } std::string thr_name() { - return std::string{"wp_thrd: "} - + std::to_string(wk->ix) + ", " + std::to_string(ix); + return std::string{"wp_thrd: "} + std::to_string(wk->ix) + ", " + + std::to_string(ix); } - void setf(work_f _f) { - f = _f; - } + void setf(work_f _f) { f = _f; } void enqueue(WorkItem&& item) { unique_lock uniq(mtx); - while ((!wk->get_lc()->going_down()) && - (items.size() > qmax)) { + while ((!wk->get_lc()->going_down()) && (items.size() > qmax)) { flags |= FLAG_EWAIT_SYNC; cv.wait_for(uniq, 200ms); } @@ -720,14 +715,13 @@ class WorkQ : public Thread } } -private: + private: dequeue_result dequeue() { unique_lock uniq(mtx); - while ((!wk->get_lc()->going_down()) && - (items.size() == 0)) { + while ((!wk->get_lc()->going_down()) && (items.size() == 0)) { /* clear drain state, as we are NOT doing work and qlen==0 */ if (flags & FLAG_EDRAIN_SYNC) { - flags &= ~FLAG_EDRAIN_SYNC; + flags &= ~FLAG_EDRAIN_SYNC; } flags |= FLAG_DWAIT_SYNC; cv.wait_for(uniq, 200ms); @@ -736,8 +730,8 @@ class WorkQ : public Thread auto item = items.back(); items.pop_back(); if (flags & FLAG_EWAIT_SYNC) { - flags &= ~FLAG_EWAIT_SYNC; - cv.notify_one(); + flags &= ~FLAG_EWAIT_SYNC; + cv.notify_one(); } return {item}; } @@ -748,8 +742,8 @@ class WorkQ : public Thread while (!wk->get_lc()->going_down()) { auto item = dequeue(); if (item.which() == 0) { - /* going down */ - break; + /* going down */ + break; } f(wk, this, boost::get(item)); } @@ -757,21 +751,17 @@ class WorkQ : public Thread } }; /* WorkQ */ -class RGWLC::WorkPool -{ +class RGWLC::WorkPool { using TVector = ceph::containers::tiny_vector; TVector wqs; uint64_t ix; -public: + public: WorkPool(RGWLC::LCWorker* wk, uint16_t n_threads, uint32_t qmax) - : wqs(TVector{ - n_threads, - [&](const size_t ix, auto emplacer) { - emplacer.emplace(wk, ix, qmax); - }}), - ix(0) - {} + : wqs(TVector{ + n_threads, [&](const size_t ix, + auto emplacer) { emplacer.emplace(wk, ix, qmax); }}), + ix(0) {} ~WorkPool() { for (auto& wq : wqs) { @@ -787,7 +777,7 @@ class RGWLC::WorkPool void enqueue(WorkItem item) { const auto tix = ix; - ix = (ix+1) % wqs.size(); + ix = (ix + 1) % wqs.size(); (wqs[tix]).enqueue(std::move(item)); } @@ -798,23 +788,22 @@ class RGWLC::WorkPool } }; /* WorkPool */ -RGWLC::LCWorker::LCWorker(const DoutPrefixProvider* dpp, CephContext *cct, - RGWLC *lc, int ix) - : dpp(dpp), cct(cct), lc(lc), ix(ix) -{ +RGWLC::LCWorker::LCWorker( + const DoutPrefixProvider* dpp, CephContext* cct, RGWLC* lc, int ix +) + : dpp(dpp), cct(cct), lc(lc), ix(ix) { auto wpw = cct->_conf.get_val("rgw_lc_max_wp_worker"); workpool = new WorkPool(this, wpw, 512); } -static inline bool worker_should_stop(time_t stop_at, bool once) -{ +static inline bool worker_should_stop(time_t stop_at, bool once) { return !once && stop_at < time(nullptr); } -int RGWLC::handle_multipart_expiration(rgw::sal::Bucket* target, - const multimap& prefix_map, - LCWorker* worker, time_t stop_at, bool once) -{ +int RGWLC::handle_multipart_expiration( + rgw::sal::Bucket* target, const multimap& prefix_map, + LCWorker* worker, time_t stop_at, bool once +) { MultipartMetaFilter mp_filter; int ret; rgw::sal::Bucket::ListParams params; @@ -833,39 +822,34 @@ int RGWLC::handle_multipart_expiration(rgw::sal::Bucket* target, auto& [rule, obj] = wt; if (obj_has_expired(this, cct, obj.meta.mtime, rule.mp_expiration)) { rgw_obj_key key(obj.key); - std::unique_ptr mpu = target->get_multipart_upload(key.name); + std::unique_ptr mpu = + target->get_multipart_upload(key.name); int ret = mpu->abort(this, cct); if (ret == 0) { if (perfcounter) { perfcounter->inc(l_rgw_lc_abort_mpu, 1); } } else { - if (ret == -ERR_NO_SUCH_UPLOAD) { - ldpp_dout(wk->get_lc(), 5) - << "ERROR: abort_multipart_upload failed, ret=" << ret - << ", thread:" << wq->thr_name() - << ", meta:" << obj.key - << dendl; - } else { - ldpp_dout(wk->get_lc(), 0) - << "ERROR: abort_multipart_upload failed, ret=" << ret - << ", thread:" << wq->thr_name() - << ", meta:" << obj.key - << dendl; - } + if (ret == -ERR_NO_SUCH_UPLOAD) { + ldpp_dout(wk->get_lc(), 5) + << "ERROR: abort_multipart_upload failed, ret=" << ret + << ", thread:" << wq->thr_name() << ", meta:" << obj.key << dendl; + } else { + ldpp_dout(wk->get_lc(), 0) + << "ERROR: abort_multipart_upload failed, ret=" << ret + << ", thread:" << wq->thr_name() << ", meta:" << obj.key << dendl; + } } /* abort failed */ - } /* expired */ + } /* expired */ }; worker->workpool->setf(pf); for (auto prefix_iter = prefix_map.begin(); prefix_iter != prefix_map.end(); ++prefix_iter) { - if (worker_should_stop(stop_at, once)) { ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker " - << worker->ix - << dendl; + << worker->ix << dendl; return 0; } @@ -878,90 +862,82 @@ int RGWLC::handle_multipart_expiration(rgw::sal::Bucket* target, results.objs.clear(); ret = target->list(this, params, 1000, results, null_yield); if (ret < 0) { - if (ret == (-ENOENT)) - return 0; - ldpp_dout(this, 0) << "ERROR: driver->list_objects():" <list_objects():" << dendl; + return ret; } - for (auto obj_iter = results.objs.begin(); obj_iter != results.objs.end(); ++obj_iter, ++offset) { - std::tuple t1 = - {prefix_iter->second, *obj_iter}; - worker->workpool->enqueue(WorkItem{t1}); - if (going_down()) { - return 0; - } + for (auto obj_iter = results.objs.begin(); obj_iter != results.objs.end(); + ++obj_iter, ++offset) { + std::tuple t1 = { + prefix_iter->second, *obj_iter}; + worker->workpool->enqueue(WorkItem{t1}); + if (going_down()) { + return 0; + } } /* for objs */ if ((offset % 100) == 0) { - if (worker_should_stop(stop_at, once)) { - ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker " - << worker->ix - << dendl; - return 0; - } + if (worker_should_stop(stop_at, once)) { + ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker " + << worker->ix << dendl; + return 0; + } } std::this_thread::sleep_for(std::chrono::milliseconds(delay_ms)); - } while(results.is_truncated); + } while (results.is_truncated); } /* for prefix_map */ worker->workpool->drain(); return 0; } /* RGWLC::handle_multipart_expiration */ -static int read_obj_tags(const DoutPrefixProvider *dpp, rgw::sal::Object* obj, bufferlist& tags_bl) -{ +static int read_obj_tags( + const DoutPrefixProvider* dpp, rgw::sal::Object* obj, bufferlist& tags_bl +) { std::unique_ptr rop = obj->get_read_op(); return rop->get_attr(dpp, RGW_ATTR_TAGS, tags_bl, null_yield); } -static bool is_valid_op(const lc_op& op) -{ - return (op.status && - (op.expiration > 0 - || op.expiration_date != boost::none - || op.noncur_expiration > 0 - || op.dm_expiration - || !op.transitions.empty() - || !op.noncur_transitions.empty())); +static bool is_valid_op(const lc_op& op) { + return ( + op.status && (op.expiration > 0 || op.expiration_date != boost::none || + op.noncur_expiration > 0 || op.dm_expiration || + !op.transitions.empty() || !op.noncur_transitions.empty()) + ); } -static bool zone_check(const lc_op& op, rgw::sal::Zone* zone) -{ - +static bool zone_check(const lc_op& op, rgw::sal::Zone* zone) { if (zone->get_tier_type() == "archive") { return (op.rule_flags & uint32_t(LCFlagType::ArchiveZone)); } else { - return (! (op.rule_flags & uint32_t(LCFlagType::ArchiveZone))); + return (!(op.rule_flags & uint32_t(LCFlagType::ArchiveZone))); } } -static inline bool has_all_tags(const lc_op& rule_action, - const RGWObjTags& object_tags) -{ - if(! rule_action.obj_tags) - return false; - if(object_tags.count() < rule_action.obj_tags->count()) - return false; +static inline bool has_all_tags( + const lc_op& rule_action, const RGWObjTags& object_tags +) { + if (!rule_action.obj_tags) return false; + if (object_tags.count() < rule_action.obj_tags->count()) return false; size_t tag_count = 0; for (const auto& tag : object_tags.get_tags()) { const auto& rule_tags = rule_action.obj_tags->get_tags(); const auto& iter = rule_tags.find(tag.first); - if(iter == rule_tags.end()) - continue; - if(iter->second == tag.second) - { + if (iter == rule_tags.end()) continue; + if (iter->second == tag.second) { tag_count++; } - /* all tags in the rule appear in obj tags */ + /* all tags in the rule appear in obj tags */ } return tag_count == rule_action.obj_tags->count(); } -static int check_tags(const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool *skip) -{ +static int check_tags( + const DoutPrefixProvider* dpp, lc_op_ctx& oc, bool* skip +) { auto& op = oc.op; if (op.obj_tags != boost::none) { @@ -971,8 +947,8 @@ static int check_tags(const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool *skip) int ret = read_obj_tags(dpp, oc.obj.get(), tags_bl); if (ret < 0) { if (ret != -ENODATA) { - ldpp_dout(oc.dpp, 5) << "ERROR: read_obj_tags returned r=" - << ret << " " << oc.wq->thr_name() << dendl; + ldpp_dout(oc.dpp, 5) << "ERROR: read_obj_tags returned r=" << ret << " " + << oc.wq->thr_name() << dendl; } return 0; } @@ -981,16 +957,17 @@ static int check_tags(const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool *skip) auto iter = tags_bl.cbegin(); dest_obj_tags.decode(iter); } catch (buffer::error& err) { - ldpp_dout(oc.dpp,0) << "ERROR: caught buffer::error, couldn't decode TagSet " - << oc.wq->thr_name() << dendl; + ldpp_dout( + oc.dpp, 0 + ) << "ERROR: caught buffer::error, couldn't decode TagSet " + << oc.wq->thr_name() << dendl; return -EIO; } - if (! has_all_tags(op, dest_obj_tags)) { + if (!has_all_tags(op, dest_obj_tags)) { ldpp_dout(oc.dpp, 20) << __func__ << "() skipping obj " << oc.obj - << " as tags do not match in rule: " - << op.id << " " - << oc.wq->thr_name() << dendl; + << " as tags do not match in rule: " << op.id << " " + << oc.wq->thr_name() << dendl; return 0; } } @@ -999,8 +976,8 @@ static int check_tags(const DoutPrefixProvider *dpp, lc_op_ctx& oc, bool *skip) } class LCOpFilter_Tags : public LCOpFilter { -public: - bool check(const DoutPrefixProvider *dpp, lc_op_ctx& oc) override { + public: + bool check(const DoutPrefixProvider* dpp, lc_op_ctx& oc) override { auto& o = oc.o; if (o.is_delete_marker()) { @@ -1015,8 +992,8 @@ class LCOpFilter_Tags : public LCOpFilter { return false; } ldpp_dout(oc.dpp, 0) << "ERROR: check_tags on obj=" << oc.obj - << " returned ret=" << ret << " " - << oc.wq->thr_name() << dendl; + << " returned ret=" << ret << " " + << oc.wq->thr_name() << dendl; return false; } @@ -1025,32 +1002,34 @@ class LCOpFilter_Tags : public LCOpFilter { }; class LCOpAction_CurrentExpiration : public LCOpAction { -public: + public: LCOpAction_CurrentExpiration(op_env& env) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { + bool check( + lc_op_ctx& oc, ceph::real_time* exp_time, const DoutPrefixProvider* dpp + ) override { auto& o = oc.o; if (!o.is_current()) { ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key - << ": not current, skipping " - << oc.wq->thr_name() << dendl; + << ": not current, skipping " << oc.wq->thr_name() + << dendl; return false; } if (o.is_delete_marker()) { if (oc.next_key_name) { - std::string nkn = *oc.next_key_name; - if (oc.next_has_same_name(o.key.name)) { - ldpp_dout(dpp, 7) << __func__ << "(): dm-check SAME: key=" << o.key - << " next_key_name: %%" << nkn << "%% " - << oc.wq->thr_name() << dendl; - return false; - } else { - ldpp_dout(dpp, 7) << __func__ << "(): dm-check DELE: key=" << o.key - << " next_key_name: %%" << nkn << "%% " - << oc.wq->thr_name() << dendl; - *exp_time = real_clock::now(); - return true; - } + std::string nkn = *oc.next_key_name; + if (oc.next_has_same_name(o.key.name)) { + ldpp_dout(dpp, 7) << __func__ << "(): dm-check SAME: key=" << o.key + << " next_key_name: %%" << nkn << "%% " + << oc.wq->thr_name() << dendl; + return false; + } else { + ldpp_dout(dpp, 7) << __func__ << "(): dm-check DELE: key=" << o.key + << " next_key_name: %%" << nkn << "%% " + << oc.wq->thr_name() << dendl; + *exp_time = real_clock::now(); + return true; + } } return false; } @@ -1061,20 +1040,20 @@ class LCOpAction_CurrentExpiration : public LCOpAction { if (op.expiration <= 0) { if (op.expiration_date == boost::none) { ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key - << ": no expiration set in rule, skipping " - << oc.wq->thr_name() << dendl; + << ": no expiration set in rule, skipping " + << oc.wq->thr_name() << dendl; return false; } - is_expired = ceph_clock_now() >= - ceph::real_clock::to_time_t(*op.expiration_date); + is_expired = + ceph_clock_now() >= ceph::real_clock::to_time_t(*op.expiration_date); *exp_time = *op.expiration_date; } else { is_expired = obj_has_expired(dpp, oc.cct, mtime, op.expiration, exp_time); } - ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired=" - << (int)is_expired << " " - << oc.wq->thr_name() << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key + << ": is_expired=" << (int)is_expired << " " + << oc.wq->thr_name() << dendl; return is_expired; } @@ -1082,103 +1061,108 @@ class LCOpAction_CurrentExpiration : public LCOpAction { auto& o = oc.o; int r; if (o.is_delete_marker()) { - r = remove_expired_obj(oc.dpp, oc, true, - rgw::notify::ObjectExpirationDeleteMarker); + r = remove_expired_obj( + oc.dpp, oc, true, rgw::notify::ObjectExpirationDeleteMarker + ); if (r < 0) { - ldpp_dout(oc.dpp, 0) << "ERROR: current is-dm remove_expired_obj " - << oc.bucket << ":" << o.key - << " " << cpp_strerror(r) << " " - << oc.wq->thr_name() << dendl; - return r; + ldpp_dout(oc.dpp, 0) + << "ERROR: current is-dm remove_expired_obj " << oc.bucket << ":" + << o.key << " " << cpp_strerror(r) << " " << oc.wq->thr_name() + << dendl; + return r; } - ldpp_dout(oc.dpp, 2) << "DELETED: current is-dm " - << oc.bucket << ":" << o.key - << " " << oc.wq->thr_name() << dendl; + ldpp_dout(oc.dpp, 2) << "DELETED: current is-dm " << oc.bucket << ":" + << o.key << " " << oc.wq->thr_name() << dendl; } else { /* ! o.is_delete_marker() */ - r = remove_expired_obj(oc.dpp, oc, !oc.bucket->versioned(), - rgw::notify::ObjectExpirationCurrent); + r = remove_expired_obj( + oc.dpp, oc, !oc.bucket->versioned(), + rgw::notify::ObjectExpirationCurrent + ); if (r < 0) { - ldpp_dout(oc.dpp, 0) << "ERROR: remove_expired_obj " - << oc.bucket << ":" << o.key - << " " << cpp_strerror(r) << " " - << oc.wq->thr_name() << dendl; - return r; + ldpp_dout(oc.dpp, 0) + << "ERROR: remove_expired_obj " << oc.bucket << ":" << o.key << " " + << cpp_strerror(r) << " " << oc.wq->thr_name() << dendl; + return r; } if (perfcounter) { perfcounter->inc(l_rgw_lc_expire_current, 1); } - ldpp_dout(oc.dpp, 2) << "DELETED:" << oc.bucket << ":" << o.key - << " " << oc.wq->thr_name() << dendl; + ldpp_dout(oc.dpp, 2) << "DELETED:" << oc.bucket << ":" << o.key << " " + << oc.wq->thr_name() << dendl; } return 0; } }; class LCOpAction_NonCurrentExpiration : public LCOpAction { -protected: -public: - LCOpAction_NonCurrentExpiration(op_env& env) - {} + protected: + public: + LCOpAction_NonCurrentExpiration(op_env& env) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { + bool check( + lc_op_ctx& oc, ceph::real_time* exp_time, const DoutPrefixProvider* dpp + ) override { auto& o = oc.o; if (o.is_current()) { ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key - << ": current version, skipping " - << oc.wq->thr_name() << dendl; + << ": current version, skipping " << oc.wq->thr_name() + << dendl; return false; } int expiration = oc.op.noncur_expiration; - bool is_expired = obj_has_expired(dpp, oc.cct, oc.effective_mtime, expiration, - exp_time); + bool is_expired = + obj_has_expired(dpp, oc.cct, oc.effective_mtime, expiration, exp_time); - ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired=" - << is_expired << " " - << oc.wq->thr_name() << dendl; + ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key + << ": is_expired=" << is_expired << " " + << oc.wq->thr_name() << dendl; - return is_expired && - pass_object_lock_check(oc.driver, oc.obj.get(), dpp); + return is_expired && pass_object_lock_check(oc.driver, oc.obj.get(), dpp); } int process(lc_op_ctx& oc) { auto& o = oc.o; - int r = remove_expired_obj(oc.dpp, oc, true, - rgw::notify::ObjectExpirationNoncurrent); + int r = remove_expired_obj( + oc.dpp, oc, true, rgw::notify::ObjectExpirationNoncurrent + ); if (r < 0) { - ldpp_dout(oc.dpp, 0) << "ERROR: remove_expired_obj (non-current expiration) " - << oc.bucket << ":" << o.key - << " " << cpp_strerror(r) - << " " << oc.wq->thr_name() << dendl; + ldpp_dout( + oc.dpp, 0 + ) << "ERROR: remove_expired_obj (non-current expiration) " + << oc.bucket << ":" << o.key << " " << cpp_strerror(r) << " " + << oc.wq->thr_name() << dendl; return r; } if (perfcounter) { perfcounter->inc(l_rgw_lc_expire_noncurrent, 1); } ldpp_dout(oc.dpp, 2) << "DELETED:" << oc.bucket << ":" << o.key - << " (non-current expiration) " - << oc.wq->thr_name() << dendl; + << " (non-current expiration) " << oc.wq->thr_name() + << dendl; return 0; } }; class LCOpAction_DMExpiration : public LCOpAction { -public: + public: LCOpAction_DMExpiration(op_env& env) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { + bool check( + lc_op_ctx& oc, ceph::real_time* exp_time, const DoutPrefixProvider* dpp + ) override { auto& o = oc.o; if (!o.is_delete_marker()) { ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key - << ": not a delete marker, skipping " - << oc.wq->thr_name() << dendl; + << ": not a delete marker, skipping " + << oc.wq->thr_name() << dendl; return false; } if (oc.next_has_same_name(o.key.name)) { ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key - << ": next is same object, skipping " - << oc.wq->thr_name() << dendl; + << ": next is same object, skipping " + << oc.wq->thr_name() << dendl; return false; } @@ -1189,22 +1173,23 @@ class LCOpAction_DMExpiration : public LCOpAction { int process(lc_op_ctx& oc) { auto& o = oc.o; - int r = remove_expired_obj(oc.dpp, oc, true, - rgw::notify::ObjectExpirationDeleteMarker); + int r = remove_expired_obj( + oc.dpp, oc, true, rgw::notify::ObjectExpirationDeleteMarker + ); if (r < 0) { - ldpp_dout(oc.dpp, 0) << "ERROR: remove_expired_obj (delete marker expiration) " - << oc.bucket << ":" << o.key - << " " << cpp_strerror(r) - << " " << oc.wq->thr_name() - << dendl; + ldpp_dout( + oc.dpp, 0 + ) << "ERROR: remove_expired_obj (delete marker expiration) " + << oc.bucket << ":" << o.key << " " << cpp_strerror(r) << " " + << oc.wq->thr_name() << dendl; return r; } if (perfcounter) { perfcounter->inc(l_rgw_lc_expire_dm, 1); } ldpp_dout(oc.dpp, 2) << "DELETED:" << oc.bucket << ":" << o.key - << " (delete marker expiration) " - << oc.wq->thr_name() << dendl; + << " (delete marker expiration) " << oc.wq->thr_name() + << dendl; return 0; } }; @@ -1213,14 +1198,17 @@ class LCOpAction_Transition : public LCOpAction { const transition_action& transition; bool need_to_process{false}; -protected: + protected: virtual bool check_current_state(bool is_current) = 0; virtual ceph::real_time get_effective_mtime(lc_op_ctx& oc) = 0; -public: + + public: LCOpAction_Transition(const transition_action& _transition) - : transition(_transition) {} + : transition(_transition) {} - bool check(lc_op_ctx& oc, ceph::real_time *exp_time, const DoutPrefixProvider *dpp) override { + bool check( + lc_op_ctx& oc, ceph::real_time* exp_time, const DoutPrefixProvider* dpp + ) override { auto& o = oc.o; if (o.is_delete_marker()) { @@ -1236,55 +1224,64 @@ class LCOpAction_Transition : public LCOpAction { if (transition.days < 0) { if (transition.date == boost::none) { ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key - << ": no transition day/date set in rule, skipping " - << oc.wq->thr_name() << dendl; + << ": no transition day/date set in rule, skipping " + << oc.wq->thr_name() << dendl; return false; } - is_expired = ceph_clock_now() >= - ceph::real_clock::to_time_t(*transition.date); + is_expired = + ceph_clock_now() >= ceph::real_clock::to_time_t(*transition.date); *exp_time = *transition.date; } else { - is_expired = obj_has_expired(dpp, oc.cct, mtime, transition.days, exp_time); + is_expired = + obj_has_expired(dpp, oc.cct, mtime, transition.days, exp_time); } - ldpp_dout(oc.dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired=" - << is_expired << " " - << oc.wq->thr_name() << dendl; + ldpp_dout(oc.dpp, 20) << __func__ << "(): key=" << o.key + << ": is_expired=" << is_expired << " " + << oc.wq->thr_name() << dendl; need_to_process = - (rgw_placement_rule::get_canonical_storage_class(o.meta.storage_class) != - transition.storage_class); + (rgw_placement_rule::get_canonical_storage_class(o.meta.storage_class + ) != transition.storage_class); return is_expired; } - bool should_process() override { - return need_to_process; - } + bool should_process() override { return need_to_process; } int delete_tier_obj(lc_op_ctx& oc) { int ret = 0; /* If bucket is versioned, create delete_marker for current version */ - if (oc.bucket->versioned() && oc.o.is_current() && !oc.o.is_delete_marker()) { - ret = remove_expired_obj(oc.dpp, oc, false, rgw::notify::ObjectExpiration); - ldpp_dout(oc.dpp, 20) << "delete_tier_obj Object(key:" << oc.o.key << ") current & not delete_marker" << " versioned_epoch: " << oc.o.versioned_epoch << "flags: " << oc.o.flags << dendl; + if (oc.bucket->versioned() && oc.o.is_current() && + !oc.o.is_delete_marker()) { + ret = + remove_expired_obj(oc.dpp, oc, false, rgw::notify::ObjectExpiration); + ldpp_dout(oc.dpp, 20) << "delete_tier_obj Object(key:" << oc.o.key + << ") current & not delete_marker" + << " versioned_epoch: " << oc.o.versioned_epoch + << "flags: " << oc.o.flags << dendl; } else { ret = remove_expired_obj(oc.dpp, oc, true, rgw::notify::ObjectExpiration); - ldpp_dout(oc.dpp, 20) << "delete_tier_obj Object(key:" << oc.o.key << ") not current " << "versioned_epoch: " << oc.o.versioned_epoch << "flags: " << oc.o.flags << dendl; + ldpp_dout(oc.dpp, 20) + << "delete_tier_obj Object(key:" << oc.o.key << ") not current " + << "versioned_epoch: " << oc.o.versioned_epoch + << "flags: " << oc.o.flags << dendl; } return ret; } int transition_obj_to_cloud(lc_op_ctx& oc) { /* If CurrentVersion object, remove it & create delete marker */ - bool delete_object = (!oc.tier->retain_head_object() || - (oc.o.is_current() && oc.bucket->versioned())); + bool delete_object = + (!oc.tier->retain_head_object() || + (oc.o.is_current() && oc.bucket->versioned())); - int ret = oc.obj->transition_to_cloud(oc.bucket, oc.tier.get(), oc.o, - oc.env.worker->get_cloud_targets(), oc.cct, - !delete_object, oc.dpp, null_yield); + int ret = oc.obj->transition_to_cloud( + oc.bucket, oc.tier.get(), oc.o, oc.env.worker->get_cloud_targets(), + oc.cct, !delete_object, oc.dpp, null_yield + ); if (ret < 0) { return ret; } @@ -1292,7 +1289,8 @@ class LCOpAction_Transition : public LCOpAction { if (delete_object) { ret = delete_tier_obj(oc); if (ret < 0) { - ldpp_dout(oc.dpp, 0) << "ERROR: Deleting tier object(" << oc.o.key << ") failed ret=" << ret << dendl; + ldpp_dout(oc.dpp, 0) << "ERROR: Deleting tier object(" << oc.o.key + << ") failed ret=" << ret << dendl; return ret; } } @@ -1306,11 +1304,13 @@ class LCOpAction_Transition : public LCOpAction { if (oc.o.meta.category == RGWObjCategory::CloudTiered) { /* Skip objects which are already cloud tiered. */ - ldpp_dout(oc.dpp, 30) << "Object(key:" << oc.o.key << ") is already cloud tiered to cloud-s3 tier: " << oc.o.meta.storage_class << dendl; + ldpp_dout(oc.dpp, 30) << "Object(key:" << oc.o.key + << ") is already cloud tiered to cloud-s3 tier: " + << oc.o.meta.storage_class << dendl; return 0; } - std::string tier_type = ""; + std::string tier_type = ""; rgw::sal::ZoneGroup& zonegroup = oc.driver->get_zone()->get_zonegroup(); rgw_placement_rule target_placement; @@ -1320,105 +1320,105 @@ class LCOpAction_Transition : public LCOpAction { r = zonegroup.get_placement_tier(target_placement, &oc.tier); if (!r && oc.tier->get_tier_type() == "cloud-s3") { - ldpp_dout(oc.dpp, 30) << "Found cloud s3 tier: " << target_placement.storage_class << dendl; + ldpp_dout(oc.dpp, 30) + << "Found cloud s3 tier: " << target_placement.storage_class << dendl; if (!oc.o.is_current() && !pass_object_lock_check(oc.driver, oc.obj.get(), oc.dpp)) { /* Skip objects which has object lock enabled. */ - ldpp_dout(oc.dpp, 10) << "Object(key:" << oc.o.key << ") is locked. Skipping transition to cloud-s3 tier: " << target_placement.storage_class << dendl; + ldpp_dout(oc.dpp, 10) + << "Object(key:" << oc.o.key + << ") is locked. Skipping transition to cloud-s3 tier: " + << target_placement.storage_class << dendl; return 0; } r = transition_obj_to_cloud(oc); if (r < 0) { - ldpp_dout(oc.dpp, 0) << "ERROR: failed to transition obj(key:" << oc.o.key << ") to cloud (r=" << r << ")" - << dendl; + ldpp_dout(oc.dpp, 0) + << "ERROR: failed to transition obj(key:" << oc.o.key + << ") to cloud (r=" << r << ")" << dendl; return r; } } else { if (!oc.driver->valid_placement(target_placement)) { - ldpp_dout(oc.dpp, 0) << "ERROR: non existent dest placement: " - << target_placement - << " bucket="<< oc.bucket - << " rule_id=" << oc.op.id - << " " << oc.wq->thr_name() << dendl; + ldpp_dout(oc.dpp, 0) + << "ERROR: non existent dest placement: " << target_placement + << " bucket=" << oc.bucket << " rule_id=" << oc.op.id << " " + << oc.wq->thr_name() << dendl; return -EINVAL; } - int r = oc.obj->transition(oc.bucket, target_placement, o.meta.mtime, - o.versioned_epoch, oc.dpp, null_yield); + int r = oc.obj->transition( + oc.bucket, target_placement, o.meta.mtime, o.versioned_epoch, oc.dpp, + null_yield + ); if (r < 0) { - ldpp_dout(oc.dpp, 0) << "ERROR: failed to transition obj " - << oc.bucket << ":" << o.key - << " -> " << transition.storage_class - << " " << cpp_strerror(r) - << " " << oc.wq->thr_name() << dendl; + ldpp_dout(oc.dpp, 0) + << "ERROR: failed to transition obj " << oc.bucket << ":" << o.key + << " -> " << transition.storage_class << " " << cpp_strerror(r) + << " " << oc.wq->thr_name() << dendl; return r; } } - ldpp_dout(oc.dpp, 2) << "TRANSITIONED:" << oc.bucket - << ":" << o.key << " -> " - << transition.storage_class - << " " << oc.wq->thr_name() << dendl; + ldpp_dout(oc.dpp, 2) << "TRANSITIONED:" << oc.bucket << ":" << o.key + << " -> " << transition.storage_class << " " + << oc.wq->thr_name() << dendl; return 0; } }; class LCOpAction_CurrentTransition : public LCOpAction_Transition { -protected: - bool check_current_state(bool is_current) override { - return is_current; - } + protected: + bool check_current_state(bool is_current) override { return is_current; } ceph::real_time get_effective_mtime(lc_op_ctx& oc) override { return oc.o.meta.mtime; } -public: + + public: LCOpAction_CurrentTransition(const transition_action& _transition) - : LCOpAction_Transition(_transition) {} - int process(lc_op_ctx& oc) { - int r = LCOpAction_Transition::process(oc); - if (r == 0) { - if (perfcounter) { - perfcounter->inc(l_rgw_lc_transition_current, 1); - } + : LCOpAction_Transition(_transition) {} + int process(lc_op_ctx& oc) { + int r = LCOpAction_Transition::process(oc); + if (r == 0) { + if (perfcounter) { + perfcounter->inc(l_rgw_lc_transition_current, 1); } - return r; } + return r; + } }; class LCOpAction_NonCurrentTransition : public LCOpAction_Transition { -protected: - bool check_current_state(bool is_current) override { - return !is_current; - } + protected: + bool check_current_state(bool is_current) override { return !is_current; } ceph::real_time get_effective_mtime(lc_op_ctx& oc) override { return oc.effective_mtime; } -public: - LCOpAction_NonCurrentTransition(op_env& env, - const transition_action& _transition) - : LCOpAction_Transition(_transition) - {} - int process(lc_op_ctx& oc) { - int r = LCOpAction_Transition::process(oc); - if (r == 0) { - if (perfcounter) { - perfcounter->inc(l_rgw_lc_transition_noncurrent, 1); - } + + public: + LCOpAction_NonCurrentTransition( + op_env& env, const transition_action& _transition + ) + : LCOpAction_Transition(_transition) {} + int process(lc_op_ctx& oc) { + int r = LCOpAction_Transition::process(oc); + if (r == 0) { + if (perfcounter) { + perfcounter->inc(l_rgw_lc_transition_noncurrent, 1); } - return r; } + return r; + } }; -void LCOpRule::build() -{ +void LCOpRule::build() { filters.emplace_back(new LCOpFilter_Tags); auto& op = env.op; - if (op.expiration > 0 || - op.expiration_date != boost::none) { + if (op.expiration > 0 || op.expiration_date != boost::none) { actions.emplace_back(new LCOpAction_CurrentExpiration(env)); } @@ -1439,18 +1439,16 @@ void LCOpRule::build() } } -void LCOpRule::update() -{ +void LCOpRule::update() { next_key_name = env.ol.next_key_name(); effective_mtime = env.ol.get_prev_obj().meta.mtime; } -int LCOpRule::process(rgw_bucket_dir_entry& o, - const DoutPrefixProvider *dpp, - WorkQ* wq) -{ +int LCOpRule::process( + rgw_bucket_dir_entry& o, const DoutPrefixProvider* dpp, WorkQ* wq +) { lc_op_ctx ctx(env, o, next_key_name, effective_mtime, dpp, wq); - shared_ptr *selected = nullptr; // n.b., req'd by sharing + shared_ptr* selected = nullptr; // n.b., req'd by sharing real_time exp; for (auto& a : actions) { @@ -1464,9 +1462,7 @@ int LCOpRule::process(rgw_bucket_dir_entry& o, } } - if (selected && - (*selected)->should_process()) { - + if (selected && (*selected)->should_process()) { /* * Calling filter checks after action checks because * all action checks (as they are implemented now) do @@ -1486,31 +1482,29 @@ int LCOpRule::process(rgw_bucket_dir_entry& o, if (!cont) { ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key - << ": no rule match, skipping " - << wq->thr_name() << dendl; + << ": no rule match, skipping " << wq->thr_name() + << dendl; return 0; } int r = (*selected)->process(ctx); if (r < 0) { - ldpp_dout(dpp, 0) << "ERROR: remove_expired_obj " - << env.bucket << ":" << o.key - << " " << cpp_strerror(r) - << " " << wq->thr_name() << dendl; + ldpp_dout(dpp, 0) << "ERROR: remove_expired_obj " << env.bucket << ":" + << o.key << " " << cpp_strerror(r) << " " + << wq->thr_name() << dendl; return r; } - ldpp_dout(dpp, 20) << "processed:" << env.bucket << ":" - << o.key << " " << wq->thr_name() << dendl; + ldpp_dout(dpp, 20) << "processed:" << env.bucket << ":" << o.key << " " + << wq->thr_name() << dendl; } return 0; - } -int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, - time_t stop_at, bool once) -{ - RGWLifecycleConfiguration config(cct); +int RGWLC::bucket_lc_process( + string& shard_id, LCWorker* worker, time_t stop_at, bool once +) { + RGWLifecycleConfiguration config(cct); std::unique_ptr bucket; string no_ns, list_versions; vector objs; @@ -1520,93 +1514,85 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, string bucket_name = result[1]; string bucket_marker = result[2]; - ldpp_dout(this, 5) << "RGWLC::bucket_lc_process ENTER " << bucket_name << dendl; + ldpp_dout(this, 5) << "RGWLC::bucket_lc_process ENTER " << bucket_name + << dendl; if (unlikely(cct->_conf->rgwlc_skip_bucket_step)) { return 0; } - int ret = driver->get_bucket(this, nullptr, bucket_tenant, bucket_name, &bucket, null_yield); + int ret = driver->get_bucket( + this, nullptr, bucket_tenant, bucket_name, &bucket, null_yield + ); if (ret < 0) { - ldpp_dout(this, 0) << "LC:get_bucket for " << bucket_name - << " failed" << dendl; + ldpp_dout(this, 0) << "LC:get_bucket for " << bucket_name << " failed" + << dendl; return ret; } ret = bucket->load_bucket(this, null_yield); if (ret < 0) { - ldpp_dout(this, 0) << "LC:load_bucket for " << bucket_name - << " failed" << dendl; + ldpp_dout(this, 0) << "LC:load_bucket for " << bucket_name << " failed" + << dendl; return ret; } - auto stack_guard = make_scope_guard( - [&worker] - { - worker->workpool->drain(); - } - ); + auto stack_guard = make_scope_guard([&worker] { worker->workpool->drain(); }); if (bucket->get_marker() != bucket_marker) { ldpp_dout(this, 1) << "LC: deleting stale entry found for bucket=" - << bucket_tenant << ":" << bucket_name - << " cur_marker=" << bucket->get_marker() + << bucket_tenant << ":" << bucket_name + << " cur_marker=" << bucket->get_marker() << " orig_marker=" << bucket_marker << dendl; return -ENOENT; } - map::iterator aiter - = bucket->get_attrs().find(RGW_ATTR_LC); + map::iterator aiter = + bucket->get_attrs().find(RGW_ATTR_LC); if (aiter == bucket->get_attrs().end()) { ldpp_dout(this, 0) << "WARNING: bucket_attrs.find(RGW_ATTR_LC) failed for " - << bucket_name << " (terminates bucket_lc_process(...))" - << dendl; + << bucket_name << " (terminates bucket_lc_process(...))" + << dendl; return 0; } bufferlist::const_iterator iter{&aiter->second}; try { - config.decode(iter); - } catch (const buffer::error& e) { - ldpp_dout(this, 0) << __func__ << "() decode life cycle config failed" - << dendl; - return -1; - } + config.decode(iter); + } catch (const buffer::error& e) { + ldpp_dout(this, 0) << __func__ << "() decode life cycle config failed" + << dendl; + return -1; + } /* fetch information for zone checks */ rgw::sal::Zone* zone = driver->get_zone(); auto pf = [](RGWLC::LCWorker* wk, WorkQ* wq, WorkItem& wi) { - auto wt = - boost::get>(wi); + auto wt = boost::get>(wi); auto& [op_rule, o] = wt; ldpp_dout(wk->get_lc(), 20) - << __func__ << "(): key=" << o.key << wq->thr_name() - << dendl; + << __func__ << "(): key=" << o.key << wq->thr_name() << dendl; int ret = op_rule.process(o, wk->dpp, wq); if (ret < 0) { ldpp_dout(wk->get_lc(), 20) - << "ERROR: orule.process() returned ret=" << ret - << "thread:" << wq->thr_name() - << dendl; + << "ERROR: orule.process() returned ret=" << ret + << "thread:" << wq->thr_name() << dendl; } }; worker->workpool->setf(pf); multimap& prefix_map = config.get_prefix_map(); - ldpp_dout(this, 10) << __func__ << "() prefix_map size=" - << prefix_map.size() - << dendl; + ldpp_dout(this, 10) << __func__ << "() prefix_map size=" << prefix_map.size() + << dendl; rgw_obj_key pre_marker; rgw_obj_key next_marker; - for(auto prefix_iter = prefix_map.begin(); prefix_iter != prefix_map.end(); - ++prefix_iter) { - + for (auto prefix_iter = prefix_map.begin(); prefix_iter != prefix_map.end(); + ++prefix_iter) { if (worker_should_stop(stop_at, once)) { ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker " - << worker->ix - << dendl; + << worker->ix << dendl; return 0; } @@ -1615,10 +1601,11 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, continue; } ldpp_dout(this, 20) << __func__ << "(): prefix=" << prefix_iter->first - << dendl; - if (prefix_iter != prefix_map.begin() && - (prefix_iter->first.compare(0, prev(prefix_iter)->first.length(), - prev(prefix_iter)->first) == 0)) { + << dendl; + if (prefix_iter != prefix_map.begin() && + (prefix_iter->first.compare( + 0, prev(prefix_iter)->first.length(), prev(prefix_iter)->first + ) == 0)) { next_marker = pre_marker; } else { pre_marker = next_marker; @@ -1627,70 +1614,66 @@ int RGWLC::bucket_lc_process(string& shard_id, LCWorker* worker, LCObjsLister ol(driver, bucket.get()); ol.set_prefix(prefix_iter->first); - if (! zone_check(op, zone)) { - ldpp_dout(this, 7) << "LC rule not executable in " << zone->get_tier_type() - << " zone, skipping" << dendl; + if (!zone_check(op, zone)) { + ldpp_dout(this, 7) << "LC rule not executable in " + << zone->get_tier_type() << " zone, skipping" << dendl; continue; } ret = ol.init(this); if (ret < 0) { - if (ret == (-ENOENT)) - return 0; + if (ret == (-ENOENT)) return 0; ldpp_dout(this, 0) << "ERROR: driver->list_objects():" << dendl; return ret; } op_env oenv(op, driver, worker, bucket.get(), ol); LCOpRule orule(oenv); - orule.build(); // why can't ctor do it? + orule.build(); // why can't ctor do it? rgw_bucket_dir_entry* o{nullptr}; - for (auto offset = 0; ol.get_obj(this, &o /* , fetch_barrier */); ++offset, ol.next()) { + for (auto offset = 0; ol.get_obj(this, &o /* , fetch_barrier */); + ++offset, ol.next()) { orule.update(); std::tuple t1 = {orule, *o}; worker->workpool->enqueue(WorkItem{t1}); if ((offset % 100) == 0) { - if (worker_should_stop(stop_at, once)) { - ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker " - << worker->ix - << dendl; - return 0; - } + if (worker_should_stop(stop_at, once)) { + ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker " + << worker->ix << dendl; + return 0; + } } } worker->workpool->drain(); } - ret = handle_multipart_expiration(bucket.get(), prefix_map, worker, stop_at, once); + ret = handle_multipart_expiration( + bucket.get(), prefix_map, worker, stop_at, once + ); return ret; } -class SimpleBackoff -{ +class SimpleBackoff { const int max_retries; std::chrono::milliseconds sleep_ms; int retries{0}; -public: + + public: SimpleBackoff(int max_retries, std::chrono::milliseconds initial_sleep_ms) - : max_retries(max_retries), sleep_ms(initial_sleep_ms) - {} + : max_retries(max_retries), sleep_ms(initial_sleep_ms) {} SimpleBackoff(const SimpleBackoff&) = delete; SimpleBackoff& operator=(const SimpleBackoff&) = delete; - int get_retries() const { - return retries; - } + int get_retries() const { return retries; } - void reset() { - retries = 0; - } + void reset() { retries = 0; } bool wait_backoff(const fu2::unique_function& barrier) { reset(); while (retries < max_retries) { auto r = barrier(); if (r) { - return r; + return r; } std::this_thread::sleep_for(sleep_ms * 2 * retries++); } @@ -1698,42 +1681,42 @@ class SimpleBackoff } }; -int RGWLC::bucket_lc_post(int index, int max_lock_sec, - rgw::sal::Lifecycle::LCEntry& entry, int& result, - LCWorker* worker) -{ +int RGWLC::bucket_lc_post( + int index, int max_lock_sec, rgw::sal::Lifecycle::LCEntry& entry, + int& result, LCWorker* worker +) { utime_t lock_duration(cct->_conf->rgw_lc_lock_max_time, 0); std::unique_ptr lock = - sal_lc->get_serializer(lc_index_lock_name, obj_names[index], cookie); + sal_lc->get_serializer(lc_index_lock_name, obj_names[index], cookie); ldpp_dout(this, 5) << "RGWLC::bucket_lc_post(): POST " << entry - << " index: " << index << " worker ix: " << worker->ix - << dendl; + << " index: " << index << " worker ix: " << worker->ix + << dendl; do { int ret = lock->try_lock(this, lock_duration, null_yield); if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */ ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to acquire lock on " - << obj_names[index] << ", sleep 5, try again " << dendl; + << obj_names[index] << ", sleep 5, try again " + << dendl; sleep(5); continue; } - if (ret < 0) - return 0; + if (ret < 0) return 0; ldpp_dout(this, 20) << "RGWLC::bucket_lc_post() lock " << obj_names[index] - << dendl; + << dendl; - if (result == -ENOENT) { + if (result == -ENOENT) { /* XXXX are we SURE the only way result could == ENOENT is when * there is no such bucket? It is currently the value returned * from bucket_lc_process(...) */ - ret = sal_lc->rm_entry(obj_names[index], entry); + ret = sal_lc->rm_entry(obj_names[index], entry); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::bucket_lc_post() failed to remove entry " - << obj_names[index] << dendl; + << obj_names[index] << dendl; } goto clean; } else if (result < 0) { @@ -1742,31 +1725,34 @@ int RGWLC::bucket_lc_post(int index, int max_lock_sec, entry.set_status(lc_complete); } - ret = sal_lc->set_entry(obj_names[index], entry); + ret = sal_lc->set_entry(obj_names[index], entry); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::process() failed to set entry on " - << obj_names[index] << dendl; + << obj_names[index] << dendl; } -clean: + clean: lock->unlock(); - ldpp_dout(this, 20) << "RGWLC::bucket_lc_post() unlock " - << obj_names[index] << dendl; + ldpp_dout(this, 20) << "RGWLC::bucket_lc_post() unlock " << obj_names[index] + << dendl; return 0; } while (true); } /* RGWLC::bucket_lc_post */ -int RGWLC::list_lc_progress(string& marker, uint32_t max_entries, - vector>& progress_map, - int& index) -{ +int RGWLC::list_lc_progress( + string& marker, uint32_t max_entries, + vector>& progress_map, + int& index +) { progress_map.clear(); - for(; index < max_objs; index++, marker="") { + for (; index < max_objs; index++, marker = "") { vector> entries; - int ret = sal_lc->list_entries(obj_names[index], marker, max_entries, entries); + int ret = + sal_lc->list_entries(obj_names[index], marker, max_entries, entries); if (ret < 0) { if (ret == -ENOENT) { - ldpp_dout(this, 10) << __func__ << "() ignoring unfound lc object=" - << obj_names[index] << dendl; + ldpp_dout(this, 10) + << __func__ << "() ignoring unfound lc object=" << obj_names[index] + << dendl; continue; } else { return ret; @@ -1777,43 +1763,35 @@ int RGWLC::list_lc_progress(string& marker, uint32_t max_entries, //progress_map.insert(progress_map.end(), entries.begin(), entries.end()); /* update index, marker tuple */ - if (progress_map.size() > 0) - marker = progress_map.back()->get_bucket(); + if (progress_map.size() > 0) marker = progress_map.back()->get_bucket(); - if (progress_map.size() >= max_entries) - break; + if (progress_map.size() >= max_entries) break; } return 0; } -static inline vector random_sequence(uint32_t n) -{ +static inline vector random_sequence(uint32_t n) { vector v(n, 0); - std::generate(v.begin(), v.end(), - [ix = 0]() mutable { - return ix++; - }); + std::generate(v.begin(), v.end(), [ix = 0]() mutable { return ix++; }); std::random_device rd; std::default_random_engine rng{rd()}; std::shuffle(v.begin(), v.end(), rng); return v; } -static inline int get_lc_index(CephContext *cct, - const std::string& shard_id) -{ +static inline int get_lc_index(CephContext* cct, const std::string& shard_id) { int max_objs = - (cct->_conf->rgw_lc_max_objs > HASH_PRIME ? HASH_PRIME : - cct->_conf->rgw_lc_max_objs); + (cct->_conf->rgw_lc_max_objs > HASH_PRIME ? HASH_PRIME + : cct->_conf->rgw_lc_max_objs); /* n.b. review hash algo */ - int index = ceph_str_hash_linux(shard_id.c_str(), - shard_id.size()) % HASH_PRIME % max_objs; + int index = ceph_str_hash_linux(shard_id.c_str(), shard_id.size()) % + HASH_PRIME % max_objs; return index; } -static inline void get_lc_oid(CephContext *cct, - const std::string& shard_id, string *oid) -{ +static inline void get_lc_oid( + CephContext* cct, const std::string& shard_id, string* oid +) { /* n.b. review hash algo */ int index = get_lc_index(cct, shard_id); *oid = lc_oid_prefix; @@ -1823,14 +1801,14 @@ static inline void get_lc_oid(CephContext *cct, return; } -static std::string get_bucket_lc_key(const rgw_bucket& bucket){ +static std::string get_bucket_lc_key(const rgw_bucket& bucket) { return string_join_reserve(':', bucket.tenant, bucket.name, bucket.marker); } -int RGWLC::process(LCWorker* worker, - const std::unique_ptr& optional_bucket, - bool once = false) -{ +int RGWLC::process( + LCWorker* worker, const std::unique_ptr& optional_bucket, + bool once = false +) { int ret = 0; int max_secs = cct->_conf->rgw_lc_lock_max_time; @@ -1850,56 +1828,52 @@ int RGWLC::process(LCWorker* worker, vector shard_seq = random_sequence(max_objs); for (auto index : shard_seq) { ret = process(index, max_secs, worker, once); - if (ret < 0) - return ret; + if (ret < 0) return ret; } } return 0; } -bool RGWLC::expired_session(time_t started) -{ - if (! cct->_conf->rgwlc_auto_session_clear) { +bool RGWLC::expired_session(time_t started) { + if (!cct->_conf->rgwlc_auto_session_clear) { return false; } time_t interval = (cct->_conf->rgw_lc_debug_interval > 0) - ? cct->_conf->rgw_lc_debug_interval - : 24*60*60; + ? cct->_conf->rgw_lc_debug_interval + : 24 * 60 * 60; auto now = time(nullptr); ldpp_dout(this, 16) << "RGWLC::expired_session" - << " started: " << started - << " interval: " << interval << "(*2==" << 2*interval << ")" - << " now: " << now - << dendl; + << " started: " << started << " interval: " << interval + << "(*2==" << 2 * interval << ")" + << " now: " << now << dendl; - return (started + 2*interval < now); + return (started + 2 * interval < now); } -time_t RGWLC::thread_stop_at() -{ +time_t RGWLC::thread_stop_at() { uint64_t interval = (cct->_conf->rgw_lc_debug_interval > 0) - ? cct->_conf->rgw_lc_debug_interval - : 24*60*60; + ? cct->_conf->rgw_lc_debug_interval + : 24 * 60 * 60; return time(nullptr) + interval; } -int RGWLC::process_bucket(int index, int max_lock_secs, LCWorker* worker, - const std::string& bucket_entry_marker, - bool once = false) -{ +int RGWLC::process_bucket( + int index, int max_lock_secs, LCWorker* worker, + const std::string& bucket_entry_marker, bool once = false +) { ldpp_dout(this, 5) << "RGWLC::process_bucket(): ENTER: " - << "index: " << index << " worker ix: " << worker->ix - << dendl; + << "index: " << index << " worker ix: " << worker->ix + << dendl; int ret = 0; - std::unique_ptr serializer = - sal_lc->get_serializer(lc_index_lock_name, obj_names[index], - worker->thr_name()); + std::unique_ptr serializer = sal_lc->get_serializer( + lc_index_lock_name, obj_names[index], worker->thr_name() + ); std::unique_ptr entry; if (max_lock_secs <= 0) { return -EAGAIN; @@ -1910,30 +1884,29 @@ int RGWLC::process_bucket(int index, int max_lock_secs, LCWorker* worker, if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */ ldpp_dout(this, 0) << "RGWLC::process() failed to acquire lock on " - << obj_names[index] << dendl; + << obj_names[index] << dendl; return -EBUSY; } - if (ret < 0) - return 0; + if (ret < 0) return 0; std::unique_lock lock( - *(serializer.get()), std::adopt_lock); + *(serializer.get()), std::adopt_lock + ); ret = sal_lc->get_entry(obj_names[index], bucket_entry_marker, &entry); if (ret >= 0) { if (entry->get_status() == lc_processing) { if (expired_session(entry->get_start_time())) { - ldpp_dout(this, 5) << "RGWLC::process_bucket(): STALE lc session found for: " << entry - << " index: " << index << " worker ix: " << worker->ix - << " (clearing)" - << dendl; + ldpp_dout( + this, 5 + ) << "RGWLC::process_bucket(): STALE lc session found for: " + << entry << " index: " << index << " worker ix: " << worker->ix + << " (clearing)" << dendl; } else { - ldpp_dout(this, 5) << "RGWLC::process_bucket(): ACTIVE entry: " - << entry - << " index: " << index - << " worker ix: " << worker->ix - << dendl; - return ret; + ldpp_dout(this, 5) << "RGWLC::process_bucket(): ACTIVE entry: " << entry + << " index: " << index + << " worker ix: " << worker->ix << dendl; + return ret; } } } @@ -1944,21 +1917,21 @@ int RGWLC::process_bucket(int index, int max_lock_secs, LCWorker* worker, } ldpp_dout(this, 5) << "RGWLC::process_bucket(): START entry 1: " << entry - << " index: " << index << " worker ix: " << worker->ix - << dendl; + << " index: " << index << " worker ix: " << worker->ix + << dendl; entry->set_status(lc_processing); ret = sal_lc->set_entry(obj_names[index], *entry); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::process_bucket() failed to set obj entry " - << obj_names[index] << entry->get_bucket() << entry->get_status() - << dendl; + << obj_names[index] << entry->get_bucket() + << entry->get_status() << dendl; return ret; } ldpp_dout(this, 5) << "RGWLC::process_bucket(): START entry 2: " << entry - << " index: " << index << " worker ix: " << worker->ix - << dendl; + << " index: " << index << " worker ix: " << worker->ix + << dendl; lock.unlock(); ret = bucket_lc_process(entry->get_bucket(), worker, thread_stop_at(), once); @@ -1967,23 +1940,24 @@ int RGWLC::process_bucket(int index, int max_lock_secs, LCWorker* worker, return ret; } /* RGWLC::process_bucket */ -static inline bool allow_shard_rollover(CephContext* cct, time_t now, time_t shard_rollover_date) -{ +static inline bool allow_shard_rollover( + CephContext* cct, time_t now, time_t shard_rollover_date +) { /* return true iff: * - non-debug scheduling is in effect, and * - the current shard has not rolled over in the last 24 hours */ - if (((shard_rollover_date < now) && - (now - shard_rollover_date > 24*60*60)) || - (! shard_rollover_date /* no rollover date stored */) || - (cct->_conf->rgw_lc_debug_interval > 0 /* defaults to -1 == disabled */)) { + if (((shard_rollover_date < now) && (now - shard_rollover_date > 24 * 60 * 60) + ) || + (!shard_rollover_date /* no rollover date stored */) || + (cct->_conf->rgw_lc_debug_interval > + 0 /* defaults to -1 == disabled */)) { return true; } return false; } /* allow_shard_rollover */ -static inline bool already_run_today(CephContext* cct, time_t start_date) -{ +static inline bool already_run_today(CephContext* cct, time_t start_date) { struct tm bdt; time_t begin_of_day; utime_t now = ceph_clock_now(); @@ -2000,24 +1974,23 @@ static inline bool already_run_today(CephContext* cct, time_t start_date) bdt.tm_min = 0; bdt.tm_sec = 0; begin_of_day = mktime(&bdt); - if (now - begin_of_day < 24*60*60) + if (now - begin_of_day < 24 * 60 * 60) return true; else return false; } /* already_run_today */ -inline int RGWLC::advance_head(const std::string& lc_shard, - rgw::sal::Lifecycle::LCHead& head, - rgw::sal::Lifecycle::LCEntry& entry, - time_t start_date) -{ +inline int RGWLC::advance_head( + const std::string& lc_shard, rgw::sal::Lifecycle::LCHead& head, + rgw::sal::Lifecycle::LCEntry& entry, time_t start_date +) { int ret{0}; std::unique_ptr next_entry; ret = sal_lc->get_next_entry(lc_shard, entry.get_bucket(), &next_entry); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::process() failed to get obj entry " - << lc_shard << dendl; + << lc_shard << dendl; goto exit; } @@ -2027,30 +2000,30 @@ inline int RGWLC::advance_head(const std::string& lc_shard, ret = sal_lc->put_head(lc_shard, head); if (ret < 0) { - ldpp_dout(this, 0) << "RGWLC::process() failed to put head " - << lc_shard - << dendl; + ldpp_dout(this, 0) << "RGWLC::process() failed to put head " << lc_shard + << dendl; goto exit; } exit: return ret; } /* advance head */ -int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, - bool once = false) -{ +int RGWLC::process( + int index, int max_lock_secs, LCWorker* worker, bool once = false +) { int ret{0}; const auto& lc_shard = obj_names[index]; std::unique_ptr head; - std::unique_ptr entry; //string = bucket_name:bucket_id, start_time, int = LC_BUCKET_STATUS + std::unique_ptr + entry; //string = bucket_name:bucket_id, start_time, int = LC_BUCKET_STATUS ldpp_dout(this, 5) << "RGWLC::process(): ENTER: " - << "index: " << index << " worker ix: " << worker->ix - << dendl; + << "index: " << index << " worker ix: " << worker->ix + << dendl; std::unique_ptr lock = - sal_lc->get_serializer(lc_index_lock_name, lc_shard, worker->thr_name()); + sal_lc->get_serializer(lc_index_lock_name, lc_shard, worker->thr_name()); utime_t lock_for_s(max_lock_secs, 0); const auto& lock_lambda = [&]() { @@ -2061,15 +2034,15 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, if (ret == -EBUSY || ret == -EEXIST) { /* already locked by another lc processor */ return false; - } + } return false; }; SimpleBackoff shard_lock(5 /* max retries */, 50ms); - if (! shard_lock.wait_backoff(lock_lambda)) { + if (!shard_lock.wait_backoff(lock_lambda)) { ldpp_dout(this, 0) << "RGWLC::process(): failed to aquire lock on " - << lc_shard << " after " << shard_lock.get_retries() - << dendl; + << lc_shard << " after " << shard_lock.get_retries() + << dendl; return 0; } @@ -2080,7 +2053,7 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, ret = sal_lc->get_head(lc_shard, &head); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::process() failed to get obj head " - << lc_shard << ", ret=" << ret << dendl; + << lc_shard << ", ret=" << ret << dendl; goto exit; } @@ -2089,37 +2062,41 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, if (head->get_marker().empty() && allow_shard_rollover(cct, now, head->get_shard_rollover_date()) /* prevent multiple passes by diff. * rgws,in same cycle */) { - - ldpp_dout(this, 5) << "RGWLC::process() process shard rollover lc_shard=" << lc_shard - << " head.marker=" << head->get_marker() - << " head.shard_rollover_date=" << head->get_shard_rollover_date() - << dendl; + ldpp_dout(this, 5) << "RGWLC::process() process shard rollover lc_shard=" + << lc_shard << " head.marker=" << head->get_marker() + << " head.shard_rollover_date=" + << head->get_shard_rollover_date() << dendl; vector> entries; int ret = sal_lc->list_entries(lc_shard, head->get_marker(), 1, entries); if (ret < 0) { - ldpp_dout(this, 0) << "RGWLC::process() sal_lc->list_entries(lc_shard, head.marker, 1, " - << "entries) returned error ret==" << ret << dendl; - goto exit; + ldpp_dout( + this, 0 + ) << "RGWLC::process() sal_lc->list_entries(lc_shard, head.marker, 1, " + << "entries) returned error ret==" << ret << dendl; + goto exit; } if (entries.size() > 0) { - entry = std::move(entries.front()); - head->set_marker(entry->get_bucket()); - head->set_start_date(now); - head->set_shard_rollover_date(0); + entry = std::move(entries.front()); + head->set_marker(entry->get_bucket()); + head->set_start_date(now); + head->set_shard_rollover_date(0); } } else { - ldpp_dout(this, 0) << "RGWLC::process() head.marker !empty() at START for shard==" - << lc_shard << " head last stored at " - << rgw_to_asctime(utime_t(time_t(head->get_start_date()), 0)) - << dendl; + ldpp_dout( + this, 0 + ) << "RGWLC::process() head.marker !empty() at START for shard==" + << lc_shard << " head last stored at " + << rgw_to_asctime(utime_t(time_t(head->get_start_date()), 0)) << dendl; /* fetches the entry pointed to by head.bucket */ ret = sal_lc->get_entry(lc_shard, head->get_marker(), &entry); if (ret < 0) { - ldpp_dout(this, 0) << "RGWLC::process() sal_lc->get_entry(lc_shard, head.marker, entry) " - << "returned error ret==" << ret << dendl; - goto exit; + ldpp_dout( + this, 0 + ) << "RGWLC::process() sal_lc->get_entry(lc_shard, head.marker, entry) " + << "returned error ret==" << ret << dendl; + goto exit; } } @@ -2134,60 +2111,57 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, ldpp_dout(this, 5) << "RGWLC::process(): ACTIVE entry: " << entry << " index: " << index << " worker ix: " << worker->ix << dendl; - /* skip to next entry */ - if (advance_head(lc_shard, *head.get(), *entry.get(), now) < 0) { - goto exit; - } - /* done with this shard */ - if (head->get_marker().empty()) { - ldpp_dout(this, 5) << - "RGWLC::process() cycle finished lc_shard=" - << lc_shard - << dendl; - head->set_shard_rollover_date(ceph_clock_now()); - ret = sal_lc->put_head(lc_shard, *head.get()); - if (ret < 0) { - ldpp_dout(this, 0) << "RGWLC::process() failed to put head " - << lc_shard - << dendl; - } - goto exit; - } + /* skip to next entry */ + if (advance_head(lc_shard, *head.get(), *entry.get(), now) < 0) { + goto exit; + } + /* done with this shard */ + if (head->get_marker().empty()) { + ldpp_dout(this, 5) + << "RGWLC::process() cycle finished lc_shard=" << lc_shard + << dendl; + head->set_shard_rollover_date(ceph_clock_now()); + ret = sal_lc->put_head(lc_shard, *head.get()); + if (ret < 0) { + ldpp_dout(this, 0) << "RGWLC::process() failed to put head " + << lc_shard << dendl; + } + goto exit; + } continue; } } else { - if ((entry->get_status() == lc_complete) && - already_run_today(cct, entry->get_start_time())) { - /* skip to next entry */ - if (advance_head(lc_shard, *head.get(), *entry.get(), now) < 0) { - goto exit; - } - ldpp_dout(this, 5) << "RGWLC::process() worker ix; " << worker->ix - << " SKIP processing for already-processed bucket " << entry->get_bucket() - << dendl; - /* done with this shard */ - if (head->get_marker().empty()) { - ldpp_dout(this, 5) << - "RGWLC::process() cycle finished lc_shard=" - << lc_shard - << dendl; - head->set_shard_rollover_date(ceph_clock_now()); - ret = sal_lc->put_head(lc_shard, *head.get()); - if (ret < 0) { - ldpp_dout(this, 0) << "RGWLC::process() failed to put head " - << lc_shard - << dendl; - } - goto exit; - } - continue; - } + if ((entry->get_status() == lc_complete) && + already_run_today(cct, entry->get_start_time())) { + /* skip to next entry */ + if (advance_head(lc_shard, *head.get(), *entry.get(), now) < 0) { + goto exit; + } + ldpp_dout(this, 5) << "RGWLC::process() worker ix; " << worker->ix + << " SKIP processing for already-processed bucket " + << entry->get_bucket() << dendl; + /* done with this shard */ + if (head->get_marker().empty()) { + ldpp_dout(this, 5) + << "RGWLC::process() cycle finished lc_shard=" << lc_shard + << dendl; + head->set_shard_rollover_date(ceph_clock_now()); + ret = sal_lc->put_head(lc_shard, *head.get()); + if (ret < 0) { + ldpp_dout(this, 0) << "RGWLC::process() failed to put head " + << lc_shard << dendl; + } + goto exit; + } + continue; + } } } else { - ldpp_dout(this, 5) << "RGWLC::process() entry.bucket.empty() == true at START 1" - << " (this is possible mainly before any lc policy has been stored" - << " or after removal of an lc_shard object)" - << dendl; + ldpp_dout( + this, 5 + ) << "RGWLC::process() entry.bucket.empty() == true at START 1" + << " (this is possible mainly before any lc policy has been stored" + << " or after removal of an lc_shard object)" << dendl; goto exit; } @@ -2196,8 +2170,8 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, * processing for the shard automatically when processing is * finished for the shard */ ldpp_dout(this, 5) << "RGWLC::process(): START entry 1: " << entry - << " index: " << index << " worker ix: " << worker->ix - << dendl; + << " index: " << index << " worker ix: " << worker->ix + << dendl; entry->set_status(lc_processing); entry->set_start_time(now); @@ -2205,7 +2179,8 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, ret = sal_lc->set_entry(lc_shard, *entry); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::process() failed to set obj entry " - << lc_shard << entry->get_bucket() << entry->get_status() << dendl; + << lc_shard << entry->get_bucket() + << entry->get_status() << dendl; goto exit; } @@ -2215,20 +2190,21 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, } ldpp_dout(this, 5) << "RGWLC::process(): START entry 2: " << entry - << " index: " << index << " worker ix: " << worker->ix - << dendl; + << " index: " << index << " worker ix: " << worker->ix + << dendl; /* drop lock so other instances can make progress while this * bucket is being processed */ lock->unlock(); - ret = bucket_lc_process(entry->get_bucket(), worker, thread_stop_at(), once); + ret = + bucket_lc_process(entry->get_bucket(), worker, thread_stop_at(), once); /* postamble */ //bucket_lc_post(index, max_lock_secs, entry, ret, worker); - if (! shard_lock.wait_backoff(lock_lambda)) { + if (!shard_lock.wait_backoff(lock_lambda)) { ldpp_dout(this, 0) << "RGWLC::process(): failed to aquire lock on " - << lc_shard << " after " << shard_lock.get_retries() - << dendl; + << lc_shard << " after " << shard_lock.get_retries() + << dendl; return 0; } @@ -2236,12 +2212,11 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, /* XXXX are we SURE the only way result could == ENOENT is when * there is no such bucket? It is currently the value returned * from bucket_lc_process(...) */ - ret = sal_lc->rm_entry(lc_shard, *entry); + ret = sal_lc->rm_entry(lc_shard, *entry); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::process() failed to remove entry " - << lc_shard << " (nonfatal)" - << dendl; - /* not fatal, could result from a race */ + << lc_shard << " (nonfatal)" << dendl; + /* not fatal, could result from a race */ } } else { if (ret < 0) { @@ -2252,8 +2227,7 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, ret = sal_lc->set_entry(lc_shard, *entry); if (ret < 0) { ldpp_dout(this, 0) << "RGWLC::process() failed to set entry on " - << lc_shard - << dendl; + << lc_shard << dendl; /* fatal, locked */ goto exit; } @@ -2261,40 +2235,35 @@ int RGWLC::process(int index, int max_lock_secs, LCWorker* worker, /* done with this shard */ if (head->get_marker().empty()) { - ldpp_dout(this, 5) << - "RGWLC::process() cycle finished lc_shard=" - << lc_shard - << dendl; + ldpp_dout(this, 5) << "RGWLC::process() cycle finished lc_shard=" + << lc_shard << dendl; head->set_shard_rollover_date(ceph_clock_now()); - ret = sal_lc->put_head(lc_shard, *head.get()); + ret = sal_lc->put_head(lc_shard, *head.get()); if (ret < 0) { - ldpp_dout(this, 0) << "RGWLC::process() failed to put head " - << lc_shard - << dendl; + ldpp_dout(this, 0) << "RGWLC::process() failed to put head " << lc_shard + << dendl; } goto exit; } - } while(1 && !once && !going_down()); + } while (1 && !once && !going_down()); exit: lock->unlock(); return 0; } -void RGWLC::start_processor() -{ +void RGWLC::start_processor() { auto maxw = cct->_conf->rgw_lc_max_worker; workers.reserve(maxw); for (int ix = 0; ix < maxw; ++ix) { - auto worker = - std::make_unique(this /* dpp */, cct, this, ix); + auto worker = + std::make_unique(this /* dpp */, cct, this, ix); worker->create((string{"lifecycle_thr_"} + to_string(ix)).c_str()); workers.emplace_back(std::move(worker)); } } -void RGWLC::stop_processor() -{ +void RGWLC::stop_processor() { down_flag = true; for (auto& worker : workers) { worker->stop(); @@ -2303,61 +2272,54 @@ void RGWLC::stop_processor() workers.clear(); } -unsigned RGWLC::get_subsys() const -{ +unsigned RGWLC::get_subsys() const { return dout_subsys; } -std::ostream& RGWLC::gen_prefix(std::ostream& out) const -{ +std::ostream& RGWLC::gen_prefix(std::ostream& out) const { return out << "lifecycle: "; } -void RGWLC::LCWorker::stop() -{ +void RGWLC::LCWorker::stop() { std::lock_guard l{lock}; cond.notify_all(); } -bool RGWLC::going_down() -{ +bool RGWLC::going_down() { return down_flag; } -bool RGWLC::LCWorker::should_work(utime_t& now) -{ +bool RGWLC::LCWorker::should_work(utime_t& now) { int start_hour; int start_minute; int end_hour; int end_minute; string worktime = cct->_conf->rgw_lifecycle_work_time; - sscanf(worktime.c_str(),"%d:%d-%d:%d",&start_hour, &start_minute, - &end_hour, &end_minute); + sscanf( + worktime.c_str(), "%d:%d-%d:%d", &start_hour, &start_minute, &end_hour, + &end_minute + ); struct tm bdt; time_t tt = now.sec(); localtime_r(&tt, &bdt); if (cct->_conf->rgw_lc_debug_interval > 0) { - /* We're debugging, so say we can run */ - return true; - } else if ((bdt.tm_hour*60 + bdt.tm_min >= start_hour*60 + start_minute) && - (bdt.tm_hour*60 + bdt.tm_min <= end_hour*60 + end_minute)) { - return true; + /* We're debugging, so say we can run */ + return true; + } else if ((bdt.tm_hour * 60 + bdt.tm_min >= start_hour * 60 + start_minute) && (bdt.tm_hour * 60 + bdt.tm_min <= end_hour * 60 + end_minute)) { + return true; } else { - return false; + return false; } - } -int RGWLC::LCWorker::schedule_next_start_time(utime_t &start, utime_t& now) -{ +int RGWLC::LCWorker::schedule_next_start_time(utime_t& start, utime_t& now) { int secs; if (cct->_conf->rgw_lc_debug_interval > 0) { - secs = start + cct->_conf->rgw_lc_debug_interval - now; - if (secs < 0) - secs = 0; - return (secs); + secs = start + cct->_conf->rgw_lc_debug_interval - now; + if (secs < 0) secs = 0; + return (secs); } int start_hour; @@ -2365,8 +2327,10 @@ int RGWLC::LCWorker::schedule_next_start_time(utime_t &start, utime_t& now) int end_hour; int end_minute; string worktime = cct->_conf->rgw_lifecycle_work_time; - sscanf(worktime.c_str(),"%d:%d-%d:%d",&start_hour, &start_minute, &end_hour, - &end_minute); + sscanf( + worktime.c_str(), "%d:%d-%d:%d", &start_hour, &start_minute, &end_hour, + &end_minute + ); struct tm bdt; time_t tt = now.sec(); time_t nt; @@ -2377,30 +2341,29 @@ int RGWLC::LCWorker::schedule_next_start_time(utime_t &start, utime_t& now) nt = mktime(&bdt); secs = nt - tt; - return secs>0 ? secs : secs+24*60*60; + return secs > 0 ? secs : secs + 24 * 60 * 60; } -RGWLC::LCWorker::~LCWorker() -{ +RGWLC::LCWorker::~LCWorker() { delete workpool; } /* ~LCWorker */ void RGWLifecycleConfiguration::generate_test_instances( - list& o) -{ + list& o +) { o.push_back(new RGWLifecycleConfiguration); } -template -static int guard_lc_modify(const DoutPrefixProvider *dpp, - rgw::sal::Driver* driver, - rgw::sal::Lifecycle* sal_lc, - const rgw_bucket& bucket, const string& cookie, - const F& f) { - CephContext *cct = driver->ctx(); +template +static int guard_lc_modify( + const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, + rgw::sal::Lifecycle* sal_lc, const rgw_bucket& bucket, const string& cookie, + const F& f +) { + CephContext* cct = driver->ctx(); auto bucket_lc_key = get_bucket_lc_key(bucket); - string oid; + string oid; get_lc_oid(cct, bucket_lc_key, &oid); /* XXX it makes sense to take shard_id for a bucket_id? */ @@ -2410,7 +2373,7 @@ static int guard_lc_modify(const DoutPrefixProvider *dpp, int max_lock_secs = cct->_conf->rgw_lc_lock_max_time; std::unique_ptr lock = - sal_lc->get_serializer(lc_index_lock_name, oid, cookie); + sal_lc->get_serializer(lc_index_lock_name, oid, cookie); utime_t time(max_lock_secs, 0); int ret; @@ -2420,34 +2383,34 @@ static int guard_lc_modify(const DoutPrefixProvider *dpp, do { ret = lock->try_lock(dpp, time, null_yield); if (ret == -EBUSY || ret == -EEXIST) { - ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " - << oid << ", retry in 100ms, ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid + << ", retry in 100ms, ret=" << ret << dendl; std::this_thread::sleep_for(std::chrono::milliseconds(100)); // the typical S3 client will time out in 60s - if(retries++ < 500) { - continue; + if (retries++ < 500) { + continue; } } if (ret < 0) { - ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " - << oid << ", ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid + << ", ret=" << ret << dendl; break; } ret = f(sal_lc, oid, *entry.get()); if (ret < 0) { - ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to set entry on " - << oid << ", ret=" << ret << dendl; + ldpp_dout(dpp, 0) << "RGWLC::RGWPutLC() failed to set entry on " << oid + << ", ret=" << ret << dendl; } break; - } while(true); + } while (true); lock->unlock(); return ret; } -int RGWLC::set_bucket_config(rgw::sal::Bucket* bucket, - const rgw::sal::Attrs& bucket_attrs, - RGWLifecycleConfiguration *config) -{ +int RGWLC::set_bucket_config( + rgw::sal::Bucket* bucket, const rgw::sal::Attrs& bucket_attrs, + RGWLifecycleConfiguration* config +) { int ret{0}; rgw::sal::Attrs attrs = bucket_attrs; if (config) { @@ -2457,8 +2420,7 @@ int RGWLC::set_bucket_config(rgw::sal::Bucket* bucket, config->encode(lc_bl); attrs[RGW_ATTR_LC] = std::move(lc_bl); - ret = - bucket->merge_and_store_attrs(this, attrs, null_yield); + ret = bucket->merge_and_store_attrs(this, attrs, null_yield); if (ret < 0) { return ret; } @@ -2466,20 +2428,21 @@ int RGWLC::set_bucket_config(rgw::sal::Bucket* bucket, rgw_bucket& b = bucket->get_key(); - - ret = guard_lc_modify(this, driver, sal_lc.get(), b, cookie, - [&](rgw::sal::Lifecycle* sal_lc, const string& oid, - rgw::sal::Lifecycle::LCEntry& entry) { - return sal_lc->set_entry(oid, entry); - }); + ret = guard_lc_modify( + this, driver, sal_lc.get(), b, cookie, + [&](rgw::sal::Lifecycle* sal_lc, const string& oid, + rgw::sal::Lifecycle::LCEntry& entry) { + return sal_lc->set_entry(oid, entry); + } + ); return ret; } -int RGWLC::remove_bucket_config(rgw::sal::Bucket* bucket, - const rgw::sal::Attrs& bucket_attrs, - bool merge_attrs) -{ +int RGWLC::remove_bucket_config( + rgw::sal::Bucket* bucket, const rgw::sal::Attrs& bucket_attrs, + bool merge_attrs +) { rgw::sal::Attrs attrs = bucket_attrs; rgw_bucket& b = bucket->get_key(); int ret{0}; @@ -2489,37 +2452,39 @@ int RGWLC::remove_bucket_config(rgw::sal::Bucket* bucket, ret = bucket->merge_and_store_attrs(this, attrs, null_yield); if (ret < 0) { - ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" - << b.name << " returned err=" << ret << dendl; + ldpp_dout( + this, 0 + ) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" + << b.name << " returned err=" << ret << dendl; return ret; } } - ret = guard_lc_modify(this, driver, sal_lc.get(), b, cookie, - [&](rgw::sal::Lifecycle* sal_lc, const string& oid, - rgw::sal::Lifecycle::LCEntry& entry) { - return sal_lc->rm_entry(oid, entry); - }); + ret = guard_lc_modify( + this, driver, sal_lc.get(), b, cookie, + [&](rgw::sal::Lifecycle* sal_lc, const string& oid, + rgw::sal::Lifecycle::LCEntry& entry) { + return sal_lc->rm_entry(oid, entry); + } + ); return ret; } /* RGWLC::remove_bucket_config */ -RGWLC::~RGWLC() -{ +RGWLC::~RGWLC() { stop_processor(); finalize(); } /* ~RGWLC() */ namespace rgw::lc { -int fix_lc_shard_entry(const DoutPrefixProvider *dpp, - rgw::sal::Driver* driver, - rgw::sal::Lifecycle* sal_lc, - rgw::sal::Bucket* bucket) -{ +int fix_lc_shard_entry( + const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, + rgw::sal::Lifecycle* sal_lc, rgw::sal::Bucket* bucket +) { if (auto aiter = bucket->get_attrs().find(RGW_ATTR_LC); aiter == bucket->get_attrs().end()) { - return 0; // No entry, nothing to fix + return 0; // No entry, nothing to fix } auto bucket_lc_key = get_bucket_lc_key(bucket->get_key()); @@ -2535,62 +2500,56 @@ int fix_lc_shard_entry(const DoutPrefixProvider *dpp, int ret = sal_lc->get_entry(lc_oid, bucket_lc_key, &entry); if (ret == 0) { ldpp_dout(dpp, 5) << "Entry already exists, nothing to do" << dendl; - return ret; // entry is already existing correctly set to marker + return ret; // entry is already existing correctly set to marker } ldpp_dout(dpp, 5) << "lc_get_entry errored ret code=" << ret << dendl; if (ret == -ENOENT) { - ldpp_dout(dpp, 1) << "No entry for bucket=" << bucket - << " creating " << dendl; + ldpp_dout(dpp, 1) << "No entry for bucket=" << bucket << " creating " + << dendl; // TODO: we have too many ppl making cookies like this! char cookie_buf[COOKIE_LEN + 1]; gen_rand_alphanumeric(driver->ctx(), cookie_buf, sizeof(cookie_buf) - 1); std::string cookie = cookie_buf; - ret = guard_lc_modify(dpp, - driver, sal_lc, bucket->get_key(), cookie, - [&lc_oid](rgw::sal::Lifecycle* slc, - const string& oid, - rgw::sal::Lifecycle::LCEntry& entry) { - return slc->set_entry(lc_oid, entry); - }); - + ret = guard_lc_modify( + dpp, driver, sal_lc, bucket->get_key(), cookie, + [&lc_oid]( + rgw::sal::Lifecycle* slc, const string& oid, + rgw::sal::Lifecycle::LCEntry& entry + ) { return slc->set_entry(lc_oid, entry); } + ); } return ret; } std::string s3_expiration_header( - DoutPrefixProvider* dpp, - const rgw_obj_key& obj_key, - const RGWObjTags& obj_tagset, - const ceph::real_time& mtime, - const std::map& bucket_attrs) -{ + DoutPrefixProvider* dpp, const rgw_obj_key& obj_key, + const RGWObjTags& obj_tagset, const ceph::real_time& mtime, + const std::map& bucket_attrs +) { CephContext* cct = dpp->get_cct(); RGWLifecycleConfiguration config(cct); std::string hdr{""}; const auto& aiter = bucket_attrs.find(RGW_ATTR_LC); - if (aiter == bucket_attrs.end()) - return hdr; + if (aiter == bucket_attrs.end()) return hdr; bufferlist::const_iterator iter{&aiter->second}; try { - config.decode(iter); + config.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(dpp, 0) << __func__ - << "() decode life cycle config failed" - << dendl; - return hdr; + ldpp_dout(dpp, 0) << __func__ << "() decode life cycle config failed" + << dendl; + return hdr; } /* catch */ /* dump tags at debug level 16 */ RGWObjTags::tag_map_t obj_tag_map = obj_tagset.get_tags(); if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 16)) { for (const auto& elt : obj_tag_map) { - ldpp_dout(dpp, 16) << __func__ - << "() key=" << elt.first << " val=" << elt.second - << dendl; + ldpp_dout(dpp, 16) << __func__ << "() key=" << elt.first + << " val=" << elt.second << dendl; } } @@ -2602,19 +2561,18 @@ std::string s3_expiration_header( const auto& rule = ri.second; auto& id = rule.get_id(); auto& filter = rule.get_filter(); - auto& prefix = filter.has_prefix() ? filter.get_prefix(): rule.get_prefix(); + auto& prefix = + filter.has_prefix() ? filter.get_prefix() : rule.get_prefix(); auto& expiration = rule.get_expiration(); auto& noncur_expiration = rule.get_noncur_expiration(); - ldpp_dout(dpp, 10) << "rule: " << ri.first - << " prefix: " << prefix - << " expiration: " - << " date: " << expiration.get_date() - << " days: " << expiration.get_days() - << " noncur_expiration: " - << " date: " << noncur_expiration.get_date() - << " days: " << noncur_expiration.get_days() - << dendl; + ldpp_dout(dpp, 10) << "rule: " << ri.first << " prefix: " << prefix + << " expiration: " + << " date: " << expiration.get_date() + << " days: " << expiration.get_days() + << " noncur_expiration: " + << " date: " << noncur_expiration.get_date() + << " days: " << noncur_expiration.get_days() << dendl; /* skip if rule !enabled * if rule has prefix, skip iff object !match prefix @@ -2627,58 +2585,56 @@ std::string s3_expiration_header( * if the date accum has a value, format it into hdr */ - if (! rule.is_enabled()) - continue; + if (!rule.is_enabled()) continue; - if(! prefix.empty()) { - if (! boost::starts_with(obj_key.name, prefix)) - continue; + if (!prefix.empty()) { + if (!boost::starts_with(obj_key.name, prefix)) continue; } if (filter.has_tags()) { bool tag_match = false; const RGWObjTags& rule_tagset = filter.get_tags(); for (auto& tag : rule_tagset.get_tags()) { - /* remember, S3 tags are {key,value} tuples */ + /* remember, S3 tags are {key,value} tuples */ tag_match = true; auto obj_tag = obj_tag_map.find(tag.first); if (obj_tag == obj_tag_map.end() || obj_tag->second != tag.second) { - ldpp_dout(dpp, 10) << "tag does not match obj_key=" << obj_key - << " rule_id=" << id - << " tag=" << tag - << dendl; - tag_match = false; - break; - } + ldpp_dout(dpp, 10) << "tag does not match obj_key=" << obj_key + << " rule_id=" << id << " tag=" << tag << dendl; + tag_match = false; + break; + } } - if (! tag_match) - continue; + if (!tag_match) continue; } // compute a uniform expiration date boost::optional rule_expiration_date; const LCExpiration& rule_expiration = - (obj_key.instance.empty()) ? expiration : noncur_expiration; + (obj_key.instance.empty()) ? expiration : noncur_expiration; if (rule_expiration.has_date()) { - rule_expiration_date = - boost::optional( - ceph::from_iso_8601(rule.get_expiration().get_date())); + rule_expiration_date = boost::optional( + ceph::from_iso_8601(rule.get_expiration().get_date()) + ); } else { if (rule_expiration.has_days()) { - rule_expiration_date = - boost::optional( - mtime + make_timespan(double(rule_expiration.get_days())*24*60*60 - ceph::real_clock::to_time_t(mtime)%(24*60*60) + 24*60*60)); + rule_expiration_date = boost::optional( + mtime + make_timespan( + double(rule_expiration.get_days()) * 24 * 60 * 60 - + ceph::real_clock::to_time_t(mtime) % (24 * 60 * 60) + + 24 * 60 * 60 + ) + ); } } // update earliest expiration if (rule_expiration_date) { - if ((! expiration_date) || - (*expiration_date > *rule_expiration_date)) { - expiration_date = - boost::optional(rule_expiration_date); - rule_id = boost::optional(id); + if ((!expiration_date) || (*expiration_date > *rule_expiration_date)) { + expiration_date = + boost::optional(rule_expiration_date); + rule_id = boost::optional(id); } } } @@ -2688,14 +2644,16 @@ std::string s3_expiration_header( // Fri, 23 Dec 2012 00:00:00 GMT char exp_buf[100]; time_t exp = ceph::real_clock::to_time_t(*expiration_date); - if (std::strftime(exp_buf, sizeof(exp_buf), - "%a, %d %b %Y %T %Z", std::gmtime(&exp))) { - hdr = fmt::format("expiry-date=\"{0}\", rule-id=\"{1}\"", exp_buf, - *rule_id); + if (std::strftime( + exp_buf, sizeof(exp_buf), "%a, %d %b %Y %T %Z", std::gmtime(&exp) + )) { + hdr = fmt::format( + "expiry-date=\"{0}\", rule-id=\"{1}\"", exp_buf, *rule_id + ); } else { - ldpp_dout(dpp, 0) << __func__ << - "() strftime of life cycle expiration header failed" - << dendl; + ldpp_dout(dpp, 0) << __func__ + << "() strftime of life cycle expiration header failed" + << dendl; } } @@ -2704,26 +2662,22 @@ std::string s3_expiration_header( } /* rgwlc_s3_expiration_header */ bool s3_multipart_abort_header( - DoutPrefixProvider* dpp, - const rgw_obj_key& obj_key, - const ceph::real_time& mtime, - const std::map& bucket_attrs, - ceph::real_time& abort_date, - std::string& rule_id) -{ + DoutPrefixProvider* dpp, const rgw_obj_key& obj_key, + const ceph::real_time& mtime, + const std::map& bucket_attrs, + ceph::real_time& abort_date, std::string& rule_id +) { CephContext* cct = dpp->get_cct(); RGWLifecycleConfiguration config(cct); const auto& aiter = bucket_attrs.find(RGW_ATTR_LC); - if (aiter == bucket_attrs.end()) - return false; + if (aiter == bucket_attrs.end()) return false; bufferlist::const_iterator iter{&aiter->second}; try { config.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(dpp, 0) << __func__ - << "() decode life cycle config failed" + ldpp_dout(dpp, 0) << __func__ << "() decode life cycle config failed" << dendl; return false; } /* catch */ @@ -2735,27 +2689,31 @@ bool s3_multipart_abort_header( const auto& rule = ri.second; const auto& id = rule.get_id(); const auto& filter = rule.get_filter(); - const auto& prefix = filter.has_prefix()?filter.get_prefix():rule.get_prefix(); + const auto& prefix = + filter.has_prefix() ? filter.get_prefix() : rule.get_prefix(); const auto& mp_expiration = rule.get_mp_expiration(); if (!rule.is_enabled()) { continue; } - if(!prefix.empty() && !boost::starts_with(obj_key.name, prefix)) { + if (!prefix.empty() && !boost::starts_with(obj_key.name, prefix)) { continue; } std::optional rule_abort_date; if (mp_expiration.has_days()) { rule_abort_date = std::optional( - mtime + make_timespan(mp_expiration.get_days()*24*60*60 - ceph::real_clock::to_time_t(mtime)%(24*60*60) + 24*60*60)); + mtime + + make_timespan( + mp_expiration.get_days() * 24 * 60 * 60 - + ceph::real_clock::to_time_t(mtime) % (24 * 60 * 60) + 24 * 60 * 60 + ) + ); } // update earliest abort date if (rule_abort_date) { - if ((! abort_date_tmp) || - (*abort_date_tmp > *rule_abort_date)) { - abort_date_tmp = - std::optional(rule_abort_date); + if ((!abort_date_tmp) || (*abort_date_tmp > *rule_abort_date)) { + abort_date_tmp = std::optional(rule_abort_date); rule_id_tmp = std::optional(id); } } @@ -2771,8 +2729,7 @@ bool s3_multipart_abort_header( } /* namespace rgw::lc */ -void lc_op::dump(Formatter *f) const -{ +void lc_op::dump(Formatter* f) const { f->dump_bool("status", status); f->dump_bool("dm_expiration", dm_expiration); @@ -2787,7 +2744,7 @@ void lc_op::dump(Formatter *f) const f->dump_object("obj_tags", *obj_tags); } f->open_object_section("transitions"); - for(auto& [storage_class, transition] : transitions) { + for (auto& [storage_class, transition] : transitions) { f->dump_object(storage_class, transition); } f->close_section(); @@ -2799,8 +2756,7 @@ void lc_op::dump(Formatter *f) const f->close_section(); } -void LCFilter::dump(Formatter *f) const -{ +void LCFilter::dump(Formatter* f) const { f->dump_string("prefix", prefix); f->dump_object("obj_tags", obj_tags); if (have_flag(LCFlagType::ArchiveZone)) { @@ -2808,14 +2764,12 @@ void LCFilter::dump(Formatter *f) const } } -void LCExpiration::dump(Formatter *f) const -{ +void LCExpiration::dump(Formatter* f) const { f->dump_string("days", days); f->dump_string("date", date); } -void LCRule::dump(Formatter *f) const -{ +void LCRule::dump(Formatter* f) const { f->dump_string("id", id); f->dump_string("prefix", prefix); f->dump_string("status", status); @@ -2837,9 +2791,7 @@ void LCRule::dump(Formatter *f) const f->dump_bool("dm_expiration", dm_expiration); } - -void RGWLifecycleConfiguration::dump(Formatter *f) const -{ +void RGWLifecycleConfiguration::dump(Formatter* f) const { f->open_object_section("prefix_map"); for (auto& prefix : prefix_map) { f->dump_object(prefix.first.c_str(), prefix.second); @@ -2857,4 +2809,3 @@ void RGWLifecycleConfiguration::dump(Formatter *f) const } f->close_section(); } - diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index 513cb20bc69a23..1c17a358c9a5dd 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -1,72 +1,66 @@ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp +#include "rgw_op.h" + #include #include -#include #include -#include -#include - #include #include #include +#include +#include +#include -#include "include/scope_guard.h" +#include "cls/lock/cls_lock_client.h" +#include "cls/rgw/cls_rgw_client.h" #include "common/Clock.h" #include "common/armor.h" +#include "common/ceph_json.h" #include "common/errno.h" #include "common/mime.h" -#include "common/utf8.h" -#include "common/ceph_json.h" #include "common/static_ptr.h" -#include "rgw_tracer.h" - -#include "rgw_rados.h" -#include "rgw_zone.h" -#include "rgw_op.h" -#include "rgw_rest.h" +#include "common/utf8.h" +#include "compressor/Compressor.h" +#include "include/ceph_assert.h" +#include "include/scope_guard.h" #include "rgw_acl.h" #include "rgw_acl_s3.h" #include "rgw_acl_swift.h" -#include "rgw_user.h" #include "rgw_bucket.h" +#include "rgw_client_io.h" +#include "rgw_compression.h" +#include "rgw_cors.h" +#include "rgw_cors_s3.h" +#include "rgw_crypt.h" #include "rgw_log.h" +#include "rgw_lua.h" +#include "rgw_lua_data_filter.h" #include "rgw_multi.h" #include "rgw_multi_del.h" -#include "rgw_cors.h" -#include "rgw_cors_s3.h" +#include "rgw_notify.h" +#include "rgw_notify_event_type.h" +#include "rgw_perf_counters.h" +#include "rgw_process_env.h" +#include "rgw_putobj_processor.h" +#include "rgw_rados.h" +#include "rgw_rest.h" #include "rgw_rest_conn.h" #include "rgw_rest_s3.h" -#include "rgw_tar.h" -#include "rgw_client_io.h" -#include "rgw_compression.h" #include "rgw_role.h" -#include "rgw_tag_s3.h" -#include "rgw_putobj_processor.h" -#include "rgw_crypt.h" -#include "rgw_perf_counters.h" -#include "rgw_process_env.h" -#include "rgw_notify.h" -#include "rgw_notify_event_type.h" #include "rgw_sal.h" #include "rgw_sal_rados.h" +#include "rgw_tag_s3.h" +#include "rgw_tar.h" #include "rgw_torrent.h" -#include "rgw_lua_data_filter.h" -#include "rgw_lua.h" - -#include "services/svc_zone.h" +#include "rgw_tracer.h" +#include "rgw_user.h" +#include "rgw_zone.h" #include "services/svc_quota.h" #include "services/svc_sys_obj.h" - -#include "cls/lock/cls_lock_client.h" -#include "cls/rgw/cls_rgw_client.h" - - -#include "include/ceph_assert.h" - -#include "compressor/Compressor.h" +#include "services/svc_zone.h" #ifdef WITH_ARROW_FLIGHT #include "rgw_flight.h" @@ -88,9 +82,9 @@ using namespace std; using namespace librados; -using ceph::crypto::MD5; -using boost::optional; using boost::none; +using boost::optional; +using ceph::crypto::MD5; using rgw::ARN; using rgw::IAM::Effect; @@ -99,7 +93,10 @@ using rgw::IAM::Policy; static string mp_ns = RGW_OBJ_NS_MULTIPART; static string shadow_ns = RGW_OBJ_NS_SHADOW; -static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, req_info& info, const std::string& bucket_name); +static void forward_req_info( + const DoutPrefixProvider* dpp, CephContext* cct, req_info& info, + const std::string& bucket_name +); static MultipartMetaFilter mp_filter; @@ -109,8 +106,7 @@ static constexpr auto S3_EXISTING_OBJTAG = "s3:ExistingObjectTag"; static constexpr auto S3_RESOURCE_TAG = "s3:ResourceTag"; static constexpr auto S3_RUNTIME_RESOURCE_VAL = "${s3:ResourceTag"; -int RGWGetObj::parse_range(void) -{ +int RGWGetObj::parse_range(void) { int r = -ERANGE; string rs(range_str); string ofs_str; @@ -122,24 +118,18 @@ int RGWGetObj::parse_range(void) size_t pos = rs.find("bytes="); if (pos == string::npos) { pos = 0; - while (isspace(rs[pos])) - pos++; + while (isspace(rs[pos])) pos++; int end = pos; - while (isalpha(rs[end])) - end++; - if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0) - return 0; - while (isspace(rs[end])) - end++; - if (rs[end] != '=') - return 0; + while (isalpha(rs[end])) end++; + if (strncasecmp(rs.c_str(), "bytes", end - pos) != 0) return 0; + while (isspace(rs[end])) end++; + if (rs[end] != '=') return 0; rs = rs.substr(end + 1); } else { rs = rs.substr(pos + 6); /* size of("bytes=") */ } pos = rs.find('-'); - if (pos == string::npos) - goto done; + if (pos == string::npos) goto done; partial_content = true; @@ -147,19 +137,17 @@ int RGWGetObj::parse_range(void) end_str = rs.substr(pos + 1); if (end_str.length()) { end = atoll(end_str.c_str()); - if (end < 0) - goto done; + if (end < 0) goto done; } if (ofs_str.length()) { ofs = atoll(ofs_str.c_str()); - } else { // RFC2616 suffix-byte-range-spec + } else { // RFC2616 suffix-byte-range-spec ofs = -end; end = -1; } - if (end >= 0 && end < ofs) - goto done; + if (end >= 0 && end < ofs) goto done; range_parsed = true; return 0; @@ -169,40 +157,39 @@ int RGWGetObj::parse_range(void) partial_content = false; ofs = 0; end = -1; - range_parsed = false; // allow retry + range_parsed = false; // allow retry r = 0; } return r; } -static int decode_policy(const DoutPrefixProvider *dpp, - CephContext *cct, - bufferlist& bl, - RGWAccessControlPolicy *policy) -{ +static int decode_policy( + const DoutPrefixProvider* dpp, CephContext* cct, bufferlist& bl, + RGWAccessControlPolicy* policy +) { auto iter = bl.cbegin(); try { policy->decode(iter); } catch (buffer::error& err) { - ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" + << dendl; return -EIO; } if (cct->_conf->subsys.should_gather()) { ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy"; - RGWAccessControlPolicy_S3 *s3policy = static_cast(policy); + RGWAccessControlPolicy_S3* s3policy = + static_cast(policy); s3policy->to_xml(*_dout); *_dout << dendl; } return 0; } - -static int get_user_policy_from_attr(const DoutPrefixProvider *dpp, - CephContext * const cct, - map& attrs, - RGWAccessControlPolicy& policy /* out */) -{ +static int get_user_policy_from_attr( + const DoutPrefixProvider* dpp, CephContext* const cct, + map& attrs, RGWAccessControlPolicy& policy /* out */ +) { auto aiter = attrs.find(RGW_ATTR_ACL); if (aiter != attrs.end()) { int ret = decode_policy(dpp, cct, aiter->second, &policy); @@ -223,43 +210,37 @@ static int get_user_policy_from_attr(const DoutPrefixProvider *dpp, * object: name of the object to get the ACL for. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp, - CephContext *cct, - rgw::sal::Driver* driver, - RGWBucketInfo& bucket_info, - map& bucket_attrs, - RGWAccessControlPolicy *policy, - optional_yield y) -{ +int rgw_op_get_bucket_policy_from_attr( + const DoutPrefixProvider* dpp, CephContext* cct, rgw::sal::Driver* driver, + RGWBucketInfo& bucket_info, map& bucket_attrs, + RGWAccessControlPolicy* policy, optional_yield y +) { map::iterator aiter = bucket_attrs.find(RGW_ATTR_ACL); if (aiter != bucket_attrs.end()) { int ret = decode_policy(dpp, cct, aiter->second, policy); - if (ret < 0) - return ret; + if (ret < 0) return ret; } else { - ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl; + ldpp_dout( + dpp, 0 + ) << "WARNING: couldn't find acl header for bucket, generating default" + << dendl; std::unique_ptr user = driver->get_user(bucket_info.owner); /* object exists, but policy is broken */ int r = user->load_user(dpp, y); - if (r < 0) - return r; + if (r < 0) return r; policy->create_default(bucket_info.owner, user->get_display_name()); } return 0; } -static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, - CephContext *cct, - rgw::sal::Driver* driver, - RGWBucketInfo& bucket_info, - map& bucket_attrs, - RGWAccessControlPolicy *policy, - string *storage_class, - rgw::sal::Object* obj, - optional_yield y) -{ +static int get_obj_policy_from_attr( + const DoutPrefixProvider* dpp, CephContext* cct, rgw::sal::Driver* driver, + RGWBucketInfo& bucket_info, map& bucket_attrs, + RGWAccessControlPolicy* policy, string* storage_class, + rgw::sal::Object* obj, optional_yield y +) { bufferlist bl; int ret = 0; @@ -268,15 +249,16 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, ret = rop->get_attr(dpp, RGW_ATTR_ACL, bl, y); if (ret >= 0) { ret = decode_policy(dpp, cct, bl, policy); - if (ret < 0) - return ret; + if (ret < 0) return ret; } else if (ret == -ENODATA) { /* object exists, but policy is broken */ - ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl; + ldpp_dout( + dpp, 0 + ) << "WARNING: couldn't find acl header for object, generating default" + << dendl; std::unique_ptr user = driver->get_user(bucket_info.owner); ret = user->load_user(dpp, y); - if (ret < 0) - return ret; + if (ret < 0) return ret; policy->create_default(bucket_info.owner, user->get_display_name()); } @@ -294,10 +276,9 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp, return ret; } - -static boost::optional get_iam_policy_from_attr(CephContext* cct, - map& attrs, - const string& tenant) { +static boost::optional get_iam_policy_from_attr( + CephContext* cct, map& attrs, const string& tenant +) { auto i = attrs.find(RGW_ATTR_IAM_POLICY); if (i != attrs.end()) { return Policy(cct, tenant, i->second, false); @@ -307,10 +288,8 @@ static boost::optional get_iam_policy_from_attr(CephContext* cct, } static boost::optional -get_public_access_conf_from_attr(const map& attrs) -{ - if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS); - aiter != attrs.end()) { +get_public_access_conf_from_attr(const map& attrs) { + if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS); aiter != attrs.end()) { bufferlist::const_iterator iter{&aiter->second}; PublicAccessBlockConfiguration access_conf; try { @@ -323,35 +302,31 @@ get_public_access_conf_from_attr(const map& attrs) return boost::none; } -vector get_iam_user_policy_from_attr(CephContext* cct, - map& attrs, - const string& tenant) { +vector get_iam_user_policy_from_attr( + CephContext* cct, map& attrs, const string& tenant +) { vector policies; if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) { - bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY]; - map policy_map; - decode(policy_map, out_bl); - for (auto& it : policy_map) { - bufferlist bl = bufferlist::static_from_string(it.second); - Policy p(cct, tenant, bl, false); - policies.push_back(std::move(p)); - } + bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY]; + map policy_map; + decode(policy_map, out_bl); + for (auto& it : policy_map) { + bufferlist bl = bufferlist::static_from_string(it.second); + Policy p(cct, tenant, bl, false); + policies.push_back(std::move(p)); + } } return policies; } -static int read_bucket_policy(const DoutPrefixProvider *dpp, - rgw::sal::Driver* driver, - req_state *s, - RGWBucketInfo& bucket_info, - map& bucket_attrs, - RGWAccessControlPolicy *policy, - rgw_bucket& bucket, - optional_yield y) -{ +static int read_bucket_policy( + const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, req_state* s, + RGWBucketInfo& bucket_info, map& bucket_attrs, + RGWAccessControlPolicy* policy, rgw_bucket& bucket, optional_yield y +) { if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { ldpp_dout(dpp, 0) << "NOTICE: bucket " << bucket_info.bucket.name - << " is suspended" << dendl; + << " is suspended" << dendl; return -ERR_USER_SUSPENDED; } @@ -359,27 +334,23 @@ static int read_bucket_policy(const DoutPrefixProvider *dpp, return 0; } - int ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, driver, bucket_info, bucket_attrs, policy, y); + int ret = rgw_op_get_bucket_policy_from_attr( + dpp, s->cct, driver, bucket_info, bucket_attrs, policy, y + ); if (ret == -ENOENT) { - ret = -ERR_NO_SUCH_BUCKET; + ret = -ERR_NO_SUCH_BUCKET; } return ret; } -static int read_obj_policy(const DoutPrefixProvider *dpp, - rgw::sal::Driver* driver, - req_state *s, - RGWBucketInfo& bucket_info, - map& bucket_attrs, - RGWAccessControlPolicy* acl, - string *storage_class, - boost::optional& policy, - rgw::sal::Bucket* bucket, - rgw::sal::Object* object, - optional_yield y, - bool copy_src=false) -{ +static int read_obj_policy( + const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, req_state* s, + RGWBucketInfo& bucket_info, map& bucket_attrs, + RGWAccessControlPolicy* acl, string* storage_class, + boost::optional& policy, rgw::sal::Bucket* bucket, + rgw::sal::Object* object, optional_yield y, bool copy_src = false +) { string upload_id; upload_id = s->info.args.get("uploadId"); std::unique_ptr mpobj; @@ -387,7 +358,7 @@ static int read_obj_policy(const DoutPrefixProvider *dpp, if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { ldpp_dout(dpp, 0) << "NOTICE: bucket " << bucket_info.bucket.name - << " is suspended" << dendl; + << " is suspended" << dendl; return -ERR_USER_SUSPENDED; } @@ -403,43 +374,48 @@ static int read_obj_policy(const DoutPrefixProvider *dpp, } policy = get_iam_policy_from_attr(s->cct, bucket_attrs, bucket->get_tenant()); - int ret = get_obj_policy_from_attr(dpp, s->cct, driver, bucket_info, - bucket_attrs, acl, storage_class, object, - s->yield); + int ret = get_obj_policy_from_attr( + dpp, s->cct, driver, bucket_info, bucket_attrs, acl, storage_class, + object, s->yield + ); if (ret == -ENOENT) { /* object does not exist checking the bucket's ACL to make sure that we send a proper error code */ RGWAccessControlPolicy bucket_policy(s->cct); - ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, driver, bucket_info, bucket_attrs, &bucket_policy, y); + ret = rgw_op_get_bucket_policy_from_attr( + dpp, s->cct, driver, bucket_info, bucket_attrs, &bucket_policy, y + ); if (ret < 0) { return ret; } const rgw_user& bucket_owner = bucket_policy.get_owner().get_id(); if (bucket_owner.compare(s->user->get_id()) != 0 && - ! s->auth.identity->is_admin_of(bucket_owner)) { - auto r = eval_identity_or_session_policies(dpp, s->iam_user_policies, s->env, - rgw::IAM::s3ListBucket, ARN(bucket->get_key())); - if (r == Effect::Allow) - return -ENOENT; - if (r == Effect::Deny) - return -EACCES; + !s->auth.identity->is_admin_of(bucket_owner)) { + auto r = eval_identity_or_session_policies( + dpp, s->iam_user_policies, s->env, rgw::IAM::s3ListBucket, + ARN(bucket->get_key()) + ); + if (r == Effect::Allow) return -ENOENT; + if (r == Effect::Deny) return -EACCES; if (policy) { ARN b_arn(bucket->get_key()); - r = policy->eval(s->env, *s->auth.identity, rgw::IAM::s3ListBucket, b_arn); - if (r == Effect::Allow) - return -ENOENT; - if (r == Effect::Deny) - return -EACCES; + r = policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3ListBucket, b_arn + ); + if (r == Effect::Allow) return -ENOENT; + if (r == Effect::Deny) return -EACCES; } - if (! s->session_policies.empty()) { - r = eval_identity_or_session_policies(dpp, s->session_policies, s->env, - rgw::IAM::s3ListBucket, ARN(bucket->get_key())); - if (r == Effect::Allow) - return -ENOENT; - if (r == Effect::Deny) - return -EACCES; + if (!s->session_policies.empty()) { + r = eval_identity_or_session_policies( + dpp, s->session_policies, s->env, rgw::IAM::s3ListBucket, + ARN(bucket->get_key()) + ); + if (r == Effect::Allow) return -ENOENT; + if (r == Effect::Deny) return -EACCES; } - if (! bucket_policy.verify_permission(s, *s->auth.identity, s->perm_mask, RGW_PERM_READ)) + if (!bucket_policy.verify_permission( + s, *s->auth.identity, s->perm_mask, RGW_PERM_READ + )) ret = -EACCES; else ret = -ENOENT; @@ -457,22 +433,27 @@ static int read_obj_policy(const DoutPrefixProvider *dpp, * only_bucket: If true, reads the user and bucket ACLs rather than the object ACL. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, req_state* s, optional_yield y) -{ +int rgw_build_bucket_policies( + const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, req_state* s, + optional_yield y +) { int ret = 0; string bi = s->info.args.get(RGW_SYS_PARAM_PREFIX "bucket-instance"); if (!bi.empty()) { // note: overwrites s->bucket_name, may include a tenant/ - ret = rgw_bucket_parse_bucket_instance(bi, &s->bucket_name, &s->bucket_instance_id, &s->bucket_instance_shard_id); + ret = rgw_bucket_parse_bucket_instance( + bi, &s->bucket_name, &s->bucket_instance_id, + &s->bucket_instance_shard_id + ); if (ret < 0) { return ret; } } - if(s->dialect.compare("s3") == 0) { + if (s->dialect.compare("s3") == 0) { s->bucket_acl = std::make_unique(s->cct); - } else if(s->dialect.compare("swift") == 0) { + } else if (s->dialect.compare("swift") == 0) { /* We aren't allocating the account policy for those operations using * the Swift's infrastructure that don't really need req_state::user. * Typical example here is the implementation of /info. */ @@ -487,11 +468,13 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d /* check if copy source is within the current domain */ if (!s->src_bucket_name.empty()) { std::unique_ptr src_bucket; - ret = driver->get_bucket(dpp, nullptr, - rgw_bucket(s->src_tenant_name, - s->src_bucket_name, - s->bucket_instance_id), - &src_bucket, y); + ret = driver->get_bucket( + dpp, nullptr, + rgw_bucket( + s->src_tenant_name, s->src_bucket_name, s->bucket_instance_id + ), + &src_bucket, y + ); if (ret == 0) { string& zonegroup = src_bucket->get_info().zonegroup; s->local_source = driver->get_zone()->get_zonegroup().equals(zonegroup); @@ -502,8 +485,8 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d rgw_user uid; std::string display_name; } acct_acl_user = { - s->user->get_id(), - s->user->get_display_name(), + s->user->get_id(), + s->user->get_display_name(), }; if (!s->bucket_name.empty()) { @@ -511,14 +494,23 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d /* This is the only place that s->bucket is created. It should never be * overwritten. */ - ret = driver->get_bucket(dpp, s->user.get(), rgw_bucket(rgw_bucket_key(s->bucket_tenant, s->bucket_name, s->bucket_instance_id)), &s->bucket, y); + ret = driver->get_bucket( + dpp, s->user.get(), + rgw_bucket(rgw_bucket_key( + s->bucket_tenant, s->bucket_name, s->bucket_instance_id + )), + &s->bucket, y + ); if (ret < 0) { if (ret != -ENOENT) { - string bucket_log; - bucket_log = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name); - ldpp_dout(dpp, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" - << bucket_log << ")" << dendl; - return ret; + string bucket_log; + bucket_log = + rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name); + ldpp_dout( + dpp, 0 + ) << "NOTICE: couldn't get bucket from bucket_name (name=" + << bucket_log << ")" << dendl; + return ret; } s->bucket_exists = false; return -ERR_NO_SUCH_BUCKET; @@ -526,15 +518,16 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d if (!rgw::sal::Object::empty(s->object.get())) { s->object->set_bucket(s->bucket.get()); } - + s->bucket_mtime = s->bucket->get_modification_time(); s->bucket_attrs = s->bucket->get_attrs(); - ret = read_bucket_policy(dpp, driver, s, s->bucket->get_info(), - s->bucket->get_attrs(), - s->bucket_acl.get(), s->bucket->get_key(), y); + ret = read_bucket_policy( + dpp, driver, s, s->bucket->get_info(), s->bucket->get_attrs(), + s->bucket_acl.get(), s->bucket->get_key(), y + ); acct_acl_user = { - s->bucket->get_info().owner, - s->bucket_acl->get_owner().get_display_name(), + s->bucket->get_info().owner, + s->bucket_acl->get_owner().get_display_name(), }; s->bucket_owner = s->bucket_acl->get_owner(); @@ -549,16 +542,20 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d ret = r; } - if (!driver->get_zone()->get_zonegroup().equals(s->bucket->get_info().zonegroup)) { + if (!driver->get_zone()->get_zonegroup().equals( + s->bucket->get_info().zonegroup + )) { ldpp_dout(dpp, 0) << "NOTICE: request for data in a different zonegroup (" - << s->bucket->get_info().zonegroup << " != " - << driver->get_zone()->get_zonegroup().get_id() << ")" << dendl; + << s->bucket->get_info().zonegroup << " != " + << driver->get_zone()->get_zonegroup().get_id() << ")" + << dendl; /* we now need to make sure that the operation actually requires copy source, that is * it's a copy operation */ - if (driver->get_zone()->get_zonegroup().is_master_zonegroup() && s->system_request) { + if (driver->get_zone()->get_zonegroup().is_master_zonegroup() && + s->system_request) { /*If this is the master, don't redirect*/ - } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) { + } else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION) { /* If op is get bucket location, don't redirect */ } else if (!s->local_source || (s->op != OP_PUT && s->op != OP_COPY) || @@ -572,20 +569,25 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d s->dest_placement.inherit_from(s->bucket->get_placement_rule()); if (!driver->valid_placement(s->dest_placement)) { - ldpp_dout(dpp, 0) << "NOTICE: invalid dest placement: " << s->dest_placement.to_str() << dendl; + ldpp_dout(dpp, 0) << "NOTICE: invalid dest placement: " + << s->dest_placement.to_str() << dendl; return -EINVAL; } - s->bucket_access_conf = get_public_access_conf_from_attr(s->bucket->get_attrs()); + s->bucket_access_conf = + get_public_access_conf_from_attr(s->bucket->get_attrs()); } /* handle user ACL only for those APIs which support it */ if (s->user_acl) { - std::unique_ptr acl_user = driver->get_user(acct_acl_user.uid); + std::unique_ptr acl_user = + driver->get_user(acct_acl_user.uid); ret = acl_user->read_attrs(dpp, y); if (!ret) { - ret = get_user_policy_from_attr(dpp, s->cct, acl_user->get_attrs(), *s->user_acl); + ret = get_user_policy_from_attr( + dpp, s->cct, acl_user->get_attrs(), *s->user_acl + ); } if (-ENOENT == ret) { /* In already existing clusters users won't have ACL. In such case @@ -595,40 +597,48 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d * 1. if we try to reach an existing bucket, its owner is considered * as account owner. * 2. otherwise account owner is identity stored in s->user->user_id. */ - s->user_acl->create_default(acct_acl_user.uid, - acct_acl_user.display_name); + s->user_acl->create_default( + acct_acl_user.uid, acct_acl_user.display_name + ); ret = 0; } else if (ret < 0) { ldpp_dout(dpp, 0) << "NOTICE: couldn't get user attrs for handling ACL " - "(user_id=" << s->user->get_id() << ", ret=" << ret << ")" << dendl; + "(user_id=" + << s->user->get_id() << ", ret=" << ret << ")" << dendl; return ret; } } // We don't need user policies in case of STS token returned by AssumeRole, // hence the check for user type - if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) { + if (!s->user->get_id().empty() && + s->auth.identity->get_identity_type() != TYPE_ROLE) { try { ret = s->user->read_attrs(dpp, y); if (ret == 0) { - auto user_policies = get_iam_user_policy_from_attr(s->cct, - s->user->get_attrs(), - s->user->get_tenant()); - s->iam_user_policies.insert(s->iam_user_policies.end(), - std::make_move_iterator(user_policies.begin()), - std::make_move_iterator(user_policies.end())); + auto user_policies = get_iam_user_policy_from_attr( + s->cct, s->user->get_attrs(), s->user->get_tenant() + ); + s->iam_user_policies.insert( + s->iam_user_policies.end(), + std::make_move_iterator(user_policies.begin()), + std::make_move_iterator(user_policies.end()) + ); } else { if (ret == -ENOENT) ret = 0; - else ret = -EACCES; + else + ret = -EACCES; } } catch (const std::exception& e) { - ldpp_dout(dpp, -1) << "Error reading IAM User Policy: " << e.what() << dendl; + ldpp_dout(dpp, -1) << "Error reading IAM User Policy: " << e.what() + << dendl; ret = -EACCES; } } try { - s->iam_policy = get_iam_policy_from_attr(s->cct, s->bucket_attrs, s->bucket_tenant); + s->iam_policy = + get_iam_policy_from_attr(s->cct, s->bucket_attrs, s->bucket_tenant); } catch (const std::exception& e) { // Really this is a can't happen condition. We parse the policy // when it's given to us, so perhaps we should abort or otherwise @@ -637,9 +647,11 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d ret = -EACCES; } - bool success = driver->get_zone()->get_redirect_endpoint(&s->redirect_zone_endpoint); + bool success = + driver->get_zone()->get_redirect_endpoint(&s->redirect_zone_endpoint); if (success) { - ldpp_dout(dpp, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl; + ldpp_dout(dpp, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint + << dendl; } return ret; @@ -651,9 +663,10 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d * only_bucket: If true, reads the bucket ACL rather than the object ACL. * Returns: 0 on success, -ERR# otherwise. */ -int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, - req_state *s, bool prefetch_data, optional_yield y) -{ +int rgw_build_object_policies( + const DoutPrefixProvider* dpp, rgw::sal::Driver* driver, req_state* s, + bool prefetch_data, optional_yield y +) { int ret = 0; if (!rgw::sal::Object::empty(s->object.get())) { @@ -666,19 +679,23 @@ int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d if (prefetch_data) { s->object->set_prefetch_data(); } - ret = read_obj_policy(dpp, driver, s, s->bucket->get_info(), s->bucket_attrs, - s->object_acl.get(), nullptr, s->iam_policy, s->bucket.get(), - s->object.get(), y); + ret = read_obj_policy( + dpp, driver, s, s->bucket->get_info(), s->bucket_attrs, + s->object_acl.get(), nullptr, s->iam_policy, s->bucket.get(), + s->object.get(), y + ); } return ret; } -static int rgw_iam_remove_objtags(const DoutPrefixProvider *dpp, req_state* s, rgw::sal::Object* object, bool has_existing_obj_tag, bool has_resource_tag) { +static int rgw_iam_remove_objtags( + const DoutPrefixProvider* dpp, req_state* s, rgw::sal::Object* object, + bool has_existing_obj_tag, bool has_resource_tag +) { object->set_atomic(); int op_ret = object->get_obj_attrs(s->yield, dpp); - if (op_ret < 0) - return op_ret; + if (op_ret < 0) return op_ret; rgw::sal::Attrs attrs = object->get_attrs(); auto tags = attrs.find(RGW_ATTR_TAGS); if (tags != attrs.end()) { @@ -687,16 +704,16 @@ static int rgw_iam_remove_objtags(const DoutPrefixProvider *dpp, req_state* s, r auto bliter = tags->second.cbegin(); tagset.decode(bliter); } catch (buffer::error& err) { - ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; + ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" + << dendl; return -EIO; } - for (auto& tag: tagset.get_tags()) { + for (auto& tag : tagset.get_tags()) { if (has_existing_obj_tag) { vector::iterator> iters; string key = "s3:ExistingObjectTag/" + tag.first; auto result = s->env.equal_range(key); - for (auto& it = result.first; it != result.second; ++it) - { + for (auto& it = result.first; it != result.second; ++it) { if (tag.second == it->second) { iters.emplace_back(it); } @@ -704,13 +721,12 @@ static int rgw_iam_remove_objtags(const DoutPrefixProvider *dpp, req_state* s, r for (auto& it : iters) { s->env.erase(it); } - }//end if has_existing_obj_tag + } //end if has_existing_obj_tag if (has_resource_tag) { vector::iterator> iters; string key = "s3:ResourceTag/" + tag.first; auto result = s->env.equal_range(key); - for (auto& it = result.first; it != result.second; ++it) - { + for (auto& it = result.first; it != result.second; ++it) { if (tag.second == it->second) { iters.emplace_back(it); } @@ -718,59 +734,79 @@ static int rgw_iam_remove_objtags(const DoutPrefixProvider *dpp, req_state* s, r for (auto& it : iters) { s->env.erase(it); } - }//end if has_resource_tag + } //end if has_resource_tag } } return 0; } -void rgw_add_to_iam_environment(rgw::IAM::Environment& e, std::string_view key, std::string_view val){ +void rgw_add_to_iam_environment( + rgw::IAM::Environment& e, std::string_view key, std::string_view val +) { // This variant just adds non empty key pairs to IAM env., values can be empty // in certain cases like tagging - if (!key.empty()) - e.emplace(key,val); + if (!key.empty()) e.emplace(key, val); } -static int rgw_iam_add_tags_from_bl(req_state* s, bufferlist& bl, bool has_existing_obj_tag=false, bool has_resource_tag=false){ +static int rgw_iam_add_tags_from_bl( + req_state* s, bufferlist& bl, bool has_existing_obj_tag = false, + bool has_resource_tag = false +) { RGWObjTags& tagset = s->tagset; try { auto bliter = bl.cbegin(); tagset.decode(bliter); } catch (buffer::error& err) { - ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; + ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" + << dendl; return -EIO; } - for (const auto& tag: tagset.get_tags()){ + for (const auto& tag : tagset.get_tags()) { if (has_existing_obj_tag) - rgw_add_to_iam_environment(s->env, "s3:ExistingObjectTag/" + tag.first, tag.second); + rgw_add_to_iam_environment( + s->env, "s3:ExistingObjectTag/" + tag.first, tag.second + ); if (has_resource_tag) - rgw_add_to_iam_environment(s->env, "s3:ResourceTag/" + tag.first, tag.second); + rgw_add_to_iam_environment( + s->env, "s3:ResourceTag/" + tag.first, tag.second + ); } return 0; } -static int rgw_iam_add_objtags(const DoutPrefixProvider *dpp, req_state* s, rgw::sal::Object* object, bool has_existing_obj_tag, bool has_resource_tag) { +static int rgw_iam_add_objtags( + const DoutPrefixProvider* dpp, req_state* s, rgw::sal::Object* object, + bool has_existing_obj_tag, bool has_resource_tag +) { object->set_atomic(); int op_ret = object->get_obj_attrs(s->yield, dpp); - if (op_ret < 0) - return op_ret; + if (op_ret < 0) return op_ret; rgw::sal::Attrs attrs = object->get_attrs(); auto tags = attrs.find(RGW_ATTR_TAGS); - if (tags != attrs.end()){ - return rgw_iam_add_tags_from_bl(s, tags->second, has_existing_obj_tag, has_resource_tag); + if (tags != attrs.end()) { + return rgw_iam_add_tags_from_bl( + s, tags->second, has_existing_obj_tag, has_resource_tag + ); } return 0; } -static int rgw_iam_add_objtags(const DoutPrefixProvider *dpp, req_state* s, bool has_existing_obj_tag, bool has_resource_tag) { +static int rgw_iam_add_objtags( + const DoutPrefixProvider* dpp, req_state* s, bool has_existing_obj_tag, + bool has_resource_tag +) { if (!rgw::sal::Object::empty(s->object.get())) { - return rgw_iam_add_objtags(dpp, s, s->object.get(), has_existing_obj_tag, has_resource_tag); + return rgw_iam_add_objtags( + dpp, s, s->object.get(), has_existing_obj_tag, has_resource_tag + ); } return 0; } -static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s, rgw::sal::Bucket* bucket) { +static int rgw_iam_add_buckettags( + const DoutPrefixProvider* dpp, req_state* s, rgw::sal::Bucket* bucket +) { rgw::sal::Attrs attrs = bucket->get_attrs(); auto tags = attrs.find(RGW_ATTR_TAGS); if (tags != attrs.end()) { @@ -779,88 +815,106 @@ static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s, r return 0; } -static int rgw_iam_add_buckettags(const DoutPrefixProvider *dpp, req_state* s) { +static int rgw_iam_add_buckettags(const DoutPrefixProvider* dpp, req_state* s) { return rgw_iam_add_buckettags(dpp, s, s->bucket.get()); } -static std::tuple rgw_check_policy_condition(const DoutPrefixProvider *dpp, - boost::optional iam_policy, - boost::optional> identity_policies, - boost::optional> session_policies, - bool check_obj_exist_tag=true) { +static std::tuple rgw_check_policy_condition( + const DoutPrefixProvider* dpp, boost::optional iam_policy, + boost::optional> identity_policies, + boost::optional> session_policies, + bool check_obj_exist_tag = true +) { bool has_existing_obj_tag = false, has_resource_tag = false; bool iam_policy_s3_exist_tag = false, iam_policy_s3_resource_tag = false; if (iam_policy) { if (check_obj_exist_tag) { - iam_policy_s3_exist_tag = iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG); + iam_policy_s3_exist_tag = + iam_policy->has_partial_conditional(S3_EXISTING_OBJTAG); } - iam_policy_s3_resource_tag = iam_policy->has_partial_conditional(S3_RESOURCE_TAG) || iam_policy->has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL); + iam_policy_s3_resource_tag = + iam_policy->has_partial_conditional(S3_RESOURCE_TAG) || + iam_policy->has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL); } - bool identity_policy_s3_exist_tag = false, identity_policy_s3_resource_tag = false; + bool identity_policy_s3_exist_tag = false, + identity_policy_s3_resource_tag = false; if (identity_policies) { for (auto& identity_policy : identity_policies.get()) { if (check_obj_exist_tag) { if (identity_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) identity_policy_s3_exist_tag = true; } - if (identity_policy.has_partial_conditional(S3_RESOURCE_TAG) || identity_policy.has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL)) + if (identity_policy.has_partial_conditional(S3_RESOURCE_TAG) || + identity_policy.has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL + )) identity_policy_s3_resource_tag = true; - if (identity_policy_s3_exist_tag && identity_policy_s3_resource_tag) // check all policies till both are set to true + if (identity_policy_s3_exist_tag && + identity_policy_s3_resource_tag) // check all policies till both are set to true break; } } - bool session_policy_s3_exist_tag = false, session_policy_s3_resource_flag = false; + bool session_policy_s3_exist_tag = false, + session_policy_s3_resource_flag = false; if (session_policies) { for (auto& session_policy : session_policies.get()) { if (check_obj_exist_tag) { if (session_policy.has_partial_conditional(S3_EXISTING_OBJTAG)) session_policy_s3_exist_tag = true; } - if (session_policy.has_partial_conditional(S3_RESOURCE_TAG) || session_policy.has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL)) + if (session_policy.has_partial_conditional(S3_RESOURCE_TAG) || + session_policy.has_partial_conditional_value(S3_RUNTIME_RESOURCE_VAL)) session_policy_s3_resource_flag = true; - if (session_policy_s3_exist_tag && session_policy_s3_resource_flag) - break; + if (session_policy_s3_exist_tag && session_policy_s3_resource_flag) break; } } - has_existing_obj_tag = iam_policy_s3_exist_tag || identity_policy_s3_exist_tag || session_policy_s3_exist_tag; - has_resource_tag = iam_policy_s3_resource_tag || identity_policy_s3_resource_tag || session_policy_s3_resource_flag; + has_existing_obj_tag = iam_policy_s3_exist_tag || + identity_policy_s3_exist_tag || + session_policy_s3_exist_tag; + has_resource_tag = iam_policy_s3_resource_tag || + identity_policy_s3_resource_tag || + session_policy_s3_resource_flag; return make_tuple(has_existing_obj_tag, has_resource_tag); } -static std::tuple rgw_check_policy_condition(const DoutPrefixProvider *dpp, req_state* s, bool check_obj_exist_tag=true) { - return rgw_check_policy_condition(dpp, s->iam_policy, s->iam_user_policies, s->session_policies, check_obj_exist_tag); -} - -static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, req_state *s){ - - using header_pair_t = std::pair ; - static const std::initializer_list acl_header_conditionals { - {"HTTP_X_AMZ_GRANT_READ", "s3:x-amz-grant-read"}, - {"HTTP_X_AMZ_GRANT_WRITE", "s3:x-amz-grant-write"}, - {"HTTP_X_AMZ_GRANT_READ_ACP", "s3:x-amz-grant-read-acp"}, - {"HTTP_X_AMZ_GRANT_WRITE_ACP", "s3:x-amz-grant-write-acp"}, - {"HTTP_X_AMZ_GRANT_FULL_CONTROL", "s3:x-amz-grant-full-control"} - }; - - if (s->has_acl_header){ - for (const auto& c: acl_header_conditionals){ +static std::tuple rgw_check_policy_condition( + const DoutPrefixProvider* dpp, req_state* s, bool check_obj_exist_tag = true +) { + return rgw_check_policy_condition( + dpp, s->iam_policy, s->iam_user_policies, s->session_policies, + check_obj_exist_tag + ); +} + +static void rgw_add_grant_to_iam_environment( + rgw::IAM::Environment& e, req_state* s +) { + using header_pair_t = std::pair; + static const std::initializer_list acl_header_conditionals{ + {"HTTP_X_AMZ_GRANT_READ", "s3:x-amz-grant-read"}, + {"HTTP_X_AMZ_GRANT_WRITE", "s3:x-amz-grant-write"}, + {"HTTP_X_AMZ_GRANT_READ_ACP", "s3:x-amz-grant-read-acp"}, + {"HTTP_X_AMZ_GRANT_WRITE_ACP", "s3:x-amz-grant-write-acp"}, + {"HTTP_X_AMZ_GRANT_FULL_CONTROL", "s3:x-amz-grant-full-control"}}; + + if (s->has_acl_header) { + for (const auto& c : acl_header_conditionals) { auto hdr = s->info.env->get(c.first); - if(hdr) { + if (hdr) { e.emplace(c.second, hdr); } } } } -void rgw_build_iam_environment(rgw::sal::Driver* driver, - req_state* s) -{ +void rgw_build_iam_environment(rgw::sal::Driver* driver, req_state* s) { const auto& m = s->info.env->get_map(); auto t = ceph::real_clock::now(); - s->env.emplace("aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t))); + s->env.emplace( + "aws:CurrentTime", std::to_string(ceph::real_clock::to_time_t(t)) + ); s->env.emplace("aws:EpochTime", ceph::to_iso_8601(t)); // TODO: This is fine for now, but once we have STS we'll need to // look and see. Also this won't work with the IdentityApplier @@ -888,16 +942,16 @@ void rgw_build_iam_environment(rgw::sal::Driver* driver, if (remote_addr_param == "HTTP_X_FORWARDED_FOR") { const auto comma = ip->find(','); if (comma != string::npos) { - temp.assign(*ip, 0, comma); - ip = &temp; + temp.assign(*ip, 0, comma); + ip = &temp; } } s->env.emplace("aws:SourceIp", *ip); } - i = m.find("HTTP_USER_AGENT"); { - if (i != m.end()) - s->env.emplace("aws:UserAgent", i->second); + i = m.find("HTTP_USER_AGENT"); + { + if (i != m.end()) s->env.emplace("aws:UserAgent", i->second); } if (s->user) { @@ -915,10 +969,8 @@ void rgw_build_iam_environment(rgw::sal::Driver* driver, } } -void rgw_bucket_object_pre_exec(req_state *s) -{ - if (s->expect_cont) - dump_continue(s); +void rgw_bucket_object_pre_exec(req_state* s) { + if (s->expect_cont) dump_continue(s); dump_bucket_from_state(s); } @@ -939,8 +991,10 @@ void rgw_bucket_object_pre_exec(req_state *s) // The called function must return an integer, negative on error. In // general, they should just return op_ret. namespace { -template -int retry_raced_bucket_write(const DoutPrefixProvider *dpp, rgw::sal::Bucket* b, const F& f) { +template +int retry_raced_bucket_write( + const DoutPrefixProvider* dpp, rgw::sal::Bucket* b, const F& f +) { auto r = f(); for (auto i = 0u; i < 15u && r == -ECANCELED; ++i) { r = b->try_refresh_info(dpp, nullptr); @@ -950,20 +1004,19 @@ int retry_raced_bucket_write(const DoutPrefixProvider *dpp, rgw::sal::Bucket* b, } return r; } -} +} // namespace - -int RGWGetObj::verify_permission(optional_yield y) -{ +int RGWGetObj::verify_permission(optional_yield y) { s->object->set_atomic(); if (prefetch_data()) { s->object->set_prefetch_data(); } - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); if (get_torrent) { if (s->object->get_instance().empty()) { @@ -984,8 +1037,10 @@ int RGWGetObj::verify_permission(optional_yield y) } if (s->bucket->get_info().obj_lock_enabled()) { - get_retention = verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention); - get_legal_hold = verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold); + get_retention = + verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention); + get_legal_hold = + verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold); } return 0; @@ -993,58 +1048,57 @@ int RGWGetObj::verify_permission(optional_yield y) RGWOp::~RGWOp(){}; -int RGWOp::verify_op_mask() -{ +int RGWOp::verify_op_mask() { uint32_t required_mask = op_mask(); ldpp_dout(this, 20) << "required_mask= " << required_mask - << " user.op_mask=" << s->user->get_info().op_mask << dendl; + << " user.op_mask=" << s->user->get_info().op_mask + << dendl; if ((s->user->get_info().op_mask & required_mask) != required_mask) { return -EPERM; } - if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !driver->get_zone()->is_writeable()) { + if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && + !driver->get_zone()->is_writeable()) { ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a " - "non-system user, permission denied" << dendl; + "non-system user, permission denied" + << dendl; return -EPERM; } return 0; } -int RGWGetObjTags::verify_permission(optional_yield y) -{ - auto iam_action = s->object->get_instance().empty()? - rgw::IAM::s3GetObjectTagging: - rgw::IAM::s3GetObjectVersionTagging; +int RGWGetObjTags::verify_permission(optional_yield y) { + auto iam_action = s->object->get_instance().empty() + ? rgw::IAM::s3GetObjectTagging + : rgw::IAM::s3GetObjectVersionTagging; - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); if (has_s3_existing_tag || has_s3_resource_tag) rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - if (!verify_object_permission(this, s,iam_action)) - return -EACCES; + if (!verify_object_permission(this, s, iam_action)) return -EACCES; return 0; } -void RGWGetObjTags::pre_exec() -{ +void RGWGetObjTags::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetObjTags::execute(optional_yield y) -{ +void RGWGetObjTags::execute(optional_yield y) { rgw::sal::Attrs attrs; s->object->set_atomic(); op_ret = s->object->get_obj_attrs(y, this); - if (op_ret == 0){ + if (op_ret == 0) { attrs = s->object->get_attrs(); auto tags = attrs.find(RGW_ATTR_TAGS); - if(tags != attrs.end()){ + if (tags != attrs.end()) { has_tags = true; tags_bl.append(tags->second); } @@ -1052,76 +1106,65 @@ void RGWGetObjTags::execute(optional_yield y) send_response_data(tags_bl); } -int RGWPutObjTags::verify_permission(optional_yield y) -{ - auto iam_action = s->object->get_instance().empty() ? - rgw::IAM::s3PutObjectTagging: - rgw::IAM::s3PutObjectVersionTagging; +int RGWPutObjTags::verify_permission(optional_yield y) { + auto iam_action = s->object->get_instance().empty() + ? rgw::IAM::s3PutObjectTagging + : rgw::IAM::s3PutObjectVersionTagging; //Using buckets tags for authorization makes more sense. - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, true); - if (has_s3_existing_tag) - rgw_iam_add_objtags(this, s, true, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); - if (!verify_object_permission(this, s,iam_action)) - return -EACCES; + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, true); + if (has_s3_existing_tag) rgw_iam_add_objtags(this, s, true, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); + if (!verify_object_permission(this, s, iam_action)) return -EACCES; return 0; } -void RGWPutObjTags::execute(optional_yield y) -{ +void RGWPutObjTags::execute(optional_yield y) { op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; - if (rgw::sal::Object::empty(s->object.get())){ - op_ret= -EINVAL; // we only support tagging on existing objects + if (rgw::sal::Object::empty(s->object.get())) { + op_ret = -EINVAL; // we only support tagging on existing objects return; } s->object->set_atomic(); op_ret = s->object->modify_obj_attrs(RGW_ATTR_TAGS, tags_bl, y, this); - if (op_ret == -ECANCELED){ + if (op_ret == -ECANCELED) { op_ret = -ERR_TAG_CONFLICT; } } -void RGWDeleteObjTags::pre_exec() -{ +void RGWDeleteObjTags::pre_exec() { rgw_bucket_object_pre_exec(s); } - -int RGWDeleteObjTags::verify_permission(optional_yield y) -{ +int RGWDeleteObjTags::verify_permission(optional_yield y) { if (!rgw::sal::Object::empty(s->object.get())) { - auto iam_action = s->object->get_instance().empty() ? - rgw::IAM::s3DeleteObjectTagging: - rgw::IAM::s3DeleteObjectVersionTagging; + auto iam_action = s->object->get_instance().empty() + ? rgw::IAM::s3DeleteObjectTagging + : rgw::IAM::s3DeleteObjectVersionTagging; - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - if (!verify_object_permission(this, s, iam_action)) - return -EACCES; + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); + if (!verify_object_permission(this, s, iam_action)) return -EACCES; } return 0; } -void RGWDeleteObjTags::execute(optional_yield y) -{ - if (rgw::sal::Object::empty(s->object.get())) - return; +void RGWDeleteObjTags::execute(optional_yield y) { + if (rgw::sal::Object::empty(s->object.get())) return; op_ret = s->object->delete_obj_attrs(this, RGW_ATTR_TAGS, y); } -int RGWGetBucketTags::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketTags::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketTagging)) { return -EACCES; @@ -1130,13 +1173,11 @@ int RGWGetBucketTags::verify_permission(optional_yield y) return 0; } -void RGWGetBucketTags::pre_exec() -{ +void RGWGetBucketTags::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetBucketTags::execute(optional_yield y) -{ +void RGWGetBucketTags::execute(optional_yield y) { auto iter = s->bucket_attrs.find(RGW_ATTR_TAGS); if (iter != s->bucket_attrs.end()) { has_tags = true; @@ -1148,23 +1189,23 @@ void RGWGetBucketTags::execute(optional_yield y) } int RGWPutBucketTags::verify_permission(optional_yield y) { - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging); } -void RGWPutBucketTags::execute(optional_yield y) -{ - +void RGWPutBucketTags::execute(optional_yield y) { op_ret = get_params(this, y); - if (op_ret < 0) - return; + if (op_ret < 0) return; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, y] { @@ -1172,29 +1213,28 @@ void RGWPutBucketTags::execute(optional_yield y) attrs[RGW_ATTR_TAGS] = tags_bl; return s->bucket->merge_and_store_attrs(this, attrs, y); }); - } -void RGWDeleteBucketTags::pre_exec() -{ +void RGWDeleteBucketTags::pre_exec() { rgw_bucket_object_pre_exec(s); } -int RGWDeleteBucketTags::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteBucketTags::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging); } -void RGWDeleteBucketTags::execute(optional_yield y) -{ +void RGWDeleteBucketTags::execute(optional_yield y) { bufferlist in_data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } @@ -1203,58 +1243,63 @@ void RGWDeleteBucketTags::execute(optional_yield y) attrs.erase(RGW_ATTR_TAGS); op_ret = s->bucket->merge_and_store_attrs(this, attrs, y); if (op_ret < 0) { - ldpp_dout(this, 0) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket=" - << s->bucket->get_name() - << " returned err= " << op_ret << dendl; + ldpp_dout( + this, 0 + ) << "RGWDeleteBucketTags() failed to remove RGW_ATTR_TAGS on bucket=" + << s->bucket->get_name() << " returned err= " << op_ret << dendl; } return op_ret; }); } -int RGWGetBucketReplication::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketReplication::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - if (!verify_bucket_permission(this, s, rgw::IAM::s3GetReplicationConfiguration)) { + if (!verify_bucket_permission( + this, s, rgw::IAM::s3GetReplicationConfiguration + )) { return -EACCES; } return 0; } -void RGWGetBucketReplication::pre_exec() -{ +void RGWGetBucketReplication::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetBucketReplication::execute(optional_yield y) -{ +void RGWGetBucketReplication::execute(optional_yield y) { send_response_data(); } int RGWPutBucketReplication::verify_permission(optional_yield y) { - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); - return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutReplicationConfiguration); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); + return verify_bucket_owner_or_policy( + s, rgw::IAM::s3PutReplicationConfiguration + ); } void RGWPutBucketReplication::execute(optional_yield y) { - op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - auto sync_policy = (s->bucket->get_info().sync_policy ? *s->bucket->get_info().sync_policy : rgw_sync_policy_info()); + auto sync_policy = + (s->bucket->get_info().sync_policy ? *s->bucket->get_info().sync_policy + : rgw_sync_policy_info()); for (auto& group : sync_policy_groups) { sync_policy.groups[group.id] = group; @@ -1264,7 +1309,8 @@ void RGWPutBucketReplication::execute(optional_yield y) { int ret = s->bucket->put_info(this, false, real_time()); if (ret < 0) { - ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket << ") returned ret=" << ret << dendl; + ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" + << s->bucket << ") returned ret=" << ret << dendl; return ret; } @@ -1272,26 +1318,28 @@ void RGWPutBucketReplication::execute(optional_yield y) { }); } -void RGWDeleteBucketReplication::pre_exec() -{ +void RGWDeleteBucketReplication::pre_exec() { rgw_bucket_object_pre_exec(s); } -int RGWDeleteBucketReplication::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteBucketReplication::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteReplicationConfiguration); + return verify_bucket_owner_or_policy( + s, rgw::IAM::s3DeleteReplicationConfiguration + ); } -void RGWDeleteBucketReplication::execute(optional_yield y) -{ +void RGWDeleteBucketReplication::execute(optional_yield y) { bufferlist in_data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } @@ -1308,7 +1356,8 @@ void RGWDeleteBucketReplication::execute(optional_yield y) int ret = s->bucket->put_info(this, false, real_time()); if (ret < 0) { - ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" << s->bucket << ") returned ret=" << ret << dendl; + ldpp_dout(this, 0) << "ERROR: put_bucket_instance_info (bucket=" + << s->bucket << ") returned ret=" << ret << dendl; return ret; } @@ -1316,9 +1365,8 @@ void RGWDeleteBucketReplication::execute(optional_yield y) }); } -int RGWOp::do_aws4_auth_completion() -{ - ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl; +int RGWOp::do_aws4_auth_completion() { + ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl; if (s->auth.completer) { if (!s->auth.completer->complete()) { return -ERR_AMZ_CONTENT_SHA256_MISMATCH; @@ -1335,11 +1383,9 @@ int RGWOp::do_aws4_auth_completion() return 0; } -int RGWOp::init_quota() -{ +int RGWOp::init_quota() { /* no quota enforcement for system requests */ - if (s->system_request) - return 0; + if (s->system_request) return 0; /* init quota related stuff */ if (!(s->user->get_info().op_mask & RGW_OP_TYPE_MODIFY)) { @@ -1352,17 +1398,15 @@ int RGWOp::init_quota() } std::unique_ptr owner_user = - driver->get_user(s->bucket->get_info().owner); + driver->get_user(s->bucket->get_info().owner); rgw::sal::User* user; if (s->user->get_id() == s->bucket_owner.get_id()) { user = s->user.get(); } else { int r = owner_user->load_user(this, s->yield); - if (r < 0) - return r; + if (r < 0) return r; user = owner_user.get(); - } driver->get_quota(quota); @@ -1380,7 +1424,9 @@ int RGWOp::init_quota() return 0; } -static bool validate_cors_rule_method(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_meth) { +static bool validate_cors_rule_method( + const DoutPrefixProvider* dpp, RGWCORSRule* rule, const char* req_meth +) { uint8_t flags = 0; if (!req_meth) { @@ -1388,11 +1434,16 @@ static bool validate_cors_rule_method(const DoutPrefixProvider *dpp, RGWCORSRule return false; } - if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET; - else if (strcmp(req_meth, "POST") == 0) flags = RGW_CORS_POST; - else if (strcmp(req_meth, "PUT") == 0) flags = RGW_CORS_PUT; - else if (strcmp(req_meth, "DELETE") == 0) flags = RGW_CORS_DELETE; - else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD; + if (strcmp(req_meth, "GET") == 0) + flags = RGW_CORS_GET; + else if (strcmp(req_meth, "POST") == 0) + flags = RGW_CORS_POST; + else if (strcmp(req_meth, "PUT") == 0) + flags = RGW_CORS_PUT; + else if (strcmp(req_meth, "DELETE") == 0) + flags = RGW_CORS_DELETE; + else if (strcmp(req_meth, "HEAD") == 0) + flags = RGW_CORS_HEAD; if (rule->get_allowed_methods() & flags) { ldpp_dout(dpp, 10) << "Method " << req_meth << " is supported" << dendl; @@ -1404,13 +1455,16 @@ static bool validate_cors_rule_method(const DoutPrefixProvider *dpp, RGWCORSRule return true; } -static bool validate_cors_rule_header(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_hdrs) { +static bool validate_cors_rule_header( + const DoutPrefixProvider* dpp, RGWCORSRule* rule, const char* req_hdrs +) { if (req_hdrs) { vector hdrs; get_str_vec(req_hdrs, hdrs); for (const auto& hdr : hdrs) { if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) { - ldpp_dout(dpp, 5) << "Header " << hdr << " is not registered in this rule" << dendl; + ldpp_dout(dpp, 5) << "Header " << hdr + << " is not registered in this rule" << dendl; return false; } } @@ -1418,8 +1472,7 @@ static bool validate_cors_rule_header(const DoutPrefixProvider *dpp, RGWCORSRule return true; } -int RGWOp::read_bucket_cors() -{ +int RGWOp::read_bucket_cors() { bufferlist bl; map::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS); @@ -1437,11 +1490,13 @@ int RGWOp::read_bucket_cors() try { bucket_cors.decode(iter); } catch (buffer::error& err) { - ldpp_dout(this, 0) << "ERROR: could not decode CORS, caught buffer::error" << dendl; + ldpp_dout(this, 0) << "ERROR: could not decode CORS, caught buffer::error" + << dendl; return -EIO; } if (s->cct->_conf->subsys.should_gather()) { - RGWCORSConfiguration_S3 *s3cors = static_cast(&bucket_cors); + RGWCORSConfiguration_S3* s3cors = + static_cast(&bucket_cors); ldpp_dout(this, 15) << "Read RGWCORSConfiguration"; s3cors->to_xml(*_dout); *_dout << dendl; @@ -1454,13 +1509,17 @@ int RGWOp::read_bucket_cors() * any of the values in list of headers do not set any additional headers and * terminate this set of steps. * */ -static void get_cors_response_headers(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) { +static void get_cors_response_headers( + const DoutPrefixProvider* dpp, RGWCORSRule* rule, const char* req_hdrs, + string& hdrs, string& exp_hdrs, unsigned* max_age +) { if (req_hdrs) { list hl; get_str_list(req_hdrs, hl); - for(list::iterator it = hl.begin(); it != hl.end(); ++it) { + for (list::iterator it = hl.begin(); it != hl.end(); ++it) { if (!rule->is_header_allowed((*it).c_str(), (*it).length())) { - ldpp_dout(dpp, 5) << "Header " << (*it) << " is not registered in this rule" << dendl; + ldpp_dout(dpp, 5) << "Header " << (*it) + << " is not registered in this rule" << dendl; } else { if (hdrs.length() > 0) hdrs.append(","); hdrs.append((*it)); @@ -1476,10 +1535,12 @@ static void get_cors_response_headers(const DoutPrefixProvider *dpp, RGWCORSRule * * This is described in the CORS standard, section 6.2. */ -bool RGWOp::generate_cors_headers(string& origin, string& method, string& headers, string& exp_headers, unsigned *max_age) -{ +bool RGWOp::generate_cors_headers( + string& origin, string& method, string& headers, string& exp_headers, + unsigned* max_age +) { /* CORS 6.2.1. */ - const char *orig = s->info.env->get("HTTP_ORIGIN"); + const char* orig = s->info.env->get("HTTP_ORIGIN"); if (!orig) { return false; } @@ -1493,14 +1554,14 @@ bool RGWOp::generate_cors_headers(string& origin, string& method, string& header } if (!cors_exist) { - ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; + ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" + << dendl; return false; } /* CORS 6.2.2. */ - RGWCORSRule *rule = bucket_cors.host_name_rule(orig); - if (!rule) - return false; + RGWCORSRule* rule = bucket_cors.host_name_rule(orig); + if (!rule) return false; /* * Set the Allowed-Origin header to a asterisk if this is allowed in the rule @@ -1510,12 +1571,11 @@ bool RGWOp::generate_cors_headers(string& origin, string& method, string& header * For requests without credentials, the server may specify "*" as a wildcard, * thereby allowing any origin to access the resource. */ - const char *authorization = s->info.env->get("HTTP_AUTHORIZATION"); - if (!authorization && rule->has_wildcard_origin()) - origin = "*"; + const char* authorization = s->info.env->get("HTTP_AUTHORIZATION"); + if (!authorization && rule->has_wildcard_origin()) origin = "*"; /* CORS 6.2.3. */ - const char *req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD"); + const char* req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD"); if (!req_meth) { req_meth = s->info.method; } @@ -1524,35 +1584,41 @@ bool RGWOp::generate_cors_headers(string& origin, string& method, string& header method = req_meth; /* CORS 6.2.5. */ if (!validate_cors_rule_method(this, rule, req_meth)) { - return false; + return false; } } /* CORS 6.2.4. */ - const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS"); + const char* req_hdrs = + s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS"); /* CORS 6.2.6. */ - get_cors_response_headers(this, rule, req_hdrs, headers, exp_headers, max_age); + get_cors_response_headers( + this, rule, req_hdrs, headers, exp_headers, max_age + ); return true; } -int rgw_policy_from_attrset(const DoutPrefixProvider *dpp, CephContext *cct, map& attrset, RGWAccessControlPolicy *policy) -{ +int rgw_policy_from_attrset( + const DoutPrefixProvider* dpp, CephContext* cct, + map& attrset, RGWAccessControlPolicy* policy +) { map::iterator aiter = attrset.find(RGW_ATTR_ACL); - if (aiter == attrset.end()) - return -EIO; + if (aiter == attrset.end()) return -EIO; bufferlist& bl = aiter->second; auto iter = bl.cbegin(); try { policy->decode(iter); } catch (buffer::error& err) { - ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" + << dendl; return -EIO; } if (cct->_conf->subsys.should_gather()) { - RGWAccessControlPolicy_S3 *s3policy = static_cast(policy); + RGWAccessControlPolicy_S3* s3policy = + static_cast(policy); ldpp_dout(dpp, 15) << __func__ << " Read AccessControlPolicy"; s3policy->to_xml(*_dout); *_dout << dendl; @@ -1560,16 +1626,14 @@ int rgw_policy_from_attrset(const DoutPrefixProvider *dpp, CephContext *cct, map return 0; } -int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket, - const rgw_bucket_dir_entry& ent, - RGWAccessControlPolicy * const bucket_acl, - const boost::optional& bucket_policy, - const off_t start_ofs, - const off_t end_ofs, - bool swift_slo) -{ - ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name - << "[" << ent.key.instance << "]" << dendl; +int RGWGetObj::read_user_manifest_part( + rgw::sal::Bucket* bucket, const rgw_bucket_dir_entry& ent, + RGWAccessControlPolicy* const bucket_acl, + const boost::optional& bucket_policy, const off_t start_ofs, + const off_t end_ofs, bool swift_slo +) { + ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name << "[" + << ent.key.instance << "]" << dendl; RGWGetObj_CB cb(this); RGWGetObj_Filter* filter = &cb; boost::optional decompress; @@ -1582,7 +1646,7 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket, RGWAccessControlPolicy obj_policy(s->cct); ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs - << " end=" << cur_end << dendl; + << " end=" << cur_end << dendl; part->set_atomic(); part->set_prefetch_data(); @@ -1595,52 +1659,53 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket, } op_ret = read_op->prepare(s->yield, this); - if (op_ret < 0) - return op_ret; + if (op_ret < 0) return op_ret; op_ret = part->range_to_ofs(ent.meta.accounted_size, cur_ofs, cur_end); - if (op_ret < 0) - return op_ret; + if (op_ret < 0) return op_ret; bool need_decompress; - op_ret = rgw_compression_info_from_attrset(part->get_attrs(), need_decompress, cs_info); + op_ret = rgw_compression_info_from_attrset( + part->get_attrs(), need_decompress, cs_info + ); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl; return -EIO; } - if (need_decompress) - { + if (need_decompress) { if (cs_info.orig_size != ent.meta.accounted_size) { // hmm.. something wrong, object not as expected, abort! - ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size - << ", actual read size=" << ent.meta.size << dendl; + ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" + << cs_info.orig_size + << ", actual read size=" << ent.meta.size << dendl; return -EIO; } decompress.emplace(s->cct, &cs_info, partial_content, filter); filter = &*decompress; - } - else - { + } else { if (part->get_obj_size() != ent.meta.size) { // hmm.. something wrong, object not as expected, abort! ldpp_dout(this, 0) << "ERROR: expected obj_size=" << part->get_obj_size() - << ", actual read size=" << ent.meta.size << dendl; + << ", actual read size=" << ent.meta.size << dendl; return -EIO; - } + } } op_ret = rgw_policy_from_attrset(s, s->cct, part->get_attrs(), &obj_policy); - if (op_ret < 0) - return op_ret; + if (op_ret < 0) return op_ret; /* We can use global user_acl because LOs cannot have segments * stored inside different accounts. */ if (s->system_request) { - ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl; + ldpp_dout(this, 2) << "overriding permissions due to system operation" + << dendl; } else if (s->auth.identity->is_admin_of(s->user->get_id())) { - ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl; - } else if (!verify_object_permission(this, s, part->get_obj(), s->user_acl.get(), - bucket_acl, &obj_policy, bucket_policy, - s->iam_user_policies, s->session_policies, action)) { + ldpp_dout(this, 2) << "overriding permissions due to admin operation" + << dendl; + } else if (!verify_object_permission( + this, s, part->get_obj(), s->user_acl.get(), bucket_acl, + &obj_policy, bucket_policy, s->iam_user_policies, + s->session_policies, action + )) { return -EPERM; } if (ent.meta.size == 0) { @@ -1650,34 +1715,25 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket, perfcounter->inc(l_rgw_get_b, cur_end - cur_ofs); filter->fixup_range(cur_ofs, cur_end); op_ret = read_op->iterate(this, cur_ofs, cur_end, filter, s->yield); - if (op_ret >= 0) - op_ret = filter->flush(); + if (op_ret >= 0) op_ret = filter->flush(); return op_ret; } -static int iterate_user_manifest_parts(const DoutPrefixProvider *dpp, - CephContext * const cct, - rgw::sal::Driver* const driver, - const off_t ofs, - const off_t end, - rgw::sal::Bucket* bucket, - const string& obj_prefix, - RGWAccessControlPolicy * const bucket_acl, - const boost::optional& bucket_policy, - uint64_t * const ptotal_len, - uint64_t * const pobj_size, - string * const pobj_sum, - int (*cb)(rgw::sal::Bucket* bucket, - const rgw_bucket_dir_entry& ent, - RGWAccessControlPolicy * const bucket_acl, - const boost::optional& bucket_policy, - off_t start_ofs, - off_t end_ofs, - void *param, - bool swift_slo), - void * const cb_param, - optional_yield y) -{ +static int iterate_user_manifest_parts( + const DoutPrefixProvider* dpp, CephContext* const cct, + rgw::sal::Driver* const driver, const off_t ofs, const off_t end, + rgw::sal::Bucket* bucket, const string& obj_prefix, + RGWAccessControlPolicy* const bucket_acl, + const boost::optional& bucket_policy, uint64_t* const ptotal_len, + uint64_t* const pobj_size, string* const pobj_sum, + int (*cb)( + rgw::sal::Bucket* bucket, const rgw_bucket_dir_entry& ent, + RGWAccessControlPolicy* const bucket_acl, + const boost::optional& bucket_policy, off_t start_ofs, + off_t end_ofs, void* param, bool swift_slo + ), + void* const cb_param, optional_yield y +) { uint64_t obj_ofs = 0, len_count = 0; bool found_start = false, found_end = false, handled_end = false; string delim; @@ -1704,31 +1760,33 @@ static int iterate_user_manifest_parts(const DoutPrefixProvider *dpp, const uint64_t obj_size = ent.meta.accounted_size; uint64_t start_ofs = 0, end_ofs = obj_size; - if ((ptotal_len || cb) && !found_start && cur_total_len + obj_size > (uint64_t)ofs) { - start_ofs = ofs - obj_ofs; - found_start = true; + if ((ptotal_len || cb) && !found_start && + cur_total_len + obj_size > (uint64_t)ofs) { + start_ofs = ofs - obj_ofs; + found_start = true; } obj_ofs += obj_size; if (pobj_sum) { - etag_sum.Update((const unsigned char *)ent.meta.etag.c_str(), - ent.meta.etag.length()); + etag_sum.Update( + (const unsigned char*)ent.meta.etag.c_str(), ent.meta.etag.length() + ); } if ((ptotal_len || cb) && !found_end && obj_ofs > (uint64_t)end) { - end_ofs = end - cur_total_len + 1; - found_end = true; + end_ofs = end - cur_total_len + 1; + found_end = true; } - perfcounter->tinc(l_rgw_get_lat, - (ceph_clock_now() - start_time)); + perfcounter->tinc(l_rgw_get_lat, (ceph_clock_now() - start_time)); if (found_start && !handled_end) { len_count += end_ofs - start_ofs; if (cb) { - r = cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, - cb_param, false /* swift_slo */); + r = + cb(bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, + cb_param, false /* swift_slo */); if (r < 0) { return r; } @@ -1754,7 +1812,7 @@ static int iterate_user_manifest_parts(const DoutPrefixProvider *dpp, } struct rgw_slo_part { - RGWAccessControlPolicy *bucket_acl = nullptr; + RGWAccessControlPolicy* bucket_acl = nullptr; Policy* bucket_policy = nullptr; rgw::sal::Bucket* bucket; string obj_name; @@ -1762,22 +1820,17 @@ struct rgw_slo_part { string etag; }; -static int iterate_slo_parts(const DoutPrefixProvider *dpp, - CephContext *cct, - rgw::sal::Driver* driver, - off_t ofs, - off_t end, - map& slo_parts, - int (*cb)(rgw::sal::Bucket* bucket, - const rgw_bucket_dir_entry& ent, - RGWAccessControlPolicy *bucket_acl, - const boost::optional& bucket_policy, - off_t start_ofs, - off_t end_ofs, - void *param, - bool swift_slo), - void *cb_param) -{ +static int iterate_slo_parts( + const DoutPrefixProvider* dpp, CephContext* cct, rgw::sal::Driver* driver, + off_t ofs, off_t end, map& slo_parts, + int (*cb)( + rgw::sal::Bucket* bucket, const rgw_bucket_dir_entry& ent, + RGWAccessControlPolicy* bucket_acl, + const boost::optional& bucket_policy, off_t start_ofs, + off_t end_ofs, void* param, bool swift_slo + ), + void* cb_param +) { bool found_start = false, found_end = false; if (slo_parts.empty()) { @@ -1816,24 +1869,23 @@ static int iterate_slo_parts(const DoutPrefixProvider *dpp, found_end = true; } - perfcounter->tinc(l_rgw_get_lat, - (ceph_clock_now() - start_time)); + perfcounter->tinc(l_rgw_get_lat, (ceph_clock_now() - start_time)); if (found_start) { if (cb) { ldpp_dout(dpp, 20) << "iterate_slo_parts()" - << " obj=" << part.obj_name - << " start_ofs=" << start_ofs - << " end_ofs=" << end_ofs - << dendl; - - // SLO is a Swift thing, and Swift has no knowledge of S3 Policies. - int r = cb(part.bucket, ent, part.bucket_acl, - (part.bucket_policy ? - boost::optional(*part.bucket_policy) : none), - start_ofs, end_ofs, cb_param, true /* swift_slo */); - if (r < 0) - return r; + << " obj=" << part.obj_name + << " start_ofs=" << start_ofs + << " end_ofs=" << end_ofs << dendl; + + // SLO is a Swift thing, and Swift has no knowledge of S3 Policies. + int r = + cb(part.bucket, ent, part.bucket_acl, + (part.bucket_policy + ? boost::optional(*part.bucket_policy) + : none), + start_ofs, end_ofs, cb_param, true /* swift_slo */); + if (r < 0) return r; } } @@ -1843,25 +1895,22 @@ static int iterate_slo_parts(const DoutPrefixProvider *dpp, return 0; } -static int get_obj_user_manifest_iterate_cb(rgw::sal::Bucket* bucket, - const rgw_bucket_dir_entry& ent, - RGWAccessControlPolicy * const bucket_acl, - const boost::optional& bucket_policy, - const off_t start_ofs, - const off_t end_ofs, - void * const param, - bool swift_slo = false) -{ - RGWGetObj *op = static_cast(param); +static int get_obj_user_manifest_iterate_cb( + rgw::sal::Bucket* bucket, const rgw_bucket_dir_entry& ent, + RGWAccessControlPolicy* const bucket_acl, + const boost::optional& bucket_policy, const off_t start_ofs, + const off_t end_ofs, void* const param, bool swift_slo = false +) { + RGWGetObj* op = static_cast(param); return op->read_user_manifest_part( - bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, swift_slo); + bucket, ent, bucket_acl, bucket_policy, start_ofs, end_ofs, swift_slo + ); } -int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) -{ +int RGWGetObj::handle_user_manifest(const char* prefix, optional_yield y) { const std::string_view prefix_view(prefix); ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix=" - << prefix_view << dendl; + << prefix_view << dendl; const size_t pos = prefix_view.find('/'); if (pos == string::npos) { @@ -1872,7 +1921,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1)); RGWAccessControlPolicy _bucket_acl(s->cct); - RGWAccessControlPolicy *bucket_acl; + RGWAccessControlPolicy* bucket_acl; boost::optional _bucket_policy; boost::optional* bucket_policy; RGWBucketInfo bucket_info; @@ -1882,19 +1931,25 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) if (bucket_name.compare(s->bucket->get_name()) != 0) { map bucket_attrs; - r = driver->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y); + r = driver->get_bucket( + this, s->user.get(), s->user->get_tenant(), bucket_name, &ubucket, y + ); if (r < 0) { ldpp_dout(this, 0) << "could not get bucket info for bucket=" - << bucket_name << dendl; + << bucket_name << dendl; return r; } bucket_acl = &_bucket_acl; - r = read_bucket_policy(this, driver, s, ubucket->get_info(), bucket_attrs, bucket_acl, ubucket->get_key(), y); + r = read_bucket_policy( + this, driver, s, ubucket->get_info(), bucket_attrs, bucket_acl, + ubucket->get_key(), y + ); if (r < 0) { ldpp_dout(this, 0) << "failed to read bucket policy" << dendl; return r; } - _bucket_policy = get_iam_policy_from_attr(s->cct, bucket_attrs, s->user->get_tenant()); + _bucket_policy = + get_iam_policy_from_attr(s->cct, bucket_attrs, s->user->get_tenant()); bucket_policy = &_bucket_policy; pbucket = ubucket.get(); } else { @@ -1907,10 +1962,11 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) * - total length (of the parts we are going to send to client), * - overall DLO's content size, * - md5 sum of overall DLO's content (for etag of Swift API). */ - r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end, - pbucket, obj_prefix, bucket_acl, *bucket_policy, - nullptr, &s->obj_size, &lo_etag, - nullptr /* cb */, nullptr /* cb arg */, y); + r = iterate_user_manifest_parts( + this, s->cct, driver, ofs, end, pbucket, obj_prefix, bucket_acl, + *bucket_policy, nullptr, &s->obj_size, &lo_etag, nullptr /* cb */, + nullptr /* cb arg */, y + ); if (r < 0) { return r; } @@ -1921,10 +1977,10 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) return r; } - r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end, - pbucket, obj_prefix, bucket_acl, *bucket_policy, - &total_len, nullptr, nullptr, - nullptr, nullptr, y); + r = iterate_user_manifest_parts( + this, s->cct, driver, ofs, end, pbucket, obj_prefix, bucket_acl, + *bucket_policy, &total_len, nullptr, nullptr, nullptr, nullptr, y + ); if (r < 0) { return r; } @@ -1935,10 +1991,11 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) return 0; } - r = iterate_user_manifest_parts(this, s->cct, driver, ofs, end, - pbucket, obj_prefix, bucket_acl, *bucket_policy, - nullptr, nullptr, nullptr, - get_obj_user_manifest_iterate_cb, (void *)this, y); + r = iterate_user_manifest_parts( + this, s->cct, driver, ofs, end, pbucket, obj_prefix, bucket_acl, + *bucket_policy, nullptr, nullptr, nullptr, + get_obj_user_manifest_iterate_cb, (void*)this, y + ); if (r < 0) { return r; } @@ -1951,8 +2008,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y) return r; } -int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) -{ +int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) { RGWSLOInfo slo_info; auto bliter = bl.cbegin(); try { @@ -1964,7 +2020,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl; vector allocated_acls; - map>> policies; + map>> policies; map> buckets; map slo_parts; @@ -1998,7 +2054,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) string obj_name = path.substr(pos_sep + 1); rgw::sal::Bucket* bucket; - RGWAccessControlPolicy *bucket_acl; + RGWAccessControlPolicy* bucket_acl; Policy* bucket_policy; if (bucket_name.compare(s->bucket->get_name()) != 0) { @@ -2006,31 +2062,38 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) if (piter != policies.end()) { bucket_acl = piter->second.first; bucket_policy = piter->second.second.get_ptr(); - bucket = buckets[bucket_name].get(); + bucket = buckets[bucket_name].get(); } else { - allocated_acls.push_back(RGWAccessControlPolicy(s->cct)); - RGWAccessControlPolicy& _bucket_acl = allocated_acls.back(); - - std::unique_ptr tmp_bucket; - int r = driver->get_bucket(this, s->user.get(), s->user->get_tenant(), bucket_name, &tmp_bucket, y); + allocated_acls.push_back(RGWAccessControlPolicy(s->cct)); + RGWAccessControlPolicy& _bucket_acl = allocated_acls.back(); + + std::unique_ptr tmp_bucket; + int r = driver->get_bucket( + this, s->user.get(), s->user->get_tenant(), bucket_name, + &tmp_bucket, y + ); if (r < 0) { - ldpp_dout(this, 0) << "could not get bucket info for bucket=" - << bucket_name << dendl; + ldpp_dout(this, 0) + << "could not get bucket info for bucket=" << bucket_name + << dendl; return r; } bucket = tmp_bucket.get(); bucket_acl = &_bucket_acl; - r = read_bucket_policy(this, driver, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), bucket_acl, - tmp_bucket->get_key(), y); + r = read_bucket_policy( + this, driver, s, tmp_bucket->get_info(), tmp_bucket->get_attrs(), + bucket_acl, tmp_bucket->get_key(), y + ); if (r < 0) { - ldpp_dout(this, 0) << "failed to read bucket ACL for bucket " - << bucket << dendl; + ldpp_dout(this, 0) + << "failed to read bucket ACL for bucket " << bucket << dendl; return r; - } - auto _bucket_policy = get_iam_policy_from_attr( - s->cct, tmp_bucket->get_attrs(), tmp_bucket->get_tenant()); + } + auto _bucket_policy = get_iam_policy_from_attr( + s->cct, tmp_bucket->get_attrs(), tmp_bucket->get_tenant() + ); bucket_policy = _bucket_policy.get_ptr(); - buckets[bucket_name].swap(tmp_bucket); + buckets[bucket_name].swap(tmp_bucket); policies[bucket_name] = make_pair(bucket_acl, _bucket_policy); } } else { @@ -2047,13 +2110,12 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) part.size = entry.size_bytes; part.etag = entry.etag; ldpp_dout(this, 20) << "slo_part: bucket=" << part.bucket - << " obj=" << part.obj_name - << " size=" << part.size - << " etag=" << part.etag - << dendl; + << " obj=" << part.obj_name << " size=" << part.size + << " etag=" << part.etag << dendl; - etag_sum.Update((const unsigned char *)entry.etag.c_str(), - entry.etag.length()); + etag_sum.Update( + (const unsigned char*)entry.etag.c_str(), entry.etag.length() + ); slo_parts[total_len] = part; total_len += part.size; @@ -2071,13 +2133,13 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) } total_len = end - ofs + 1; - ldpp_dout(this, 20) << "Requested: ofs=" << ofs - << " end=" << end - << " total=" << total_len - << dendl; + ldpp_dout(this, 20) << "Requested: ofs=" << ofs << " end=" << end + << " total=" << total_len << dendl; - r = iterate_slo_parts(this, s->cct, driver, ofs, end, slo_parts, - get_obj_user_manifest_iterate_cb, (void *)this); + r = iterate_slo_parts( + this, s->cct, driver, ofs, end, slo_parts, + get_obj_user_manifest_iterate_cb, (void*)this + ); if (r < 0) { return r; } @@ -2085,29 +2147,33 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y) return 0; } -int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) -{ +int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) { /* garbage collection related handling: * defer_gc disabled for https://tracker.ceph.com/issues/47866 */ return send_response_data(bl, bl_ofs, bl_len); } -int RGWGetObj::get_lua_filter(std::unique_ptr* filter, RGWGetObj_Filter* cb) { +int RGWGetObj::get_lua_filter( + std::unique_ptr* filter, RGWGetObj_Filter* cb +) { std::string script; - const auto rc = rgw::lua::read_script(s, s->penv.lua.manager.get(), s->bucket_tenant, s->yield, rgw::lua::context::getData, script); + const auto rc = rgw::lua::read_script( + s, s->penv.lua.manager.get(), s->bucket_tenant, s->yield, + rgw::lua::context::getData, script + ); if (rc == -ENOENT) { // no script, nothing to do return 0; } else if (rc < 0) { - ldpp_dout(this, 5) << "WARNING: failed to read data script. error: " << rc << dendl; + ldpp_dout(this, 5) << "WARNING: failed to read data script. error: " << rc + << dendl; return rc; } filter->reset(new rgw::lua::RGWGetObjFilter(s, script, cb)); return 0; } -bool RGWGetObj::prefetch_data() -{ +bool RGWGetObj::prefetch_data() { /* HEAD request, stop prefetch*/ if (!get_data || s->info.env->exists("HTTP_X_RGW_AUTH")) { return false; @@ -2123,29 +2189,26 @@ bool RGWGetObj::prefetch_data() return get_data; } -void RGWGetObj::pre_exec() -{ +void RGWGetObj::pre_exec() { rgw_bucket_object_pre_exec(s); } static inline void rgw_cond_decode_objtags( - req_state *s, - const std::map &attrs) -{ + req_state* s, const std::map& attrs +) { const auto& tags = attrs.find(RGW_ATTR_TAGS); if (tags != attrs.end()) { try { bufferlist::const_iterator iter{&tags->second}; s->tagset.decode(iter); } catch (buffer::error& err) { - ldpp_dout(s, 0) - << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; + ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" + << dendl; } } } -void RGWGetObj::execute(optional_yield y) -{ +void RGWGetObj::execute(optional_yield y) { bufferlist bl; gc_invalidate_time = ceph_clock_now(); gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2); @@ -2154,7 +2217,7 @@ void RGWGetObj::execute(optional_yield y) int64_t ofs_x, end_x; RGWGetObj_CB cb(this); - RGWGetObj_Filter* filter = (RGWGetObj_Filter *)&cb; + RGWGetObj_Filter* filter = (RGWGetObj_Filter*)&cb; boost::optional decompress; #ifdef WITH_ARROW_FLIGHT boost::optional flight_filter; @@ -2168,16 +2231,15 @@ void RGWGetObj::execute(optional_yield y) std::unique_ptr read_op(s->object->get_read_op()); op_ret = get_params(y); - if (op_ret < 0) - goto done_err; + if (op_ret < 0) goto done_err; op_ret = init_common(); - if (op_ret < 0) - goto done_err; + if (op_ret < 0) goto done_err; read_op->params.mod_ptr = mod_ptr; read_op->params.unmod_ptr = unmod_ptr; - read_op->params.high_precision_time = s->system_request; /* system request need to use high precision time */ + read_op->params.high_precision_time = + s->system_request; /* system request need to use high precision time */ read_op->params.mod_zone_id = mod_zone_id; read_op->params.mod_pg_ver = mod_pg_ver; read_op->params.if_match = if_match; @@ -2185,8 +2247,7 @@ void RGWGetObj::execute(optional_yield y) read_op->params.lastmod = &lastmod; op_ret = read_op->prepare(s->yield, this); - if (op_ret < 0) - goto done_err; + if (op_ret < 0) goto done_err; version_id = s->object->get_instance(); s->obj_size = s->object->get_obj_size(); attrs = s->object->get_attrs(); @@ -2202,9 +2263,11 @@ void RGWGetObj::execute(optional_yield y) /* start gettorrent */ if (get_torrent) { attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE); - if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") { + if (attr_iter != attrs.end() && + attr_iter->second.to_str() == "SSE-C-AES256") { ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects " - "encrypted with SSE-C" << dendl; + "encrypted with SSE-C" + << dendl; op_ret = -EINVAL; goto done_err; } @@ -2213,12 +2276,13 @@ void RGWGetObj::execute(optional_yield y) op_ret = rgw_read_torrent_file(this, s->object.get(), torrentbl, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret - << dendl; + << dendl; goto done_err; } op_ret = send_response_data(torrentbl, 0, torrentbl.length()); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl; + ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " + << op_ret << dendl; goto done_err; } return; @@ -2242,20 +2306,24 @@ void RGWGetObj::execute(optional_yield y) filter = &*flight_filter; } } else { - ldpp_dout(this, 0) << "ERROR: flight_store not created in " << __func__ << dendl; + ldpp_dout(this, 0) << "ERROR: flight_store not created in " << __func__ + << dendl; } #endif op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl; + ldpp_dout( + this, 0 + ) << "ERROR: failed to decode compression info, cannot decompress" + << dendl; goto done_err; } if (need_decompress) { - s->obj_size = cs_info.orig_size; - s->object->set_obj_size(cs_info.orig_size); - decompress.emplace(s->cct, &cs_info, partial_content, filter); - filter = &*decompress; + s->obj_size = cs_info.orig_size; + s->object->set_obj_size(cs_info.orig_size); + decompress.emplace(s->cct, &cs_info, partial_content, filter); + filter = &*decompress; } attr_iter = attrs.find(RGW_ATTR_MANIFEST); @@ -2268,14 +2336,14 @@ void RGWGetObj::execute(optional_yield y) op_ret = -ERR_INVALID_OBJECT_STATE; s->err.message = "This object was transitioned to cloud-s3"; ldpp_dout(this, 4) << "Cannot get cloud tiered object. Failing with " - << op_ret << dendl; + << op_ret << dendl; goto done_err; } } catch (const buffer::end_of_buffer&) { // ignore empty manifest; it's not cloud-tiered } catch (const std::exception& e) { ldpp_dout(this, 1) << "WARNING: failed to decode object manifest for " - << *s->object << ": " << e.what() << dendl; + << *s->object << ": " << e.what() << dendl; } } @@ -2284,7 +2352,7 @@ void RGWGetObj::execute(optional_yield y) op_ret = handle_user_manifest(attr_iter->second.c_str(), y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret=" - << op_ret << dendl; + << op_ret << dendl; goto done_err; } return; @@ -2295,8 +2363,8 @@ void RGWGetObj::execute(optional_yield y) is_slo = true; op_ret = handle_slo_manifest(attr_iter->second, y); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret - << dendl; + ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" + << op_ret << dendl; goto done_err; } return; @@ -2310,8 +2378,7 @@ void RGWGetObj::execute(optional_yield y) } op_ret = s->object->range_to_ofs(s->obj_size, ofs, end); - if (op_ret < 0) - goto done_err; + if (op_ret < 0) goto done_err; total_len = (ofs <= end ? end + 1 - ofs : 0); ofs_x = ofs; @@ -2331,8 +2398,10 @@ void RGWGetObj::execute(optional_yield y) start = ofs; attr_iter = attrs.find(RGW_ATTR_MANIFEST); - op_ret = this->get_decrypt_filter(&decrypt, filter, - attr_iter != attrs.end() ? &(attr_iter->second) : nullptr); + op_ret = this->get_decrypt_filter( + &decrypt, filter, + attr_iter != attrs.end() ? &(attr_iter->second) : nullptr + ); if (decrypt != nullptr) { filter = decrypt.get(); filter->fixup_range(ofs_x, end_x); @@ -2341,7 +2410,6 @@ void RGWGetObj::execute(optional_yield y) goto done_err; } - if (!get_data || ofs > end) { send_response_data(bl, 0, 0); return; @@ -2351,8 +2419,7 @@ void RGWGetObj::execute(optional_yield y) op_ret = read_op->iterate(this, ofs_x, end_x, filter, s->yield); - if (op_ret >= 0) - op_ret = filter->flush(); + if (op_ret >= 0) op_ret = filter->flush(); perfcounter->tinc(l_rgw_get_lat, s->time_elapsed()); if (op_ret < 0) { @@ -2369,33 +2436,28 @@ void RGWGetObj::execute(optional_yield y) send_response_data_error(y); } -int RGWGetObj::init_common() -{ +int RGWGetObj::init_common() { if (range_str) { /* range parsed error when prefetch */ if (!range_parsed) { int r = parse_range(); - if (r < 0) - return r; + if (r < 0) return r; } } if (if_mod) { - if (parse_time(if_mod, &mod_time) < 0) - return -EINVAL; + if (parse_time(if_mod, &mod_time) < 0) return -EINVAL; mod_ptr = &mod_time; } if (if_unmod) { - if (parse_time(if_unmod, &unmod_time) < 0) - return -EINVAL; + if (parse_time(if_unmod, &unmod_time) < 0) return -EINVAL; unmod_ptr = &unmod_time; } return 0; } -int RGWListBuckets::verify_permission(optional_yield y) -{ +int RGWListBuckets::verify_permission(optional_yield y) { rgw::Partition partition = rgw::Partition::aws; rgw::Service service = rgw::Service::s3; @@ -2406,15 +2468,17 @@ int RGWListBuckets::verify_permission(optional_yield y) tenant = s->user->get_tenant(); } - if (!verify_user_permission(this, s, ARN(partition, service, "", tenant, "*"), rgw::IAM::s3ListAllMyBuckets, false)) { + if (!verify_user_permission( + this, s, ARN(partition, service, "", tenant, "*"), + rgw::IAM::s3ListAllMyBuckets, false + )) { return -EACCES; } return 0; } -int RGWGetUsage::verify_permission(optional_yield y) -{ +int RGWGetUsage::verify_permission(optional_yield y) { if (s->auth.identity->is_anonymous()) { return -EACCES; } @@ -2422,8 +2486,7 @@ int RGWGetUsage::verify_permission(optional_yield y) return 0; } -void RGWListBuckets::execute(optional_yield y) -{ +void RGWListBuckets::execute(optional_yield y) { bool done; bool started = false; uint64_t total_count = 0; @@ -2452,13 +2515,15 @@ void RGWListBuckets::execute(optional_yield y) read_count = max_buckets; } - op_ret = s->user->list_buckets(this, marker, end_marker, read_count, should_get_stats(), buckets, y); + op_ret = s->user->list_buckets( + this, marker, end_marker, read_count, should_get_stats(), buckets, y + ); if (op_ret < 0) { /* hmm.. something wrong here.. the user was authenticated, so it should exist */ ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid=" - << s->user->get_id() << dendl; + << s->user->get_id() << dendl; break; } @@ -2468,13 +2533,15 @@ void RGWListBuckets::execute(optional_yield y) * isn't actually used in a given account. In such situation its usage * stats would be simply full of zeros. */ std::set targets; - if (driver->get_zone()->get_zonegroup().get_placement_target_names(targets)) { + if (driver->get_zone()->get_zonegroup().get_placement_target_names(targets + )) { for (const auto& policy : targets) { - policies_stats.emplace(policy, decltype(policies_stats)::mapped_type()); + policies_stats.emplace(policy, decltype(policies_stats)::mapped_type()); } } - std::map>& m = buckets.get_buckets(); + std::map>& m = + buckets.get_buckets(); for (const auto& kv : m) { const auto& bucket = kv.second; @@ -2484,7 +2551,8 @@ void RGWListBuckets::execute(optional_yield y) /* operator[] still can create a new entry for storage policy seen * for first time. */ - auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()]; + auto& policy_stats = + policies_stats[bucket->get_placement_rule().to_str()]; policy_stats.bytes_used += bucket->get_size(); policy_stats.bytes_used_rounded += bucket->get_size_rounded(); policy_stats.buckets_count++; @@ -2493,15 +2561,16 @@ void RGWListBuckets::execute(optional_yield y) global_stats.buckets_count += m.size(); total_count += m.size(); - done = (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit)); + done = + (m.size() < read_count || (limit >= 0 && total_count >= (uint64_t)limit) + ); if (!started) { send_response_begin(buckets.count() > 0); started = true; } - if (read_count > 0 && - !m.empty()) { + if (read_count > 0 && !m.empty()) { auto riter = m.rbegin(); marker = riter->first; @@ -2516,14 +2585,12 @@ void RGWListBuckets::execute(optional_yield y) send_response_end(); } -void RGWGetUsage::execute(optional_yield y) -{ +void RGWGetUsage::execute(optional_yield y) { uint64_t start_epoch = 0; uint64_t end_epoch = (uint64_t)-1; op_ret = get_params(y); - if (op_ret < 0) - return; - + if (op_ret < 0) return; + if (!start_date.empty()) { op_ret = utime_t::parse_date(start_date, &start_epoch, NULL); if (op_ret < 0) { @@ -2531,7 +2598,7 @@ void RGWGetUsage::execute(optional_yield y) return; } } - + if (!end_date.empty()) { op_ret = utime_t::parse_date(end_date, &end_epoch, NULL); if (op_ret < 0) { @@ -2539,16 +2606,18 @@ void RGWGetUsage::execute(optional_yield y) return; } } - + uint32_t max_entries = 1000; bool is_truncated = true; RGWUsageIter usage_iter; - + while (s->bucket && is_truncated) { - op_ret = s->bucket->read_usage(this, start_epoch, end_epoch, max_entries, &is_truncated, - usage_iter, usage); + op_ret = s->bucket->read_usage( + this, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, + usage + ); if (op_ret == -ENOENT) { op_ret = 0; is_truncated = false; @@ -2556,7 +2625,7 @@ void RGWGetUsage::execute(optional_yield y) if (op_ret < 0) { return; - } + } } op_ret = rgw_user_sync_all_stats(this, driver, s->user.get(), y); @@ -2565,7 +2634,9 @@ void RGWGetUsage::execute(optional_yield y) return; } - op_ret = rgw_user_get_all_buckets_stats(this, driver, s->user.get(), buckets_usage, y); + op_ret = rgw_user_get_all_buckets_stats( + this, driver, s->user.get(), buckets_usage, y + ); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl; return; @@ -2573,15 +2644,14 @@ void RGWGetUsage::execute(optional_yield y) op_ret = s->user->read_stats(this, y, &stats); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl; + ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl; return; } - + return; } -int RGWStatAccount::verify_permission(optional_yield y) -{ +int RGWStatAccount::verify_permission(optional_yield y) { if (!verify_user_permission_no_policy(this, s, RGW_PERM_READ)) { return -EACCES; } @@ -2589,22 +2659,22 @@ int RGWStatAccount::verify_permission(optional_yield y) return 0; } -void RGWStatAccount::execute(optional_yield y) -{ +void RGWStatAccount::execute(optional_yield y) { string marker; rgw::sal::BucketList buckets; uint64_t max_buckets = s->cct->_conf->rgw_list_buckets_max_chunk; - const string *lastmarker; + const string* lastmarker; do { - lastmarker = nullptr; - op_ret = s->user->list_buckets(this, marker, string(), max_buckets, true, buckets, y); + op_ret = s->user->list_buckets( + this, marker, string(), max_buckets, true, buckets, y + ); if (op_ret < 0) { /* hmm.. something wrong here.. the user was authenticated, so it should exist */ ldpp_dout(this, 10) << "WARNING: failed on list_buckets uid=" - << s->user->get_id() << " ret=" << op_ret << dendl; + << s->user->get_id() << " ret=" << op_ret << dendl; break; } else { /* We need to have stats for all our policies - even if a given policy @@ -2616,10 +2686,11 @@ void RGWStatAccount::execute(optional_yield y) policies_stats.emplace(policy, decltype(policies_stats)::mapped_type()); } - std::map>& m = buckets.get_buckets(); + std::map>& m = + buckets.get_buckets(); for (const auto& kv : m) { const auto& bucket = kv.second; - lastmarker = &kv.first; + lastmarker = &kv.first; global_stats.bytes_used += bucket->get_size(); global_stats.bytes_used_rounded += bucket->get_size_rounded(); @@ -2627,41 +2698,38 @@ void RGWStatAccount::execute(optional_yield y) /* operator[] still can create a new entry for storage policy seen * for first time. */ - auto& policy_stats = policies_stats[bucket->get_placement_rule().to_str()]; + auto& policy_stats = + policies_stats[bucket->get_placement_rule().to_str()]; policy_stats.bytes_used += bucket->get_size(); policy_stats.bytes_used_rounded += bucket->get_size_rounded(); policy_stats.buckets_count++; policy_stats.objects_count += bucket->get_count(); } global_stats.buckets_count += m.size(); - } if (!lastmarker) { - ldpp_dout(this, -1) << "ERROR: rgw_read_user_buckets, stasis at marker=" - << marker << " uid=" << s->user->get_id() << dendl; - break; + ldpp_dout(this, -1) << "ERROR: rgw_read_user_buckets, stasis at marker=" + << marker << " uid=" << s->user->get_id() << dendl; + break; } marker = *lastmarker; } while (buckets.is_truncated()); } -int RGWGetBucketVersioning::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketVersioning::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning); } -void RGWGetBucketVersioning::pre_exec() -{ +void RGWGetBucketVersioning::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetBucketVersioning::execute(optional_yield y) -{ - if (! s->bucket_exists) { +void RGWGetBucketVersioning::execute(optional_yield y) { + if (!s->bucket_exists) { op_ret = -ERR_NO_SUCH_BUCKET; return; } @@ -2671,33 +2739,32 @@ void RGWGetBucketVersioning::execute(optional_yield y) mfa_enabled = s->bucket->get_info().mfa_enabled(); } -int RGWSetBucketVersioning::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWSetBucketVersioning::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning); } -void RGWSetBucketVersioning::pre_exec() -{ +void RGWSetBucketVersioning::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWSetBucketVersioning::execute(optional_yield y) -{ +void RGWSetBucketVersioning::execute(optional_yield y) { op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; - if (! s->bucket_exists) { + if (!s->bucket_exists) { op_ret = -ERR_NO_SUCH_BUCKET; return; } - if (s->bucket->get_info().obj_lock_enabled() && versioning_status != VersioningEnabled) { - s->err.message = "bucket versioning cannot be disabled on buckets with object lock enabled"; + if (s->bucket->get_info().obj_lock_enabled() && + versioning_status != VersioningEnabled) { + s->err.message = + "bucket versioning cannot be disabled on buckets with object lock " + "enabled"; ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl; op_ret = -ERR_INVALID_BUCKET_STATE; return; @@ -2707,8 +2774,7 @@ void RGWSetBucketVersioning::execute(optional_yield y) mfa_set_status &= (mfa_status != cur_mfa_status); - if (mfa_set_status && - !s->mfa_verified) { + if (mfa_set_status && !s->mfa_verified) { op_ret = -ERR_MFA_REQUIRED; return; } @@ -2717,9 +2783,11 @@ void RGWSetBucketVersioning::execute(optional_yield y) bool req_versioning_status = false; //if requested versioning status is not the same as the one set for the bucket, return error if (versioning_status == VersioningEnabled) { - req_versioning_status = (s->bucket->get_info().flags & BUCKET_VERSIONS_SUSPENDED) != 0; + req_versioning_status = + (s->bucket->get_info().flags & BUCKET_VERSIONS_SUSPENDED) != 0; } else if (versioning_status == VersioningSuspended) { - req_versioning_status = (s->bucket->get_info().flags & BUCKET_VERSIONS_SUSPENDED) == 0; + req_versioning_status = + (s->bucket->get_info().flags & BUCKET_VERSIONS_SUSPENDED) == 0; } if (req_versioning_status && !s->mfa_verified) { op_ret = -ERR_MFA_REQUIRED; @@ -2727,131 +2795,130 @@ void RGWSetBucketVersioning::execute(optional_yield y) } } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } bool modified = mfa_set_status; op_ret = retry_raced_bucket_write(this, s->bucket.get(), [&] { - if (mfa_set_status) { - if (mfa_status) { - s->bucket->get_info().flags |= BUCKET_MFA_ENABLED; - } else { - s->bucket->get_info().flags &= ~BUCKET_MFA_ENABLED; - } - } - - if (versioning_status == VersioningEnabled) { - s->bucket->get_info().flags |= BUCKET_VERSIONED; - s->bucket->get_info().flags &= ~BUCKET_VERSIONS_SUSPENDED; - modified = true; - } else if (versioning_status == VersioningSuspended) { - s->bucket->get_info().flags |= (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED); - modified = true; + if (mfa_set_status) { + if (mfa_status) { + s->bucket->get_info().flags |= BUCKET_MFA_ENABLED; } else { - return op_ret; + s->bucket->get_info().flags &= ~BUCKET_MFA_ENABLED; } - s->bucket->set_attrs(rgw::sal::Attrs(s->bucket_attrs)); - return s->bucket->put_info(this, false, real_time()); - }); + } + + if (versioning_status == VersioningEnabled) { + s->bucket->get_info().flags |= BUCKET_VERSIONED; + s->bucket->get_info().flags &= ~BUCKET_VERSIONS_SUSPENDED; + modified = true; + } else if (versioning_status == VersioningSuspended) { + s->bucket->get_info().flags |= + (BUCKET_VERSIONED | BUCKET_VERSIONS_SUSPENDED); + modified = true; + } else { + return op_ret; + } + s->bucket->set_attrs(rgw::sal::Attrs(s->bucket_attrs)); + return s->bucket->put_info(this, false, real_time()); + }); if (!modified) { return; } if (op_ret < 0) { - ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() - << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" + << s->bucket->get_name() << " returned err=" << op_ret + << dendl; return; } } -int RGWGetBucketWebsite::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketWebsite::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite); } -void RGWGetBucketWebsite::pre_exec() -{ +void RGWGetBucketWebsite::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetBucketWebsite::execute(optional_yield y) -{ +void RGWGetBucketWebsite::execute(optional_yield y) { if (!s->bucket->get_info().has_website) { op_ret = -ERR_NO_SUCH_WEBSITE_CONFIGURATION; } } -int RGWSetBucketWebsite::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWSetBucketWebsite::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite); } -void RGWSetBucketWebsite::pre_exec() -{ +void RGWSetBucketWebsite::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWSetBucketWebsite::execute(optional_yield y) -{ +void RGWSetBucketWebsite::execute(optional_yield y) { op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; if (!s->bucket_exists) { op_ret = -ERR_NO_SUCH_BUCKET; return; } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret + << dendl; return; } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - s->bucket->get_info().has_website = true; - s->bucket->get_info().website_conf = website_conf; - op_ret = s->bucket->put_info(this, false, real_time()); - return op_ret; - }); + s->bucket->get_info().has_website = true; + s->bucket->get_info().website_conf = website_conf; + op_ret = s->bucket->put_info(this, false, real_time()); + return op_ret; + }); if (op_ret < 0) { - ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() - << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" + << s->bucket->get_name() << " returned err=" << op_ret + << dendl; return; } } -int RGWDeleteBucketWebsite::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteBucketWebsite::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite); } -void RGWDeleteBucketWebsite::pre_exec() -{ +void RGWDeleteBucketWebsite::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWDeleteBucketWebsite::execute(optional_yield y) -{ +void RGWDeleteBucketWebsite::execute(optional_yield y) { if (!s->bucket_exists) { op_ret = -ERR_NO_SUCH_BUCKET; return; @@ -2859,30 +2926,32 @@ void RGWDeleteBucketWebsite::execute(optional_yield y) bufferlist in_data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name() - << "returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" + << s->bucket->get_name() << "returned err=" << op_ret + << dendl; return; } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - s->bucket->get_info().has_website = false; - s->bucket->get_info().website_conf = RGWBucketWebsiteConf(); - op_ret = s->bucket->put_info(this, false, real_time()); - return op_ret; - }); + s->bucket->get_info().has_website = false; + s->bucket->get_info().website_conf = RGWBucketWebsiteConf(); + op_ret = s->bucket->put_info(this, false, real_time()); + return op_ret; + }); if (op_ret < 0) { ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket - << " returned err=" << op_ret << dendl; + << " returned err=" << op_ret << dendl; return; } } -int RGWStatBucket::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWStatBucket::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); // This (a HEAD request on a bucket) is governed by the s3:ListBucket permission. if (!verify_bucket_permission(this, s, rgw::IAM::s3ListBucket)) { @@ -2892,80 +2961,76 @@ int RGWStatBucket::verify_permission(optional_yield y) return 0; } -void RGWStatBucket::pre_exec() -{ +void RGWStatBucket::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWStatBucket::execute(optional_yield y) -{ +void RGWStatBucket::execute(optional_yield y) { if (!s->bucket_exists) { op_ret = -ERR_NO_SUCH_BUCKET; return; } - op_ret = driver->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y); + op_ret = + driver->get_bucket(this, s->user.get(), s->bucket->get_key(), &bucket, y); if (op_ret) { return; } op_ret = bucket->update_container_stats(s); } -int RGWListBucket::verify_permission(optional_yield y) -{ +int RGWListBucket::verify_permission(optional_yield y) { op_ret = get_params(y); if (op_ret < 0) { return op_ret; } - if (!prefix.empty()) - s->env.emplace("s3:prefix", prefix); + if (!prefix.empty()) s->env.emplace("s3:prefix", prefix); - if (!delimiter.empty()) - s->env.emplace("s3:delimiter", delimiter); + if (!delimiter.empty()) s->env.emplace("s3:delimiter", delimiter); s->env.emplace("s3:max-keys", std::to_string(max)); - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - if (!verify_bucket_permission(this, - s, - list_versions ? - rgw::IAM::s3ListBucketVersions : - rgw::IAM::s3ListBucket)) { + if (!verify_bucket_permission( + this, s, + list_versions ? rgw::IAM::s3ListBucketVersions + : rgw::IAM::s3ListBucket + )) { return -EACCES; } return 0; } -int RGWListBucket::parse_max_keys() -{ +int RGWListBucket::parse_max_keys() { // Bound max value of max-keys to configured value for security // Bound min value of max-keys to '0' // Some S3 clients explicitly send max-keys=0 to detect if the bucket is // empty without listing any items. - return parse_value_and_bound(max_keys, max, 0, - g_conf().get_val("rgw_max_listing_results"), - default_max); + return parse_value_and_bound( + max_keys, max, 0, g_conf().get_val("rgw_max_listing_results"), + default_max + ); } -void RGWListBucket::pre_exec() -{ +void RGWListBucket::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWListBucket::execute(optional_yield y) -{ +void RGWListBucket::execute(optional_yield y) { if (!s->bucket_exists) { op_ret = -ERR_NO_SUCH_BUCKET; return; } if (allow_unordered && !delimiter.empty()) { - ldpp_dout(this, 0) << - "ERROR: unordered bucket listing requested with a delimiter" << dendl; + ldpp_dout( + this, 0 + ) << "ERROR: unordered bucket listing requested with a delimiter" + << dendl; op_ret = -EINVAL; return; } @@ -2994,26 +3059,23 @@ void RGWListBucket::execute(optional_yield y) } } -int RGWGetBucketLogging::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketLogging::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging); } -int RGWGetBucketLocation::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketLocation::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation); } -int RGWCreateBucket::verify_permission(optional_yield y) -{ +int RGWCreateBucket::verify_permission(optional_yield y) { /* This check is mostly needed for S3 that doesn't support account ACL. * Swift doesn't allow to delegate any permission to an anonymous user, * so it will become an early exit in such case. */ @@ -3033,9 +3095,8 @@ int RGWCreateBucket::verify_permission(optional_yield y) //AssumeRole is meant for cross account access if (s->auth.identity->get_identity_type() != TYPE_ROLE) { ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant" - << " (user_id.tenant=" << s->user->get_tenant() - << " requested=" << s->bucket_tenant << ")" - << dendl; + << " (user_id.tenant=" << s->user->get_tenant() + << " requested=" << s->bucket_tenant << ")" << dendl; return -EACCES; } } @@ -3047,8 +3108,9 @@ int RGWCreateBucket::verify_permission(optional_yield y) if (s->user->get_max_buckets()) { rgw::sal::BucketList buckets; string marker; - op_ret = s->user->list_buckets(this, marker, string(), s->user->get_max_buckets(), - false, buckets, y); + op_ret = s->user->list_buckets( + this, marker, string(), s->user->get_max_buckets(), false, buckets, y + ); if (op_ret < 0) { return op_ret; } @@ -3061,21 +3123,21 @@ int RGWCreateBucket::verify_permission(optional_yield y) return 0; } -void RGWCreateBucket::pre_exec() -{ +void RGWCreateBucket::pre_exec() { rgw_bucket_object_pre_exec(s); } -static void prepare_add_del_attrs(const map& orig_attrs, - map& out_attrs, - map& out_rmattrs) -{ +static void prepare_add_del_attrs( + const map& orig_attrs, + map& out_attrs, map& out_rmattrs +) { for (const auto& kv : orig_attrs) { const string& name = kv.first; /* Check if the attr is user-defined metadata item. */ - if (name.compare(0, sizeof(RGW_ATTR_META_PREFIX) - 1, - RGW_ATTR_META_PREFIX) == 0) { + if (name.compare( + 0, sizeof(RGW_ATTR_META_PREFIX) - 1, RGW_ATTR_META_PREFIX + ) == 0) { /* For the objects all existing meta attrs have to be removed. */ out_rmattrs[name] = kv.second; } else if (out_attrs.find(name) == std::end(out_attrs)) { @@ -3092,16 +3154,16 @@ static void prepare_add_del_attrs(const map& orig_attrs, * will be preserved without any change. Special attributes are those which * names start with RGW_ATTR_META_PREFIX. They're complement to custom ones * used for X-Account-Meta-*, X-Container-Meta-*, X-Amz-Meta and so on. */ -static void prepare_add_del_attrs(const map& orig_attrs, - const set& rmattr_names, - map& out_attrs) -{ +static void prepare_add_del_attrs( + const map& orig_attrs, const set& rmattr_names, + map& out_attrs +) { for (const auto& kv : orig_attrs) { const string& name = kv.first; /* Check if the attr is user-defined metadata item. */ - if (name.compare(0, strlen(RGW_ATTR_META_PREFIX), - RGW_ATTR_META_PREFIX) == 0) { + if (name.compare(0, strlen(RGW_ATTR_META_PREFIX), RGW_ATTR_META_PREFIX) == + 0) { /* For the buckets all existing meta attrs are preserved, except those that are listed in rmattr_names. */ if (rmattr_names.find(name) != std::end(rmattr_names)) { @@ -3121,10 +3183,9 @@ static void prepare_add_del_attrs(const map& orig_attrs, } } - -static void populate_with_generic_attrs(const req_state * const s, - map& out_attrs) -{ +static void populate_with_generic_attrs( + const req_state* const s, map& out_attrs +) { for (const auto& kv : s->generic_attrs) { bufferlist& attrbl = out_attrs[kv.first]; const string& val = kv.second; @@ -3133,12 +3194,11 @@ static void populate_with_generic_attrs(const req_state * const s, } } - -static int filter_out_quota_info(std::map& add_attrs, - const std::set& rmattr_names, - RGWQuotaInfo& quota, - bool * quota_extracted = nullptr) -{ +static int filter_out_quota_info( + std::map& add_attrs, + const std::set& rmattr_names, RGWQuotaInfo& quota, + bool* quota_extracted = nullptr +) { bool extracted = false; /* Put new limit on max objects. */ @@ -3146,7 +3206,7 @@ static int filter_out_quota_info(std::map& add_attrs, std::string err; if (std::end(add_attrs) != iter) { quota.max_objects = - static_cast(strict_strtoll(iter->second.c_str(), 10, &err)); + static_cast(strict_strtoll(iter->second.c_str(), 10, &err)); if (!err.empty()) { return -EINVAL; } @@ -3158,7 +3218,7 @@ static int filter_out_quota_info(std::map& add_attrs, iter = add_attrs.find(RGW_ATTR_QUOTA_MSIZE); if (iter != add_attrs.end()) { quota.max_size = - static_cast(strict_strtoll(iter->second.c_str(), 10, &err)); + static_cast(strict_strtoll(iter->second.c_str(), 10, &err)); if (!err.empty()) { return -EINVAL; } @@ -3191,26 +3251,24 @@ static int filter_out_quota_info(std::map& add_attrs, return 0; } - -static void filter_out_website(std::map& add_attrs, - const std::set& rmattr_names, - RGWBucketWebsiteConf& ws_conf) -{ +static void filter_out_website( + std::map& add_attrs, + const std::set& rmattr_names, RGWBucketWebsiteConf& ws_conf +) { std::string lstval; /* Let's define a mapping between each custom attribute and the memory where * attribute's value should be stored. The memory location is expressed by * a non-const reference. */ - const auto mapping = { - std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)), - std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)), - std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)), - std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)), - std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker)) - }; + const auto mapping = { + std::make_pair(RGW_ATTR_WEB_INDEX, std::ref(ws_conf.index_doc_suffix)), + std::make_pair(RGW_ATTR_WEB_ERROR, std::ref(ws_conf.error_doc)), + std::make_pair(RGW_ATTR_WEB_LISTINGS, std::ref(lstval)), + std::make_pair(RGW_ATTR_WEB_LIST_CSS, std::ref(ws_conf.listing_css_doc)), + std::make_pair(RGW_ATTR_SUBDIR_MARKER, std::ref(ws_conf.subdir_marker))}; for (const auto& kv : mapping) { - const char * const key = kv.first; + const char* const key = kv.first; auto& target = kv.second; auto iter = add_attrs.find(key); @@ -3226,37 +3284,39 @@ static void filter_out_website(std::map& add_attr } } - if (! lstval.empty()) { + if (!lstval.empty()) { ws_conf.listing_enabled = boost::algorithm::iequals(lstval, "true"); } } - -void RGWCreateBucket::execute(optional_yield y) -{ +void RGWCreateBucket::execute(optional_yield y) { buffer::list aclbl; buffer::list corsbl; - string bucket_name = rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name); + string bucket_name = + rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name); op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; - if (!relaxed_region_enforcement && - !location_constraint.empty() && + if (!relaxed_region_enforcement && !location_constraint.empty() && !driver->get_zone()->has_zonegroup_api(location_constraint)) { - ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")" + ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")" << " can't be found." << dendl; - op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; - s->err.message = "The specified location-constraint is not valid"; - return; + op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; + s->err.message = "The specified location-constraint is not valid"; + return; } - if (!relaxed_region_enforcement && !driver->get_zone()->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() && - driver->get_zone()->get_zonegroup().get_api_name() != location_constraint) { + if (!relaxed_region_enforcement && + !driver->get_zone()->get_zonegroup().is_master_zonegroup() && + !location_constraint.empty() && + driver->get_zone()->get_zonegroup().get_api_name() != + location_constraint) { ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")" - << " doesn't match zonegroup" << " (" << driver->get_zone()->get_zonegroup().get_api_name() << ")" - << dendl; + << " doesn't match zonegroup" + << " (" + << driver->get_zone()->get_zonegroup().get_api_name() + << ")" << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; s->err.message = "The specified location-constraint is not valid"; return; @@ -3264,11 +3324,12 @@ void RGWCreateBucket::execute(optional_yield y) std::set names; driver->get_zone()->get_zonegroup().get_placement_target_names(names); - if (!placement_rule.name.empty() && - !names.count(placement_rule.name)) { + if (!placement_rule.name.empty() && !names.count(placement_rule.name)) { ldpp_dout(this, 0) << "placement target (" << placement_rule.name << ")" - << " doesn't exist in the placement targets of zonegroup" - << " (" << driver->get_zone()->get_zonegroup().get_api_name() << ")" << dendl; + << " doesn't exist in the placement targets of zonegroup" + << " (" + << driver->get_zone()->get_zonegroup().get_api_name() + << ")" << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; s->err.message = "The specified placement target does not exist"; return; @@ -3278,18 +3339,17 @@ void RGWCreateBucket::execute(optional_yield y) * specific request */ { std::unique_ptr tmp_bucket; - op_ret = driver->get_bucket(this, s->user.get(), s->bucket_tenant, - s->bucket_name, &tmp_bucket, y); - if (op_ret < 0 && op_ret != -ENOENT) - return; + op_ret = driver->get_bucket( + this, s->user.get(), s->bucket_tenant, s->bucket_name, &tmp_bucket, y + ); + if (op_ret < 0 && op_ret != -ENOENT) return; s->bucket_exists = (op_ret != -ENOENT); if (s->bucket_exists) { - if (!s->system_request && - driver->get_zone()->get_zonegroup().get_id() != - tmp_bucket->get_info().zonegroup) { - op_ret = -EEXIST; - return; + if (!s->system_request && driver->get_zone()->get_zonegroup().get_id() != + tmp_bucket->get_info().zonegroup) { + op_ret = -EEXIST; + return; } /* Initialize info from req_state */ info = tmp_bucket->get_info(); @@ -3322,7 +3382,7 @@ void RGWCreateBucket::execute(optional_yield y) } RGWQuotaInfo quota_info; - const RGWQuotaInfo * pquota_info = nullptr; + const RGWQuotaInfo* pquota_info = nullptr; if (need_metadata_upload()) { /* It's supposed that following functions WILL NOT change any special * attributes (like RGW_ATTR_ACL) if they are already present in attrs. */ @@ -3352,24 +3412,24 @@ void RGWCreateBucket::execute(optional_yield y) /* Handle updates of the metadata for Swift's object versioning. */ if (swift_ver_location) { info.swift_ver_location = *swift_ver_location; - info.swift_versioning = (! swift_ver_location->empty()); + info.swift_versioning = (!swift_ver_location->empty()); } /* We're replacing bucket with the newly created one */ - ldpp_dout(this, 10) << "user=" << s->user << " bucket=" << tmp_bucket << dendl; - op_ret = s->user->create_bucket(this, tmp_bucket, zonegroup_id, - placement_rule, - info.swift_ver_location, - pquota_info, policy, attrs, info, ep_objv, - true, obj_lock_enabled, &s->bucket_exists, s->info, - &s->bucket, y); + ldpp_dout(this, 10) << "user=" << s->user << " bucket=" << tmp_bucket + << dendl; + op_ret = s->user->create_bucket( + this, tmp_bucket, zonegroup_id, placement_rule, info.swift_ver_location, + pquota_info, policy, attrs, info, ep_objv, true, obj_lock_enabled, + &s->bucket_exists, s->info, &s->bucket, y + ); /* continue if EEXIST and create_bucket will fail below. this way we can * recover from a partial create by retrying it. */ - ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket.get() << dendl; + ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret + << " bucket=" << s->bucket.get() << dendl; - if (op_ret) - return; + if (op_ret) return; const bool existed = s->bucket_exists; if (need_metadata_upload() && existed) { @@ -3400,7 +3460,9 @@ void RGWCreateBucket::execute(optional_yield y) } prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs); populate_with_generic_attrs(s, attrs); - op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket->get_info().quota); + op_ret = filter_out_quota_info( + attrs, rmattr_names, s->bucket->get_info().quota + ); if (op_ret < 0) { return; } @@ -3408,12 +3470,15 @@ void RGWCreateBucket::execute(optional_yield y) /* Handle updates of the metadata for Swift's object versioning. */ if (swift_ver_location) { s->bucket->get_info().swift_ver_location = *swift_ver_location; - s->bucket->get_info().swift_versioning = (! swift_ver_location->empty()); + s->bucket->get_info().swift_versioning = (!swift_ver_location->empty()); } /* Web site of Swift API. */ - filter_out_website(attrs, rmattr_names, s->bucket->get_info().website_conf); - s->bucket->get_info().has_website = !s->bucket->get_info().website_conf.is_empty(); + filter_out_website( + attrs, rmattr_names, s->bucket->get_info().website_conf + ); + s->bucket->get_info().has_website = + !s->bucket->get_info().website_conf.is_empty(); /* This will also set the quota on the bucket. */ op_ret = s->bucket->merge_and_store_attrs(this, attrs, y); @@ -3426,11 +3491,10 @@ void RGWCreateBucket::execute(optional_yield y) } } -int RGWDeleteBucket::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteBucket::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucket)) { return -EACCES; @@ -3439,20 +3503,19 @@ int RGWDeleteBucket::verify_permission(optional_yield y) return 0; } -void RGWDeleteBucket::pre_exec() -{ +void RGWDeleteBucket::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWDeleteBucket::execute(optional_yield y) -{ +void RGWDeleteBucket::execute(optional_yield y) { if (s->bucket_name.empty()) { op_ret = -EINVAL; return; } if (!s->bucket_exists) { - ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl; + ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" + << dendl; op_ret = -ERR_NO_SUCH_BUCKET; return; } @@ -3477,8 +3540,11 @@ void RGWDeleteBucket::execute(optional_yield y) } op_ret = s->bucket->sync_user_stats(this, y); - if ( op_ret < 0) { - ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl; + if (op_ret < 0) { + ldpp_dout( + this, 1 + ) << "WARNING: failed to sync user stats before bucket delete: op_ret= " + << op_ret << dendl; } op_ret = s->bucket->check_empty(this, y); @@ -3487,7 +3553,9 @@ void RGWDeleteBucket::execute(optional_yield y) } bufferlist in_data; - op_ret = driver->forward_request_to_master(this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), &ot.read_version, in_data, nullptr, s->info, y + ); if (op_ret < 0) { if (op_ret == -ENOENT) { /* adjust error, we want to return with NoSuchBucket and not @@ -3499,14 +3567,14 @@ void RGWDeleteBucket::execute(optional_yield y) op_ret = rgw_remove_sse_s3_bucket_key(s); if (op_ret != 0) { - // do nothing; it will already have been logged + // do nothing; it will already have been logged } op_ret = s->bucket->remove_bucket(this, false, false, nullptr, y); if (op_ret < 0 && op_ret == -ECANCELED) { - // lost a race, either with mdlog sync or another delete bucket operation. - // in either case, we've already called ctl.bucket->unlink_bucket() - op_ret = 0; + // lost a race, either with mdlog sync or another delete bucket operation. + // in either case, we've already called ctl.bucket->unlink_bucket() + op_ret = 0; } return; @@ -3520,9 +3588,8 @@ int RGWPutObj::init_processing(optional_yield y) { /* handle x-amz-copy-source */ std::string_view cs_view(copy_source); - if (! cs_view.empty()) { - if (cs_view[0] == '/') - cs_view.remove_prefix(1); + if (!cs_view.empty()) { + if (cs_view[0] == '/') cs_view.remove_prefix(1); copy_source_bucket_name = std::string(cs_view); pos = copy_source_bucket_name.find("/"); if (pos == std::string::npos) { @@ -3531,7 +3598,7 @@ int RGWPutObj::init_processing(optional_yield y) { return ret; } copy_source_object_name = - copy_source_bucket_name.substr(pos + 1, copy_source_bucket_name.size()); + copy_source_bucket_name.substr(pos + 1, copy_source_bucket_name.size()); copy_source_bucket_name = copy_source_bucket_name.substr(0, pos); #define VERSION_ID_STR "?versionId=" pos = copy_source_object_name.find(VERSION_ID_STR); @@ -3539,9 +3606,9 @@ int RGWPutObj::init_processing(optional_yield y) { copy_source_object_name = url_decode(copy_source_object_name); } else { copy_source_version_id = - copy_source_object_name.substr(pos + sizeof(VERSION_ID_STR) - 1); + copy_source_object_name.substr(pos + sizeof(VERSION_ID_STR) - 1); copy_source_object_name = - url_decode(copy_source_object_name.substr(0, pos)); + url_decode(copy_source_object_name.substr(0, pos)); } pos = copy_source_bucket_name.find(":"); if (pos == std::string::npos) { @@ -3549,7 +3616,9 @@ int RGWPutObj::init_processing(optional_yield y) { copy_source_tenant_name = s->user->get_tenant(); } else { copy_source_tenant_name = copy_source_bucket_name.substr(0, pos); - copy_source_bucket_name = copy_source_bucket_name.substr(pos + 1, copy_source_bucket_name.size()); + copy_source_bucket_name = copy_source_bucket_name.substr( + pos + 1, copy_source_bucket_name.size() + ); if (copy_source_bucket_name.empty()) { ret = -EINVAL; ldpp_dout(this, 5) << "source bucket name is empty" << dendl; @@ -3557,10 +3626,13 @@ int RGWPutObj::init_processing(optional_yield y) { } } std::unique_ptr bucket; - ret = driver->get_bucket(this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name, - &bucket, y); + ret = driver->get_bucket( + this, s->user.get(), copy_source_tenant_name, copy_source_bucket_name, + &bucket, y + ); if (ret < 0) { - ldpp_dout(this, 5) << __func__ << "(): get_bucket() returned ret=" << ret << dendl; + ldpp_dout(this, 5) << __func__ << "(): get_bucket() returned ret=" << ret + << dendl; if (ret == -ENOENT) { ret = -ERR_NO_SUCH_BUCKET; } @@ -3569,7 +3641,8 @@ int RGWPutObj::init_processing(optional_yield y) { ret = bucket->load_bucket(this, y); if (ret < 0) { - ldpp_dout(this, 5) << __func__ << "(): load_bucket() returned ret=" << ret << dendl; + ldpp_dout(this, 5) << __func__ << "(): load_bucket() returned ret=" << ret + << dendl; return ret; } copy_source_bucket_info = bucket->get_info(); @@ -3594,8 +3667,11 @@ int RGWPutObj::init_processing(optional_yield y) { string first = range.substr(0, pos); string last = range.substr(pos + 1); if (first.find_first_not_of("0123456789") != std::string::npos || - last.find_first_not_of("0123456789") != std::string::npos) { - ldpp_dout(this, 5) << "x-amz-copy-source-range bad format not an integer" << dendl; + last.find_first_not_of("0123456789") != std::string::npos) { + ldpp_dout( + this, 5 + ) << "x-amz-copy-source-range bad format not an integer" + << dendl; ret = -EINVAL; return ret; } @@ -3603,7 +3679,9 @@ int RGWPutObj::init_processing(optional_yield y) { copy_source_range_lst = strtoull(last.c_str(), NULL, 10); if (copy_source_range_fst > copy_source_range_lst) { ret = -ERANGE; - ldpp_dout(this, 5) << "x-amz-copy-source-range bad format first number bigger than second" << dendl; + ldpp_dout(this, 5) << "x-amz-copy-source-range bad format first number " + "bigger than second" + << dendl; return ret; } } @@ -3612,69 +3690,89 @@ int RGWPutObj::init_processing(optional_yield y) { return RGWOp::init_processing(y); } -int RGWPutObj::verify_permission(optional_yield y) -{ - if (! copy_source.empty()) { - +int RGWPutObj::verify_permission(optional_yield y) { + ldpp_dout(this, 5) << "===============================================" + << dendl; + if (!copy_source.empty()) { RGWAccessControlPolicy cs_acl(s->cct); boost::optional policy; map cs_attrs; std::unique_ptr cs_bucket; int ret = driver->get_bucket(NULL, copy_source_bucket_info, &cs_bucket); - if (ret < 0) - return ret; - - std::unique_ptr cs_object = - cs_bucket->get_object(rgw_obj_key(copy_source_object_name, copy_source_version_id)); - + ldpp_dout(this, 5) << "XXX ===============================================" + << ret << dendl; + if (ret < 0) return ret; + ldpp_dout(this, 5) << "1 ===============================================" + << dendl; + std::unique_ptr cs_object = cs_bucket->get_object( + rgw_obj_key(copy_source_object_name, copy_source_version_id) + ); + ldpp_dout(this, 5) << "2 ===============================================" + << dendl; cs_object->set_atomic(); cs_object->set_prefetch_data(); /* check source object permissions */ - if (ret = read_obj_policy(this, driver, s, copy_source_bucket_info, cs_attrs, &cs_acl, nullptr, - policy, cs_bucket.get(), cs_object.get(), y, true); ret < 0) { + if (ret = read_obj_policy( + this, driver, s, copy_source_bucket_info, cs_attrs, &cs_acl, + nullptr, policy, cs_bucket.get(), cs_object.get(), y, true + ); + ret < 0) { return ret; } /* admin request overrides permission checks */ - if (! s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) { - if (policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { + if (!s->auth.identity->is_admin_of(cs_acl.get_owner().get_id())) { + if (policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { //add source object tags for permission evaluation - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, policy, s->iam_user_policies, s->session_policies); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition( + this, policy, s->iam_user_policies, s->session_policies + ); if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag); + rgw_iam_add_objtags( + this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag + ); auto usr_policy_res = Effect::Pass; rgw::ARN obj_arn(cs_object->get_obj()); for (auto& user_policy : s->iam_user_policies) { - if (usr_policy_res = user_policy.eval(s->env, *s->auth.identity, - cs_object->get_instance().empty() ? - rgw::IAM::s3GetObject : - rgw::IAM::s3GetObjectVersion, - obj_arn); usr_policy_res == Effect::Deny) + if (usr_policy_res = user_policy.eval( + s->env, *s->auth.identity, + cs_object->get_instance().empty() + ? rgw::IAM::s3GetObject + : rgw::IAM::s3GetObjectVersion, + obj_arn + ); + usr_policy_res == Effect::Deny) return -EACCES; else if (usr_policy_res == Effect::Allow) break; } - rgw::IAM::Effect e = Effect::Pass; - if (policy) { - rgw::ARN obj_arn(cs_object->get_obj()); - e = policy->eval(s->env, *s->auth.identity, - cs_object->get_instance().empty() ? - rgw::IAM::s3GetObject : - rgw::IAM::s3GetObjectVersion, - obj_arn); - } - if (e == Effect::Deny) { - return -EACCES; - } else if (usr_policy_res == Effect::Pass && e == Effect::Pass && + rgw::IAM::Effect e = Effect::Pass; + if (policy) { + rgw::ARN obj_arn(cs_object->get_obj()); + e = policy->eval( + s->env, *s->auth.identity, + cs_object->get_instance().empty() ? rgw::IAM::s3GetObject + : rgw::IAM::s3GetObjectVersion, + obj_arn + ); + } + if (e == Effect::Deny) { + return -EACCES; + } else if (usr_policy_res == Effect::Pass && e == Effect::Pass && !cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask, RGW_PERM_READ)) { - return -EACCES; - } - rgw_iam_remove_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag); - } else if (!cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask, - RGW_PERM_READ)) { - return -EACCES; + return -EACCES; + } + rgw_iam_remove_objtags( + this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag + ); + } else if (!cs_acl.verify_permission( + this, *s->auth.identity, s->perm_mask, RGW_PERM_READ + )) { + return -EACCES; } } } @@ -3692,74 +3790,82 @@ int RGWPutObj::verify_permission(optional_yield y) return op_ret; } - if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { rgw_add_grant_to_iam_environment(s->env, s); rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl); - if (obj_tags != nullptr && obj_tags->count() > 0){ + if (obj_tags != nullptr && obj_tags->count() > 0) { auto tags = obj_tags->get_tags(); - for (const auto& kv: tags){ - rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second); + for (const auto& kv : tags) { + rgw_add_to_iam_environment( + s->env, "s3:RequestObjectTag/" + kv.first, kv.second + ); } } constexpr auto encrypt_attr = "x-amz-server-side-encryption"; constexpr auto s3_encrypt_attr = "s3:x-amz-server-side-encryption"; auto enc_header = s->info.crypt_attribute_map.find(encrypt_attr); - if (enc_header != s->info.crypt_attribute_map.end()){ + if (enc_header != s->info.crypt_attribute_map.end()) { rgw_add_to_iam_environment(s->env, s3_encrypt_attr, enc_header->second); } constexpr auto kms_attr = "x-amz-server-side-encryption-aws-kms-key-id"; - constexpr auto s3_kms_attr = "s3:x-amz-server-side-encryption-aws-kms-key-id"; + constexpr auto s3_kms_attr = + "s3:x-amz-server-side-encryption-aws-kms-key-id"; auto kms_header = s->info.crypt_attribute_map.find(kms_attr); - if (kms_header != s->info.crypt_attribute_map.end()){ + if (kms_header != s->info.crypt_attribute_map.end()) { rgw_add_to_iam_environment(s->env, s3_kms_attr, kms_header->second); } // Add bucket tags for authorization - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); - - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); - if (identity_policy_res == Effect::Deny) - return -EACCES; + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); + + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); + if (identity_policy_res == Effect::Deny) return -EACCES; rgw::IAM::Effect e = Effect::Pass; rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; if (s->iam_policy) { ARN obj_arn(s->object->get_obj()); - e = s->iam_policy->eval(s->env, *s->auth.identity, - rgw::IAM::s3PutObject, - obj_arn, - princ_type); + e = s->iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3PutObject, obj_arn, princ_type + ); } if (e == Effect::Deny) { return -EACCES; } if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (session_policy_res == Effect::Deny) { - return -EACCES; + return -EACCES; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && e == Effect::Allow)) return 0; } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + e == Effect::Allow) return 0; - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) return 0; } return -EACCES; @@ -3776,17 +3882,15 @@ int RGWPutObj::verify_permission(optional_yield y) return 0; } - -void RGWPutObj::pre_exec() -{ +void RGWPutObj::pre_exec() { rgw_bucket_object_pre_exec(s); } -class RGWPutObj_CB : public RGWGetObj_Filter -{ - RGWPutObj *op; -public: - explicit RGWPutObj_CB(RGWPutObj *_op) : op(_op) {} +class RGWPutObj_CB : public RGWGetObj_Filter { + RGWPutObj* op; + + public: + explicit RGWPutObj_CB(RGWPutObj* _op) : op(_op) {} ~RGWPutObj_CB() override {} int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override { @@ -3794,8 +3898,7 @@ class RGWPutObj_CB : public RGWGetObj_Filter } }; -int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) -{ +int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) { bufferlist bl_tmp; bl.begin(bl_ofs).copy(bl_len, bl_tmp); @@ -3804,8 +3907,7 @@ int RGWPutObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) return bl_len; } -int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) -{ +int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) { RGWPutObj_CB cb(this); RGWGetObj_Filter* filter = &cb; boost::optional decompress; @@ -3822,38 +3924,39 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) std::unique_ptr bucket; ret = driver->get_bucket(nullptr, copy_source_bucket_info, &bucket); - if (ret < 0) - return ret; + if (ret < 0) return ret; - std::unique_ptr obj = bucket->get_object(rgw_obj_key(copy_source_object_name, copy_source_version_id)); + std::unique_ptr obj = bucket->get_object( + rgw_obj_key(copy_source_object_name, copy_source_version_id) + ); std::unique_ptr read_op(obj->get_read_op()); ret = read_op->prepare(s->yield, this); - if (ret < 0) - return ret; + if (ret < 0) return ret; obj_size = obj->get_obj_size(); bool need_decompress; - op_ret = rgw_compression_info_from_attrset(obj->get_attrs(), need_decompress, cs_info); + op_ret = rgw_compression_info_from_attrset( + obj->get_attrs(), need_decompress, cs_info + ); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl; return -EIO; } bool partial_content = true; - if (need_decompress) - { + if (need_decompress) { obj_size = cs_info.orig_size; decompress.emplace(s->cct, &cs_info, partial_content, filter); filter = &*decompress; } auto attr_iter = obj->get_attrs().find(RGW_ATTR_MANIFEST); - op_ret = this->get_decrypt_filter(&decrypt, - filter, - obj->get_attrs(), - attr_iter != obj->get_attrs().end() ? &(attr_iter->second) : nullptr); + op_ret = this->get_decrypt_filter( + &decrypt, filter, obj->get_attrs(), + attr_iter != obj->get_attrs().end() ? &(attr_iter->second) : nullptr + ); if (decrypt != nullptr) { filter = decrypt.get(); } @@ -3862,14 +3965,12 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) } ret = obj->range_to_ofs(obj_size, new_ofs, new_end); - if (ret < 0) - return ret; + if (ret < 0) return ret; filter->fixup_range(new_ofs, new_end); ret = read_op->iterate(this, new_ofs, new_end, filter, s->yield); - if (ret >= 0) - ret = filter->flush(); + if (ret >= 0) ret = filter->flush(); bl.claim_append(bl_aux); @@ -3877,9 +3978,9 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) } // special handling for compression type = "random" with multipart uploads -static CompressorRef get_compressor_plugin(const req_state *s, - const std::string& compression_type) -{ +static CompressorRef get_compressor_plugin( + const req_state* s, const std::string& compression_type +) { if (compression_type != "random") { return Compressor::create(s->cct, compression_type); } @@ -3892,7 +3993,8 @@ static CompressorRef get_compressor_plugin(const req_state *s, } // use a hash of the multipart upload id so all parts use the same plugin - const auto alg = std::hash{}(upload_id) % Compressor::COMP_ALG_LAST; + const auto alg = + std::hash{}(upload_id) % Compressor::COMP_ALG_LAST; if (alg == Compressor::COMP_ALG_NONE) { return nullptr; } @@ -3900,39 +4002,46 @@ static CompressorRef get_compressor_plugin(const req_state *s, } auto RGWPutObj::get_torrent_filter(rgw::sal::DataProcessor* cb) - -> std::optional -{ + -> std::optional { auto& conf = get_cct()->_conf; if (!conf->rgw_torrent_flag) { - return std::nullopt; // torrent generation disabled + return std::nullopt; // torrent generation disabled } const auto max_len = conf->rgw_torrent_max_size; const auto piece_len = conf->rgw_torrent_sha_unit; if (!max_len || !piece_len) { - return std::nullopt; // invalid configuration + return std::nullopt; // invalid configuration } - if (crypt_http_responses.count("x-amz-server-side-encryption-customer-algorithm")) { - return std::nullopt; // downloading the torrent would require customer keys + if (crypt_http_responses.count( + "x-amz-server-side-encryption-customer-algorithm" + )) { + return std::nullopt; // downloading the torrent would require customer keys } return RGWPutObj_Torrent{cb, max_len, piece_len}; } -int RGWPutObj::get_lua_filter(std::unique_ptr* filter, rgw::sal::DataProcessor* cb) { +int RGWPutObj::get_lua_filter( + std::unique_ptr* filter, + rgw::sal::DataProcessor* cb +) { std::string script; - const auto rc = rgw::lua::read_script(s, s->penv.lua.manager.get(), s->bucket_tenant, s->yield, rgw::lua::context::putData, script); + const auto rc = rgw::lua::read_script( + s, s->penv.lua.manager.get(), s->bucket_tenant, s->yield, + rgw::lua::context::putData, script + ); if (rc == -ENOENT) { // no script, nothing to do return 0; } else if (rc < 0) { - ldpp_dout(this, 5) << "WARNING: failed to read data script. error: " << rc << dendl; + ldpp_dout(this, 5) << "WARNING: failed to read data script. error: " << rc + << dendl; return rc; } filter->reset(new rgw::lua::RGWPutObjFilter(s, script, cb)); return 0; } -void RGWPutObj::execute(optional_yield y) -{ +void RGWPutObj::execute(optional_yield y) { char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1]; char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; @@ -3942,7 +4051,7 @@ void RGWPutObj::execute(optional_yield y) hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); bufferlist bl, aclbl, bs; int len; - + off_t fst; off_t lst; @@ -3950,8 +4059,8 @@ void RGWPutObj::execute(optional_yield y) perfcounter->inc(l_rgw_put); // report latency on return auto put_lat = make_scope_guard([&] { - perfcounter->tinc(l_rgw_put_lat, s->time_elapsed()); - }); + perfcounter->tinc(l_rgw_put_lat, s->time_elapsed()); + }); op_ret = -EINVAL; if (rgw::sal::Object::empty(s->object.get())) { @@ -3966,7 +4075,7 @@ void RGWPutObj::execute(optional_yield y) op_ret = get_system_versioning_params(s, &olh_epoch, &version_id); if (op_ret < 0) { ldpp_dout(this, 20) << "get_system_versioning_params() returned ret=" - << op_ret << dendl; + << op_ret << dendl; return; } @@ -3974,15 +4083,20 @@ void RGWPutObj::execute(optional_yield y) need_calc_md5 = true; ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; - op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], - supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64)); + op_ret = ceph_unarmor( + supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], + supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64) + ); ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl; if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) { op_ret = -ERR_INVALID_DIGEST; return; } - buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5); + buf_to_hex( + (const unsigned char*)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, + supplied_md5 + ); ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl; } @@ -4003,7 +4117,7 @@ void RGWPutObj::execute(optional_yield y) const bool multipart = !multipart_upload_id.empty(); /* Handle object versioning of Swift API. */ - if (! multipart) { + if (!multipart) { op_ret = s->object->swift_versioning_copy(this, s->yield); if (op_ret < 0) { return; @@ -4011,11 +4125,10 @@ void RGWPutObj::execute(optional_yield y) } // make reservation for notification if needed - std::unique_ptr res - = driver->get_notification( - s->object.get(), s->src_object.get(), s, - rgw::notify::ObjectCreatedPut, y); - if(!multipart) { + std::unique_ptr res = driver->get_notification( + s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPut, y + ); + if (!multipart) { op_ret = res->publish_reserve(this, obj_tags.get()); if (op_ret < 0) { return; @@ -4025,98 +4138,112 @@ void RGWPutObj::execute(optional_yield y) // create the object processor std::unique_ptr processor; - rgw_placement_rule *pdest_placement = &s->dest_placement; + rgw_placement_rule* pdest_placement = &s->dest_placement; if (multipart) { std::unique_ptr upload; - upload = s->bucket->get_multipart_upload(s->object->get_name(), - multipart_upload_id); + upload = s->bucket->get_multipart_upload( + s->object->get_name(), multipart_upload_id + ); op_ret = upload->get_info(this, s->yield, &pdest_placement); s->trace->SetAttribute(tracing::rgw::UPLOAD_ID, multipart_upload_id); - multipart_trace = tracing::rgw::tracer.add_span(name(), upload->get_trace()); + multipart_trace = + tracing::rgw::tracer.add_span(name(), upload->get_trace()); if (op_ret < 0) { if (op_ret != -ENOENT) { - ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret << ": " << cpp_strerror(-op_ret) << dendl; - } else {// -ENOENT: raced with upload complete/cancel, no need to spam log - ldpp_dout(this, 20) << "failed to get multipart info (returned " << op_ret << ": " << cpp_strerror(-op_ret) << "): probably raced with upload complete / cancel" << dendl; + ldpp_dout(this, 0) << "ERROR: get_multipart_info returned " << op_ret + << ": " << cpp_strerror(-op_ret) << dendl; + } else { // -ENOENT: raced with upload complete/cancel, no need to spam log + ldpp_dout(this, 20) + << "failed to get multipart info (returned " << op_ret << ": " + << cpp_strerror(-op_ret) + << "): probably raced with upload complete / cancel" << dendl; } return; } /* upload will go out of scope, so copy the dest placement for later use */ s->dest_placement = *pdest_placement; pdest_placement = &s->dest_placement; - ldpp_dout(this, 20) << "dest_placement for part=" << *pdest_placement << dendl; - processor = upload->get_writer(this, s->yield, s->object.get(), - s->user->get_id(), pdest_placement, - multipart_part_num, multipart_part_str); - } else if(append) { + ldpp_dout(this, 20) << "dest_placement for part=" << *pdest_placement + << dendl; + processor = upload->get_writer( + this, s->yield, s->object.get(), s->user->get_id(), pdest_placement, + multipart_part_num, multipart_part_str + ); + } else if (append) { if (s->bucket->versioned()) { op_ret = -ERR_INVALID_BUCKET_STATE; return; } - processor = driver->get_append_writer(this, s->yield, s->object.get(), - s->bucket_owner.get_id(), - pdest_placement, s->req_id, position, - &cur_accounted_size); + processor = driver->get_append_writer( + this, s->yield, s->object.get(), s->bucket_owner.get_id(), + pdest_placement, s->req_id, position, &cur_accounted_size + ); } else { if (s->bucket->versioning_enabled()) { if (!version_id.empty()) { s->object->set_instance(version_id); } else { - s->object->gen_rand_obj_instance_name(); + s->object->gen_rand_obj_instance_name(); version_id = s->object->get_instance(); } } - processor = driver->get_atomic_writer(this, s->yield, s->object.get(), - s->bucket_owner.get_id(), - pdest_placement, olh_epoch, s->req_id); + processor = driver->get_atomic_writer( + this, s->yield, s->object.get(), s->bucket_owner.get_id(), + pdest_placement, olh_epoch, s->req_id + ); } op_ret = processor->prepare(s->yield); if (op_ret < 0) { ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret - << dendl; + << dendl; return; } - if ((! copy_source.empty()) && !copy_source_range) { + if ((!copy_source.empty()) && !copy_source_range) { std::unique_ptr bucket; op_ret = driver->get_bucket(nullptr, copy_source_bucket_info, &bucket); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: failed to get bucket with error" << op_ret << dendl; + ldpp_dout(this, 0) << "ERROR: failed to get bucket with error" << op_ret + << dendl; return; } - std::unique_ptr obj = - bucket->get_object(rgw_obj_key(copy_source_object_name, copy_source_version_id)); + std::unique_ptr obj = bucket->get_object( + rgw_obj_key(copy_source_object_name, copy_source_version_id) + ); - RGWObjState *astate; + RGWObjState* astate; op_ret = obj->get_obj_state(this, &astate, s->yield); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl; + ldpp_dout( + this, 0 + ) << "ERROR: get copy source obj state returned with error" + << op_ret << dendl; return; } bufferlist bl; if (astate->get_attr(RGW_ATTR_MANIFEST, bl)) { RGWObjManifest m; - try{ + try { decode(m, bl); if (m.get_tier_type() == "cloud-s3") { op_ret = -ERR_INVALID_OBJECT_STATE; s->err.message = "This object was transitioned to cloud-s3"; ldpp_dout(this, 4) << "Cannot copy cloud tiered object. Failing with " - << op_ret << dendl; + << op_ret << dendl; return; } } catch (const buffer::end_of_buffer&) { // ignore empty manifest; it's not cloud-tiered } catch (const std::exception& e) { ldpp_dout(this, 1) << "WARNING: failed to decode object manifest for " - << *s->object << ": " << e.what() << dendl; + << *s->object << ": " << e.what() << dendl; } } - if (!astate->exists){ + if (!astate->exists) { op_ret = -ENOENT; return; } @@ -4127,7 +4254,7 @@ void RGWPutObj::execute(optional_yield y) fst = copy_source_range_fst; // no filters by default - rgw::sal::DataProcessor *filter = processor.get(); + rgw::sal::DataProcessor* filter = processor.get(); const auto& compression_type = driver->get_compression_type(*pdest_placement); CompressorRef plugin; @@ -4137,7 +4264,7 @@ void RGWPutObj::execute(optional_yield y) std::unique_ptr encrypt; std::unique_ptr run_lua; - if (!append) { // compression and encryption only apply to full object uploads + if (!append) { // compression and encryption only apply to full object uploads op_ret = get_encrypt_filter(&encrypt, filter); if (op_ret < 0) { return; @@ -4149,7 +4276,7 @@ void RGWPutObj::execute(optional_yield y) plugin = get_compressor_plugin(s, compression_type); if (!plugin) { ldpp_dout(this, 1) << "Cannot load plugin for compression type " - << compression_type << dendl; + << compression_type << dendl; } else { compressor.emplace(s->cct, plugin, filter); filter = &*compressor; @@ -4172,15 +4299,14 @@ void RGWPutObj::execute(optional_yield y) tracepoint(rgw_op, before_data_transfer, s->req_id.c_str()); do { bufferlist data; - if (fst > lst) - break; + if (fst > lst) break; if (copy_source.empty()) { len = get_data(data); } else { - off_t cur_lst = min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst); + off_t cur_lst = + min(fst + s->cct->_conf->rgw_max_chunk_size - 1, lst); op_ret = get_data(fst, cur_lst, data); - if (op_ret < 0) - return; + if (op_ret < 0) return; len = data.length(); s->content_length += len; fst += len; @@ -4194,13 +4320,13 @@ void RGWPutObj::execute(optional_yield y) } if (need_calc_md5) { - hash.Update((const unsigned char *)data.c_str(), data.length()); + hash.Update((const unsigned char*)data.c_str(), data.length()); } op_ret = filter->process(std::move(data), ofs); if (op_ret < 0) { - ldpp_dout(this, 20) << "processor->process() returned ret=" - << op_ret << dendl; + ldpp_dout(this, 20) << "processor->process() returned ret=" << op_ret + << dendl; return; } @@ -4230,7 +4356,8 @@ void RGWPutObj::execute(optional_yield y) op_ret = s->bucket->check_quota(this, quota, s->obj_size, y); if (op_ret < 0) { - ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret + << dendl; return; } @@ -4246,15 +4373,16 @@ void RGWPutObj::execute(optional_yield y) encode(cs_info, tmp); attrs[RGW_ATTR_COMPRESSION] = tmp; ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION - << " with type=" << cs_info.compression_type - << ", orig_size=" << cs_info.orig_size - << ", blocks=" << cs_info.blocks.size() << dendl; + << " with type=" << cs_info.compression_type + << ", orig_size=" << cs_info.orig_size + << ", blocks=" << cs_info.blocks.size() << dendl; } if (torrent) { auto bl = torrent->bencode_torrent(s->object->get_name()); if (bl.length()) { ldpp_dout(this, 20) << "storing " << bl.length() - << " bytes of torrent info in " << RGW_ATTR_TORRENT << dendl; + << " bytes of torrent info in " << RGW_ATTR_TORRENT + << dendl; attrs[RGW_ATTR_TORRENT] = std::move(bl); } } @@ -4321,32 +4449,33 @@ void RGWPutObj::execute(optional_yield y) } tracepoint(rgw_op, processor_complete_enter, s->req_id.c_str()); - op_ret = processor->complete(s->obj_size, etag, &mtime, real_time(), attrs, - (delete_at ? *delete_at : real_time()), if_match, if_nomatch, - (user_data.empty() ? nullptr : &user_data), nullptr, nullptr, - s->yield); + op_ret = processor->complete( + s->obj_size, etag, &mtime, real_time(), attrs, + (delete_at ? *delete_at : real_time()), if_match, if_nomatch, + (user_data.empty() ? nullptr : &user_data), nullptr, nullptr, s->yield + ); tracepoint(rgw_op, processor_complete_exit, s->req_id.c_str()); // send request to notification manager - int ret = res->publish_commit(this, s->obj_size, mtime, etag, s->object->get_instance()); + int ret = res->publish_commit( + this, s->obj_size, mtime, etag, s->object->get_instance() + ); if (ret < 0) { - ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " + << ret << dendl; // too late to rollback operation, hence op_ret is not set here } } -int RGWPostObj::verify_permission(optional_yield y) -{ +int RGWPostObj::verify_permission(optional_yield y) { return 0; } -void RGWPostObj::pre_exec() -{ +void RGWPostObj::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWPostObj::execute(optional_yield y) -{ +void RGWPostObj::execute(optional_yield y) { boost::optional compressor; CompressorRef plugin; char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; @@ -4362,10 +4491,12 @@ void RGWPostObj::execute(optional_yield y) return; } - if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (identity_policy_res == Effect::Deny) { op_ret = -EACCES; return; @@ -4375,10 +4506,9 @@ void RGWPostObj::execute(optional_yield y) rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; if (s->iam_policy) { ARN obj_arn(s->object->get_obj()); - e = s->iam_policy->eval(s->env, *s->auth.identity, - rgw::IAM::s3PutObject, - obj_arn, - princ_type); + e = s->iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3PutObject, obj_arn, princ_type + ); } if (e == Effect::Deny) { op_ret = -EACCES; @@ -4386,28 +4516,33 @@ void RGWPostObj::execute(optional_yield y) } if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (session_policy_res == Effect::Deny) { - op_ret = -EACCES; - return; + op_ret = -EACCES; + return; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && e == Effect::Allow)) { op_ret = 0; return; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) { + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + e == Effect::Allow) { op_ret = 0; return; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) { op_ret = 0; return; } @@ -4415,7 +4550,8 @@ void RGWPostObj::execute(optional_yield y) op_ret = -EACCES; return; } - if (identity_policy_res == Effect::Pass && e == Effect::Pass && !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) { + if (identity_policy_res == Effect::Pass && e == Effect::Pass && + !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) { op_ret = -EACCES; return; } @@ -4425,8 +4561,9 @@ void RGWPostObj::execute(optional_yield y) } // make reservation for notification if needed - std::unique_ptr res - = driver->get_notification(s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost, y); + std::unique_ptr res = driver->get_notification( + s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost, y + ); op_ret = res->publish_reserve(this); if (op_ret < 0) { return; @@ -4450,35 +4587,41 @@ void RGWPostObj::execute(optional_yield y) if (supplied_md5_b64) { char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1]; ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; - op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], - supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64)); + op_ret = ceph_unarmor( + supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], + supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64) + ); ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl; if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) { op_ret = -ERR_INVALID_DIGEST; return; } - buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5); + buf_to_hex( + (const unsigned char*)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, + supplied_md5 + ); ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl; } std::unique_ptr obj = - s->bucket->get_object(rgw_obj_key(get_current_filename())); + s->bucket->get_object(rgw_obj_key(get_current_filename())); if (s->bucket->versioning_enabled()) { obj->gen_rand_obj_instance_name(); } std::unique_ptr processor; - processor = driver->get_atomic_writer(this, s->yield, obj.get(), - s->bucket_owner.get_id(), - &s->dest_placement, 0, s->req_id); + processor = driver->get_atomic_writer( + this, s->yield, obj.get(), s->bucket_owner.get_id(), &s->dest_placement, + 0, s->req_id + ); op_ret = processor->prepare(s->yield); if (op_ret < 0) { return; } /* No filters by default. */ - rgw::sal::DataProcessor *filter = processor.get(); + rgw::sal::DataProcessor* filter = processor.get(); std::unique_ptr encrypt; op_ret = get_encrypt_filter(&encrypt, filter); @@ -4488,12 +4631,13 @@ void RGWPostObj::execute(optional_yield y) if (encrypt != nullptr) { filter = encrypt.get(); } else { - const auto& compression_type = driver->get_compression_type(s->dest_placement); + const auto& compression_type = + driver->get_compression_type(s->dest_placement); if (compression_type != "none") { plugin = Compressor::create(s->cct, compression_type); if (!plugin) { ldpp_dout(this, 1) << "Cannot load plugin for compression type " - << compression_type << dendl; + << compression_type << dendl; } else { compressor.emplace(s->cct, plugin, filter); filter = &*compressor; @@ -4515,7 +4659,7 @@ void RGWPostObj::execute(optional_yield y) break; } - hash.Update((const unsigned char *)data.c_str(), data.length()); + hash.Update((const unsigned char*)data.c_str(), data.length()); op_ret = filter->process(std::move(data), ofs); if (op_ret < 0) { return; @@ -4543,7 +4687,6 @@ void RGWPostObj::execute(optional_yield y) s->obj_size = ofs; s->object->set_obj_size(ofs); - op_ret = s->bucket->check_quota(this, quota, s->obj_size, y); if (op_ret < 0) { return; @@ -4553,7 +4696,7 @@ void RGWPostObj::execute(optional_yield y) buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5); etag = calc_md5; - + if (supplied_md5_b64 && strcmp(calc_md5, supplied_md5)) { op_ret = -ERR_BAD_DIGEST; return; @@ -4566,7 +4709,7 @@ void RGWPostObj::execute(optional_yield y) emplace_attr(RGW_ATTR_ACL, std::move(aclbl)); const std::string content_type = get_current_content_type(); - if (! content_type.empty()) { + if (!content_type.empty()) { ceph::bufferlist ct_bl; ct_bl.append(content_type.c_str(), content_type.size() + 1); emplace_attr(RGW_ATTR_CONTENT_TYPE, std::move(ct_bl)); @@ -4583,28 +4726,31 @@ void RGWPostObj::execute(optional_yield y) emplace_attr(RGW_ATTR_COMPRESSION, std::move(tmp)); } - op_ret = processor->complete(s->obj_size, etag, nullptr, real_time(), attrs, - (delete_at ? *delete_at : real_time()), - nullptr, nullptr, nullptr, nullptr, nullptr, - s->yield); + op_ret = processor->complete( + s->obj_size, etag, nullptr, real_time(), attrs, + (delete_at ? *delete_at : real_time()), nullptr, nullptr, nullptr, + nullptr, nullptr, s->yield + ); if (op_ret < 0) { return; } } while (is_next_file_to_upload()); // send request to notification manager - int ret = res->publish_commit(this, ofs, s->object->get_mtime(), etag, s->object->get_instance()); + int ret = res->publish_commit( + this, ofs, s->object->get_mtime(), etag, s->object->get_instance() + ); if (ret < 0) { - ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " + << ret << dendl; // too late to rollback operation, hence op_ret is not set here } } - -void RGWPutMetadataAccount::filter_out_temp_url(map& add_attrs, - const set& rmattr_names, - map& temp_url_keys) -{ +void RGWPutMetadataAccount::filter_out_temp_url( + map& add_attrs, const set& rmattr_names, + map& temp_url_keys +) { map::iterator iter; iter = add_attrs.find(RGW_ATTR_TEMPURL_KEY1); @@ -4629,8 +4775,7 @@ void RGWPutMetadataAccount::filter_out_temp_url(map& add_att } } -int RGWPutMetadataAccount::init_processing(optional_yield y) -{ +int RGWPutMetadataAccount::init_processing(optional_yield y) { /* First, go to the base class. At the time of writing the method was * responsible only for initializing the quota. This isn't necessary * here as we are touching metadata only. I'm putting this call only @@ -4669,8 +4814,9 @@ int RGWPutMetadataAccount::init_processing(optional_yield y) filter_out_temp_url(attrs, rmattr_names, temp_url_keys); /* The same with quota except a client needs to be reseller admin. */ - op_ret = filter_out_quota_info(attrs, rmattr_names, new_quota, - &new_quota_extracted); + op_ret = filter_out_quota_info( + attrs, rmattr_names, new_quota, &new_quota_extracted + ); if (op_ret < 0) { return op_ret; } @@ -4678,8 +4824,7 @@ int RGWPutMetadataAccount::init_processing(optional_yield y) return 0; } -int RGWPutMetadataAccount::verify_permission(optional_yield y) -{ +int RGWPutMetadataAccount::verify_permission(optional_yield y) { if (s->auth.identity->is_anonymous()) { return -EACCES; } @@ -4703,8 +4848,7 @@ int RGWPutMetadataAccount::verify_permission(optional_yield y) return 0; } -void RGWPutMetadataAccount::execute(optional_yield y) -{ +void RGWPutMetadataAccount::execute(optional_yield y) { /* Params have been extracted earlier. See init_processing(). */ op_ret = s->user->load_user(this, y); if (op_ret < 0) { @@ -4729,8 +4873,7 @@ void RGWPutMetadataAccount::execute(optional_yield y) op_ret = s->user->store_user(this, y, false, &s->user->get_info()); } -int RGWPutMetadataBucket::verify_permission(optional_yield y) -{ +int RGWPutMetadataBucket::verify_permission(optional_yield y) { if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) { return -EACCES; } @@ -4738,13 +4881,11 @@ int RGWPutMetadataBucket::verify_permission(optional_yield y) return 0; } -void RGWPutMetadataBucket::pre_exec() -{ +void RGWPutMetadataBucket::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWPutMetadataBucket::execute(optional_yield y) -{ +void RGWPutMetadataBucket::execute(optional_yield y) { op_ret = get_params(y); if (op_ret < 0) { return; @@ -4762,63 +4903,64 @@ void RGWPutMetadataBucket::execute(optional_yield y) } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - /* Encode special metadata first as we're using std::map::emplace under + /* Encode special metadata first as we're using std::map::emplace under * the hood. This method will add the new items only if the map doesn't * contain such keys yet. */ - if (has_policy) { - if (s->dialect.compare("swift") == 0) { - auto old_policy = \ - static_cast(s->bucket_acl.get()); - auto new_policy = static_cast(&policy); - new_policy->filter_merge(policy_rw_mask, old_policy); - policy = *new_policy; - } - buffer::list bl; - policy.encode(bl); - emplace_attr(RGW_ATTR_ACL, std::move(bl)); + if (has_policy) { + if (s->dialect.compare("swift") == 0) { + auto old_policy = + static_cast(s->bucket_acl.get()); + auto new_policy = static_cast(&policy); + new_policy->filter_merge(policy_rw_mask, old_policy); + policy = *new_policy; } + buffer::list bl; + policy.encode(bl); + emplace_attr(RGW_ATTR_ACL, std::move(bl)); + } - if (has_cors) { - buffer::list bl; - cors_config.encode(bl); - emplace_attr(RGW_ATTR_CORS, std::move(bl)); - } + if (has_cors) { + buffer::list bl; + cors_config.encode(bl); + emplace_attr(RGW_ATTR_CORS, std::move(bl)); + } - /* It's supposed that following functions WILL NOT change any + /* It's supposed that following functions WILL NOT change any * special attributes (like RGW_ATTR_ACL) if they are already * present in attrs. */ - prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs); - populate_with_generic_attrs(s, attrs); + prepare_add_del_attrs(s->bucket_attrs, rmattr_names, attrs); + populate_with_generic_attrs(s, attrs); - /* According to the Swift's behaviour and its container_quota + /* According to the Swift's behaviour and its container_quota * WSGI middleware implementation: anyone with write permissions * is able to set the bucket quota. This stays in contrast to * account quotas that can be set only by clients holding * reseller admin privileges. */ - op_ret = filter_out_quota_info(attrs, rmattr_names, s->bucket->get_info().quota); - if (op_ret < 0) { - return op_ret; - } + op_ret = + filter_out_quota_info(attrs, rmattr_names, s->bucket->get_info().quota); + if (op_ret < 0) { + return op_ret; + } - if (swift_ver_location) { - s->bucket->get_info().swift_ver_location = *swift_ver_location; - s->bucket->get_info().swift_versioning = (!swift_ver_location->empty()); - } + if (swift_ver_location) { + s->bucket->get_info().swift_ver_location = *swift_ver_location; + s->bucket->get_info().swift_versioning = (!swift_ver_location->empty()); + } - /* Web site of Swift API. */ - filter_out_website(attrs, rmattr_names, s->bucket->get_info().website_conf); - s->bucket->get_info().has_website = !s->bucket->get_info().website_conf.is_empty(); + /* Web site of Swift API. */ + filter_out_website(attrs, rmattr_names, s->bucket->get_info().website_conf); + s->bucket->get_info().has_website = + !s->bucket->get_info().website_conf.is_empty(); - /* Setting attributes also stores the provided bucket info. Due + /* Setting attributes also stores the provided bucket info. Due * to this fact, the new quota settings can be serialized with * the same call. */ - op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); - return op_ret; - }); + op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); + return op_ret; + }); } -int RGWPutMetadataObject::verify_permission(optional_yield y) -{ +int RGWPutMetadataObject::verify_permission(optional_yield y) { // This looks to be something specific to Swift. We could add // operations like swift:PutMetadataObject to the Policy Engine. if (!verify_object_permission_no_policy(this, s, RGW_PERM_WRITE)) { @@ -4828,13 +4970,11 @@ int RGWPutMetadataObject::verify_permission(optional_yield y) return 0; } -void RGWPutMetadataObject::pre_exec() -{ +void RGWPutMetadataObject::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWPutMetadataObject::execute(optional_yield y) -{ +void RGWPutMetadataObject::execute(optional_yield y) { rgw_obj target_obj; rgw::sal::Attrs attrs, rmattrs; @@ -4879,8 +5019,7 @@ void RGWPutMetadataObject::execute(optional_yield y) op_ret = s->object->set_obj_attrs(this, &attrs, &rmattrs, s->yield); } -int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y) -{ +int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y) { RGWSLOInfo slo_info; auto bliter = bl.cbegin(); try { @@ -4891,8 +5030,9 @@ int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y) } try { - deleter = std::unique_ptr(\ - new RGWBulkDelete::Deleter(this, driver, s)); + deleter = std::unique_ptr( + new RGWBulkDelete::Deleter(this, driver, s) + ); } catch (const std::bad_alloc&) { return -ENOMEM; } @@ -4928,42 +5068,53 @@ int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y) return 0; } -int RGWDeleteObj::verify_permission(optional_yield y) -{ +int RGWDeleteObj::verify_permission(optional_yield y) { int op_ret = get_params(y); if (op_ret) { return op_ret; } - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) { + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) { - auto r = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key(), s->object->get_name())); + auto r = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, + rgw::IAM::s3BypassGovernanceRetention, + ARN(s->bucket->get_key(), s->object->get_name()) + ); if (r == Effect::Deny) { bypass_perm = false; } else if (r == Effect::Pass && s->iam_policy) { ARN obj_arn(ARN(s->bucket->get_key(), s->object->get_name())); - r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention, obj_arn); + r = s->iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention, + obj_arn + ); if (r == Effect::Deny) { bypass_perm = false; } } else if (r == Effect::Pass && !s->session_policies.empty()) { - r = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key(), s->object->get_name())); + r = eval_identity_or_session_policies( + this, s->session_policies, s->env, + rgw::IAM::s3BypassGovernanceRetention, + ARN(s->bucket->get_key(), s->object->get_name()) + ); if (r == Effect::Deny) { bypass_perm = false; } } } - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - s->object->get_instance().empty() ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - ARN(s->bucket->get_key(), s->object->get_name())); + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, + s->object->get_instance().empty() ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + ARN(s->bucket->get_key(), s->object->get_name()) + ); if (identity_policy_res == Effect::Deny) { return -EACCES; } @@ -4972,45 +5123,48 @@ int RGWDeleteObj::verify_permission(optional_yield y) rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; ARN obj_arn(ARN(s->bucket->get_key(), s->object->get_name())); if (s->iam_policy) { - r = s->iam_policy->eval(s->env, *s->auth.identity, - s->object->get_instance().empty() ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - obj_arn, - princ_type); - } - if (r == Effect::Deny) - return -EACCES; + r = s->iam_policy->eval( + s->env, *s->auth.identity, + s->object->get_instance().empty() ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + obj_arn, princ_type + ); + } + if (r == Effect::Deny) return -EACCES; if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - s->object->get_instance().empty() ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - obj_arn); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, + s->object->get_instance().empty() ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + obj_arn + ); if (session_policy_res == Effect::Deny) { - return -EACCES; + return -EACCES; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && r == Effect::Allow)) { return 0; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow) { + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + r == Effect::Allow) { return 0; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) { return 0; } } return -EACCES; } - if (r == Effect::Allow || identity_policy_res == Effect::Allow) - return 0; + if (r == Effect::Allow || identity_policy_res == Effect::Allow) return 0; } if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) { @@ -5018,22 +5172,21 @@ int RGWDeleteObj::verify_permission(optional_yield y) } if (s->bucket->get_info().mfa_enabled() && - !s->object->get_instance().empty() && - !s->mfa_verified) { - ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl; + !s->object->get_instance().empty() && !s->mfa_verified) { + ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned " + "object, mfa auth not provided" + << dendl; return -ERR_MFA_REQUIRED; } return 0; } -void RGWDeleteObj::pre_exec() -{ +void RGWDeleteObj::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWDeleteObj::execute(optional_yield y) -{ +void RGWDeleteObj::execute(optional_yield y) { if (!s->bucket_exists) { op_ret = -ERR_NO_SUCH_BUCKET; return; @@ -5044,7 +5197,8 @@ void RGWDeleteObj::execute(optional_yield y) std::string etag; { RGWObjState* astate = nullptr; - bool check_obj_lock = s->object->have_instance() && s->bucket->get_info().obj_lock_enabled(); + bool check_obj_lock = s->object->have_instance() && + s->bucket->get_info().obj_lock_enabled(); op_ret = s->object->get_obj_state(this, &astate, s->yield, true); if (op_ret < 0) { @@ -5071,7 +5225,9 @@ void RGWDeleteObj::execute(optional_yield y) if (check_obj_lock) { ceph_assert(astate); - int object_lock_response = verify_object_lock(this, astate->attrset, bypass_perm, bypass_governance_mode); + int object_lock_response = verify_object_lock( + this, astate->attrset, bypass_perm, bypass_governance_mode + ); if (object_lock_response != 0) { op_ret = object_lock_response; if (op_ret == -EACCES) { @@ -5092,7 +5248,9 @@ void RGWDeleteObj::execute(optional_yield y) if (slo_attr != astate->attrset.end()) { op_ret = handle_slo_manifest(slo_attr->second, y); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl; + ldpp_dout(this, 0) + << "ERROR: failed to handle slo manifest ret=" << op_ret + << dendl; } } else { op_ret = -ERR_NOT_SLO_MANIFEST; @@ -5104,20 +5262,20 @@ void RGWDeleteObj::execute(optional_yield y) // make reservation for notification if needed const auto versioned_object = s->bucket->versioning_enabled(); - const auto event_type = versioned_object && - s->object->get_instance().empty() ? - rgw::notify::ObjectRemovedDeleteMarkerCreated : - rgw::notify::ObjectRemovedDelete; - std::unique_ptr res - = driver->get_notification(s->object.get(), s->src_object.get(), s, - event_type, y); + const auto event_type = + versioned_object && s->object->get_instance().empty() + ? rgw::notify::ObjectRemovedDeleteMarkerCreated + : rgw::notify::ObjectRemovedDelete; + std::unique_ptr res = driver->get_notification( + s->object.get(), s->src_object.get(), s, event_type, y + ); op_ret = res->publish_reserve(this); if (op_ret < 0) { return; } s->object->set_atomic(); - + bool ver_restored = false; op_ret = s->object->swift_versioning_restore(ver_restored, this); if (op_ret < 0) { @@ -5132,13 +5290,15 @@ void RGWDeleteObj::execute(optional_yield y) * with the regular delete path. */ op_ret = get_system_versioning_params(s, &epoch, &version_id); if (op_ret < 0) { - return; + return; } - std::unique_ptr del_op = s->object->get_delete_op(); + std::unique_ptr del_op = + s->object->get_delete_op(); del_op->params.obj_owner = s->owner; del_op->params.bucket_owner = s->bucket_owner; - del_op->params.versioning_status = s->bucket->get_info().versioning_status(); + del_op->params.versioning_status = + s->bucket->get_info().versioning_status(); del_op->params.unmod_since = unmod_since; del_op->params.high_precision_time = s->system_request; del_op->params.olh_epoch = epoch; @@ -5146,8 +5306,8 @@ void RGWDeleteObj::execute(optional_yield y) op_ret = del_op->delete_obj(this, y); if (op_ret >= 0) { - delete_marker = del_op->result.delete_marker; - version_id = del_op->result.version_id; + delete_marker = del_op->result.delete_marker; + version_id = del_op->result.version_id; } /* Check whether the object has expired. Swift API documentation @@ -5166,9 +5326,14 @@ void RGWDeleteObj::execute(optional_yield y) } // send request to notification manager - int ret = res->publish_commit(this, obj_size, ceph::real_clock::now(), etag, version_id); + int ret = res->publish_commit( + this, obj_size, ceph::real_clock::now(), etag, version_id + ); if (ret < 0) { - ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; + ldpp_dout( + this, 1 + ) << "ERROR: publishing notification failed, with error: " + << ret << dendl; // too late to rollback operation, hence op_ret is not set here } } else { @@ -5176,11 +5341,10 @@ void RGWDeleteObj::execute(optional_yield y) } } -bool RGWCopyObj::parse_copy_location(const std::string_view& url_src, - string& bucket_name, - rgw_obj_key& key, - req_state* s) -{ +bool RGWCopyObj::parse_copy_location( + const std::string_view& url_src, string& bucket_name, rgw_obj_key& key, + req_state* s +) { std::string_view name_str; std::string_view params_str; @@ -5193,14 +5357,13 @@ bool RGWCopyObj::parse_copy_location(const std::string_view& url_src, params_str = url_src.substr(pos + 1); } - if (name_str[0] == '/') // trim leading slash + if (name_str[0] == '/') // trim leading slash name_str.remove_prefix(1); std::string dec_src = url_decode(name_str); pos = dec_src.find('/'); - if (pos == string::npos) - return false; + if (pos == string::npos) return false; bucket_name = dec_src.substr(0, pos); key.name = dec_src.substr(pos + 1); @@ -5209,7 +5372,7 @@ bool RGWCopyObj::parse_copy_location(const std::string_view& url_src, return false; } - if (! params_str.empty()) { + if (!params_str.empty()) { RGWHTTPArgs args; args.set(std::string(params_str)); args.parse(s); @@ -5220,24 +5383,22 @@ bool RGWCopyObj::parse_copy_location(const std::string_view& url_src, return true; } -int RGWCopyObj::verify_permission(optional_yield y) -{ +int RGWCopyObj::verify_permission(optional_yield y) { RGWAccessControlPolicy src_acl(s->cct); boost::optional src_policy; op_ret = get_params(y); - if (op_ret < 0) - return op_ret; + if (op_ret < 0) return op_ret; op_ret = get_system_versioning_params(s, &olh_epoch, &version_id); if (op_ret < 0) { return op_ret; } - op_ret = driver->get_bucket(this, s->user.get(), - rgw_bucket(src_tenant_name, - src_bucket_name, - s->bucket_instance_id), - &src_bucket, y); + op_ret = driver->get_bucket( + this, s->user.get(), + rgw_bucket(src_tenant_name, src_bucket_name, s->bucket_instance_id), + &src_bucket, y + ); if (op_ret < 0) { if (op_ret == -ENOENT) { op_ret = -ERR_NO_SUCH_BUCKET; @@ -5248,15 +5409,18 @@ int RGWCopyObj::verify_permission(optional_yield y) /* This is the only place the bucket is set on src_object */ s->src_object->set_bucket(src_bucket.get()); /* get buckets info (source and dest) */ - if (s->local_source && source_zone.empty()) { + if (s->local_source && source_zone.empty()) { s->src_object->set_atomic(); s->src_object->set_prefetch_data(); rgw_placement_rule src_placement; /* check source object permissions */ - op_ret = read_obj_policy(this, driver, s, src_bucket->get_info(), src_bucket->get_attrs(), &src_acl, &src_placement.storage_class, - src_policy, src_bucket.get(), s->src_object.get(), y); + op_ret = read_obj_policy( + this, driver, s, src_bucket->get_info(), src_bucket->get_attrs(), + &src_acl, &src_placement.storage_class, src_policy, src_bucket.get(), + s->src_object.get(), y + ); if (op_ret < 0) { return op_ret; } @@ -5265,7 +5429,7 @@ int RGWCopyObj::verify_permission(optional_yield y) if (need_to_check_storage_class) { src_placement.inherit_from(src_bucket->get_placement_rule()); - op_ret = check_storage_class(src_placement); + op_ret = check_storage_class(src_placement); if (op_ret < 0) { return op_ret; } @@ -5273,87 +5437,111 @@ int RGWCopyObj::verify_permission(optional_yield y) /* admin request overrides permission checks */ if (!s->auth.identity->is_admin_of(src_acl.get_owner().get_id())) { - if (src_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, src_policy, s->iam_user_policies, s->session_policies); + if (src_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition( + this, src_policy, s->iam_user_policies, s->session_policies + ); if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag); + rgw_iam_add_objtags( + this, s, s->src_object.get(), has_s3_existing_tag, + has_s3_resource_tag + ); ARN obj_arn(s->src_object->get_obj()); - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - s->src_object->get_instance().empty() ? - rgw::IAM::s3GetObject : - rgw::IAM::s3GetObjectVersion, - obj_arn); + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, + s->src_object->get_instance().empty() + ? rgw::IAM::s3GetObject + : rgw::IAM::s3GetObjectVersion, + obj_arn + ); if (identity_policy_res == Effect::Deny) { return -EACCES; } auto e = Effect::Pass; rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; if (src_policy) { - e = src_policy->eval(s->env, *s->auth.identity, - s->src_object->get_instance().empty() ? - rgw::IAM::s3GetObject : - rgw::IAM::s3GetObjectVersion, - obj_arn, - princ_type); + e = src_policy->eval( + s->env, *s->auth.identity, + s->src_object->get_instance().empty() + ? rgw::IAM::s3GetObject + : rgw::IAM::s3GetObjectVersion, + obj_arn, princ_type + ); } - if (e == Effect::Deny) { - return -EACCES; - } - if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - s->src_object->get_instance().empty() ? - rgw::IAM::s3GetObject : - rgw::IAM::s3GetObjectVersion, - obj_arn); - if (session_policy_res == Effect::Deny) { - return -EACCES; + if (e == Effect::Deny) { + return -EACCES; } - if (princ_type == rgw::IAM::PolicyPrincipal::Role) { - //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && - (session_policy_res != Effect::Allow || e != Effect::Allow)) { - return -EACCES; - } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { - //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) { + if (!s->session_policies.empty()) { + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, + s->src_object->get_instance().empty() + ? rgw::IAM::s3GetObject + : rgw::IAM::s3GetObjectVersion, + obj_arn + ); + if (session_policy_res == Effect::Deny) { return -EACCES; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) { - return -EACCES; + if (princ_type == rgw::IAM::PolicyPrincipal::Role) { + //Intersection of session policy and identity policy plus intersection of session policy and bucket policy + if ((session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) && + (session_policy_res != Effect::Allow || e != Effect::Allow)) { + return -EACCES; + } + } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { + //Intersection of session policy and identity policy plus bucket policy + if ((session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) && + e != Effect::Allow) { + return -EACCES; + } + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) { + return -EACCES; + } } } - } - if (identity_policy_res == Effect::Pass && e == Effect::Pass && - !src_acl.verify_permission(this, *s->auth.identity, s->perm_mask, - RGW_PERM_READ)) { - return -EACCES; - } - //remove src object tags as it may interfere with policy evaluation of destination obj - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_remove_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag); - - } else if (!src_acl.verify_permission(this, *s->auth.identity, - s->perm_mask, - RGW_PERM_READ)) { - return -EACCES; + if (identity_policy_res == Effect::Pass && e == Effect::Pass && + !src_acl.verify_permission( + this, *s->auth.identity, s->perm_mask, RGW_PERM_READ + )) { + return -EACCES; + } + //remove src object tags as it may interfere with policy evaluation of destination obj + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_remove_objtags( + this, s, s->src_object.get(), has_s3_existing_tag, + has_s3_resource_tag + ); + + } else if (!src_acl.verify_permission( + this, *s->auth.identity, s->perm_mask, RGW_PERM_READ + )) { + return -EACCES; } } } RGWAccessControlPolicy dest_bucket_policy(s->cct); - if (src_bucket_name.compare(dest_bucket_name) == 0) { /* will only happen if s->local_source + if (src_bucket_name.compare(dest_bucket_name) == + 0) { /* will only happen if s->local_source or intra region sync */ dest_bucket = src_bucket->clone(); } else { - op_ret = driver->get_bucket(this, s->user.get(), dest_tenant_name, dest_bucket_name, &dest_bucket, y); + op_ret = driver->get_bucket( + this, s->user.get(), dest_tenant_name, dest_bucket_name, &dest_bucket, y + ); if (op_ret < 0) { if (op_ret == -ENOENT) { - ldpp_dout(this, 0) << "ERROR: Destination Bucket not found for user: " << s->user->get_id().to_str() << dendl; - op_ret = -ERR_NO_SUCH_BUCKET; + ldpp_dout(this, 0) << "ERROR: Destination Bucket not found for user: " + << s->user->get_id().to_str() << dendl; + op_ret = -ERR_NO_SUCH_BUCKET; } return op_ret; } @@ -5363,80 +5551,91 @@ int RGWCopyObj::verify_permission(optional_yield y) dest_object->set_atomic(); /* check dest bucket permissions */ - op_ret = read_bucket_policy(this, driver, s, dest_bucket->get_info(), - dest_bucket->get_attrs(), - &dest_bucket_policy, dest_bucket->get_key(), y); + op_ret = read_bucket_policy( + this, driver, s, dest_bucket->get_info(), dest_bucket->get_attrs(), + &dest_bucket_policy, dest_bucket->get_key(), y + ); if (op_ret < 0) { return op_ret; } - auto dest_iam_policy = get_iam_policy_from_attr(s->cct, dest_bucket->get_attrs(), dest_bucket->get_tenant()); + auto dest_iam_policy = get_iam_policy_from_attr( + s->cct, dest_bucket->get_attrs(), dest_bucket->get_tenant() + ); /* admin request overrides permission checks */ - if (! s->auth.identity->is_admin_of(dest_policy.get_owner().get_id())){ - if (dest_iam_policy != boost::none || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { + if (!s->auth.identity->is_admin_of(dest_policy.get_owner().get_id())) { + if (dest_iam_policy != boost::none || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { //Add destination bucket tags for authorization - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, dest_iam_policy, s->iam_user_policies, s->session_policies); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition( + this, dest_iam_policy, s->iam_user_policies, s->session_policies + ); if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s, dest_bucket.get()); rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source); if (md_directive) - rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive", - *md_directive); + rgw_add_to_iam_environment( + s->env, "s3:x-amz-metadata-directive", *md_directive + ); ARN obj_arn(dest_object->get_obj()); - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, - s->env, - rgw::IAM::s3PutObject, - obj_arn); + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, rgw::IAM::s3PutObject, obj_arn + ); if (identity_policy_res == Effect::Deny) { return -EACCES; } auto e = Effect::Pass; rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; if (dest_iam_policy) { - e = dest_iam_policy->eval(s->env, *s->auth.identity, - rgw::IAM::s3PutObject, - obj_arn, - princ_type); + e = dest_iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3PutObject, obj_arn, + princ_type + ); } if (e == Effect::Deny) { return -EACCES; } if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3PutObject, obj_arn); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, rgw::IAM::s3PutObject, obj_arn + ); if (session_policy_res == Effect::Deny) { - return false; + return false; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && + if ((session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) && (session_policy_res != Effect::Allow || e == Effect::Allow)) { return -EACCES; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) { + if ((session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) && + e != Effect::Allow) { return -EACCES; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) { return -EACCES; } } } if (identity_policy_res == Effect::Pass && e == Effect::Pass && - ! dest_bucket_policy.verify_permission(this, - *s->auth.identity, - s->perm_mask, - RGW_PERM_WRITE)){ + !dest_bucket_policy.verify_permission( + this, *s->auth.identity, s->perm_mask, RGW_PERM_WRITE + )) { return -EACCES; } - } else if (! dest_bucket_policy.verify_permission(this, *s->auth.identity, s->perm_mask, - RGW_PERM_WRITE)) { + } else if (!dest_bucket_policy.verify_permission( + this, *s->auth.identity, s->perm_mask, RGW_PERM_WRITE + )) { return -EACCES; } - } op_ret = init_dest_policy(); @@ -5447,9 +5646,7 @@ int RGWCopyObj::verify_permission(optional_yield y) return 0; } - -int RGWCopyObj::init_common() -{ +int RGWCopyObj::init_common() { if (if_mod) { if (parse_time(if_mod, &mod_time) < 0) { op_ret = -EINVAL; @@ -5479,16 +5676,13 @@ int RGWCopyObj::init_common() return 0; } -static void copy_obj_progress_cb(off_t ofs, void *param) -{ - RGWCopyObj *op = static_cast(param); +static void copy_obj_progress_cb(off_t ofs, void* param) { + RGWCopyObj* op = static_cast(param); op->progress_cb(ofs); } -void RGWCopyObj::progress_cb(off_t ofs) -{ - if (!s->cct->_conf->rgw_copy_obj_progress) - return; +void RGWCopyObj::progress_cb(off_t ofs) { + if (!s->cct->_conf->rgw_copy_obj_progress) return; if (ofs - last_ofs < static_cast(s->cct->_conf->rgw_copy_obj_progress_every_bytes)) { @@ -5500,27 +5694,23 @@ void RGWCopyObj::progress_cb(off_t ofs) last_ofs = ofs; } -void RGWCopyObj::pre_exec() -{ +void RGWCopyObj::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWCopyObj::execute(optional_yield y) -{ - if (init_common() < 0) - return; +void RGWCopyObj::execute(optional_yield y) { + if (init_common() < 0) return; // make reservation for notification if needed - std::unique_ptr res - = driver->get_notification( - s->object.get(), s->src_object.get(), - s, rgw::notify::ObjectCreatedCopy, y); + std::unique_ptr res = driver->get_notification( + s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedCopy, y + ); op_ret = res->publish_reserve(this); if (op_ret < 0) { return; } - if ( ! version_id.empty()) { + if (!version_id.empty()) { dest_object->set_instance(version_id); } else if (dest_bucket->versioning_enabled()) { dest_object->gen_rand_obj_instance_name(); @@ -5555,27 +5745,28 @@ void RGWCopyObj::execute(optional_yield y) bufferlist bl; if (astate->get_attr(RGW_ATTR_MANIFEST, bl)) { RGWObjManifest m; - try{ + try { decode(m, bl); if (m.get_tier_type() == "cloud-s3") { op_ret = -ERR_INVALID_OBJECT_STATE; s->err.message = "This object was transitioned to cloud-s3"; ldpp_dout(this, 4) << "Cannot copy cloud tiered object. Failing with " - << op_ret << dendl; + << op_ret << dendl; return; } } catch (const buffer::end_of_buffer&) { // ignore empty manifest; it's not cloud-tiered } catch (const std::exception& e) { ldpp_dout(this, 1) << "WARNING: failed to decode object manifest for " - << *s->object << ": " << e.what() << dendl; + << *s->object << ": " << e.what() << dendl; } } obj_size = astate->size; - - if (!s->system_request) { // no quota enforcement for system requests - if (astate->accounted_size > static_cast(s->cct->_conf->rgw_max_put_size)) { + + if (!s->system_request) { // no quota enforcement for system requests + if (astate->accounted_size > + static_cast(s->cct->_conf->rgw_max_put_size)) { op_ret = -ERR_TOO_LARGE; return; } @@ -5596,49 +5787,35 @@ void RGWCopyObj::execute(optional_yield y) return; } - op_ret = s->src_object->copy_object(s->user.get(), - &s->info, - source_zone, - dest_object.get(), - dest_bucket.get(), - src_bucket.get(), - s->dest_placement, - &src_mtime, - &mtime, - mod_ptr, - unmod_ptr, - high_precision_time, - if_match, - if_nomatch, - attrs_mod, - copy_if_newer, - attrs, - RGWObjCategory::Main, - olh_epoch, - delete_at, - (version_id.empty() ? NULL : &version_id), - &s->req_id, /* use req_id as tag */ - &etag, - copy_obj_progress_cb, (void *)this, - this, - s->yield); + op_ret = s->src_object->copy_object( + s->user.get(), &s->info, source_zone, dest_object.get(), + dest_bucket.get(), src_bucket.get(), s->dest_placement, &src_mtime, + &mtime, mod_ptr, unmod_ptr, high_precision_time, if_match, if_nomatch, + attrs_mod, copy_if_newer, attrs, RGWObjCategory::Main, olh_epoch, + delete_at, (version_id.empty() ? NULL : &version_id), + &s->req_id, /* use req_id as tag */ + &etag, copy_obj_progress_cb, (void*)this, this, s->yield + ); // send request to notification manager - int ret = res->publish_commit(this, obj_size, mtime, etag, dest_object->get_instance()); + int ret = res->publish_commit( + this, obj_size, mtime, etag, dest_object->get_instance() + ); if (ret < 0) { - ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " + << ret << dendl; // too late to rollback operation, hence op_ret is not set here } } -int RGWGetACLs::verify_permission(optional_yield y) -{ +int RGWGetACLs::verify_permission(optional_yield y) { bool perm; - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); if (!rgw::sal::Object::empty(s->object.get())) { - auto iam_action = s->object->get_instance().empty() ? - rgw::IAM::s3GetObjectAcl : - rgw::IAM::s3GetObjectVersionAcl; + auto iam_action = s->object->get_instance().empty() + ? rgw::IAM::s3GetObjectAcl + : rgw::IAM::s3GetObjectVersionAcl; if (has_s3_existing_tag || has_s3_resource_tag) rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); perm = verify_object_permission(this, s, iam_action); @@ -5646,122 +5823,109 @@ int RGWGetACLs::verify_permission(optional_yield y) if (!s->bucket_exists) { return -ERR_NO_SUCH_BUCKET; } - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); perm = verify_bucket_permission(this, s, rgw::IAM::s3GetBucketAcl); } - if (!perm) - return -EACCES; + if (!perm) return -EACCES; return 0; } -void RGWGetACLs::pre_exec() -{ +void RGWGetACLs::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetACLs::execute(optional_yield y) -{ +void RGWGetACLs::execute(optional_yield y) { stringstream ss; - RGWAccessControlPolicy* const acl = \ - (!rgw::sal::Object::empty(s->object.get()) ? s->object_acl.get() : s->bucket_acl.get()); - RGWAccessControlPolicy_S3* const s3policy = \ - static_cast(acl); + RGWAccessControlPolicy* const acl = + (!rgw::sal::Object::empty(s->object.get()) ? s->object_acl.get() + : s->bucket_acl.get()); + RGWAccessControlPolicy_S3* const s3policy = + static_cast(acl); s3policy->to_xml(ss); acls = ss.str(); } - - -int RGWPutACLs::verify_permission(optional_yield y) -{ +int RGWPutACLs::verify_permission(optional_yield y) { bool perm; rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl); rgw_add_grant_to_iam_environment(s->env, s); if (!rgw::sal::Object::empty(s->object.get())) { - auto iam_action = s->object->get_instance().empty() ? rgw::IAM::s3PutObjectAcl : rgw::IAM::s3PutObjectVersionAcl; + auto iam_action = s->object->get_instance().empty() + ? rgw::IAM::s3PutObjectAcl + : rgw::IAM::s3PutObjectVersionAcl; op_ret = rgw_iam_add_objtags(this, s, true, true); perm = verify_object_permission(this, s, iam_action); } else { op_ret = rgw_iam_add_buckettags(this, s); perm = verify_bucket_permission(this, s, rgw::IAM::s3PutBucketAcl); } - if (!perm) - return -EACCES; + if (!perm) return -EACCES; return 0; } -int RGWGetLC::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetLC::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); bool perm; - perm = verify_bucket_permission(this, s, rgw::IAM::s3GetLifecycleConfiguration); - if (!perm) - return -EACCES; + perm = + verify_bucket_permission(this, s, rgw::IAM::s3GetLifecycleConfiguration); + if (!perm) return -EACCES; return 0; } -int RGWPutLC::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWPutLC::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); bool perm; - perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration); - if (!perm) - return -EACCES; + perm = + verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration); + if (!perm) return -EACCES; return 0; } -int RGWDeleteLC::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteLC::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); bool perm; - perm = verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration); - if (!perm) - return -EACCES; + perm = + verify_bucket_permission(this, s, rgw::IAM::s3PutLifecycleConfiguration); + if (!perm) return -EACCES; return 0; } -void RGWPutACLs::pre_exec() -{ +void RGWPutACLs::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetLC::pre_exec() -{ +void RGWGetLC::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWPutLC::pre_exec() -{ +void RGWPutLC::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWDeleteLC::pre_exec() -{ +void RGWDeleteLC::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWPutACLs::execute(optional_yield y) -{ +void RGWPutACLs::execute(optional_yield y) { bufferlist bl; - RGWAccessControlPolicy_S3 *policy = NULL; + RGWAccessControlPolicy_S3* policy = NULL; RGWACLXMLParser_S3 parser(s->cct); RGWAccessControlPolicy_S3 new_policy(s->cct); stringstream ss; @@ -5773,17 +5937,18 @@ void RGWPutACLs::execute(optional_yield y) return; } - - RGWAccessControlPolicy* const existing_policy = \ - (rgw::sal::Object::empty(s->object.get()) ? s->bucket_acl.get() : s->object_acl.get()); + RGWAccessControlPolicy* const existing_policy = + (rgw::sal::Object::empty(s->object.get()) ? s->bucket_acl.get() + : s->object_acl.get()); owner = existing_policy->get_owner(); op_ret = get_params(y); if (op_ret < 0) { if (op_ret == -ERANGE) { - ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = " - << s->length << dendl; + ldpp_dout(this, 4) << "The size of request xml data is larger than the " + "max limitation, data size = " + << s->length << dendl; op_ret = -ERR_MALFORMED_XML; s->err.message = "The XML you provided was larger than the maximum " + std::to_string(s->cct->_conf->rgw_max_put_param_size) + @@ -5793,7 +5958,8 @@ void RGWPutACLs::execute(optional_yield y) } char* buf = data.c_str(); - ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl; + ldpp_dout(this, 15) << "read len=" << data.length() + << " data=" << (buf ? buf : "") << dendl; if (!s->canned_acl.empty() && data.length() > 0) { op_ret = -EINVAL; @@ -5802,8 +5968,7 @@ void RGWPutACLs::execute(optional_yield y) if (!s->canned_acl.empty() || s->has_acl_header) { op_ret = get_policy_from_state(driver, s, ss); - if (op_ret < 0) - return; + if (op_ret < 0) return; data.clear(); data.append(ss.str()); @@ -5813,7 +5978,9 @@ void RGWPutACLs::execute(optional_yield y) op_ret = -EINVAL; return; } - policy = static_cast(parser.find_first("AccessControlPolicy")); + policy = static_cast( + parser.find_first("AccessControlPolicy") + ); if (!policy) { op_ret = -EINVAL; return; @@ -5821,7 +5988,7 @@ void RGWPutACLs::execute(optional_yield y) const RGWAccessControlList& req_acl = policy->get_acl(); const multimap& req_grant_map = req_acl.get_grant_map(); -#define ACL_GRANTS_MAX_NUM 100 +#define ACL_GRANTS_MAX_NUM 100 int max_num = s->cct->_conf->rgw_acl_grants_max_num; if (max_num < 0) { max_num = ACL_GRANTS_MAX_NUM; @@ -5830,11 +5997,13 @@ void RGWPutACLs::execute(optional_yield y) int grants_num = req_grant_map.size(); if (grants_num > max_num) { ldpp_dout(this, 4) << "An acl can have up to " << max_num - << " grants, request acl grants num: " << grants_num << dendl; + << " grants, request acl grants num: " << grants_num + << dendl; op_ret = -ERR_LIMIT_EXCEEDED; - s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum " - + std::to_string(max_num) - + " grants allowed in an acl."; + s->err.message = + "The request is rejected, because the acl grants number you requested " + "is larger than the maximum " + + std::to_string(max_num) + " grants allowed in an acl."; return; } @@ -5845,9 +6014,12 @@ void RGWPutACLs::execute(optional_yield y) if (s->canned_acl.empty()) { in_data.append(data); } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } } @@ -5859,8 +6031,7 @@ void RGWPutACLs::execute(optional_yield y) } op_ret = policy->rebuild(this, driver, &owner, new_policy, s->err.message); - if (op_ret < 0) - return; + if (op_ret < 0) return; if (s->cct->_conf->subsys.should_gather()) { ldpp_dout(this, 15) << "New AccessControlPolicy:"; @@ -5868,8 +6039,7 @@ void RGWPutACLs::execute(optional_yield y) *_dout << dendl; } - if (s->bucket_access_conf && - s->bucket_access_conf->block_public_acls() && + if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls() && new_policy.is_public(this)) { op_ret = -EACCES; return; @@ -5882,7 +6052,7 @@ void RGWPutACLs::execute(optional_yield y) //if instance is empty, we should modify the latest object op_ret = s->object->modify_obj_attrs(RGW_ATTR_ACL, bl, s->yield, this); } else { - map attrs = s->bucket_attrs; + map attrs = s->bucket_attrs; attrs[RGW_ATTR_ACL] = bl; op_ret = s->bucket->merge_and_store_attrs(this, attrs, y); } @@ -5891,10 +6061,9 @@ void RGWPutACLs::execute(optional_yield y) } } -void RGWPutLC::execute(optional_yield y) -{ +void RGWPutLC::execute(optional_yield y) { bufferlist bl; - + RGWLifecycleConfiguration_S3 config(s->cct); RGWXMLParser parser; RGWLifecycleConfiguration_S3 new_config(s->cct); @@ -5910,8 +6079,9 @@ void RGWPutLC::execute(optional_yield y) try { content_md5_bin = rgw::from_base64(std::string_view(content_md5)); } catch (...) { - s->err.message = "Request header Content-MD5 contains character " - "that is not base64 encoded."; + s->err.message = + "Request header Content-MD5 contains character " + "that is not base64 encoded."; ldpp_dout(this, 5) << s->err.message << dendl; op_ret = -ERR_BAD_DIGEST; return; @@ -5924,27 +6094,32 @@ void RGWPutLC::execute(optional_yield y) } op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; char* buf = data.c_str(); - ldpp_dout(this, 15) << "read len=" << data.length() << " data=" << (buf ? buf : "") << dendl; + ldpp_dout(this, 15) << "read len=" << data.length() + << " data=" << (buf ? buf : "") << dendl; if (content_md5_bin) { MD5 data_hash; // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes data_hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE]; - data_hash.Update(reinterpret_cast(buf), data.length()); + data_hash.Update( + reinterpret_cast(buf), data.length() + ); data_hash.Final(data_hash_res); - if (memcmp(data_hash_res, content_md5_bin->c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) { + if (memcmp( + data_hash_res, content_md5_bin->c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE + ) != 0) { op_ret = -ERR_BAD_DIGEST; - s->err.message = "The Content-MD5 you specified did not match what we received."; + s->err.message = + "The Content-MD5 you specified did not match what we received."; ldpp_dout(this, 5) << s->err.message - << " Specified content md5: " << content_md5 - << ", calculated content md5: " << data_hash_res - << dendl; + << " Specified content md5: " << content_md5 + << ", calculated content md5: " << data_hash_res + << dendl; return; } } @@ -5963,8 +6138,7 @@ void RGWPutLC::execute(optional_yield y) } op_ret = config.rebuild(new_config); - if (op_ret < 0) - return; + if (op_ret < 0) return; if (s->cct->_conf->subsys.should_gather()) { XMLFormatter xf; @@ -5974,136 +6148,149 @@ void RGWPutLC::execute(optional_yield y) ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl; } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } - op_ret = driver->get_rgwlc()->set_bucket_config(s->bucket.get(), s->bucket_attrs, &new_config); + op_ret = driver->get_rgwlc()->set_bucket_config( + s->bucket.get(), s->bucket_attrs, &new_config + ); if (op_ret < 0) { return; } return; } -void RGWDeleteLC::execute(optional_yield y) -{ +void RGWDeleteLC::execute(optional_yield y) { bufferlist data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } - op_ret = driver->get_rgwlc()->remove_bucket_config(s->bucket.get(), s->bucket_attrs); + op_ret = driver->get_rgwlc()->remove_bucket_config( + s->bucket.get(), s->bucket_attrs + ); if (op_ret < 0) { return; } return; } -int RGWGetCORS::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetCORS::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS); } -void RGWGetCORS::execute(optional_yield y) -{ +void RGWGetCORS::execute(optional_yield y) { op_ret = read_bucket_cors(); - if (op_ret < 0) - return ; + if (op_ret < 0) return; if (!cors_exist) { - ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; + ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" + << dendl; op_ret = -ERR_NO_CORS_FOUND; return; } } -int RGWPutCORS::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWPutCORS::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS); } -void RGWPutCORS::execute(optional_yield y) -{ +void RGWPutCORS::execute(optional_yield y) { rgw_raw_obj obj; op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - rgw::sal::Attrs attrs(s->bucket_attrs); - attrs[RGW_ATTR_CORS] = cors_bl; - return s->bucket->merge_and_store_attrs(this, attrs, s->yield); - }); + rgw::sal::Attrs attrs(s->bucket_attrs); + attrs[RGW_ATTR_CORS] = cors_bl; + return s->bucket->merge_and_store_attrs(this, attrs, s->yield); + }); } -int RGWDeleteCORS::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteCORS::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); // No separate delete permission return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS); } -void RGWDeleteCORS::execute(optional_yield y) -{ +void RGWDeleteCORS::execute(optional_yield y) { bufferlist data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } - op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - op_ret = read_bucket_cors(); - if (op_ret < 0) - return op_ret; - - if (!cors_exist) { - ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; - op_ret = -ENOENT; - return op_ret; - } + op_ret = + retry_raced_bucket_write(this, s->bucket.get(), [this] { + op_ret = read_bucket_cors(); + if (op_ret < 0) return op_ret; - rgw::sal::Attrs attrs(s->bucket_attrs); - attrs.erase(RGW_ATTR_CORS); - op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); - if (op_ret < 0) { - ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket->get_name() - << " returned err=" << op_ret << dendl; - } - return op_ret; - }); + if (!cors_exist) { + ldpp_dout(this, 2) + << "No CORS configuration set yet for this bucket" << dendl; + op_ret = -ENOENT; + return op_ret; + } + + rgw::sal::Attrs attrs(s->bucket_attrs); + attrs.erase(RGW_ATTR_CORS); + op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); + if (op_ret < 0) { + ldpp_dout(this, 0) + << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" + << s->bucket->get_name() << " returned err=" << op_ret << dendl; + } + return op_ret; + }); } -void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) { +void RGWOptionsCORS::get_response_params( + string& hdrs, string& exp_hdrs, unsigned* max_age +) { get_cors_response_headers(this, rule, req_hdrs, hdrs, exp_hdrs, max_age); } -int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) { +int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration* cc) { rule = cc->host_name_rule(origin); if (!rule) { - ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl; + ldpp_dout(this, 10) << "There is no cors rule present for " << origin + << dendl; return -ENOENT; } @@ -6118,11 +6305,9 @@ int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) { return 0; } -void RGWOptionsCORS::execute(optional_yield y) -{ +void RGWOptionsCORS::execute(optional_yield y) { op_ret = read_bucket_cors(); - if (op_ret < 0) - return; + if (op_ret < 0) return; origin = s->info.env->get("HTTP_ORIGIN"); if (!origin) { @@ -6132,12 +6317,16 @@ void RGWOptionsCORS::execute(optional_yield y) } req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD"); if (!req_meth) { - ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl; + ldpp_dout( + this, 0 + ) << "Missing mandatory Access-control-request-method header" + << dendl; op_ret = -EINVAL; return; } if (!cors_exist) { - ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; + ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" + << dendl; op_ret = -ENOENT; return; } @@ -6150,72 +6339,70 @@ void RGWOptionsCORS::execute(optional_yield y) return; } -int RGWGetRequestPayment::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetRequestPayment::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment); } -void RGWGetRequestPayment::pre_exec() -{ +void RGWGetRequestPayment::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetRequestPayment::execute(optional_yield y) -{ +void RGWGetRequestPayment::execute(optional_yield y) { requester_pays = s->bucket->get_info().requester_pays; } -int RGWSetRequestPayment::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWSetRequestPayment::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment); } -void RGWSetRequestPayment::pre_exec() -{ +void RGWSetRequestPayment::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWSetRequestPayment::execute(optional_yield y) -{ - +void RGWSetRequestPayment::execute(optional_yield y) { op_ret = get_params(y); - if (op_ret < 0) - return; - - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, in_data, nullptr, s->info, y); + if (op_ret < 0) return; + + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, in_data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } s->bucket->get_info().requester_pays = requester_pays; op_ret = s->bucket->put_info(this, false, real_time()); if (op_ret < 0) { - ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() - << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" + << s->bucket->get_name() << " returned err=" << op_ret + << dendl; return; } s->bucket_attrs = s->bucket->get_attrs(); } -int RGWInitMultipart::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); +int RGWInitMultipart::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); if (has_s3_existing_tag || has_s3_resource_tag) rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (identity_policy_res == Effect::Deny) { return -EACCES; } @@ -6224,35 +6411,39 @@ int RGWInitMultipart::verify_permission(optional_yield y) rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; ARN obj_arn(s->object->get_obj()); if (s->iam_policy) { - e = s->iam_policy->eval(s->env, *s->auth.identity, - rgw::IAM::s3PutObject, - obj_arn, - princ_type); + e = s->iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3PutObject, obj_arn, princ_type + ); } if (e == Effect::Deny) { return -EACCES; } if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (session_policy_res == Effect::Deny) { - return -EACCES; + return -EACCES; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && e == Effect::Allow)) { return 0; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) { + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + e == Effect::Allow) { return 0; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) { return 0; } } @@ -6270,22 +6461,20 @@ int RGWInitMultipart::verify_permission(optional_yield y) return 0; } -void RGWInitMultipart::pre_exec() -{ +void RGWInitMultipart::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWInitMultipart::execute(optional_yield y) -{ - multipart_trace = tracing::rgw::tracer.start_trace(tracing::rgw::MULTIPART, s->trace_enabled); +void RGWInitMultipart::execute(optional_yield y) { + multipart_trace = tracing::rgw::tracer.start_trace( + tracing::rgw::MULTIPART, s->trace_enabled + ); bufferlist aclbl, tracebl; rgw::sal::Attrs attrs; - if (get_params(y) < 0) - return; + if (get_params(y) < 0) return; - if (rgw::sal::Object::empty(s->object.get())) - return; + if (rgw::sal::Object::empty(s->object.get())) return; if (multipart_trace) { tracing::encode(multipart_trace->GetContext(), tracebl); @@ -6299,8 +6488,7 @@ void RGWInitMultipart::execute(optional_yield y) /* select encryption mode */ op_ret = prepare_encryption(attrs); - if (op_ret != 0) - return; + if (op_ret != 0) return; op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs); if (op_ret < 0) { @@ -6308,8 +6496,7 @@ void RGWInitMultipart::execute(optional_yield y) } std::unique_ptr upload; - upload = s->bucket->get_multipart_upload(s->object->get_name(), - upload_id); + upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id); op_ret = upload->init(this, s->yield, s->owner, s->dest_placement, attrs); if (op_ret == 0) { @@ -6317,19 +6504,20 @@ void RGWInitMultipart::execute(optional_yield y) } s->trace->SetAttribute(tracing::rgw::UPLOAD_ID, upload_id); multipart_trace->UpdateName(tracing::rgw::MULTIPART + upload_id); - } -int RGWCompleteMultipart::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); +int RGWCompleteMultipart::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); if (has_s3_existing_tag || has_s3_resource_tag) rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) { - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (identity_policy_res == Effect::Deny) { return -EACCES; } @@ -6338,35 +6526,39 @@ int RGWCompleteMultipart::verify_permission(optional_yield y) rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; rgw::ARN obj_arn(s->object->get_obj()); if (s->iam_policy) { - e = s->iam_policy->eval(s->env, *s->auth.identity, - rgw::IAM::s3PutObject, - obj_arn, - princ_type); + e = s->iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3PutObject, obj_arn, princ_type + ); } if (e == Effect::Deny) { return -EACCES; } if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (session_policy_res == Effect::Deny) { - return -EACCES; + return -EACCES; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && e == Effect::Allow)) { return 0; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) { + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + e == Effect::Allow) { return 0; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) { return 0; } } @@ -6384,14 +6576,12 @@ int RGWCompleteMultipart::verify_permission(optional_yield y) return 0; } -void RGWCompleteMultipart::pre_exec() -{ +void RGWCompleteMultipart::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWCompleteMultipart::execute(optional_yield y) -{ - RGWMultiCompleteUpload *parts; +void RGWCompleteMultipart::execute(optional_yield y) { + RGWMultiCompleteUpload* parts; RGWMultiXMLParser parser; std::unique_ptr upload; off_t ofs = 0; @@ -6400,8 +6590,7 @@ void RGWCompleteMultipart::execute(optional_yield y) uint64_t olh_epoch = 0; op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; op_ret = get_system_versioning_params(s, &olh_epoch, &version_id); if (op_ret < 0) { return; @@ -6422,10 +6611,14 @@ void RGWCompleteMultipart::execute(optional_yield y) return; } - parts = static_cast(parser.find_first("CompleteMultipartUpload")); + parts = static_cast( + parser.find_first("CompleteMultipartUpload") + ); if (!parts || parts->parts.empty()) { // CompletedMultipartUpload is incorrect but some versions of some libraries use it, see PR #41700 - parts = static_cast(parser.find_first("CompletedMultipartUpload")); + parts = static_cast( + parser.find_first("CompletedMultipartUpload") + ); } if (!parts || parts->parts.empty()) { @@ -6433,7 +6626,6 @@ void RGWCompleteMultipart::execute(optional_yield y) return; } - if ((int)parts->parts.size() > s->cct->_conf->rgw_multipart_part_upload_limit) { op_ret = -ERANGE; @@ -6446,7 +6638,8 @@ void RGWCompleteMultipart::execute(optional_yield y) bool compressed = false; uint64_t accounted_size = 0; - list remove_objs; /* objects to be removed from index listing */ + list + remove_objs; /* objects to be removed from index listing */ meta_obj = upload->get_meta_obj(); meta_obj->set_in_extra_data(true); @@ -6454,8 +6647,7 @@ void RGWCompleteMultipart::execute(optional_yield y) /*take a cls lock on meta_obj to prevent racing completions (or retries) from deleting the parts*/ - int max_lock_secs_mp = - s->cct->_conf.get_val("rgw_mp_lock_max_time"); + int max_lock_secs_mp = s->cct->_conf.get_val("rgw_mp_lock_max_time"); utime_t dur(max_lock_secs_mp, 0); serializer = meta_obj->get_serializer(this, "RGWCompleteMultipart"); @@ -6463,7 +6655,10 @@ void RGWCompleteMultipart::execute(optional_yield y) if (op_ret < 0) { ldpp_dout(this, 0) << "failed to acquire lock" << dendl; if (op_ret == -ENOENT && check_previously_completed(parts)) { - ldpp_dout(this, 1) << "NOTICE: This multipart completion is already completed" << dendl; + ldpp_dout( + this, 1 + ) << "NOTICE: This multipart completion is already completed" + << dendl; op_ret = 0; return; } @@ -6475,18 +6670,20 @@ void RGWCompleteMultipart::execute(optional_yield y) op_ret = meta_obj->get_obj_attrs(s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj - << " ret=" << op_ret << dendl; + << " ret=" << op_ret << dendl; return; } s->trace->SetAttribute(tracing::rgw::UPLOAD_ID, upload_id); jspan_context trace_ctx(false, false); extract_span_context(meta_obj->get_attrs(), trace_ctx); multipart_trace = tracing::rgw::tracer.add_span(name(), trace_ctx); - // make reservation for notification if needed - std::unique_ptr res - = driver->get_notification(meta_obj.get(), nullptr, s, rgw::notify::ObjectCreatedCompleteMultipartUpload, y, &s->object->get_name()); + std::unique_ptr res = driver->get_notification( + meta_obj.get(), nullptr, s, + rgw::notify::ObjectCreatedCompleteMultipartUpload, y, + &s->object->get_name() + ); op_ret = res->publish_reserve(this); if (op_ret < 0) { return; @@ -6503,36 +6700,47 @@ void RGWCompleteMultipart::execute(optional_yield y) } target_obj->set_attrs(meta_obj->get_attrs()); - op_ret = upload->complete(this, y, s->cct, parts->parts, remove_objs, accounted_size, compressed, cs_info, ofs, s->req_id, s->owner, olh_epoch, target_obj.get()); + op_ret = upload->complete( + this, y, s->cct, parts->parts, remove_objs, accounted_size, compressed, + cs_info, ofs, s->req_id, s->owner, olh_epoch, target_obj.get() + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: upload complete failed ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "ERROR: upload complete failed ret=" << op_ret + << dendl; return; } // remove the upload meta object ; the meta object is not versioned // when the bucket is, as that would add an unneeded delete marker int r = meta_obj->delete_object(this, y, true /* prevent versioning */); - if (r >= 0) { + if (r >= 0) { /* serializer's exclusive lock is released */ serializer->clear_locked(); } else { - ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl; + ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj + << dendl; } // send request to notification manager - int ret = res->publish_commit(this, ofs, upload->get_mtime(), etag, target_obj->get_instance()); + int ret = res->publish_commit( + this, ofs, upload->get_mtime(), etag, target_obj->get_instance() + ); if (ret < 0) { - ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " + << ret << dendl; // too late to rollback operation, hence op_ret is not set here } -} // RGWCompleteMultipart::execute +} // RGWCompleteMultipart::execute -bool RGWCompleteMultipart::check_previously_completed(const RGWMultiCompleteUpload* parts) -{ +bool RGWCompleteMultipart::check_previously_completed( + const RGWMultiCompleteUpload* parts +) { // re-calculate the etag from the parts and compare to the existing object int ret = s->object->get_obj_attrs(s->yield, this); if (ret < 0) { - ldpp_dout(this, 0) << __func__ << "() ERROR: get_obj_attrs() returned ret=" << ret << dendl; + ldpp_dout(this, 0) << __func__ + << "() ERROR: get_obj_attrs() returned ret=" << ret + << dendl; return false; } rgw::sal::Attrs sattrs = s->object->get_attrs(); @@ -6545,49 +6753,58 @@ bool RGWCompleteMultipart::check_previously_completed(const RGWMultiCompleteUplo std::string partetag = rgw_string_unquote(part); char petag[CEPH_CRYPTO_MD5_DIGESTSIZE]; hex_to_buf(partetag.c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE); - hash.Update((const unsigned char *)petag, sizeof(petag)); - ldpp_dout(this, 20) << __func__ << "() re-calculating multipart etag: part: " - << index << ", etag: " << partetag << dendl; + hash.Update((const unsigned char*)petag, sizeof(petag)); + ldpp_dout(this, 20) << __func__ + << "() re-calculating multipart etag: part: " << index + << ", etag: " << partetag << dendl; } unsigned char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE]; char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16]; hash.Final(final_etag); buf_to_hex(final_etag, CEPH_CRYPTO_MD5_DIGESTSIZE, final_etag_str); - snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, - "-%lld", (long long)parts->parts.size()); + snprintf( + &final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], + sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, "-%lld", + (long long)parts->parts.size() + ); if (oetag.compare(final_etag_str) != 0) { - ldpp_dout(this, 1) << __func__ << "() NOTICE: etag mismatch: object etag:" - << oetag << ", re-calculated etag:" << final_etag_str << dendl; + ldpp_dout(this, 1) << __func__ + << "() NOTICE: etag mismatch: object etag:" << oetag + << ", re-calculated etag:" << final_etag_str << dendl; return false; } - ldpp_dout(this, 5) << __func__ << "() object etag and re-calculated etag match, etag: " << oetag << dendl; + ldpp_dout(this, 5) << __func__ + << "() object etag and re-calculated etag match, etag: " + << oetag << dendl; return true; } -void RGWCompleteMultipart::complete() -{ +void RGWCompleteMultipart::complete() { /* release exclusive lock iff not already */ if (unlikely(serializer.get() && serializer->is_locked())) { int r = serializer->unlock(); if (r < 0) { - ldpp_dout(this, 0) << "WARNING: failed to unlock " << *serializer.get() << dendl; + ldpp_dout(this, 0) << "WARNING: failed to unlock " << *serializer.get() + << dendl; } } send_response(); } -int RGWAbortMultipart::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); +int RGWAbortMultipart::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); if (has_s3_existing_tag || has_s3_resource_tag) rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3AbortMultipartUpload, - s->object->get_obj()); + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, rgw::IAM::s3AbortMultipartUpload, + s->object->get_obj() + ); if (identity_policy_res == Effect::Deny) { return -EACCES; } @@ -6596,9 +6813,10 @@ int RGWAbortMultipart::verify_permission(optional_yield y) rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; ARN obj_arn(s->object->get_obj()); if (s->iam_policy) { - e = s->iam_policy->eval(s->env, *s->auth.identity, - rgw::IAM::s3AbortMultipartUpload, - obj_arn, princ_type); + e = s->iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3AbortMultipartUpload, obj_arn, + princ_type + ); } if (e == Effect::Deny) { @@ -6606,25 +6824,30 @@ int RGWAbortMultipart::verify_permission(optional_yield y) } if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3PutObject, - s->object->get_obj()); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, rgw::IAM::s3PutObject, + s->object->get_obj() + ); if (session_policy_res == Effect::Deny) { - return -EACCES; + return -EACCES; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && e == Effect::Allow)) { return 0; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) { + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + e == Effect::Allow) { return 0; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) { return 0; } } @@ -6642,21 +6865,18 @@ int RGWAbortMultipart::verify_permission(optional_yield y) return 0; } -void RGWAbortMultipart::pre_exec() -{ +void RGWAbortMultipart::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWAbortMultipart::execute(optional_yield y) -{ +void RGWAbortMultipart::execute(optional_yield y) { op_ret = -EINVAL; string upload_id; upload_id = s->info.args.get("uploadId"); std::unique_ptr meta_obj; std::unique_ptr upload; - if (upload_id.empty() || rgw::sal::Object::empty(s->object.get())) - return; + if (upload_id.empty() || rgw::sal::Object::empty(s->object.get())) return; upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id); jspan_context trace_ctx(false, false); @@ -6672,9 +6892,9 @@ void RGWAbortMultipart::execute(optional_yield y) op_ret = upload->abort(this, s->cct); } -int RGWListMultipart::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); +int RGWListMultipart::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); if (has_s3_existing_tag || has_s3_resource_tag) rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); @@ -6684,16 +6904,13 @@ int RGWListMultipart::verify_permission(optional_yield y) return 0; } -void RGWListMultipart::pre_exec() -{ +void RGWListMultipart::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWListMultipart::execute(optional_yield y) -{ +void RGWListMultipart::execute(optional_yield y) { op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; upload = s->bucket->get_multipart_upload(s->object->get_name(), upload_id); @@ -6706,40 +6923,39 @@ void RGWListMultipart::execute(optional_yield y) try { policy.decode(bliter); } catch (buffer::error& err) { - ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout( + this, 0 + ) << "ERROR: could not decode policy, caught buffer::error" + << dendl; op_ret = -EIO; } } - if (op_ret < 0) - return; + if (op_ret < 0) return; - op_ret = upload->list_parts(this, s->cct, max_parts, marker, NULL, &truncated); + op_ret = + upload->list_parts(this, s->cct, max_parts, marker, NULL, &truncated); } -int RGWListBucketMultiparts::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWListBucketMultiparts::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - if (!verify_bucket_permission(this, - s, - rgw::IAM::s3ListBucketMultipartUploads)) + if (!verify_bucket_permission( + this, s, rgw::IAM::s3ListBucketMultipartUploads + )) return -EACCES; return 0; } -void RGWListBucketMultiparts::pre_exec() -{ +void RGWListBucketMultiparts::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWListBucketMultiparts::execute(optional_yield y) -{ +void RGWListBucketMultiparts::execute(optional_yield y) { op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; if (s->prot_flags & RGW_REST_SWIFT) { string path_args; @@ -6750,13 +6966,14 @@ void RGWListBucketMultiparts::execute(optional_yield y) return; } prefix = path_args; - delimiter="/"; + delimiter = "/"; } } - op_ret = s->bucket->list_multiparts(this, prefix, marker_meta, - delimiter, max_uploads, uploads, - &common_prefixes, &is_truncated); + op_ret = s->bucket->list_multiparts( + this, prefix, marker_meta, delimiter, max_uploads, uploads, + &common_prefixes, &is_truncated + ); if (op_ret < 0) { return; } @@ -6767,57 +6984,65 @@ void RGWListBucketMultiparts::execute(optional_yield y) } } -void RGWGetHealthCheck::execute(optional_yield y) -{ +void RGWGetHealthCheck::execute(optional_yield y) { if (!g_conf()->rgw_healthcheck_disabling_path.empty() && (::access(g_conf()->rgw_healthcheck_disabling_path.c_str(), F_OK) == 0)) { /* Disabling path specified & existent in the filesystem. */ op_ret = -ERR_SERVICE_UNAVAILABLE; /* 503 */ } else { - op_ret = 0; /* 200 OK */ + op_ret = 0; /* 200 OK */ } } -int RGWDeleteMultiObj::verify_permission(optional_yield y) -{ +int RGWDeleteMultiObj::verify_permission(optional_yield y) { int op_ret = get_params(y); if (op_ret) { return op_ret; } - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) { + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) { ARN bucket_arn(s->bucket->get_key()); - auto r = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key())); + auto r = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, + rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key()) + ); if (r == Effect::Deny) { bypass_perm = false; } else if (r == Effect::Pass && s->iam_policy) { - r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention, - bucket_arn); + r = s->iam_policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention, + bucket_arn + ); if (r == Effect::Deny) { bypass_perm = false; } } else if (r == Effect::Pass && !s->session_policies.empty()) { - r = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key())); + r = eval_identity_or_session_policies( + this, s->session_policies, s->env, + rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key()) + ); if (r == Effect::Deny) { bypass_perm = false; } } } - bool not_versioned = rgw::sal::Object::empty(s->object.get()) || s->object->get_instance().empty(); + bool not_versioned = rgw::sal::Object::empty(s->object.get()) || + s->object->get_instance().empty(); - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - not_versioned ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - ARN(s->bucket->get_key())); + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, + not_versioned ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + ARN(s->bucket->get_key()) + ); if (identity_policy_res == Effect::Deny) { return -EACCES; } @@ -6826,64 +7051,64 @@ int RGWDeleteMultiObj::verify_permission(optional_yield y) rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; rgw::ARN bucket_arn(s->bucket->get_key()); if (s->iam_policy) { - r = s->iam_policy->eval(s->env, *s->auth.identity, - not_versioned ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - bucket_arn, - princ_type); - } - if (r == Effect::Deny) - return -EACCES; + r = s->iam_policy->eval( + s->env, *s->auth.identity, + not_versioned ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + bucket_arn, princ_type + ); + } + if (r == Effect::Deny) return -EACCES; if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - not_versioned ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - ARN(s->bucket->get_key())); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, + not_versioned ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + ARN(s->bucket->get_key()) + ); if (session_policy_res == Effect::Deny) { - return -EACCES; + return -EACCES; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && r == Effect::Allow)) { return 0; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow) { + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + r == Effect::Allow) { return 0; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) { return 0; } } return -EACCES; } - if (r == Effect::Allow || identity_policy_res == Effect::Allow) - return 0; + if (r == Effect::Allow || identity_policy_res == Effect::Allow) return 0; } acl_allowed = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE); - if (!acl_allowed) - return -EACCES; + if (!acl_allowed) return -EACCES; return 0; } -void RGWDeleteMultiObj::pre_exec() -{ +void RGWDeleteMultiObj::pre_exec() { rgw_bucket_object_pre_exec(s); } void RGWDeleteMultiObj::write_ops_log_entry(rgw_log_entry& entry) const { int num_err = 0; int num_ok = 0; - for (auto iter = ops_log_entries.begin(); - iter != ops_log_entries.end(); + for (auto iter = ops_log_entries.begin(); iter != ops_log_entries.end(); ++iter) { if (iter->error) { num_err++; @@ -6896,10 +7121,10 @@ void RGWDeleteMultiObj::write_ops_log_entry(rgw_log_entry& entry) const { entry.delete_multi_obj_meta.objects = std::move(ops_log_entries); } -void RGWDeleteMultiObj::wait_flush(optional_yield y, - boost::asio::deadline_timer *formatter_flush_cond, - std::function predicate) -{ +void RGWDeleteMultiObj::wait_flush( + optional_yield y, boost::asio::deadline_timer* formatter_flush_cond, + std::function predicate +) { if (y && formatter_flush_cond) { auto yc = y.get_yield_context(); while (!predicate()) { @@ -6910,17 +7135,20 @@ void RGWDeleteMultiObj::wait_flush(optional_yield y, } } -void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_yield y, - boost::asio::deadline_timer *formatter_flush_cond) -{ +void RGWDeleteMultiObj::handle_individual_object( + const rgw_obj_key& o, optional_yield y, + boost::asio::deadline_timer* formatter_flush_cond +) { std::string version_id; std::unique_ptr obj = bucket->get_object(o); - if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - o.instance.empty() ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - ARN(obj->get_obj())); + if (s->iam_policy || !s->iam_user_policies.empty() || + !s->session_policies.empty()) { + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, + o.instance.empty() ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + ARN(obj->get_obj()) + ); if (identity_policy_res == Effect::Deny) { send_partial_response(o, false, "", -EACCES, formatter_flush_cond); return; @@ -6930,13 +7158,12 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; if (s->iam_policy) { ARN obj_arn(obj->get_obj()); - e = s->iam_policy->eval(s->env, - *s->auth.identity, - o.instance.empty() ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - obj_arn, - princ_type); + e = s->iam_policy->eval( + s->env, *s->auth.identity, + o.instance.empty() ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + obj_arn, princ_type + ); } if (e == Effect::Deny) { send_partial_response(o, false, "", -EACCES, formatter_flush_cond); @@ -6944,30 +7171,35 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ } if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - o.instance.empty() ? - rgw::IAM::s3DeleteObject : - rgw::IAM::s3DeleteObjectVersion, - ARN(obj->get_obj())); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, + o.instance.empty() ? rgw::IAM::s3DeleteObject + : rgw::IAM::s3DeleteObjectVersion, + ARN(obj->get_obj()) + ); if (session_policy_res == Effect::Deny) { send_partial_response(o, false, "", -EACCES, formatter_flush_cond); return; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && + if ((session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) && (session_policy_res != Effect::Allow || e != Effect::Allow)) { send_partial_response(o, false, "", -EACCES, formatter_flush_cond); return; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) { + if ((session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) && + e != Effect::Allow) { send_partial_response(o, false, "", -EACCES, formatter_flush_cond); return; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res != Effect::Allow || + identity_policy_res != Effect::Allow) { send_partial_response(o, false, "", -EACCES, formatter_flush_cond); return; } @@ -6976,7 +7208,8 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ return; } - if ((identity_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) { + if ((identity_policy_res == Effect::Pass && e == Effect::Pass && + !acl_allowed)) { send_partial_response(o, false, "", -EACCES, formatter_flush_cond); return; } @@ -6987,7 +7220,8 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ if (!rgw::sal::Object::empty(obj.get())) { RGWObjState* astate = nullptr; - bool check_obj_lock = obj->have_instance() && bucket->get_info().obj_lock_enabled(); + bool check_obj_lock = + obj->have_instance() && bucket->get_info().obj_lock_enabled(); const auto ret = obj->get_obj_state(this, &astate, y, true); if (ret < 0) { @@ -7006,9 +7240,13 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ if (check_obj_lock) { ceph_assert(astate); - int object_lock_response = verify_object_lock(this, astate->attrset, bypass_perm, bypass_governance_mode); + int object_lock_response = verify_object_lock( + this, astate->attrset, bypass_perm, bypass_governance_mode + ); if (object_lock_response != 0) { - send_partial_response(o, false, "", object_lock_response, formatter_flush_cond); + send_partial_response( + o, false, "", object_lock_response, formatter_flush_cond + ); return; } } @@ -7016,11 +7254,12 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ // make reservation for notification if needed const auto versioned_object = s->bucket->versioning_enabled(); - const auto event_type = versioned_object && obj->get_instance().empty() ? - rgw::notify::ObjectRemovedDeleteMarkerCreated : - rgw::notify::ObjectRemovedDelete; - std::unique_ptr res - = driver->get_notification(obj.get(), s->src_object.get(), s, event_type, y); + const auto event_type = versioned_object && obj->get_instance().empty() + ? rgw::notify::ObjectRemovedDeleteMarkerCreated + : rgw::notify::ObjectRemovedDelete; + std::unique_ptr res = driver->get_notification( + obj.get(), s->src_object.get(), s, event_type, y + ); op_ret = res->publish_reserve(this); if (op_ret < 0) { send_partial_response(o, false, "", op_ret, formatter_flush_cond); @@ -7030,7 +7269,8 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ obj->set_atomic(); std::unique_ptr del_op = obj->get_delete_op(); - del_op->params.versioning_status = obj->get_bucket()->get_info().versioning_status(); + del_op->params.versioning_status = + obj->get_bucket()->get_info().versioning_status(); del_op->params.obj_owner = s->owner; del_op->params.bucket_owner = s->bucket_owner; del_op->params.marker_version_id = version_id; @@ -7040,27 +7280,34 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_ op_ret = 0; } - send_partial_response(o, obj->get_delete_marker(), del_op->result.version_id, op_ret, formatter_flush_cond); + send_partial_response( + o, obj->get_delete_marker(), del_op->result.version_id, op_ret, + formatter_flush_cond + ); // send request to notification manager - int ret = res->publish_commit(this, obj_size, ceph::real_clock::now(), etag, version_id); + int ret = res->publish_commit( + this, obj_size, ceph::real_clock::now(), etag, version_id + ); if (ret < 0) { - ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; + ldpp_dout(this, 1) << "ERROR: publishing notification failed, with error: " + << ret << dendl; // too late to rollback operation, hence op_ret is not set here } } -void RGWDeleteMultiObj::execute(optional_yield y) -{ - RGWMultiDelDelete *multi_delete; +void RGWDeleteMultiObj::execute(optional_yield y) { + RGWMultiDelDelete* multi_delete; vector::iterator iter; RGWMultiDelXMLParser parser; uint32_t aio_count = 0; - const uint32_t max_aio = std::max(1, s->cct->_conf->rgw_multi_obj_del_max_aio); + const uint32_t max_aio = + std::max(1, s->cct->_conf->rgw_multi_obj_del_max_aio); char* buf; std::optional formatter_flush_cond; if (y) { - formatter_flush_cond = std::make_optional(y.get_io_context()); + formatter_flush_cond = + std::make_optional(y.get_io_context()); } buf = data.c_str(); @@ -7079,12 +7326,12 @@ void RGWDeleteMultiObj::execute(optional_yield y) goto error; } - multi_delete = static_cast(parser.find_first("Delete")); + multi_delete = static_cast(parser.find_first("Delete")); if (!multi_delete) { op_ret = -EINVAL; goto error; } else { -#define DELETE_MULTI_OBJ_MAX_NUM 1000 +#define DELETE_MULTI_OBJ_MAX_NUM 1000 int max_num = s->cct->_conf->rgw_delete_multi_obj_max_num; if (max_num < 0) { max_num = DELETE_MULTI_OBJ_MAX_NUM; @@ -7096,8 +7343,7 @@ void RGWDeleteMultiObj::execute(optional_yield y) } } - if (multi_delete->is_quiet()) - quiet = true; + if (multi_delete->is_quiet()) quiet = true; if (s->bucket->get_info().mfa_enabled()) { bool has_versioned = false; @@ -7108,7 +7354,9 @@ void RGWDeleteMultiObj::execute(optional_yield y) } } if (has_versioned && !s->mfa_verified) { - ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl; + ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a " + "versioned object, mfa auth not provided" + << dendl; op_ret = -ERR_MFA_REQUIRED; goto error; } @@ -7120,26 +7368,35 @@ void RGWDeleteMultiObj::execute(optional_yield y) } for (iter = multi_delete->objects.begin(); - iter != multi_delete->objects.end(); - ++iter) { + iter != multi_delete->objects.end(); ++iter) { rgw_obj_key obj_key = *iter; if (y) { wait_flush(y, &*formatter_flush_cond, [&aio_count, max_aio] { return aio_count < max_aio; }); aio_count++; - spawn::spawn(y.get_yield_context(), [this, &y, &aio_count, obj_key, &formatter_flush_cond] (yield_context yield) { - handle_individual_object(obj_key, optional_yield { y.get_io_context(), yield }, &*formatter_flush_cond); - aio_count--; - }); + spawn::spawn( + y.get_yield_context(), + [this, &y, &aio_count, obj_key, + &formatter_flush_cond](yield_context yield) { + handle_individual_object( + obj_key, optional_yield{y.get_io_context(), yield}, + &*formatter_flush_cond + ); + aio_count--; + } + ); } else { handle_individual_object(obj_key, y, nullptr); } } if (formatter_flush_cond) { - wait_flush(y, &*formatter_flush_cond, [this, n=multi_delete->objects.size()] { - return n == ops_log_entries.size(); - }); + wait_flush( + y, &*formatter_flush_cond, + [this, n = multi_delete->objects.size()] { + return n == ops_log_entries.size(); + } + ); } /* set the return code to zero, errors at this point will be @@ -7154,16 +7411,15 @@ void RGWDeleteMultiObj::execute(optional_yield y) error: send_status(); return; - } -bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo, - map& battrs, - ACLOwner& bucket_owner /* out */, - optional_yield y) -{ +bool RGWBulkDelete::Deleter::verify_permission( + RGWBucketInfo& binfo, map& battrs, + ACLOwner& bucket_owner /* out */, optional_yield y +) { RGWAccessControlPolicy bacl(driver->ctx()); - int ret = read_bucket_policy(dpp, driver, s, binfo, battrs, &bacl, binfo.bucket, y); + int ret = + read_bucket_policy(dpp, driver, s, binfo, battrs, &bacl, binfo.bucket, y); if (ret < 0) { return false; } @@ -7174,17 +7430,22 @@ bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo, /* We can use global user_acl because each BulkDelete request is allowed * to work on entities from a single account only. */ - return verify_bucket_permission(dpp, s, binfo.bucket, s->user_acl.get(), - &bacl, policy, s->iam_user_policies, s->session_policies, rgw::IAM::s3DeleteBucket); + return verify_bucket_permission( + dpp, s, binfo.bucket, s->user_acl.get(), &bacl, policy, + s->iam_user_policies, s->session_policies, rgw::IAM::s3DeleteBucket + ); } -bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yield y) -{ +bool RGWBulkDelete::Deleter::delete_single( + const acct_path_t& path, optional_yield y +) { std::unique_ptr bucket; ACLOwner bowner; RGWObjVersionTracker ot; - int ret = driver->get_bucket(dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y); + int ret = driver->get_bucket( + dpp, s->user.get(), s->user->get_tenant(), path.bucket_name, &bucket, y + ); if (ret < 0) { goto binfo_fail; } @@ -7207,7 +7468,8 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie obj->set_atomic(); std::unique_ptr del_op = obj->get_delete_op(); - del_op->params.versioning_status = obj->get_bucket()->get_info().versioning_status(); + del_op->params.versioning_status = + obj->get_bucket()->get_info().versioning_status(); del_op->params.obj_owner = bowner; del_op->params.bucket_owner = bucket_owner; @@ -7226,47 +7488,39 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie return true; binfo_fail: - if (-ENOENT == ret) { - ldpp_dout(dpp, 20) << "cannot find bucket = " << path.bucket_name << dendl; - num_unfound++; - } else { - ldpp_dout(dpp, 20) << "cannot get bucket info, ret = " << ret << dendl; + if (-ENOENT == ret) { + ldpp_dout(dpp, 20) << "cannot find bucket = " << path.bucket_name << dendl; + num_unfound++; + } else { + ldpp_dout(dpp, 20) << "cannot get bucket info, ret = " << ret << dendl; - fail_desc_t failed_item = { - .err = ret, - .path = path - }; - failures.push_back(failed_item); - } - return false; + fail_desc_t failed_item = {.err = ret, .path = path}; + failures.push_back(failed_item); + } + return false; auth_fail: - ldpp_dout(dpp, 20) << "wrong auth for " << path << dendl; - { - fail_desc_t failed_item = { - .err = ret, - .path = path - }; - failures.push_back(failed_item); - } - return false; + ldpp_dout(dpp, 20) << "wrong auth for " << path << dendl; + { + fail_desc_t failed_item = {.err = ret, .path = path}; + failures.push_back(failed_item); + } + return false; delop_fail: - if (-ENOENT == ret) { - ldpp_dout(dpp, 20) << "cannot find entry " << path << dendl; - num_unfound++; - } else { - fail_desc_t failed_item = { - .err = ret, - .path = path - }; - failures.push_back(failed_item); - } - return false; + if (-ENOENT == ret) { + ldpp_dout(dpp, 20) << "cannot find entry " << path << dendl; + num_unfound++; + } else { + fail_desc_t failed_item = {.err = ret, .path = path}; + failures.push_back(failed_item); + } + return false; } -bool RGWBulkDelete::Deleter::delete_chunk(const std::list& paths, optional_yield y) -{ +bool RGWBulkDelete::Deleter::delete_chunk( + const std::list& paths, optional_yield y +) { ldpp_dout(dpp, 20) << "in delete_chunk" << dendl; for (auto path : paths) { ldpp_dout(dpp, 20) << "bulk deleting path: " << path << dendl; @@ -7276,18 +7530,15 @@ bool RGWBulkDelete::Deleter::delete_chunk(const std::list& paths, o return true; } -int RGWBulkDelete::verify_permission(optional_yield y) -{ +int RGWBulkDelete::verify_permission(optional_yield y) { return 0; } -void RGWBulkDelete::pre_exec() -{ +void RGWBulkDelete::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWBulkDelete::execute(optional_yield y) -{ +void RGWBulkDelete::execute(optional_yield y) { deleter = std::unique_ptr(new Deleter(this, driver, s)); bool is_truncated = false; @@ -7305,23 +7556,21 @@ void RGWBulkDelete::execute(optional_yield y) return; } - constexpr std::array RGWBulkUploadOp::terminal_errors; -int RGWBulkUploadOp::verify_permission(optional_yield y) -{ +int RGWBulkUploadOp::verify_permission(optional_yield y) { if (s->auth.identity->is_anonymous()) { return -EACCES; } - if (! verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) { + if (!verify_user_permission_no_policy(this, s, RGW_PERM_WRITE)) { return -EACCES; } if (s->user->get_tenant() != s->bucket_tenant) { ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant" - << " (user_id.tenant=" << s->user->get_tenant() - << " requested=" << s->bucket_tenant << ")" << dendl; + << " (user_id.tenant=" << s->user->get_tenant() + << " requested=" << s->bucket_tenant << ")" << dendl; return -EACCES; } @@ -7332,14 +7581,12 @@ int RGWBulkUploadOp::verify_permission(optional_yield y) return 0; } -void RGWBulkUploadOp::pre_exec() -{ +void RGWBulkUploadOp::pre_exec() { rgw_bucket_object_pre_exec(s); } boost::optional> -RGWBulkUploadOp::parse_path(const std::string_view& path) -{ +RGWBulkUploadOp::parse_path(const std::string_view& path) { /* We need to skip all slashes at the beginning in order to preserve * compliance with Swift. */ const size_t start_pos = path.find_first_not_of('/'); @@ -7352,24 +7599,24 @@ RGWBulkUploadOp::parse_path(const std::string_view& path) const auto bucket_name = path.substr(start_pos, sep_pos - start_pos); const auto obj_name = path.substr(sep_pos + 1); - return std::make_pair(std::string(bucket_name), - rgw_obj_key(std::string(obj_name))); + return std::make_pair( + std::string(bucket_name), rgw_obj_key(std::string(obj_name)) + ); } else { /* It's guaranteed here that bucket name is at least one character * long and is different than slash. */ - return std::make_pair(std::string(path.substr(start_pos)), - rgw_obj_key()); + return std::make_pair(std::string(path.substr(start_pos)), rgw_obj_key()); } } return none; } -std::pair -RGWBulkUploadOp::handle_upload_path(req_state *s) -{ +std::pair RGWBulkUploadOp::handle_upload_path( + req_state* s +) { std::string bucket_path, file_prefix; - if (! s->init_state.url_bucket.empty()) { + if (!s->init_state.url_bucket.empty()) { file_prefix = bucket_path = s->init_state.url_bucket + "/"; if (!rgw::sal::Object::empty(s->object.get())) { const std::string& object_name = s->object->get_name(); @@ -7386,13 +7633,14 @@ RGWBulkUploadOp::handle_upload_path(req_state *s) return std::make_pair(bucket_path, file_prefix); } -int RGWBulkUploadOp::handle_dir_verify_permission(optional_yield y) -{ +int RGWBulkUploadOp::handle_dir_verify_permission(optional_yield y) { if (s->user->get_max_buckets() > 0) { rgw::sal::BucketList buckets; std::string marker; - op_ret = s->user->list_buckets(this, marker, std::string(), s->user->get_max_buckets(), - false, buckets, y); + op_ret = s->user->list_buckets( + this, marker, std::string(), s->user->get_max_buckets(), false, buckets, + y + ); if (op_ret < 0) { return op_ret; } @@ -7405,29 +7653,30 @@ int RGWBulkUploadOp::handle_dir_verify_permission(optional_yield y) return 0; } -static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, req_info& info, const std::string& bucket_name) -{ +static void forward_req_info( + const DoutPrefixProvider* dpp, CephContext* cct, req_info& info, + const std::string& bucket_name +) { /* the request of container or object level will contain bucket name. * only at account level need to append the bucket name */ if (info.script_uri.find(bucket_name) != std::string::npos) { return; } - ldpp_dout(dpp, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl; + ldpp_dout(dpp, 20) << "append the bucket: " << bucket_name << " to req_info" + << dendl; info.script_uri.append("/").append(bucket_name); info.request_uri_aws4 = info.request_uri = info.script_uri; info.effective_uri = "/" + bucket_name; } -void RGWBulkUploadOp::init(rgw::sal::Driver* const driver, - req_state* const s, - RGWHandler* const h) -{ +void RGWBulkUploadOp::init( + rgw::sal::Driver* const driver, req_state* const s, RGWHandler* const h +) { RGWOp::init(driver, s, h); } -int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) -{ +int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) { ldpp_dout(this, 20) << "got directory=" << path << dendl; op_ret = handle_dir_verify_permission(y); @@ -7437,7 +7686,7 @@ int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) std::string bucket_name; rgw_obj_key object_junk; - std::tie(bucket_name, object_junk) = *parse_path(path); + std::tie(bucket_name, object_junk) = *parse_path(path); /* we need to make sure we read bucket info, it's not read before for this * specific request */ @@ -7465,30 +7714,28 @@ int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y) placement_rule.storage_class = s->info.storage_class; forward_req_info(this, s->cct, info, bucket_name); - op_ret = s->user->create_bucket(this, new_bucket, - driver->get_zone()->get_zonegroup().get_id(), - placement_rule, swift_ver_location, - pquota_info, policy, attrs, - out_info, ep_objv, - true, false, &bucket_exists, - info, &bucket, y); + op_ret = s->user->create_bucket( + this, new_bucket, driver->get_zone()->get_zonegroup().get_id(), + placement_rule, swift_ver_location, pquota_info, policy, attrs, out_info, + ep_objv, true, false, &bucket_exists, info, &bucket, y + ); /* continue if EEXIST and create_bucket will fail below. this way we can * recover from a partial create by retrying it. */ ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret - << ", bucket=" << bucket << dendl; + << ", bucket=" << bucket << dendl; return op_ret; } - -bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo, - const rgw_obj& obj, - std::map& battrs, - ACLOwner& bucket_owner /* out */, - optional_yield y) -{ +bool RGWBulkUploadOp::handle_file_verify_permission( + RGWBucketInfo& binfo, const rgw_obj& obj, + std::map& battrs, + ACLOwner& bucket_owner /* out */, optional_yield y +) { RGWAccessControlPolicy bacl(driver->ctx()); - op_ret = read_bucket_policy(this, driver, s, binfo, battrs, &bacl, binfo.bucket, y); + op_ret = read_bucket_policy( + this, driver, s, binfo, battrs, &bacl, binfo.bucket, y + ); if (op_ret < 0) { ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl; return false; @@ -7497,40 +7744,47 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo, auto policy = get_iam_policy_from_attr(s->cct, battrs, binfo.bucket.tenant); bucket_owner = bacl.get_owner(); - if (policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) { - auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env, - rgw::IAM::s3PutObject, obj); + if (policy || !s->iam_user_policies.empty() || !s->session_policies.empty()) { + auto identity_policy_res = eval_identity_or_session_policies( + this, s->iam_user_policies, s->env, rgw::IAM::s3PutObject, obj + ); if (identity_policy_res == Effect::Deny) { return false; } rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other; ARN obj_arn(obj); - auto e = policy->eval(s->env, *s->auth.identity, - rgw::IAM::s3PutObject, obj_arn, princ_type); + auto e = policy->eval( + s->env, *s->auth.identity, rgw::IAM::s3PutObject, obj_arn, princ_type + ); if (e == Effect::Deny) { return false; } - + if (!s->session_policies.empty()) { - auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env, - rgw::IAM::s3PutObject, obj); + auto session_policy_res = eval_identity_or_session_policies( + this, s->session_policies, s->env, rgw::IAM::s3PutObject, obj + ); if (session_policy_res == Effect::Deny) { - return false; + return false; } if (princ_type == rgw::IAM::PolicyPrincipal::Role) { //Intersection of session policy and identity policy plus intersection of session policy and bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || (session_policy_res == Effect::Allow && e == Effect::Allow)) { return true; } } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) { //Intersection of session policy and identity policy plus bucket policy - if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) { + if ((session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) || + e == Effect::Allow) { return true; } - } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy - if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) { + } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) { // there was no match in the bucket policy + if (session_policy_res == Effect::Allow && + identity_policy_res == Effect::Allow) { return true; } } @@ -7540,16 +7794,16 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo, return true; } } - - return verify_bucket_permission_no_policy(this, s, s->user_acl.get(), - &bacl, RGW_PERM_WRITE); -} -int RGWBulkUploadOp::handle_file(const std::string_view path, - const size_t size, - AlignedStreamGetter& body, optional_yield y) -{ + return verify_bucket_permission_no_policy( + this, s, s->user_acl.get(), &bacl, RGW_PERM_WRITE + ); +} +int RGWBulkUploadOp::handle_file( + const std::string_view path, const size_t size, AlignedStreamGetter& body, + optional_yield y +) { ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl; if (size > static_cast(s->cct->_conf->rgw_max_put_size)) { @@ -7564,7 +7818,10 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, std::unique_ptr bucket; ACLOwner bowner; - op_ret = driver->get_bucket(this, s->user.get(), rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y); + op_ret = driver->get_bucket( + this, s->user.get(), + rgw_bucket(rgw_bucket_key(s->user->get_tenant(), bucket_name)), &bucket, y + ); if (op_ret < 0) { if (op_ret == -ENOENT) { ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl; @@ -7574,9 +7831,9 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, std::unique_ptr obj = bucket->get_object(object); - if (! handle_file_verify_permission(bucket->get_info(), - obj->get_obj(), - bucket->get_attrs(), bowner, y)) { + if (!handle_file_verify_permission( + bucket->get_info(), obj->get_obj(), bucket->get_attrs(), bowner, y + )) { ldpp_dout(this, 20) << "object creation unauthorized" << dendl; op_ret = -EACCES; return op_ret; @@ -7595,26 +7852,28 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, dest_placement.inherit_from(bucket->get_placement_rule()); std::unique_ptr processor; - processor = driver->get_atomic_writer(this, s->yield, obj.get(), - bowner.get_id(), - &s->dest_placement, 0, s->req_id); + processor = driver->get_atomic_writer( + this, s->yield, obj.get(), bowner.get_id(), &s->dest_placement, 0, + s->req_id + ); op_ret = processor->prepare(s->yield); if (op_ret < 0) { - ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret + << dendl; return op_ret; } /* No filters by default. */ - rgw::sal::DataProcessor *filter = processor.get(); + rgw::sal::DataProcessor* filter = processor.get(); const auto& compression_type = driver->get_compression_type(dest_placement); CompressorRef plugin; boost::optional compressor; if (compression_type != "none") { plugin = Compressor::create(s->cct, compression_type); - if (! plugin) { + if (!plugin) { ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type " - << compression_type << dendl; + << compression_type << dendl; } else { compressor.emplace(s->cct, plugin, filter); filter = &*compressor; @@ -7636,10 +7895,11 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, op_ret = len; return op_ret; } else if (len > 0) { - hash.Update((const unsigned char *)data.c_str(), data.length()); + hash.Update((const unsigned char*)data.c_str(), data.length()); op_ret = filter->process(std::move(data), ofs); if (op_ret < 0) { - ldpp_dout(this, 20) << "filter->process() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) + << "filter->process() returned ret=" << op_ret << dendl; return op_ret; } @@ -7698,19 +7958,20 @@ int RGWBulkUploadOp::handle_file(const std::string_view path, } /* Complete the transaction. */ - op_ret = processor->complete(size, etag, nullptr, ceph::real_time(), - attrs, ceph::real_time() /* delete_at */, - nullptr, nullptr, nullptr, nullptr, nullptr, - s->yield); + op_ret = processor->complete( + size, etag, nullptr, ceph::real_time(), attrs, + ceph::real_time() /* delete_at */, nullptr, nullptr, nullptr, nullptr, + nullptr, s->yield + ); if (op_ret < 0) { - ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret + << dendl; } return op_ret; } -void RGWBulkUploadOp::execute(optional_yield y) -{ +void RGWBulkUploadOp::execute(optional_yield y) { ceph::bufferlist buffer(64 * 1024); ldpp_dout(this, 20) << "start" << dendl; @@ -7718,7 +7979,7 @@ void RGWBulkUploadOp::execute(optional_yield y) /* Create an instance of stream-abstracting class. Having this indirection * allows for easy introduction of decompressors like gzip and bzip2. */ auto stream = create_stream(); - if (! stream) { + if (!stream) { return; } @@ -7742,7 +8003,7 @@ void RGWBulkUploadOp::execute(optional_yield y) boost::optional header; std::tie(status, header) = rgw::tar::interpret_block(status, buffer); - if (! status.empty() && header) { + if (!status.empty() && header) { /* This specific block isn't empty (entirely zeroed), so we can parse * it as a TAR header and dispatch. At the moment we do support only * regular files and directories. Everything else (symlinks, devices) @@ -7752,16 +8013,15 @@ void RGWBulkUploadOp::execute(optional_yield y) ldpp_dout(this, 2) << "handling regular file" << dendl; std::string filename; - if (bucket_path.empty()) - filename = header->get_filename(); - else - filename = file_prefix + std::string(header->get_filename()); - auto body = AlignedStreamGetter(0, header->get_filesize(), - rgw::tar::BLOCK_SIZE, *stream); - op_ret = handle_file(filename, - header->get_filesize(), - body, y); - if (! op_ret) { + if (bucket_path.empty()) + filename = header->get_filename(); + else + filename = file_prefix + std::string(header->get_filename()); + auto body = AlignedStreamGetter( + 0, header->get_filesize(), rgw::tar::BLOCK_SIZE, *stream + ); + op_ret = handle_file(filename, header->get_filesize(), body, y); + if (!op_ret) { /* Only regular files counts. */ num_created++; } else { @@ -7772,7 +8032,8 @@ void RGWBulkUploadOp::execute(optional_yield y) case rgw::tar::FileType::DIRECTORY: { ldpp_dout(this, 2) << "handling regular directory" << dendl; - std::string_view dirname = bucket_path.empty() ? header->get_filename() : bucket_path; + std::string_view dirname = + bucket_path.empty() ? header->get_filename() : bucket_path; op_ret = handle_dir(dirname, y); if (op_ret < 0 && op_ret != -ERR_BUCKET_EXISTS) { failures.emplace_back(op_ret, std::string(dirname)); @@ -7788,8 +8049,9 @@ void RGWBulkUploadOp::execute(optional_yield y) /* In case of any problems with sub-request authorization Swift simply * terminates whole upload immediately. */ - if (boost::algorithm::contains(std::initializer_list{ op_ret }, - terminal_errors)) { + if (boost::algorithm::contains( + std::initializer_list{op_ret}, terminal_errors + )) { ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl; break; } @@ -7799,22 +8061,21 @@ void RGWBulkUploadOp::execute(optional_yield y) } buffer.clear(); - } while (! status.eof()); + } while (!status.eof()); return; } -RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter() -{ +RGWBulkUploadOp::AlignedStreamGetter::~AlignedStreamGetter() { const size_t aligned_legnth = length + (-length % alignment); ceph::bufferlist junk; DecoratedStreamGetter::get_exactly(aligned_legnth - position, junk); } -ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want, - ceph::bufferlist& dst) -{ +ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most( + const size_t want, ceph::bufferlist& dst +) { const size_t max_to_read = std::min(want, length - position); const auto len = DecoratedStreamGetter::get_at_most(max_to_read, dst); if (len > 0) { @@ -7823,9 +8084,9 @@ ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_at_most(const size_t want, return len; } -ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want, - ceph::bufferlist& dst) -{ +ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly( + const size_t want, ceph::bufferlist& dst +) { const auto len = DecoratedStreamGetter::get_exactly(want, dst); if (len > 0) { position += len; @@ -7833,17 +8094,17 @@ ssize_t RGWBulkUploadOp::AlignedStreamGetter::get_exactly(const size_t want, return len; } -int RGWGetAttrs::verify_permission(optional_yield y) -{ +int RGWGetAttrs::verify_permission(optional_yield y) { s->object->set_atomic(); - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); - auto iam_action = s->object->get_instance().empty() ? - rgw::IAM::s3GetObject : - rgw::IAM::s3GetObjectVersion; + auto iam_action = s->object->get_instance().empty() + ? rgw::IAM::s3GetObject + : rgw::IAM::s3GetObjectVersion; if (!verify_object_permission(this, s, iam_action)) { return -EACCES; @@ -7852,23 +8113,20 @@ int RGWGetAttrs::verify_permission(optional_yield y) return 0; } -void RGWGetAttrs::pre_exec() -{ +void RGWGetAttrs::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetAttrs::execute(optional_yield y) -{ +void RGWGetAttrs::execute(optional_yield y) { op_ret = get_params(); - if (op_ret < 0) - return; + if (op_ret < 0) return; s->object->set_atomic(); op_ret = s->object->get_obj_attrs(s->yield, this); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << s->object - << " ret=" << op_ret << dendl; + << " ret=" << op_ret << dendl; return; } @@ -7879,21 +8137,21 @@ void RGWGetAttrs::execute(optional_yield y) for (auto& att : attrs) { auto iter = obj_attrs.find(att.first); if (iter != obj_attrs.end()) { - att.second = iter->second; + att.second = iter->second; } } } else { /* return all attrs */ - for (auto& att : obj_attrs) { - attrs.insert(get_attrs_t::value_type(att.first, att.second));; + for (auto& att : obj_attrs) { + attrs.insert(get_attrs_t::value_type(att.first, att.second)); + ; } } return; - } +} -int RGWRMAttrs::verify_permission(optional_yield y) -{ +int RGWRMAttrs::verify_permission(optional_yield y) { // This looks to be part of the RGW-NFS machinery and has no S3 or // Swift equivalent. bool perm; @@ -7902,35 +8160,30 @@ int RGWRMAttrs::verify_permission(optional_yield y) } else { perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE); } - if (!perm) - return -EACCES; + if (!perm) return -EACCES; return 0; } -void RGWRMAttrs::pre_exec() -{ +void RGWRMAttrs::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWRMAttrs::execute(optional_yield y) -{ +void RGWRMAttrs::execute(optional_yield y) { op_ret = get_params(); - if (op_ret < 0) - return; + if (op_ret < 0) return; s->object->set_atomic(); op_ret = s->object->set_obj_attrs(this, nullptr, &attrs, y); if (op_ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to delete obj attrs, obj=" << s->object - << " ret=" << op_ret << dendl; + << " ret=" << op_ret << dendl; } return; } -int RGWSetAttrs::verify_permission(optional_yield y) -{ +int RGWSetAttrs::verify_permission(optional_yield y) { // This looks to be part of the RGW-NFS machinery and has no S3 or // Swift equivalent. bool perm; @@ -7939,22 +8192,18 @@ int RGWSetAttrs::verify_permission(optional_yield y) } else { perm = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE); } - if (!perm) - return -EACCES; + if (!perm) return -EACCES; return 0; } -void RGWSetAttrs::pre_exec() -{ +void RGWSetAttrs::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWSetAttrs::execute(optional_yield y) -{ +void RGWSetAttrs::execute(optional_yield y) { op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; if (!rgw::sal::Object::empty(s->object.get())) { rgw::sal::Attrs a(attrs); @@ -7965,18 +8214,13 @@ void RGWSetAttrs::execute(optional_yield y) } /* RGWSetAttrs::execute() */ -void RGWGetObjLayout::pre_exec() -{ +void RGWGetObjLayout::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetObjLayout::execute(optional_yield y) -{ -} - +void RGWGetObjLayout::execute(optional_yield y) {} -int RGWConfigBucketMetaSearch::verify_permission(optional_yield y) -{ +int RGWConfigBucketMetaSearch::verify_permission(optional_yield y) { if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) { return -EACCES; } @@ -7984,16 +8228,15 @@ int RGWConfigBucketMetaSearch::verify_permission(optional_yield y) return 0; } -void RGWConfigBucketMetaSearch::pre_exec() -{ +void RGWConfigBucketMetaSearch::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWConfigBucketMetaSearch::execute(optional_yield y) -{ +void RGWConfigBucketMetaSearch::execute(optional_yield y) { op_ret = get_params(y); if (op_ret < 0) { - ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret + << dendl; return; } @@ -8001,15 +8244,15 @@ void RGWConfigBucketMetaSearch::execute(optional_yield y) op_ret = s->bucket->put_info(this, false, real_time()); if (op_ret < 0) { - ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() - << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" + << s->bucket->get_name() << " returned err=" << op_ret + << dendl; return; } s->bucket_attrs = s->bucket->get_attrs(); } -int RGWGetBucketMetaSearch::verify_permission(optional_yield y) -{ +int RGWGetBucketMetaSearch::verify_permission(optional_yield y) { if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) { return -EACCES; } @@ -8017,13 +8260,11 @@ int RGWGetBucketMetaSearch::verify_permission(optional_yield y) return 0; } -void RGWGetBucketMetaSearch::pre_exec() -{ +void RGWGetBucketMetaSearch::pre_exec() { rgw_bucket_object_pre_exec(s); } -int RGWDelBucketMetaSearch::verify_permission(optional_yield y) -{ +int RGWDelBucketMetaSearch::verify_permission(optional_yield y) { if (!s->auth.identity->is_owner_of(s->bucket_owner.get_id())) { return -EACCES; } @@ -8031,54 +8272,51 @@ int RGWDelBucketMetaSearch::verify_permission(optional_yield y) return 0; } -void RGWDelBucketMetaSearch::pre_exec() -{ +void RGWDelBucketMetaSearch::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWDelBucketMetaSearch::execute(optional_yield y) -{ +void RGWDelBucketMetaSearch::execute(optional_yield y) { s->bucket->get_info().mdsearch_config.clear(); op_ret = s->bucket->put_info(this, false, real_time()); if (op_ret < 0) { - ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket->get_name() - << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" + << s->bucket->get_name() << " returned err=" << op_ret + << dendl; return; } s->bucket_attrs = s->bucket->get_attrs(); } +RGWHandler::~RGWHandler() {} -RGWHandler::~RGWHandler() -{ -} - -int RGWHandler::init(rgw::sal::Driver* _driver, - req_state *_s, - rgw::io::BasicClient *cio) -{ +int RGWHandler::init( + rgw::sal::Driver* _driver, req_state* _s, rgw::io::BasicClient* cio +) { driver = _driver; s = _s; return 0; } -int RGWHandler::do_init_permissions(const DoutPrefixProvider *dpp, optional_yield y) -{ +int RGWHandler::do_init_permissions( + const DoutPrefixProvider* dpp, optional_yield y +) { int ret = rgw_build_bucket_policies(dpp, driver, s, y); if (ret < 0) { ldpp_dout(dpp, 10) << "init_permissions on " << s->bucket - << " failed, ret=" << ret << dendl; - return ret==-ENODATA ? -EACCES : ret; + << " failed, ret=" << ret << dendl; + return ret == -ENODATA ? -EACCES : ret; } rgw_build_iam_environment(driver, s); return ret; } -int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket, optional_yield y) -{ +int RGWHandler::do_read_permissions( + RGWOp* op, bool only_bucket, optional_yield y +) { if (only_bucket) { /* already read bucket info */ return 0; @@ -8086,29 +8324,28 @@ int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket, optional_yield int ret = rgw_build_object_policies(op, driver, s, op->prefetch_data(), y); if (ret < 0) { - ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":" - << s->object << " only_bucket=" << only_bucket - << " ret=" << ret << dendl; - if (ret == -ENODATA) - ret = -EACCES; - if (s->auth.identity->is_anonymous() && ret == -EACCES) - ret = -EPERM; + ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":" << s->object + << " only_bucket=" << only_bucket << " ret=" << ret + << dendl; + if (ret == -ENODATA) ret = -EACCES; + if (s->auth.identity->is_anonymous() && ret == -EACCES) ret = -EPERM; } return ret; } -int RGWOp::error_handler(int err_no, string *error_content, optional_yield y) { +int RGWOp::error_handler(int err_no, string* error_content, optional_yield y) { return dialect_handler->error_handler(err_no, error_content, y); } -int RGWHandler::error_handler(int err_no, string *error_content, optional_yield) { +int RGWHandler::error_handler( + int err_no, string* error_content, optional_yield +) { // This is the do-nothing error handler return err_no; } -std::ostream& RGWOp::gen_prefix(std::ostream& out) const -{ +std::ostream& RGWOp::gen_prefix(std::ostream& out) const { // append : to the prefix return s->gen_prefix(out) << s->dialect << ':' << name() << ' '; } @@ -8121,8 +8358,7 @@ void RGWDefaultResponseOp::send_response() { end_header(s); } -void RGWPutBucketPolicy::send_response() -{ +void RGWPutBucketPolicy::send_response() { if (!op_ret) { /* A successful Put Bucket Policy should return a 204 on success */ op_ret = STATUS_NO_CONTENT; @@ -8134,11 +8370,10 @@ void RGWPutBucketPolicy::send_response() end_header(s); } -int RGWPutBucketPolicy::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWPutBucketPolicy::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPolicy)) { return -EACCES; @@ -8147,8 +8382,7 @@ int RGWPutBucketPolicy::verify_permission(optional_yield y) return 0; } -int RGWPutBucketPolicy::get_params(optional_yield y) -{ +int RGWPutBucketPolicy::get_params(optional_yield y) { const auto max_size = s->cct->_conf->rgw_max_put_param_size; // At some point when I have more time I want to make a version of // rgw_rest_read_all_input that doesn't use malloc. @@ -8158,37 +8392,40 @@ int RGWPutBucketPolicy::get_params(optional_yield y) return op_ret; } -void RGWPutBucketPolicy::execute(optional_yield y) -{ +void RGWPutBucketPolicy::execute(optional_yield y) { op_ret = get_params(y); if (op_ret < 0) { return; } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } try { const Policy p( - s->cct, s->bucket_tenant, data, - s->cct->_conf.get_val("rgw_policy_reject_invalid_principals")); + s->cct, s->bucket_tenant, data, + s->cct->_conf.get_val("rgw_policy_reject_invalid_principals") + ); rgw::sal::Attrs attrs(s->bucket_attrs); - if (s->bucket_access_conf && - s->bucket_access_conf->block_public_policy() && + if (s->bucket_access_conf && s->bucket_access_conf->block_public_policy() && rgw::IAM::is_public(p)) { op_ret = -EACCES; return; } - op_ret = retry_raced_bucket_write(this, s->bucket.get(), [&p, this, &attrs] { - attrs[RGW_ATTR_IAM_POLICY].clear(); - attrs[RGW_ATTR_IAM_POLICY].append(p.text); - op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); - return op_ret; - }); + op_ret = + retry_raced_bucket_write(this, s->bucket.get(), [&p, this, &attrs] { + attrs[RGW_ATTR_IAM_POLICY].clear(); + attrs[RGW_ATTR_IAM_POLICY].append(p.text); + op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); + return op_ret; + }); } catch (rgw::IAM::PolicyParseException& e) { ldpp_dout(this, 5) << "failed to parse policy: " << e.what() << dendl; op_ret = -EINVAL; @@ -8196,8 +8433,7 @@ void RGWPutBucketPolicy::execute(optional_yield y) } } -void RGWGetBucketPolicy::send_response() -{ +void RGWGetBucketPolicy::send_response() { if (op_ret) { set_req_state_err(s, op_ret); } @@ -8206,11 +8442,10 @@ void RGWGetBucketPolicy::send_response() dump_body(s, policy); } -int RGWGetBucketPolicy::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketPolicy::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) { return -EACCES; @@ -8219,13 +8454,12 @@ int RGWGetBucketPolicy::verify_permission(optional_yield y) return 0; } -void RGWGetBucketPolicy::execute(optional_yield y) -{ +void RGWGetBucketPolicy::execute(optional_yield y) { rgw::sal::Attrs attrs(s->bucket_attrs); auto aiter = attrs.find(RGW_ATTR_IAM_POLICY); if (aiter == attrs.end()) { ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = " - << s->bucket_name << dendl; + << s->bucket_name << dendl; op_ret = -ERR_NO_SUCH_BUCKET_POLICY; s->err.message = "The bucket policy does not exist"; return; @@ -8234,16 +8468,15 @@ void RGWGetBucketPolicy::execute(optional_yield y) if (policy.length() == 0) { ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: " - << s->bucket_name << dendl; + << s->bucket_name << dendl; op_ret = -ERR_NO_SUCH_BUCKET_POLICY; s->err.message = "The bucket policy does not exist"; return; } - } + } } -void RGWDeleteBucketPolicy::send_response() -{ +void RGWDeleteBucketPolicy::send_response() { if (op_ret) { set_req_state_err(s, op_ret); } @@ -8251,11 +8484,10 @@ void RGWDeleteBucketPolicy::send_response() end_header(s); } -int RGWDeleteBucketPolicy::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteBucketPolicy::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucketPolicy)) { return -EACCES; @@ -8264,41 +8496,44 @@ int RGWDeleteBucketPolicy::verify_permission(optional_yield y) return 0; } -void RGWDeleteBucketPolicy::execute(optional_yield y) -{ +void RGWDeleteBucketPolicy::execute(optional_yield y) { bufferlist data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - rgw::sal::Attrs attrs(s->bucket_attrs); - attrs.erase(RGW_ATTR_IAM_POLICY); - op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); - return op_ret; - }); + rgw::sal::Attrs attrs(s->bucket_attrs); + attrs.erase(RGW_ATTR_IAM_POLICY); + op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); + return op_ret; + }); } -void RGWPutBucketObjectLock::pre_exec() -{ +void RGWPutBucketObjectLock::pre_exec() { rgw_bucket_object_pre_exec(s); } -int RGWPutBucketObjectLock::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWPutBucketObjectLock::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketObjectLockConfiguration); + return verify_bucket_owner_or_policy( + s, rgw::IAM::s3PutBucketObjectLockConfiguration + ); } -void RGWPutBucketObjectLock::execute(optional_yield y) -{ +void RGWPutBucketObjectLock::execute(optional_yield y) { if (!s->bucket->get_info().obj_lock_enabled()) { - s->err.message = "object lock configuration can't be set if bucket object lock not enabled"; + s->err.message = + "object lock configuration can't be set if bucket object lock not " + "enabled"; ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl; op_ret = -ERR_INVALID_BUCKET_STATE; return; @@ -8320,7 +8555,9 @@ void RGWPutBucketObjectLock::execute(optional_yield y) } try { - RGWXMLDecoder::decode_xml("ObjectLockConfiguration", obj_lock, &parser, true); + RGWXMLDecoder::decode_xml( + "ObjectLockConfiguration", obj_lock, &parser, true + ); } catch (RGWXMLDecoder::err& err) { ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; @@ -8333,9 +8570,13 @@ void RGWPutBucketObjectLock::execute(optional_yield y) return; } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << __func__ + << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } @@ -8347,33 +8588,32 @@ void RGWPutBucketObjectLock::execute(optional_yield y) return; } -void RGWGetBucketObjectLock::pre_exec() -{ +void RGWGetBucketObjectLock::pre_exec() { rgw_bucket_object_pre_exec(s); } -int RGWGetBucketObjectLock::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketObjectLock::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketObjectLockConfiguration); + return verify_bucket_owner_or_policy( + s, rgw::IAM::s3GetBucketObjectLockConfiguration + ); } -void RGWGetBucketObjectLock::execute(optional_yield y) -{ +void RGWGetBucketObjectLock::execute(optional_yield y) { if (!s->bucket->get_info().obj_lock_enabled()) { op_ret = -ERR_NO_SUCH_OBJECT_LOCK_CONFIGURATION; return; } } -int RGWPutObjRetention::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); +int RGWPutObjRetention::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectRetention)) { return -EACCES; @@ -8383,20 +8623,21 @@ int RGWPutObjRetention::verify_permission(optional_yield y) return op_ret; } if (bypass_governance_mode) { - bypass_perm = verify_object_permission(this, s, rgw::IAM::s3BypassGovernanceRetention); + bypass_perm = verify_object_permission( + this, s, rgw::IAM::s3BypassGovernanceRetention + ); } return 0; } -void RGWPutObjRetention::pre_exec() -{ +void RGWPutObjRetention::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWPutObjRetention::execute(optional_yield y) -{ +void RGWPutObjRetention::execute(optional_yield y) { if (!s->bucket->get_info().obj_lock_enabled()) { - s->err.message = "object retention can't be set if bucket object lock not configured"; + s->err.message = + "object retention can't be set if bucket object lock not configured"; ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl; op_ret = -ERR_INVALID_REQUEST; return; @@ -8422,7 +8663,8 @@ void RGWPutObjRetention::execute(optional_yield y) return; } - if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph_clock_now()) { + if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < + ceph_clock_now()) { s->err.message = "the retain-until date must be in the future"; ldpp_dout(this, 0) << "ERROR: " << s->err.message << dendl; op_ret = -EINVAL; @@ -8434,7 +8676,7 @@ void RGWPutObjRetention::execute(optional_yield y) //check old retention op_ret = s->object->get_obj_attrs(s->yield, this); if (op_ret < 0) { - ldpp_dout(this, 0) << "ERROR: get obj attr error"<< dendl; + ldpp_dout(this, 0) << "ERROR: get obj attr error" << dendl; return; } rgw::sal::Attrs attrs = s->object->get_attrs(); @@ -8444,39 +8686,50 @@ void RGWPutObjRetention::execute(optional_yield y) try { decode(old_obj_retention, aiter->second); } catch (buffer::error& err) { - ldpp_dout(this, 0) << "ERROR: failed to decode RGWObjectRetention" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode RGWObjectRetention" + << dendl; op_ret = -EIO; return; } - if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < ceph::real_clock::to_time_t(old_obj_retention.get_retain_until_date())) { - if (old_obj_retention.get_mode().compare("GOVERNANCE") != 0 || !bypass_perm || !bypass_governance_mode) { - s->err.message = "proposed retain-until date shortens an existing retention period and governance bypass check failed"; + if (ceph::real_clock::to_time_t(obj_retention.get_retain_until_date()) < + ceph::real_clock::to_time_t(old_obj_retention.get_retain_until_date() + )) { + if (old_obj_retention.get_mode().compare("GOVERNANCE") != 0 || + !bypass_perm || !bypass_governance_mode) { + s->err.message = + "proposed retain-until date shortens an existing retention period " + "and governance bypass check failed"; op_ret = -EACCES; return; } } else if (old_obj_retention.get_mode() == obj_retention.get_mode()) { // ok if retention mode doesn't change } else if (obj_retention.get_mode() == "GOVERNANCE") { - s->err.message = "can't change retention mode from COMPLIANCE to GOVERNANCE"; + s->err.message = + "can't change retention mode from COMPLIANCE to GOVERNANCE"; op_ret = -EACCES; return; } else if (!bypass_perm || !bypass_governance_mode) { - s->err.message = "can't change retention mode from GOVERNANCE without governance bypass"; + s->err.message = + "can't change retention mode from GOVERNANCE without governance " + "bypass"; op_ret = -EACCES; return; } } - op_ret = s->object->modify_obj_attrs(RGW_ATTR_OBJECT_RETENTION, bl, s->yield, this); + op_ret = s->object->modify_obj_attrs( + RGW_ATTR_OBJECT_RETENTION, bl, s->yield, this + ); return; } -int RGWGetObjRetention::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); +int RGWGetObjRetention::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectRetention)) { return -EACCES; @@ -8484,13 +8737,11 @@ int RGWGetObjRetention::verify_permission(optional_yield y) return 0; } -void RGWGetObjRetention::pre_exec() -{ +void RGWGetObjRetention::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetObjRetention::execute(optional_yield y) -{ +void RGWGetObjRetention::execute(optional_yield y) { if (!s->bucket->get_info().obj_lock_enabled()) { s->err.message = "bucket object lock not configured"; ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl; @@ -8514,18 +8765,19 @@ void RGWGetObjRetention::execute(optional_yield y) try { obj_retention.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(this, 0) << __func__ << "decode object retention config failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode object retention config failed" + << dendl; op_ret = -EIO; return; } return; } -int RGWPutObjLegalHold::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); +int RGWPutObjLegalHold::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); if (!verify_object_permission(this, s, rgw::IAM::s3PutObjectLegalHold)) { return -EACCES; @@ -8533,14 +8785,14 @@ int RGWPutObjLegalHold::verify_permission(optional_yield y) return 0; } -void RGWPutObjLegalHold::pre_exec() -{ +void RGWPutObjLegalHold::pre_exec() { rgw_bucket_object_pre_exec(s); } void RGWPutObjLegalHold::execute(optional_yield y) { if (!s->bucket->get_info().obj_lock_enabled()) { - s->err.message = "object legal hold can't be set if bucket object lock not enabled"; + s->err.message = + "object legal hold can't be set if bucket object lock not enabled"; ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl; op_ret = -ERR_INVALID_REQUEST; return; @@ -8554,8 +8806,7 @@ void RGWPutObjLegalHold::execute(optional_yield y) { } op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; if (!parser.parse(data.c_str(), data.length(), 1)) { op_ret = -ERR_MALFORMED_XML; @@ -8564,7 +8815,7 @@ void RGWPutObjLegalHold::execute(optional_yield y) { try { RGWXMLDecoder::decode_xml("LegalHold", obj_legal_hold, &parser, true); - } catch (RGWXMLDecoder::err &err) { + } catch (RGWXMLDecoder::err& err) { ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; return; @@ -8572,15 +8823,17 @@ void RGWPutObjLegalHold::execute(optional_yield y) { bufferlist bl; obj_legal_hold.encode(bl); //if instance is empty, we should modify the latest object - op_ret = s->object->modify_obj_attrs(RGW_ATTR_OBJECT_LEGAL_HOLD, bl, s->yield, this); + op_ret = s->object->modify_obj_attrs( + RGW_ATTR_OBJECT_LEGAL_HOLD, bl, s->yield, this + ); return; } -int RGWGetObjLegalHold::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s); - if (has_s3_existing_tag || has_s3_resource_tag) - rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); +int RGWGetObjLegalHold::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s); + if (has_s3_existing_tag || has_s3_resource_tag) + rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag); if (!verify_object_permission(this, s, rgw::IAM::s3GetObjectLegalHold)) { return -EACCES; @@ -8588,13 +8841,11 @@ int RGWGetObjLegalHold::verify_permission(optional_yield y) return 0; } -void RGWGetObjLegalHold::pre_exec() -{ +void RGWGetObjLegalHold::pre_exec() { rgw_bucket_object_pre_exec(s); } -void RGWGetObjLegalHold::execute(optional_yield y) -{ +void RGWGetObjLegalHold::execute(optional_yield y) { if (!s->bucket->get_info().obj_lock_enabled()) { s->err.message = "bucket object lock not configured"; ldpp_dout(this, 4) << "ERROR: " << s->err.message << dendl; @@ -8618,23 +8869,22 @@ void RGWGetObjLegalHold::execute(optional_yield y) try { obj_legal_hold.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(this, 0) << __func__ << "decode object legal hold config failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode object legal hold config failed" + << dendl; op_ret = -EIO; return; } return; } -void RGWGetClusterStat::execute(optional_yield y) -{ +void RGWGetClusterStat::execute(optional_yield y) { op_ret = driver->cluster_stat(stats_op); } -int RGWGetBucketPolicyStatus::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketPolicyStatus::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicyStatus)) { return -EACCES; @@ -8643,33 +8893,32 @@ int RGWGetBucketPolicyStatus::verify_permission(optional_yield y) return 0; } -void RGWGetBucketPolicyStatus::execute(optional_yield y) -{ - isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || s->bucket_acl->is_public(this); +void RGWGetBucketPolicyStatus::execute(optional_yield y) { + isPublic = (s->iam_policy && rgw::IAM::is_public(*s->iam_policy)) || + s->bucket_acl->is_public(this); } -int RGWPutBucketPublicAccessBlock::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWPutBucketPublicAccessBlock::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) { + if (!verify_bucket_permission( + this, s, rgw::IAM::s3PutBucketPublicAccessBlock + )) { return -EACCES; } return 0; } -int RGWPutBucketPublicAccessBlock::get_params(optional_yield y) -{ +int RGWPutBucketPublicAccessBlock::get_params(optional_yield y) { const auto max_size = s->cct->_conf->rgw_max_put_param_size; std::tie(op_ret, data) = read_all_input(s, max_size, false); return op_ret; } -void RGWPutBucketPublicAccessBlock::execute(optional_yield y) -{ +void RGWPutBucketPublicAccessBlock::execute(optional_yield y) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl; @@ -8678,8 +8927,7 @@ void RGWPutBucketPublicAccessBlock::execute(optional_yield y) } op_ret = get_params(y); - if (op_ret < 0) - return; + if (op_ret < 0) return; if (!parser.parse(data.c_str(), data.length(), 1)) { ldpp_dout(this, 0) << "ERROR: malformed XML" << dendl; @@ -8688,34 +8936,37 @@ void RGWPutBucketPublicAccessBlock::execute(optional_yield y) } try { - RGWXMLDecoder::decode_xml("PublicAccessBlockConfiguration", access_conf, &parser, true); - } catch (RGWXMLDecoder::err &err) { + RGWXMLDecoder::decode_xml( + "PublicAccessBlockConfiguration", access_conf, &parser, true + ); + } catch (RGWXMLDecoder::err& err) { ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; return; } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } bufferlist bl; access_conf.encode(bl); op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this, &bl] { - rgw::sal::Attrs attrs(s->bucket_attrs); - attrs[RGW_ATTR_PUBLIC_ACCESS] = bl; - return s->bucket->merge_and_store_attrs(this, attrs, s->yield); - }); - + rgw::sal::Attrs attrs(s->bucket_attrs); + attrs[RGW_ATTR_PUBLIC_ACCESS] = bl; + return s->bucket->merge_and_store_attrs(this, attrs, s->yield); + }); } -int RGWGetBucketPublicAccessBlock::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWGetBucketPublicAccessBlock::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketPolicy)) { return -EACCES; @@ -8724,13 +8975,11 @@ int RGWGetBucketPublicAccessBlock::verify_permission(optional_yield y) return 0; } -void RGWGetBucketPublicAccessBlock::execute(optional_yield y) -{ +void RGWGetBucketPublicAccessBlock::execute(optional_yield y) { auto attrs = s->bucket_attrs; - if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS); - aiter == attrs.end()) { + if (auto aiter = attrs.find(RGW_ATTR_PUBLIC_ACCESS); aiter == attrs.end()) { ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = " - << s->bucket_name << dendl; + << s->bucket_name << dendl; // return the default; return; } else { @@ -8738,16 +8987,14 @@ void RGWGetBucketPublicAccessBlock::execute(optional_yield y) try { access_conf.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(this, 0) << __func__ << "decode access_conf failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode access_conf failed" << dendl; op_ret = -EIO; return; } } } - -void RGWDeleteBucketPublicAccessBlock::send_response() -{ +void RGWDeleteBucketPublicAccessBlock::send_response() { if (op_ret) { set_req_state_err(s, op_ret); } @@ -8755,53 +9002,53 @@ void RGWDeleteBucketPublicAccessBlock::send_response() end_header(s); } -int RGWDeleteBucketPublicAccessBlock::verify_permission(optional_yield y) -{ - auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false); - if (has_s3_resource_tag) - rgw_iam_add_buckettags(this, s); +int RGWDeleteBucketPublicAccessBlock::verify_permission(optional_yield y) { + auto [has_s3_existing_tag, has_s3_resource_tag] = + rgw_check_policy_condition(this, s, false); + if (has_s3_resource_tag) rgw_iam_add_buckettags(this, s); - if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketPublicAccessBlock)) { + if (!verify_bucket_permission( + this, s, rgw::IAM::s3PutBucketPublicAccessBlock + )) { return -EACCES; } return 0; } -void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y) -{ +void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y) { bufferlist data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } op_ret = retry_raced_bucket_write(this, s->bucket.get(), [this] { - rgw::sal::Attrs attrs(s->bucket_attrs); - attrs.erase(RGW_ATTR_PUBLIC_ACCESS); - op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); - return op_ret; - }); + rgw::sal::Attrs attrs(s->bucket_attrs); + attrs.erase(RGW_ATTR_PUBLIC_ACCESS); + op_ret = s->bucket->merge_and_store_attrs(this, attrs, s->yield); + return op_ret; + }); } -int RGWPutBucketEncryption::get_params(optional_yield y) -{ +int RGWPutBucketEncryption::get_params(optional_yield y) { const auto max_size = s->cct->_conf->rgw_max_put_param_size; std::tie(op_ret, data) = read_all_input(s, max_size, false); return op_ret; } -int RGWPutBucketEncryption::verify_permission(optional_yield y) -{ +int RGWPutBucketEncryption::verify_permission(optional_yield y) { if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketEncryption)) { return -EACCES; } return 0; } -void RGWPutBucketEncryption::execute(optional_yield y) -{ +void RGWPutBucketEncryption::execute(optional_yield y) { RGWXMLDecoder::XMLParser parser; if (!parser.init()) { ldpp_dout(this, 0) << "ERROR: failed to initialize parser" << dendl; @@ -8819,16 +9066,22 @@ void RGWPutBucketEncryption::execute(optional_yield y) } try { - RGWXMLDecoder::decode_xml("ServerSideEncryptionConfiguration", bucket_encryption_conf, &parser, true); + RGWXMLDecoder::decode_xml( + "ServerSideEncryptionConfiguration", bucket_encryption_conf, &parser, + true + ); } catch (RGWXMLDecoder::err& err) { ldpp_dout(this, 5) << "unexpected xml:" << err << dendl; op_ret = -ERR_MALFORMED_XML; return; } - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } @@ -8841,20 +9094,19 @@ void RGWPutBucketEncryption::execute(optional_yield y) }); } -int RGWGetBucketEncryption::verify_permission(optional_yield y) -{ +int RGWGetBucketEncryption::verify_permission(optional_yield y) { if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketEncryption)) { return -EACCES; } return 0; } -void RGWGetBucketEncryption::execute(optional_yield y) -{ +void RGWGetBucketEncryption::execute(optional_yield y) { const auto& attrs = s->bucket_attrs; if (auto aiter = attrs.find(RGW_ATTR_BUCKET_ENCRYPTION_POLICY); aiter == attrs.end()) { - ldpp_dout(this, 0) << "can't find BUCKET ENCRYPTION attr for bucket_name = " << s->bucket_name << dendl; + ldpp_dout(this, 0) << "can't find BUCKET ENCRYPTION attr for bucket_name = " + << s->bucket_name << dendl; op_ret = -ENOENT; s->err.message = "The server side encryption configuration was not found"; return; @@ -8863,27 +9115,29 @@ void RGWGetBucketEncryption::execute(optional_yield y) try { bucket_encryption_conf.decode(iter); } catch (const buffer::error& e) { - ldpp_dout(this, 0) << __func__ << "decode bucket_encryption_conf failed" << dendl; + ldpp_dout(this, 0) << __func__ << "decode bucket_encryption_conf failed" + << dendl; op_ret = -EIO; return; } } } -int RGWDeleteBucketEncryption::verify_permission(optional_yield y) -{ +int RGWDeleteBucketEncryption::verify_permission(optional_yield y) { if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketEncryption)) { return -EACCES; } return 0; } -void RGWDeleteBucketEncryption::execute(optional_yield y) -{ +void RGWDeleteBucketEncryption::execute(optional_yield y) { bufferlist data; - op_ret = driver->forward_request_to_master(this, s->user.get(), nullptr, data, nullptr, s->info, y); + op_ret = driver->forward_request_to_master( + this, s->user.get(), nullptr, data, nullptr, s->info, y + ); if (op_ret < 0) { - ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret + << dendl; return; } @@ -8896,10 +9150,8 @@ void RGWDeleteBucketEncryption::execute(optional_yield y) }); } -void rgw_slo_entry::decode_json(JSONObj *obj) -{ +void rgw_slo_entry::decode_json(JSONObj* obj) { JSONDecoder::decode_json("path", path, obj); JSONDecoder::decode_json("etag", etag, obj); JSONDecoder::decode_json("size_bytes", size_bytes, obj); }; - diff --git a/src/rgw/rgw_sal_sfs.h b/src/rgw/rgw_sal_sfs.h index a78348047719ec..944edf6e653698 100644 --- a/src/rgw/rgw_sal_sfs.h +++ b/src/rgw/rgw_sal_sfs.h @@ -132,6 +132,7 @@ class SFStore : public StoreDriver { std::atomic_uint64_t filesystem_stats_avail_bytes; std::atomic_uint64_t filesystem_stats_avail_percent; const uint64_t min_space_left_for_data_write_ops_bytes; + ceph::mutex create_version_lock = ceph::make_mutex("create_version_lock"); SFStore(CephContext* c, const std::filesystem::path& data_path); SFStore(const SFStore&) = delete; diff --git a/src/test/rgw/sfs/test_rgw_sfs_gc.cc b/src/test/rgw/sfs/test_rgw_sfs_gc.cc index 2f06e6ac272552..c3278195257e89 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_gc.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_gc.cc @@ -1,24 +1,23 @@ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab -#include "common/ceph_context.h" -#include "rgw/driver/sfs/sqlite/dbconn.h" -#include "rgw/driver/sfs/sqlite/sqlite_buckets.h" -#include "rgw/driver/sfs/sqlite/buckets/bucket_conversions.h" -#include "rgw/driver/sfs/sqlite/sqlite_users.h" - -#include "rgw/driver/sfs/uuid_path.h" -#include "rgw/driver/sfs/sfs_gc.h" - -#include "rgw/rgw_sal_sfs.h" +#include #include #include -#include #include #include #include +#include "common/ceph_context.h" +#include "rgw/driver/sfs/sfs_gc.h" +#include "rgw/driver/sfs/sqlite/buckets/bucket_conversions.h" +#include "rgw/driver/sfs/sqlite/dbconn.h" +#include "rgw/driver/sfs/sqlite/sqlite_buckets.h" +#include "rgw/driver/sfs/sqlite/sqlite_users.h" +#include "rgw/driver/sfs/uuid_path.h" +#include "rgw/rgw_sal_sfs.h" + using namespace rgw::sal::sfs::sqlite; using namespace std::this_thread; using namespace std::chrono_literals; @@ -29,7 +28,7 @@ const static std::string TEST_DIR = "rgw_sfs_tests"; const static std::string TEST_USERNAME = "test_user"; class TestSFSGC : public ::testing::Test { -protected: + protected: void SetUp() override { fs::current_path(fs::temp_directory_path()); fs::create_directory(TEST_DIR); @@ -45,24 +44,26 @@ class TestSFSGC : public ::testing::Test { return test_dir.string(); } - fs::path getDBFullPath(const std::string & base_dir) const { + fs::path getDBFullPath(const std::string& base_dir) const { auto db_full_name = "s3gw.db"; - auto db_full_path = fs::path(base_dir) / db_full_name; + auto db_full_path = fs::path(base_dir) / db_full_name; return db_full_path; } - fs::path getDBFullPath() const { - return getDBFullPath(getTestDir()); - } + fs::path getDBFullPath() const { return getDBFullPath(getTestDir()); } std::size_t getStoreDataFileCount() { using std::filesystem::recursive_directory_iterator; return std::count_if( recursive_directory_iterator(getTestDir()), - recursive_directory_iterator{}, [](const std::filesystem::path& path) { - return (std::filesystem::is_regular_file(path) && - !path.filename().string().starts_with("s3gw.db")); - }); + recursive_directory_iterator{}, + [](const std::filesystem::path& path) { + return ( + std::filesystem::is_regular_file(path) && + !path.filename().string().starts_with("s3gw.db") + ); + } + ); } std::size_t databaseFileExists() { @@ -76,30 +77,28 @@ class TestSFSGC : public ::testing::Test { users.store_user(user); } - void storeRandomObjectVersion(const std::shared_ptr & object) { + void storeRandomObjectVersion( + const std::shared_ptr& object + ) { std::filesystem::path object_path = getTestDir() / object->get_storage_path(); std::filesystem::create_directories(object_path.parent_path()); - auto mode = \ - std::ofstream::binary | \ - std::ofstream::out | \ - std::ofstream::app; + auto mode = std::ofstream::binary | std::ofstream::out | std::ofstream::app; std::ofstream ofs(object_path, mode); std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution dist(1, 4096); auto file_size = dist(gen); - while(file_size) { - ofs << dist(gen); - --file_size; + while (file_size) { + ofs << dist(gen); + --file_size; } ofs.flush(); ofs.close(); } - void createTestBucket(const std::string & bucket_id, - DBConnRef conn) { + void createTestBucket(const std::string& bucket_id, DBConnRef conn) { SQLiteBuckets db_buckets(conn); DBOPBucketInfo bucket; bucket.binfo.bucket.name = bucket_id + "_name"; @@ -109,19 +108,18 @@ class TestSFSGC : public ::testing::Test { db_buckets.store_bucket(bucket); } - bool bucketExists(const std::string & bucket_id, - DBConnRef conn) { + bool bucketExists(const std::string& bucket_id, DBConnRef conn) { SQLiteBuckets db_buckets(conn); auto bucket = db_buckets.get_bucket(bucket_id); return bucket.has_value(); } std::shared_ptr createTestObject( - const std::string & bucket_id, - const std::string & name, - DBConnRef conn) { + const std::string& bucket_id, const std::string& name, DBConnRef conn + ) { auto object = std::shared_ptr( - rgw::sal::sfs::Object::create_for_testing(name)); + rgw::sal::sfs::Object::create_for_testing(name) + ); SQLiteObjects db_objects(conn); DBOPObjectInfo db_object; db_object.uuid = object->path.get_uuid(); @@ -131,9 +129,10 @@ class TestSFSGC : public ::testing::Test { return object; } - void createTestObjectVersion(std::shared_ptr & object, - uint version, - DBConnRef conn) { + void createTestObjectVersion( + std::shared_ptr& object, uint version, + DBConnRef conn + ) { object->version_id = version; storeRandomObjectVersion(object); SQLiteVersionedObjects db_versioned_objects(conn); @@ -145,12 +144,13 @@ class TestSFSGC : public ::testing::Test { db_versioned_objects.insert_versioned_object(db_version); } - void deleteTestObject(std::shared_ptr & object, - DBConnRef conn) { + void deleteTestObject( + std::shared_ptr& object, DBConnRef conn + ) { // delete mark the object SQLiteVersionedObjects db_versioned_objects(conn); - auto last_version = db_versioned_objects.get_last_versioned_object( - object->path.get_uuid()); + auto last_version = + db_versioned_objects.get_last_versioned_object(object->path.get_uuid()); ASSERT_TRUE(last_version.has_value()); last_version->object_state = rgw::sal::sfs::ObjectState::DELETED; last_version->version_id.append("_next_"); @@ -158,26 +158,29 @@ class TestSFSGC : public ::testing::Test { db_versioned_objects.insert_versioned_object(*last_version); } - void deleteTestBucket(const std::string & bucket_id, DBConnRef conn) { + void deleteTestBucket(const std::string& bucket_id, DBConnRef conn) { SQLiteBuckets db_buckets(conn); auto bucket = db_buckets.get_bucket(bucket_id); ASSERT_TRUE(bucket.has_value()); SQLiteObjects db_objects(conn); auto objects = db_objects.get_objects(bucket_id); - for (auto & object: objects) { - auto objptr = std::shared_ptr( - rgw::sal::sfs::Object::create_for_immediate_deletion(object)); - deleteTestObject(objptr, conn); + for (auto& object : objects) { + auto objptr = std::shared_ptr( + rgw::sal::sfs::Object::create_for_immediate_deletion(object) + ); + deleteTestObject(objptr, conn); } bucket->deleted = true; db_buckets.store_bucket(*bucket); } - size_t getNumberObjectsForBucket(const std::string & bucket_id, DBConnRef conn) { - SQLiteObjects db_objs(conn); - auto objects = db_objs.get_objects(bucket_id); - return objects.size(); + size_t getNumberObjectsForBucket( + const std::string& bucket_id, DBConnRef conn + ) { + SQLiteObjects db_objs(conn); + auto objects = db_objs.get_objects(bucket_id); + return objects.size(); } }; @@ -227,7 +230,7 @@ TEST_F(TestSFSGC, TestDeletedBuckets) { // nothing should be removed permanently yet EXPECT_EQ(getStoreDataFileCount(), 5); EXPECT_TRUE(databaseFileExists()); - versions = db_versioned_objs.get_versioned_object_ids(); + versions = db_versioned_objs.get_versioned_object_ids(false); // we should have 1 more version (delete marker for 1 object) EXPECT_EQ(versions.size(), 6); diff --git a/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc b/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc index f8e6fdbda2c993..90067e1317b5db 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc @@ -72,7 +72,8 @@ class TestSFSBucket : public ::testing::Test { } void createTestBucket( - const std::string& bucket_id, const std::string& user_id, DBConnRef conn + const std::string& bucket_id, const std::string& user_id, DBConnRef conn, + bool versioned = false ) { SQLiteBuckets db_buckets(conn); DBOPBucketInfo bucket; @@ -80,6 +81,9 @@ class TestSFSBucket : public ::testing::Test { bucket.binfo.bucket.bucket_id = bucket_id; bucket.binfo.owner.id = user_id; bucket.deleted = false; + if (versioned) { + bucket.binfo.flags |= BUCKET_VERSIONED; + } db_buckets.store_bucket(bucket); } @@ -625,7 +629,7 @@ TEST_F(TestSFSBucket, TestListObjectsAndVersions) { createUser("test_user", store->db_conn); // create test bucket - createTestBucket("test_bucket", "test_user", store->db_conn); + createTestBucket("test_bucket", "test_user", store->db_conn, true); // create a few objects in test_bucket with a few versions uint version_id = 1; @@ -1051,8 +1055,8 @@ TEST_F(TestSFSBucket, TestListObjectVersionsDelimiter) { // create the test user createUser("test_user", store->db_conn); - // create test bucket - createTestBucket("test_bucket", "test_user", store->db_conn); + // create test bucket versioned + createTestBucket("test_bucket", "test_user", store->db_conn, true); // create the following objects: // directory/ diff --git a/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc b/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc index 05f5476ce7731a..9c3d62b1b39648 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc @@ -80,11 +80,41 @@ DBOPObjectInfo createTestObject( return object; } -bool uuidInVector(const uuid_d& uuid, const std::vector& uuids) { - for (auto const& list_uuid : uuids) { - if (list_uuid == uuid) return true; +DBOPVersionedObjectInfo createTestVersionedObject( + uint id, const std::string& object_id, const std::string& suffix +) { + DBOPVersionedObjectInfo test_versioned_object; + test_versioned_object.id = id; + uuid_d uuid; + uuid.parse(object_id.c_str()); + test_versioned_object.object_id = uuid; + test_versioned_object.checksum = "test_checksum_" + suffix; + // test_versioned_object.size = rand(); + test_versioned_object.size = 1999; + test_versioned_object.create_time = ceph::real_clock::now(); + test_versioned_object.delete_time = ceph::real_clock::now(); + test_versioned_object.commit_time = ceph::real_clock::now(); + test_versioned_object.mtime = ceph::real_clock::now(); + test_versioned_object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + test_versioned_object.version_id = "test_version_id_" + suffix; + test_versioned_object.etag = "test_etag_" + suffix; + test_versioned_object.version_type = rgw::sal::sfs::VersionType::REGULAR; + + //set attrs with default ACL + { + RGWAccessControlPolicy aclp; + rgw_user aclu("usertest"); + aclp.get_acl().create_default(aclu, "usertest"); + aclp.get_owner().set_name("usertest"); + aclp.get_owner().set_id(aclu); + bufferlist acl_bl; + aclp.encode(acl_bl); + rgw::sal::Attrs attrs; + attrs[RGW_ATTR_ACL] = acl_bl; + test_versioned_object.attrs = attrs; } - return false; + + return test_versioned_object; } TEST_F(TestSFSSQLiteObjects, CreateAndGet) { @@ -108,73 +138,6 @@ TEST_F(TestSFSSQLiteObjects, CreateAndGet) { compareObjects(object, *ret_object); } -TEST_F(TestSFSSQLiteObjects, ListObjectsIDs) { - auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); - ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); - - EXPECT_FALSE(fs::exists(getDBFullPath())); - DBConnRef conn = std::make_shared(ceph_context.get()); - - // Create the bucket, we need it because BucketName is a foreign key of Bucket::BucketID - createBucket("usertest", "test_bucket", conn); - - auto db_objects = std::make_shared(conn); - - auto obj1 = createTestObject("1", ceph_context.get()); - db_objects->store_object(obj1); - auto obj2 = createTestObject("2", ceph_context.get()); - db_objects->store_object(obj2); - auto obj3 = createTestObject("3", ceph_context.get()); - db_objects->store_object(obj3); - - EXPECT_TRUE(fs::exists(getDBFullPath())); - - auto object_ids = db_objects->get_object_ids(); - EXPECT_EQ(object_ids.size(), 3); - - EXPECT_TRUE(uuidInVector(obj1.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj2.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj3.uuid, object_ids)); -} - -TEST_F(TestSFSSQLiteObjects, ListBucketsIDsPerBucket) { - auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); - ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); - - EXPECT_FALSE(fs::exists(getDBFullPath())); - DBConnRef conn = std::make_shared(ceph_context.get()); - - createBucket("usertest", "test_bucket_1", conn); - createBucket("usertest", "test_bucket_2", conn); - createBucket("usertest", "test_bucket_3", conn); - - auto db_objects = std::make_shared(conn); - - auto test_object_1 = createTestObject("1", ceph_context.get()); - test_object_1.bucket_id = "test_bucket_1"; - db_objects->store_object(test_object_1); - - auto test_object_2 = createTestObject("2", ceph_context.get()); - test_object_2.bucket_id = "test_bucket_2"; - db_objects->store_object(test_object_2); - - auto test_object_3 = createTestObject("3", ceph_context.get()); - test_object_3.bucket_id = "test_bucket_3"; - db_objects->store_object(test_object_3); - - auto objects_ids = db_objects->get_object_ids("test_bucket_1"); - ASSERT_EQ(objects_ids.size(), 1); - EXPECT_EQ(objects_ids[0], test_object_1.uuid); - - objects_ids = db_objects->get_object_ids("test_bucket_2"); - ASSERT_EQ(objects_ids.size(), 1); - EXPECT_EQ(objects_ids[0], test_object_2.uuid); - - objects_ids = db_objects->get_object_ids("test_bucket_3"); - ASSERT_EQ(objects_ids.size(), 1); - EXPECT_EQ(objects_ids[0], test_object_3.uuid); -} - TEST_F(TestSFSSQLiteObjects, remove_object) { auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); @@ -195,13 +158,6 @@ TEST_F(TestSFSSQLiteObjects, remove_object) { db_objects->store_object(obj3); db_objects->remove_object(obj2.uuid); - auto object_ids = db_objects->get_object_ids(); - EXPECT_EQ(object_ids.size(), 2); - - EXPECT_TRUE(uuidInVector(obj1.uuid, object_ids)); - EXPECT_FALSE(uuidInVector(obj2.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj3.uuid, object_ids)); - auto ret_object = db_objects->get_object(obj2.uuid); ASSERT_FALSE(ret_object.has_value()); } @@ -227,12 +183,8 @@ TEST_F(TestSFSSQLiteObjects, remove_objectThatDoesNotExist) { uuid_d non_existing_uuid; db_objects->remove_object(non_existing_uuid); - auto object_ids = db_objects->get_object_ids(); - EXPECT_EQ(object_ids.size(), 3); - - EXPECT_TRUE(uuidInVector(obj1.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj2.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj3.uuid, object_ids)); + auto objects = db_objects->get_objects("test_bucket"); + EXPECT_EQ(objects.size(), 3); } TEST_F(TestSFSSQLiteObjects, CreateAndUpdate) { diff --git a/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc b/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc index b376d4fa78ab3e..0ad96c4dd4b87c 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc @@ -12,7 +12,6 @@ #include "rgw/driver/sfs/sqlite/sqlite_objects.h" #include "rgw/driver/sfs/sqlite/sqlite_users.h" #include "rgw/driver/sfs/sqlite/sqlite_versioned_objects.h" -#include "rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h" #include "rgw/rgw_sal_sfs.h" using namespace rgw::sal::sfs::sqlite; @@ -22,6 +21,7 @@ const static std::string TEST_DIR = "rgw_sfs_tests"; const static std::string TEST_USERNAME = "test_username"; const static std::string TEST_BUCKET = "test_bucket"; +const static std::string TEST_BUCKET_2 = "test_bucket_2"; const static std::string TEST_OBJECT_ID = "80943a6d-9f72-4001-bac0-a9a036be8c49"; const static std::string TEST_OBJECT_ID_1 = @@ -30,6 +30,8 @@ const static std::string TEST_OBJECT_ID_2 = "af06d9d3-307f-4c98-865b-cd3b087acc4f"; const static std::string TEST_OBJECT_ID_3 = "bf06d9d3-307f-4c98-865b-cd3b087acc4f"; +const static std::string TEST_OBJECT_ID_4 = + "cf06d9d3-307f-4c98-865b-cd3b087acc4f"; class TestSFSSQLiteVersionedObjects : public ::testing::Test { protected: @@ -99,7 +101,8 @@ DBOPVersionedObjectInfo createTestVersionedObject( uuid.parse(object_id.c_str()); test_versioned_object.object_id = uuid; test_versioned_object.checksum = "test_checksum_" + suffix; - test_versioned_object.size = rand(); + // test_versioned_object.size = rand(); + test_versioned_object.size = 1999; test_versioned_object.create_time = ceph::real_clock::now(); test_versioned_object.delete_time = ceph::real_clock::now(); test_versioned_object.commit_time = ceph::real_clock::now(); @@ -228,9 +231,9 @@ TEST_F(TestSFSSQLiteVersionedObjects, ListObjectsIDs) { uuid.parse(TEST_OBJECT_ID.c_str()); auto objects = db_versioned_objects->get_versioned_objects(uuid); ASSERT_EQ(objects.size(), 3); - compareVersionedObjects(objects[0], obj1); + compareVersionedObjects(objects[0], obj3); compareVersionedObjects(objects[1], obj2); - compareVersionedObjects(objects[2], obj3); + compareVersionedObjects(objects[2], obj1); } TEST_F(TestSFSSQLiteVersionedObjects, ListBucketsIDsPerObject) { @@ -443,7 +446,7 @@ TEST_F(TestSFSSQLiteVersionedObjects, CreateObjectForNonExistingBucket) { SQLiteVersionedObjects db_objects(conn); auto storage = conn->get_storage(); - DBVersionedObject db_object; + DBOPVersionedObjectInfo db_object; db_object.id = 1; uuid_d uuid_val; @@ -468,46 +471,6 @@ TEST_F(TestSFSSQLiteVersionedObjects, CreateObjectForNonExistingBucket) { ); } -TEST_F(TestSFSSQLiteVersionedObjects, Testobject_stateConversion) { - auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); - ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); - - DBConnRef conn = std::make_shared(ceph_context.get()); - - // Create the object, we need it because of foreign key constrains - createObject( - TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn - ); - - SQLiteVersionedObjects db_objects(conn); - auto storage = conn->get_storage(); - - auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); - auto db_object = get_db_versioned_object(object); - ASSERT_EQ(rgw::sal::sfs::ObjectState::OPEN, db_object.object_state); - - db_object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; - storage.replace(db_object); - - auto ret_object = db_objects.get_versioned_object(db_object.id); - ASSERT_TRUE(ret_object.has_value()); - ASSERT_EQ(rgw::sal::sfs::ObjectState::COMMITTED, ret_object->object_state); - - db_object.object_state = rgw::sal::sfs::ObjectState::LOCKED; - storage.replace(db_object); - - ret_object = db_objects.get_versioned_object(db_object.id); - ASSERT_TRUE(ret_object.has_value()); - ASSERT_EQ(rgw::sal::sfs::ObjectState::LOCKED, ret_object->object_state); - - db_object.object_state = rgw::sal::sfs::ObjectState::DELETED; - storage.replace(db_object); - - ret_object = db_objects.get_versioned_object(db_object.id); - ASSERT_TRUE(ret_object.has_value()); - ASSERT_EQ(rgw::sal::sfs::ObjectState::DELETED, ret_object->object_state); -} - TEST_F(TestSFSSQLiteVersionedObjects, StoreCreatesNewVersions) { auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); @@ -594,6 +557,7 @@ TEST_F(TestSFSSQLiteVersionedObjects, GetLastVersion) { // just update the size, and add a new version object.size = 1999; object.version_id = "test_version_id_2"; + object.commit_time = ceph::real_clock::now(); db_versioned_objects->insert_versioned_object(object); // now it should return the last one @@ -610,6 +574,68 @@ TEST_F(TestSFSSQLiteVersionedObjects, GetLastVersion) { ASSERT_FALSE(ret_object.has_value()); } +TEST_F(TestSFSSQLiteVersionedObjects, GetLastVersionRepeatedCommitTime) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + // keep it for later + auto commit_time = object.commit_time; + db_versioned_objects->insert_versioned_object(object); + EXPECT_TRUE(fs::exists(getDBFullPath())); + + uuid_d uuid; + uuid.parse(TEST_OBJECT_ID.c_str()); + auto ret_object = db_versioned_objects->get_last_versioned_object(uuid); + ASSERT_TRUE(ret_object.has_value()); + compareVersionedObjects(object, *ret_object); + + // just update the size, and add a new version + object.size = 1999; + object.version_id = "test_version_id_2"; + // set the same commit time + object.commit_time = commit_time; + db_versioned_objects->insert_versioned_object(object); + + // now we have 2 entries with the same commit time. + // it should return the one with the highest id + ret_object = db_versioned_objects->get_last_versioned_object(uuid); + ASSERT_TRUE(ret_object.has_value()); + object.id = 2; + compareVersionedObjects(object, *ret_object); + + // just update the size, and add a new version + object.size = 3121; + object.version_id = "test_version_id_3"; + // set the same commit time + object.commit_time = commit_time; + db_versioned_objects->insert_versioned_object(object); + + // now we have 3 entries with the same commit time. + // it should return the one with the highest id (3) + ret_object = db_versioned_objects->get_last_versioned_object(uuid); + ASSERT_TRUE(ret_object.has_value()); + object.id = 3; + compareVersionedObjects(object, *ret_object); + + uuid_d uuid_that_does_not_exist; + uuid_that_does_not_exist.parse(TEST_OBJECT_ID_2.c_str()); + + ret_object = + db_versioned_objects->get_last_versioned_object(uuid_that_does_not_exist); + ASSERT_FALSE(ret_object.has_value()); +} + TEST_F(TestSFSSQLiteVersionedObjects, TestInsertIncreaseID) { auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); @@ -706,7 +732,7 @@ TEST_F(TestSFSSQLiteVersionedObjects, StoreUnsupportedTimestamp) { SQLiteVersionedObjects db_versions(conn); auto storage = conn->get_storage(); - DBVersionedObject db_version; + DBOPVersionedObjectInfo db_version; db_version.id = 1; uuid_d uuid_val; @@ -728,8 +754,10 @@ TEST_F(TestSFSSQLiteVersionedObjects, StoreUnsupportedTimestamp) { ; } catch (const std::system_error& e) { EXPECT_STREQ( - "Error converting ceph::real_time to int64. Nanoseconds value: " - "9223372036854775808 is out of range: Numerical result out of " + "Error converting ceph::real_time to int64. Nanoseconds " + "value: " + "9223372036854775808 is out of range: Numerical result out " + "of " "range", e.what() ); @@ -739,3 +767,475 @@ TEST_F(TestSFSSQLiteVersionedObjects, StoreUnsupportedTimestamp) { std::system_error ); } + +TEST_F(TestSFSSQLiteVersionedObjects, TestFilterDeleted) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // create 5 versions + // versions 2 and 3 are deleted + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + object.object_state = rgw::sal::sfs::ObjectState::DELETED; + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + object.object_state = rgw::sal::sfs::ObjectState::OPEN; + object.version_id = "test_version_id_4"; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_5"; + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(object)); + + // get_versioned_object(uint id) + // try to get version 1 (not deleted) + auto not_deleted = db_versioned_objects->get_versioned_object(1); + ASSERT_TRUE(not_deleted.has_value()); + ASSERT_NE(rgw::sal::sfs::ObjectState::DELETED, not_deleted->object_state); + + // now version 2 (deleted) + auto deleted = db_versioned_objects->get_versioned_object(2); + ASSERT_FALSE(deleted.has_value()); + // now version 2, not filtering deleted + deleted = db_versioned_objects->get_versioned_object(2, false); + ASSERT_TRUE(deleted.has_value()); + ASSERT_EQ(rgw::sal::sfs::ObjectState::DELETED, deleted->object_state); + + // get_versioned_object(const std::string & version_id) + // try to get version 1 (not deleted) + not_deleted = db_versioned_objects->get_versioned_object("test_version_id_1"); + ASSERT_TRUE(not_deleted.has_value()); + ASSERT_NE(rgw::sal::sfs::ObjectState::DELETED, not_deleted->object_state); + + // now version 2 (deleted) + deleted = db_versioned_objects->get_versioned_object("test_version_id_2"); + ASSERT_FALSE(deleted.has_value()); + // now version 2, not filtering deleted + deleted = + db_versioned_objects->get_versioned_object("test_version_id_2", false); + ASSERT_TRUE(deleted.has_value()); + ASSERT_EQ(rgw::sal::sfs::ObjectState::DELETED, deleted->object_state); + + // get_versioned_object_ids + auto ids = db_versioned_objects->get_versioned_object_ids(); + ASSERT_EQ(3, ids.size()); // 2 and 3 will not be returned + for (const auto& id : ids) { + ASSERT_NE(2, id); + ASSERT_NE(3, id); + } + + ids = db_versioned_objects->get_versioned_object_ids(false); + ASSERT_EQ(5, ids.size()); // 2 and 3 will be returned + + // get_versioned_object_ids(const uuid_d & object_id) + uuid_d object_id; + object_id.parse(TEST_OBJECT_ID.c_str()); + ids = db_versioned_objects->get_versioned_object_ids(object_id); + ASSERT_EQ(3, ids.size()); // 2 and 3 will not be returned + for (const auto& id : ids) { + ASSERT_NE(2, id); + ASSERT_NE(3, id); + } + + ids = db_versioned_objects->get_versioned_object_ids(object_id, false); + ASSERT_EQ(5, ids.size()); // 2 and 3 will be returned + + // get_versioned_objects(const uuid_d & object_id) + auto versions = db_versioned_objects->get_versioned_objects(object_id); + ASSERT_EQ(3, versions.size()); // 2 and 3 will not be returned + for (const auto& version : versions) { + ASSERT_NE(2, version.id); + ASSERT_NE(3, version.id); + ASSERT_NE(rgw::sal::sfs::ObjectState::DELETED, version.object_state); + } + versions = db_versioned_objects->get_versioned_objects(object_id, false); + ASSERT_EQ(5, versions.size()); // 2 and 3 will be returned + + // get_last_versioned_object + // this time last version (5) is not deleted + // get it first, then flag as deleted and check + auto last_version = + db_versioned_objects->get_last_versioned_object(object_id); + ASSERT_TRUE(last_version.has_value()); + ASSERT_EQ(5, last_version->id); + + // now flag the last version as DELETED + last_version->object_state = rgw::sal::sfs::ObjectState::DELETED; + db_versioned_objects->store_versioned_object(*last_version); + + // we update, so no new version should be created + versions = db_versioned_objects->get_versioned_objects(object_id, false); + ASSERT_EQ(5, versions.size()); // will still return 5 versions + + versions = db_versioned_objects->get_versioned_objects(object_id); + ASSERT_EQ(2, versions.size()); // now only 2 are not deleted + + // now last version should be 4 + last_version = db_versioned_objects->get_last_versioned_object(object_id); + ASSERT_TRUE(last_version.has_value()); + ASSERT_EQ(4, last_version->id); + + // if we don't filter deleted it's still 5 + last_version = + db_versioned_objects->get_last_versioned_object(object_id, false); + ASSERT_TRUE(last_version.has_value()); + ASSERT_EQ(5, last_version->id); +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestDeleteLastAndGetPrevious) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // create 3 versions (last one is a delete marker) + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + object.version_type = rgw::sal::sfs::VersionType::DELETE_MARKER; + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + + auto last_version_now = + db_versioned_objects->delete_version_and_get_previous_transact(3); + ASSERT_TRUE(last_version_now.has_value()); + ASSERT_EQ(2, last_version_now->id); + ASSERT_EQ("test_version_id_2", last_version_now->version_id); + + uuid_d object_id; + object_id.parse(TEST_OBJECT_ID.c_str()); + last_version_now = db_versioned_objects->get_last_versioned_object(object_id); + ASSERT_TRUE(last_version_now.has_value()); + ASSERT_EQ(2, last_version_now->id); + ASSERT_EQ("test_version_id_2", last_version_now->version_id); +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestGetByBucketAndObjectName) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // SCENARIO 1. ONLY 1 object with committed versions + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // insert 3 committed versions + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + + // try to get version (TEST_BUCKET, "test_name", "test_version_id_2") + // corresponding to the second version + auto version = db_versioned_objects->get_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_2" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_2", version->version_id); + EXPECT_EQ(2, version->id); + + // don't pass any version. Should return the last one + version = + db_versioned_objects->get_versioned_object(TEST_BUCKET, "test_name", ""); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // pass a non existing version_id + version = db_versioned_objects->get_versioned_object( + TEST_BUCKET, "test_name", "this_version_does_not_exist" + ); + ASSERT_FALSE(version.has_value()); + + // SCENARIO 2. There is one object with all versions deleted (waiting to be + // removed by the garbage collector) and the alive object, both with the same + // object name but different uuid + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_2, ceph_context.get(), conn + ); + object = createTestVersionedObject(4, TEST_OBJECT_ID_2, "4"); + object.object_state = rgw::sal::sfs::ObjectState::DELETED; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_5"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(object)); + + // even though commit times for this versions are later in time than for the + // first object it should still return versions from the first object + + // don't pass any version. Should return the last one + version = + db_versioned_objects->get_versioned_object(TEST_BUCKET, "test_name", ""); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // try to get a deleted version (TEST_BUCKET, "test_name", "test_version_id_5") + // corresponding to the second version + version = db_versioned_objects->get_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_5" + ); + // should not return that object + // (it is deleted waiting for the garbage collector) + ASSERT_FALSE(version.has_value()); + + // still return valid version + version = db_versioned_objects->get_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_3" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // SCENARIO 3. 2 Objects with the same name in different buckets. + // in this case the object in bucket TEST_BUCKET_2 is in open state + // (still writing to it), but that's still an alive object + createObject( + TEST_USERNAME, TEST_BUCKET_2, TEST_OBJECT_ID_3, ceph_context.get(), conn + ); + object = createTestVersionedObject(6, TEST_OBJECT_ID_3, "6"); + EXPECT_EQ(6, db_versioned_objects->insert_versioned_object(object)); + + // still return valid version for 1st object + version = db_versioned_objects->get_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_3" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // and also valid for the object in the second bucket + version = db_versioned_objects->get_versioned_object( + TEST_BUCKET_2, "test_name", "test_version_id_6" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_6", version->version_id); + EXPECT_EQ(6, version->id); + + // but version 6 is not on first bucket + version = db_versioned_objects->get_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_6" + ); + ASSERT_FALSE(version.has_value()); +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestUpdateAndDeleteRest) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // insert 3 open versions + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + object.object_state = rgw::sal::sfs::ObjectState::OPEN; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + + // create a different object + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_2, ceph_context.get(), conn + ); + // with also 3 open versions + object = createTestVersionedObject(4, TEST_OBJECT_ID_2, "4"); + object.object_state = rgw::sal::sfs::ObjectState::OPEN; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_5"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_6"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(6, db_versioned_objects->insert_versioned_object(object)); + + // update object 2 to COMMITTED and DELETE the rest in a transaction + auto object_2 = db_versioned_objects->get_versioned_object(2, false); + ASSERT_TRUE(object_2.has_value()); + object_2->object_state = rgw::sal::sfs::ObjectState::COMMITTED; + db_versioned_objects->store_versioned_object_delete_rest_transact(*object_2); + + // all the rest should be updated (but only for that object) + auto object_ret = db_versioned_objects->get_versioned_object(1, false); + ASSERT_TRUE(object_ret.has_value()); + EXPECT_EQ(rgw::sal::sfs::ObjectState::DELETED, object_ret->object_state); + object_ret = db_versioned_objects->get_versioned_object(3, false); + ASSERT_TRUE(object_ret.has_value()); + EXPECT_EQ(rgw::sal::sfs::ObjectState::DELETED, object_ret->object_state); + + // the other object versions should be still open + uuid_d uuid_second_object; + uuid_second_object.parse(TEST_OBJECT_ID_2.c_str()); + // get the objects but not filtering deleted ones (we get all) + auto versions = + db_versioned_objects->get_versioned_objects(uuid_second_object, false); + EXPECT_EQ(3, versions.size()); + for (const auto& ver : versions) { + EXPECT_EQ(rgw::sal::sfs::ObjectState::OPEN, ver.object_state); + } +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestUpdateDeleteVersionDeletesObject) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // insert 3 committed versions + auto version = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + version.object_state = rgw::sal::sfs::ObjectState::DELETED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_2"; + version.commit_time = ceph::real_clock::now(); + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(version)); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_id = "test_version_id_3"; + version.commit_time = ceph::real_clock::now(); + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(version)); + + // insert 3 committed versions + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_2, ceph_context.get(), conn + ); + version = createTestVersionedObject(4, TEST_OBJECT_ID_2, "4"); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_5"; + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_6"; + EXPECT_EQ(6, db_versioned_objects->insert_versioned_object(version)); + + // insert 3 committed versions for another object + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_3, ceph_context.get(), conn + ); + version = createTestVersionedObject(7, TEST_OBJECT_ID_3, "7"); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(7, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_8"; + EXPECT_EQ(8, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_9"; + EXPECT_EQ(9, db_versioned_objects->insert_versioned_object(version)); + + // insert 3 committed versions for another object in another bucket + createObject( + TEST_USERNAME, TEST_BUCKET_2, TEST_OBJECT_ID_4, ceph_context.get(), conn + ); + version = createTestVersionedObject(10, TEST_OBJECT_ID_4, "10"); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(10, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_11"; + EXPECT_EQ(11, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_12"; + EXPECT_EQ(12, db_versioned_objects->insert_versioned_object(version)); + + // we have 3 objects with 3 versions in TEST_BUCKET + // one of the objects has 2 version deleted. The rest have all versions alive. + + // we also have object with 3 version in TEST_BUCKET_2 + auto object_list = + db_versioned_objects->list_last_versioned_objects(TEST_BUCKET); + ASSERT_EQ(3, object_list.size()); + // first item + uuid_d uuid_object; + uuid_object.parse(TEST_OBJECT_ID.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[0])); + // versions 1 and 2 for TEST_OBJECT_ID are deleted + EXPECT_EQ( + "test_version_id_3", rgw::sal::sfs::sqlite::get_version_id(object_list[0]) + ); + EXPECT_EQ(3, rgw::sal::sfs::sqlite::get_id(object_list[0])); + + // second item + uuid_object.parse(TEST_OBJECT_ID_2.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[1])); + EXPECT_EQ( + "test_version_id_6", rgw::sal::sfs::sqlite::get_version_id(object_list[1]) + ); + EXPECT_EQ(6, rgw::sal::sfs::sqlite::get_id(object_list[1])); + + // third item + uuid_object.parse(TEST_OBJECT_ID_3.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[2])); + EXPECT_EQ( + "test_version_id_9", rgw::sal::sfs::sqlite::get_version_id(object_list[2]) + ); + EXPECT_EQ(9, rgw::sal::sfs::sqlite::get_id(object_list[2])); + + // now delete the 3rd version of TEST_OBJECT_ID + auto version_to_delete = db_versioned_objects->get_versioned_object(3); + version_to_delete->object_state = rgw::sal::sfs::ObjectState::DELETED; + db_versioned_objects->store_versioned_object(*version_to_delete); + + // list again + object_list = db_versioned_objects->list_last_versioned_objects(TEST_BUCKET); + // the object with all version deleted should not be listed + ASSERT_EQ(2, object_list.size()); + + // second item + uuid_object.parse(TEST_OBJECT_ID_2.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[0])); + EXPECT_EQ( + "test_version_id_6", rgw::sal::sfs::sqlite::get_version_id(object_list[0]) + ); + EXPECT_EQ(6, rgw::sal::sfs::sqlite::get_id(object_list[0])); + + // third item + uuid_object.parse(TEST_OBJECT_ID_3.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[1])); + EXPECT_EQ( + "test_version_id_9", rgw::sal::sfs::sqlite::get_version_id(object_list[1]) + ); + EXPECT_EQ(9, rgw::sal::sfs::sqlite::get_id(object_list[1])); +}