diff --git a/qa/rgw/store/sfs/tests/fixtures/s3-tests.txt b/qa/rgw/store/sfs/tests/fixtures/s3-tests.txt index 12a883e63f706..a52813228da1f 100644 --- a/qa/rgw/store/sfs/tests/fixtures/s3-tests.txt +++ b/qa/rgw/store/sfs/tests/fixtures/s3-tests.txt @@ -78,7 +78,7 @@ test_bucket_listv2_startafter_unreadable # test_bucket_list_marker_after_list # test_bucket_listv2_startafter_after_list # test_bucket_list_return_data -# test_bucket_list_return_data_versioning +test_bucket_list_return_data_versioning test_bucket_list_objects_anonymous test_bucket_listv2_objects_anonymous test_bucket_list_objects_anonymous_fail @@ -92,8 +92,8 @@ test_object_write_to_nonexist_bucket test_bucket_create_delete test_object_read_not_exist # test_object_requestid_matches_header_on_error -# test_multi_object_delete -# test_multi_objectv2_delete +test_multi_object_delete +test_multi_objectv2_delete # test_multi_object_delete_key_limit # test_multi_objectv2_delete_key_limit test_object_head_zero_bytes @@ -195,9 +195,9 @@ test_bucket_create_naming_dns_dot_dash test_bucket_create_naming_dns_dash_dot # test_bucket_create_exists # test_bucket_get_location -test_bucket_create_exists_nonowner -test_bucket_recreate_overwrite_acl -test_bucket_recreate_new_acl +# test_bucket_create_exists_nonowner +# test_bucket_recreate_overwrite_acl +# test_bucket_recreate_new_acl test_bucket_acl_default test_bucket_acl_canned_during_create test_bucket_acl_canned @@ -252,7 +252,7 @@ test_bucket_create_naming_good_starts_alpha test_bucket_create_naming_good_starts_digit test_bucket_create_naming_good_contains_period test_bucket_create_naming_good_contains_hyphen -# test_bucket_recreate_not_overriding +test_bucket_recreate_not_overriding # test_bucket_create_special_key_names test_bucket_list_special_prefix test_object_copy_zero_size @@ -269,9 +269,9 @@ test_object_copy_retaining_metadata # test_object_copy_replacing_metadata test_object_copy_bucket_not_found test_object_copy_key_not_found -# test_object_copy_versioned_bucket +test_object_copy_versioned_bucket test_object_copy_versioned_url_encoding -# test_object_copy_versioning_multipart_upload +test_object_copy_versioning_multipart_upload test_multipart_upload_empty # test_multipart_upload_small # test_multipart_copy_small @@ -280,7 +280,7 @@ test_multipart_copy_improper_range # test_multipart_copy_without_range # test_multipart_copy_special_names # test_multipart_upload -# test_multipart_copy_versioned +test_multipart_copy_versioned # test_multipart_upload_resend_part test_multipart_upload_multiple_sizes # test_multipart_copy_multiple_sizes @@ -320,24 +320,24 @@ test_ranged_request_return_trailing_bytes_response_code test_ranged_request_invalid_range test_ranged_request_empty_object test_versioning_bucket_create_suspend -# test_versioning_obj_create_read_remove -# test_versioning_obj_create_read_remove_head -test_versioning_obj_plain_null_version_removal +test_versioning_obj_create_read_remove +test_versioning_obj_create_read_remove_head +# test_versioning_obj_plain_null_version_removal # test_versioning_obj_plain_null_version_overwrite -test_versioning_obj_plain_null_version_overwrite_suspended +# test_versioning_obj_plain_null_version_overwrite_suspended # test_versioning_obj_suspend_versions -# test_versioning_obj_create_versions_remove_all -# test_versioning_obj_create_versions_remove_special_names -# test_versioning_obj_create_overwrite_multipart -# test_versioning_obj_list_marker -# test_versioning_copy_obj_version -# test_versioning_multi_object_delete -# test_versioning_multi_object_delete_with_marker -# test_versioning_multi_object_delete_with_marker_create -# test_versioned_object_acl -# test_versioned_object_acl_no_version_specified -# test_versioned_concurrent_object_create_concurrent_remove -# test_versioned_concurrent_object_create_and_remove +test_versioning_obj_create_versions_remove_all +test_versioning_obj_create_versions_remove_special_names +test_versioning_obj_create_overwrite_multipart +test_versioning_obj_list_marker +test_versioning_copy_obj_version +test_versioning_multi_object_delete +test_versioning_multi_object_delete_with_marker +test_versioning_multi_object_delete_with_marker_create +test_versioned_object_acl +test_versioned_object_acl_no_version_specified +test_versioned_concurrent_object_create_concurrent_remove +test_versioned_concurrent_object_create_and_remove test_lifecycle_set test_lifecycle_get test_lifecycle_get_no_id @@ -455,7 +455,7 @@ test_object_lock_put_obj_lock_invalid_bucket # test_object_lock_put_obj_lock_invalid_years # test_object_lock_put_obj_lock_invalid_mode # test_object_lock_put_obj_lock_invalid_status -# test_object_lock_suspend_versioning +test_object_lock_suspend_versioning # test_object_lock_get_obj_lock test_object_lock_get_obj_lock_invalid_bucket # test_object_lock_put_obj_retention diff --git a/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py b/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py index 3d411a0e932c2..169682d9db9b7 100644 --- a/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py +++ b/qa/rgw/store/sfs/tests/test-sfs-lifecycle-smoke.py @@ -130,17 +130,13 @@ def test_expiration(self): { "ID": "rule1", "Expiration": {"Days": 1}, - "Filter": { - "Prefix": "expire1/" - }, + "Filter": {"Prefix": "expire1/"}, "Status": "Enabled", }, { "ID": "rule2", "Expiration": {"Days": 5}, - "Filter": { - "Prefix": "expire3/" - }, + "Filter": {"Prefix": "expire3/"}, "Status": "Enabled", }, ] @@ -200,9 +196,7 @@ def test_lifecycle_versioning_enabled(self): { "ID": "rule1", "Expiration": {"Days": 1}, - "Filter": { - "Prefix": "expire1/" - }, + "Filter": {"Prefix": "expire1/"}, "Status": "Enabled", } ] @@ -212,7 +206,7 @@ def test_lifecycle_versioning_enabled(self): ) # give enough time to expire. - # 4 cycles because: + # 3 cycles because: # 1st cycle won't be expired yet (not still 1 day) # 2nd cycle rgw considers the bucket at processed # today and skips it @@ -238,9 +232,7 @@ def test_expiration_multiple_buckets(self): { "ID": "rule1", "Expiration": {"Days": 1}, - "Filter": { - "Prefix": "expire1/" - }, + "Filter": {"Prefix": "expire1/"}, "Status": "Enabled", } ] diff --git a/qa/rgw/store/sfs/tests/test-sfs-object-locking.py b/qa/rgw/store/sfs/tests/test-sfs-object-locking.py index 7ef3496fee5fa..7b2b6b0049845 100644 --- a/qa/rgw/store/sfs/tests/test-sfs-object-locking.py +++ b/qa/rgw/store/sfs/tests/test-sfs-object-locking.py @@ -462,6 +462,7 @@ def test_object_locking_legal_hold(self): self.assertTrue(response["ResponseMetadata"]["HTTPStatusCode"] == 204) + if __name__ == "__main__": if len(sys.argv) == 2: address_port = sys.argv.pop() diff --git a/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py b/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py index d9dfbaf281070..c01c814f95829 100644 --- a/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py +++ b/qa/rgw/store/sfs/tests/test-sfs-versioning-smoke.py @@ -19,24 +19,40 @@ import tempfile import os import filecmp +import threading + + +def _do_create_object(client, bucket_name, key, i): + body = "data {i}".format(i=i) + client.put_object(Bucket=bucket_name, Key=key, Body=body) + + +def _do_wait_completion(t): + for thr in t: + thr.join() + class VersioningSmokeTests(unittest.TestCase): - ACCESS_KEY='test' - SECRET_KEY='test' - URL='http://127.0.0.1:7480' - BUCKET_NAME_LENGTH=8 - OBJECT_NAME_LENGTH=10 + ACCESS_KEY = "test" + SECRET_KEY = "test" + URL = "http://127.0.0.1:7480" + BUCKET_NAME_LENGTH = 8 + OBJECT_NAME_LENGTH = 10 def setUp(self): - self.s3_client = boto3.client('s3', - endpoint_url=VersioningSmokeTests.URL, - aws_access_key_id="test", - aws_secret_access_key="test") - - self.s3 = boto3.resource('s3', - endpoint_url=VersioningSmokeTests.URL, - aws_access_key_id="test", - aws_secret_access_key="test") + self.s3_client = boto3.client( + "s3", + endpoint_url=VersioningSmokeTests.URL, + aws_access_key_id="test", + aws_secret_access_key="test", + ) + + self.s3 = boto3.resource( + "s3", + endpoint_url=VersioningSmokeTests.URL, + aws_access_key_id="test", + aws_secret_access_key="test", + ) self.test_dir = tempfile.TemporaryDirectory() @@ -46,7 +62,7 @@ def tearDown(self): def get_random_name(self, length) -> str: letters = string.ascii_lowercase - result_str = ''.join(random.choice(letters) for i in range(length)) + result_str = "".join(random.choice(letters) for i in range(length)) return result_str def get_random_bucket_name(self) -> str: @@ -58,221 +74,277 @@ def get_random_object_name(self) -> str: def generate_random_file(self, path, size=4): # size passed is in mb size = size * 1024 * 1024 - with open(path, 'wb') as fout: + with open(path, "wb") as fout: fout.write(os.urandom(size)) def assert_bucket_exists(self, bucket_name): response = self.s3_client.list_buckets() found = False - for bucket in response['Buckets']: - if (bucket["Name"] == bucket_name): + for bucket in response["Buckets"]: + if bucket["Name"] == bucket_name: found = True self.assertTrue(found) + def _do_create_versioned_obj_concurrent(self, bucket_name, key, num): + t = [] + for i in range(num): + thr = threading.Thread( + target=_do_create_object, args=(self.s3_client, bucket_name, key, i) + ) + thr.start() + t.append(thr) + return t + def test_create_bucket_enable_versioning(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) # ensure versioning is disabled (default) response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) - self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200) - self.assertFalse('Status' in response) - response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, - VersioningConfiguration={ - 'MFADelete': 'Disabled', - 'Status': 'Enabled'}) + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) + self.assertFalse("Status" in response) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) - self.assertTrue('Status' in response) - self.assertEqual('Enabled', response['Status']) + self.assertTrue("Status" in response) + self.assertEqual("Enabled", response["Status"]) def test_put_objects_versioning_enabled(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) - response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, - VersioningConfiguration={ - 'MFADelete': 'Disabled', - 'Status': 'Enabled'}) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) object_name = self.get_random_object_name() - test_file_path_1 = os.path.join(self.test_dir.name, 'test_file_1.bin') + test_file_path_1 = os.path.join(self.test_dir.name, "test_file_1.bin") self.generate_random_file(test_file_path_1) # upload the file self.s3_client.upload_file(test_file_path_1, bucket_name, object_name) # get the file and compare with the original - test_file_path_1_check = os.path.join(self.test_dir.name, 'test_file_1_check.bin') + test_file_path_1_check = os.path.join( + self.test_dir.name, "test_file_1_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_1_check) - self.assertTrue(filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False) + ) # now upload again with different content - test_file_path_2 = os.path.join(self.test_dir.name, 'test_file_2.bin') + test_file_path_2 = os.path.join(self.test_dir.name, "test_file_2.bin") self.generate_random_file(test_file_path_2) self.s3_client.upload_file(test_file_path_2, bucket_name, object_name) - test_file_path_2_check = os.path.join(self.test_dir.name, 'test_file_2_check.bin') + test_file_path_2_check = os.path.join( + self.test_dir.name, "test_file_2_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_2_check) - self.assertTrue(filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False) + ) # get etag of object response = self.s3_client.head_object(Bucket=bucket_name, Key=object_name) - self.assertTrue('ETag' in response) - etag = response['ETag'] + self.assertTrue("ETag" in response) + etag = response["ETag"] # check that we have 2 versions # only 1 version should be flagged as the latest - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('Versions' in response) - self.assertEqual(2, len(response['Versions'])) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(2, len(response["Versions"])) num_latest = 0 - last_version_id = '' - previous_version_id = '' - for version in response['Versions']: - self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) - self.assertEqual(object_name, version['Key']) - self.assertEqual('STANDARD', version['StorageClass']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertNotEqual('null', version['VersionId']) - if (version['IsLatest']): + last_version_id = "" + previous_version_id = "" + for version in response["Versions"]: + self.assertEqual(os.path.getsize(test_file_path_1), version["Size"]) + self.assertEqual(object_name, version["Key"]) + self.assertEqual("STANDARD", version["StorageClass"]) + self.assertEqual( + {"DisplayName": "M. Tester", "ID": "testid"}, version["Owner"] + ) + self.assertNotEqual("null", version["VersionId"]) + if version["IsLatest"]: num_latest += 1 - last_version_id = version['VersionId'] - self.assertEqual(etag, version['ETag']) + last_version_id = version["VersionId"] + self.assertEqual(etag, version["ETag"]) else: - previous_version_id = version['VersionId'] + previous_version_id = version["VersionId"] # check that all etags differ - for version in response['Versions']: - etag = version['ETag'] - version_id = version['VersionId'] - for version2 in response['Versions']: - version_id2 = version2['VersionId'] - if (version_id2 != version_id): - etag2 = version2['ETag'] + for version in response["Versions"]: + etag = version["ETag"] + version_id = version["VersionId"] + for version2 in response["Versions"]: + version_id2 = version2["VersionId"] + if version_id2 != version_id: + etag2 = version2["ETag"] self.assertNotEqual(etag, etag2) - self.assertEqual(1, num_latest) - self.assertNotEqual('', last_version_id) - self.assertNotEqual('', previous_version_id) + self.assertNotEqual("", last_version_id) + self.assertNotEqual("", previous_version_id) # download by version_id # download the last version - check_version_file = os.path.join(self.test_dir.name, 'check_version.bin') + check_version_file = os.path.join(self.test_dir.name, "check_version.bin") bucket = self.s3.Bucket(bucket_name) bucket.download_file( - object_name, - check_version_file, - ExtraArgs={"VersionId": last_version_id}) - self.assertTrue(filecmp.cmp(test_file_path_2, check_version_file, shallow=False)) + object_name, check_version_file, ExtraArgs={"VersionId": last_version_id} + ) + self.assertTrue( + filecmp.cmp(test_file_path_2, check_version_file, shallow=False) + ) # download the previous version - check_version_file_2 = os.path.join(self.test_dir.name, 'check_version2.bin') + check_version_file_2 = os.path.join(self.test_dir.name, "check_version2.bin") bucket.download_file( object_name, check_version_file_2, - ExtraArgs={"VersionId": previous_version_id}) - self.assertTrue(filecmp.cmp(test_file_path_1, check_version_file_2, shallow=False)) + ExtraArgs={"VersionId": previous_version_id}, + ) + self.assertTrue( + filecmp.cmp(test_file_path_1, check_version_file_2, shallow=False) + ) # delete the object self.s3_client.delete_object(Bucket=bucket_name, Key=object_name) # check that we have 2 versions plus 1 DeleteMarker # only 1 version should be flagged as the latest - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('Versions' in response) - self.assertEqual(2, len(response['Versions'])) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(2, len(response["Versions"])) num_latest = 0 - deleted_version_id = '' - for version in response['Versions']: - self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) - self.assertEqual(object_name, version['Key']) - self.assertEqual('STANDARD', version['StorageClass']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertNotEqual('null', version['VersionId']) - self.assertFalse(version['IsLatest']) - - self.assertEqual(1, len(response['DeleteMarkers'])) + deleted_version_id = "" + for version in response["Versions"]: + self.assertEqual(os.path.getsize(test_file_path_1), version["Size"]) + self.assertEqual(object_name, version["Key"]) + self.assertEqual("STANDARD", version["StorageClass"]) + self.assertEqual( + {"DisplayName": "M. Tester", "ID": "testid"}, version["Owner"] + ) + self.assertNotEqual("null", version["VersionId"]) + self.assertFalse(version["IsLatest"]) + + self.assertEqual(1, len(response["DeleteMarkers"])) # try to download the file, a 404 error should be returned - check_deleted_file = os.path.join(self.test_dir.name, 'check_deleted.bin') + check_deleted_file = os.path.join(self.test_dir.name, "check_deleted.bin") with self.assertRaises(botocore.exceptions.ClientError) as context: - response = self.s3_client.download_file(bucket_name, object_name, check_deleted_file) - self.assertTrue('404' in str(context.exception)) + response = self.s3_client.download_file( + bucket_name, object_name, check_deleted_file + ) + self.assertTrue("404" in str(context.exception)) # download the previous version, it should still be reacheable - check_version_file_2 = os.path.join(self.test_dir.name, 'check_version2.bin') + check_version_file_2 = os.path.join(self.test_dir.name, "check_version2.bin") bucket.download_file( - object_name, - check_version_file_2, - ExtraArgs={"VersionId": last_version_id}) - self.assertTrue(filecmp.cmp(test_file_path_2, check_version_file_2, shallow=False)) + object_name, check_version_file_2, ExtraArgs={"VersionId": last_version_id} + ) + self.assertTrue( + filecmp.cmp(test_file_path_2, check_version_file_2, shallow=False) + ) + + # delete the first version. (in this case version should be deleted + # permanently) + version_id_to_delete = response["Versions"][0]["VersionId"] + # delete the specific version + self.s3_client.delete_object( + Bucket=bucket_name, Key=object_name, VersionId=version_id_to_delete + ) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(1, len(response["Versions"])) + self.assertNotEqual(version_id_to_delete, response["Versions"][0]["VersionId"]) + self.assertTrue("DeleteMarkers" in response) + self.assertEqual(1, len(response["DeleteMarkers"])) def test_put_objects_no_versioning(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) object_name = self.get_random_object_name() - test_file_path_1 = os.path.join(self.test_dir.name, 'test_file_1.bin') + test_file_path_1 = os.path.join(self.test_dir.name, "test_file_1.bin") self.generate_random_file(test_file_path_1) # upload the file self.s3_client.upload_file(test_file_path_1, bucket_name, object_name) # get the file and compare with the original - test_file_path_1_check = os.path.join(self.test_dir.name, 'test_file_1_check.bin') + test_file_path_1_check = os.path.join( + self.test_dir.name, "test_file_1_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_1_check) - self.assertTrue(filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_1, test_file_path_1_check, shallow=False) + ) # now upload again with different content - test_file_path_2 = os.path.join(self.test_dir.name, 'test_file_2.bin') + test_file_path_2 = os.path.join(self.test_dir.name, "test_file_2.bin") self.generate_random_file(test_file_path_2) self.s3_client.upload_file(test_file_path_2, bucket_name, object_name) - test_file_path_2_check = os.path.join(self.test_dir.name, 'test_file_2_check.bin') + test_file_path_2_check = os.path.join( + self.test_dir.name, "test_file_2_check.bin" + ) self.s3_client.download_file(bucket_name, object_name, test_file_path_2_check) - self.assertTrue(filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False)) + self.assertTrue( + filecmp.cmp(test_file_path_2, test_file_path_2_check, shallow=False) + ) # get etag of object response = self.s3_client.head_object(Bucket=bucket_name, Key=object_name) - self.assertTrue('ETag' in response) - etag = response['ETag'] + self.assertTrue("ETag" in response) + etag = response["ETag"] # check that we have 1 version only # only 1 version should be flagged as the latest - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('Versions' in response) - self.assertEqual(1, len(response['Versions'])) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertTrue("Versions" in response) + self.assertEqual(1, len(response["Versions"])) num_latest = 0 - last_version_id = '' - previous_version_id = '' - for version in response['Versions']: - self.assertEqual(os.path.getsize(test_file_path_1), version['Size']) - self.assertEqual(object_name, version['Key']) - self.assertEqual('STANDARD', version['StorageClass']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertEqual(etag, version['ETag']) - self.assertEqual('null', version['VersionId']) - self.assertTrue(version['IsLatest']) + last_version_id = "" + previous_version_id = "" + for version in response["Versions"]: + self.assertEqual(os.path.getsize(test_file_path_1), version["Size"]) + self.assertEqual(object_name, version["Key"]) + self.assertEqual("STANDARD", version["StorageClass"]) + self.assertEqual( + {"DisplayName": "M. Tester", "ID": "testid"}, version["Owner"] + ) + self.assertEqual(etag, version["ETag"]) + self.assertEqual("null", version["VersionId"]) + self.assertTrue(version["IsLatest"]) # delete the object self.s3_client.delete_object(Bucket=bucket_name, Key=object_name) - # we should still have 0 versions and 1 delete marker - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix=object_name) - self.assertTrue('DeleteMarkers' in response) - self.assertFalse('Versions' in response) - self.assertEqual(1, len(response['DeleteMarkers'])) - - num_latest = 0 - deleted_version_id = '' - for version in response['DeleteMarkers']: - self.assertEqual(object_name, version['Key']) - self.assertEqual({'DisplayName': 'M. Tester', 'ID': 'testid'}, version['Owner']) - self.assertEqual('null', version['VersionId']) - self.assertTrue(version['IsLatest']) + # we should still have 0 versions and no delete markers + # non-versioned bucket don't create delete-markers + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix=object_name + ) + self.assertFalse("DeleteMarkers" in response) + self.assertFalse("Versions" in response) # try to download the file, a 404 error should be returned - check_deleted_file = os.path.join(self.test_dir.name, 'check_deleted.bin') + check_deleted_file = os.path.join(self.test_dir.name, "check_deleted.bin") with self.assertRaises(botocore.exceptions.ClientError) as context: - response = self.s3_client.download_file(bucket_name, object_name, check_deleted_file) - self.assertTrue('404' in str(context.exception)) + response = self.s3_client.download_file( + bucket_name, object_name, check_deleted_file + ) + self.assertTrue("404" in str(context.exception)) def upload_object_with_versions(self, bucket_name, object_name, number_of_versions): for i in range(number_of_versions): @@ -285,52 +357,89 @@ def test_list_objects_versioning_enabled_with_prefix(self): bucket_name = self.get_random_bucket_name() self.s3_client.create_bucket(Bucket=bucket_name) self.assert_bucket_exists(bucket_name) - response = self.s3_client.put_bucket_versioning(Bucket=bucket_name, - VersioningConfiguration={ - 'MFADelete': 'Disabled', - 'Status': 'Enabled'}) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) - self.upload_object_with_versions(bucket_name, 'prefix_file_1.bin', 2) - self.upload_object_with_versions(bucket_name, 'prefix_file_2.bin', 2) - self.upload_object_with_versions(bucket_name, 'test_file.bin', 3) + self.upload_object_with_versions(bucket_name, "prefix_file_1.bin", 2) + self.upload_object_with_versions(bucket_name, "prefix_file_2.bin", 2) + self.upload_object_with_versions(bucket_name, "test_file.bin", 3) # get the list of version with prefix = 'prefix' - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix='prefix') - self.assertTrue('Versions' in response) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix="prefix" + ) + self.assertTrue("Versions" in response) # we should have 4 versions (2 per each file) - self.assertEqual(4, len(response['Versions'])) + self.assertEqual(4, len(response["Versions"])) # check that the results are the expected ones - for version in response['Versions']: - self.assertTrue(version['Key'].startswith('prefix')) + for version in response["Versions"]: + self.assertTrue(version["Key"].startswith("prefix")) # get the list of version with prefix = 'test' - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix='test') - self.assertTrue('Versions' in response) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix="test" + ) + self.assertTrue("Versions" in response) # we should have 3 versions - self.assertEqual(3, len(response['Versions'])) + self.assertEqual(3, len(response["Versions"])) # check that the results are the expected ones - for version in response['Versions']: - self.assertTrue(version['Key'].startswith('test')) + for version in response["Versions"]: + self.assertTrue(version["Key"].startswith("test")) # delete the prefix_file_1.bin object - self.s3_client.delete_object(Bucket=bucket_name, Key='prefix_file_1.bin') + self.s3_client.delete_object(Bucket=bucket_name, Key="prefix_file_1.bin") # get the list of version with prefix = 'prefix' - response = self.s3_client.list_object_versions(Bucket=bucket_name, Prefix='prefix') - self.assertTrue('Versions' in response) + response = self.s3_client.list_object_versions( + Bucket=bucket_name, Prefix="prefix" + ) + self.assertTrue("Versions" in response) # we should have still have 4 versions (2 per each file) - self.assertEqual(4, len(response['Versions'])) + self.assertEqual(4, len(response["Versions"])) # and we should have 1 delete marker - self.assertTrue('DeleteMarkers' in response) - self.assertEqual(1, len(response['DeleteMarkers'])) + self.assertTrue("DeleteMarkers" in response) + self.assertEqual(1, len(response["DeleteMarkers"])) # ensure that it's object we deleted - self.assertEqual('prefix_file_1.bin', response['DeleteMarkers'][0]['Key']) + self.assertEqual("prefix_file_1.bin", response["DeleteMarkers"][0]["Key"]) + + def test_create_concurrent(self): + bucket_name = self.get_random_bucket_name() + self.s3_client.create_bucket(Bucket=bucket_name) + self.assert_bucket_exists(bucket_name) + # ensure versioning is disabled (default) + response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) + self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 200) + self.assertFalse("Status" in response) + response = self.s3_client.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={"MFADelete": "Disabled", "Status": "Enabled"}, + ) + response = self.s3_client.get_bucket_versioning(Bucket=bucket_name) + self.assertTrue("Status" in response) + self.assertEqual("Enabled", response["Status"]) + + key = "myobj" + num_versions = 5 + repeat = 25 + + for i in range(repeat): + key_obj = "%s-%s" % (key, i) + t = self._do_create_versioned_obj_concurrent( + bucket_name, key_obj, num_versions + ) + _do_wait_completion(t) + response = self.s3_client.list_object_versions(Bucket=bucket_name) + versions = response["Versions"] + self.assertEqual(num_versions * repeat, len(versions)) + print("Num versions: %s" % len(versions)) if __name__ == "__main__": if len(sys.argv) == 2: address_port = sys.argv.pop() - VersioningSmokeTests.URL = 'http://{0}'.format(address_port) + VersioningSmokeTests.URL = "http://{0}".format(address_port) unittest.main() else: - print ("usage: {0} ADDRESS:PORT".format(sys.argv[0])) + print("usage: {0} ADDRESS:PORT".format(sys.argv[0])) diff --git a/src/rgw/driver/dbstore/common/dbstore.h b/src/rgw/driver/dbstore/common/dbstore.h index b26cc116e82dc..c99fc4ef83f91 100644 --- a/src/rgw/driver/dbstore/common/dbstore.h +++ b/src/rgw/driver/dbstore/common/dbstore.h @@ -44,7 +44,7 @@ struct DBOpBucketInfo { std::list list_entries; }; -struct DBOpObjectInfo { +struct DBObject { RGWAccessControlPolicy acls; RGWObjState state = {}; @@ -124,7 +124,7 @@ struct DBOpInfo { DBOpUserInfo user; std::string query_str; DBOpBucketInfo bucket; - DBOpObjectInfo obj; + DBObject obj; DBOpObjectDataInfo obj_data; DBOpLCHeadInfo lc_head; DBOpLCEntryInfo lc_entry; diff --git a/src/rgw/driver/sfs/CMakeLists.txt b/src/rgw/driver/sfs/CMakeLists.txt index fe01936692a97..19bd476925be9 100644 --- a/src/rgw/driver/sfs/CMakeLists.txt +++ b/src/rgw/driver/sfs/CMakeLists.txt @@ -24,7 +24,6 @@ set(sfs_srcs sqlite/sqlite_lifecycle.cc sqlite/users/users_conversions.cc sqlite/buckets/bucket_conversions.cc - sqlite/versioned_object/versioned_object_conversions.cc sqlite/dbconn.cc bucket.cc multipart.cc diff --git a/src/rgw/driver/sfs/bucket.cc b/src/rgw/driver/sfs/bucket.cc index 28e1e43ef2a63..d52b4aa2d443d 100644 --- a/src/rgw/driver/sfs/bucket.cc +++ b/src/rgw/driver/sfs/bucket.cc @@ -43,18 +43,22 @@ void SFSBucket::write_meta(const DoutPrefixProvider* dpp) { } std::unique_ptr SFSBucket::_get_object(sfs::ObjectRef obj) { - rgw_obj_key key(obj->name); + rgw_obj_key key(obj->name, obj->instance); return make_unique(this->store, key, this, bucket); } std::unique_ptr SFSBucket::get_object(const rgw_obj_key& key) { - // note: the current code is completely ignoring the versionID in the key. - // please see to 'rgw_rest_s3.cc' RGWHandler_REST_S3::init_from_header(). - ldout(store->ceph_context(), 10) - << "bucket::" << __func__ << ": key" << key << dendl; + << "bucket::" << __func__ << ": key : " << key << dendl; try { - auto objref = bucket->get(key.name); + auto objref = bucket->get(key); + // bucket->get retrieves all the information from the db + // (incling the version_id for the last version) + // But in cases like delete operations we don't want to update the + // instance. That could convert a "delete marker" operation into a "delete + // specific version" operation. + // Return the object with the same key as it was requested. + objref->instance = key.instance; return _get_object(objref); } catch (const sfs::UnknownObjectException& _) { ldout(store->ceph_context(), 10) @@ -77,15 +81,12 @@ int SFSBucket::list( if (params.list_versions) { return list_versions(dpp, params, max, results, y); } - sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); auto use_prefix = !params.prefix.empty(); + // get_all returns the last version of all objects that are not deleted for (const auto& objref : bucket->get_all()) { if (use_prefix && objref->name.rfind(params.prefix, 0) != 0) continue; lsfs_dout(dpp, 10) << "object: " << objref->name << dendl; - - auto last_version = - db_versioned_objects.get_last_versioned_object(objref->path.get_uuid()); - if (last_version->object_state == rgw::sal::sfs::ObjectState::COMMITTED) { + if (!objref->deleted) { // check for delimiter if (check_add_common_prefix(dpp, objref->name, params, 0, results, y)) { continue; @@ -111,6 +112,8 @@ int SFSBucket::list_versions( ListResults& results, optional_yield y ) { auto use_prefix = !params.prefix.empty(); + sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); + // get_all returns the last version of all objects that are COMMITTED for (const auto& objref : bucket->get_all()) { if (use_prefix && objref->name.rfind(params.prefix, 0) != 0) continue; lsfs_dout(dpp, 10) << "object: " << objref->name << dendl; @@ -118,25 +121,40 @@ int SFSBucket::list_versions( if (check_add_common_prefix(dpp, objref->name, params, 0, results, y)) { continue; } - // get all available versions from db - sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); - auto last_version = - db_versioned_objects.get_last_versioned_object(objref->path.get_uuid()); - auto object_versions = - db_versioned_objects.get_versioned_objects(objref->path.get_uuid()); - for (const auto& object_version : object_versions) { + if (get_info().versioning_enabled()) { + auto object_versions = + db_versioned_objects.get_versioned_objects(objref->path.get_uuid()); + for (const auto& object_version : object_versions) { + if (object_version.object_state != + rgw::sal::sfs::ObjectState::COMMITTED) { + continue; + } + rgw_bucket_dir_entry dirent; + dirent.key = cls_rgw_obj_key(objref->name, object_version.version_id); + dirent.meta.accounted_size = object_version.size; + dirent.meta.mtime = object_version.create_time; + dirent.meta.etag = object_version.etag; + dirent.flags = rgw_bucket_dir_entry::FLAG_VER; + if (objref->version_id == object_version.id) { + dirent.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; + } + if (object_version.version_type == + rgw::sal::sfs::VersionType::DELETE_MARKER) { + dirent.flags |= rgw_bucket_dir_entry::FLAG_DELETE_MARKER; + } + dirent.meta.owner_display_name = bucket->get_owner().display_name; + dirent.meta.owner = bucket->get_owner().user_id.id; + results.objs.push_back(dirent); + } + } else { // non-versioned bucket rgw_bucket_dir_entry dirent; - dirent.key = cls_rgw_obj_key(objref->name, object_version.version_id); - dirent.meta.accounted_size = object_version.size; - dirent.meta.mtime = object_version.create_time; - dirent.meta.etag = object_version.etag; + // for non-versioned buckets we don't return the versionId + dirent.key = cls_rgw_obj_key(objref->name, ""); + dirent.meta.accounted_size = objref->get_meta().size; + dirent.meta.mtime = objref->get_meta().mtime; + dirent.meta.etag = objref->get_meta().etag; dirent.flags = rgw_bucket_dir_entry::FLAG_VER; - if (last_version.has_value() && last_version->id == object_version.id) { - dirent.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; - } - if (object_version.object_state == rgw::sal::sfs::ObjectState::DELETED) { - dirent.flags |= rgw_bucket_dir_entry::FLAG_DELETE_MARKER; - } + dirent.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; dirent.meta.owner_display_name = bucket->get_owner().display_name; dirent.meta.owner = bucket->get_owner().user_id.id; results.objs.push_back(dirent); @@ -238,12 +256,10 @@ bool SFSBucket::is_owner(User* user) { int SFSBucket::check_empty(const DoutPrefixProvider* dpp, optional_yield y) { /** Check in the backing store if this bucket is empty */ // check if there are still objects owned by the bucket - sfs::sqlite::SQLiteObjects db_objects(store->db_conn); - auto objects = db_objects.get_object_ids(get_name()); - sfs::sqlite::SQLiteVersionedObjects db_versions(store->db_conn); + sfs::sqlite::SQLiteVersionedObjects db_ver_objects(store->db_conn); + auto objects = db_ver_objects.list_last_versioned_objects(get_name()); for (const auto& obj : objects) { - auto last_version = db_versions.get_last_versioned_object(obj); - if (last_version->object_state != rgw::sal::sfs::ObjectState::DELETED) { + if (sfs::sqlite::get_version_type(obj) != sfs::VersionType::DELETE_MARKER) { ldpp_dout(dpp, -1) << __func__ << ": Bucket Not Empty.." << dendl; return -ENOTEMPTY; } diff --git a/src/rgw/driver/sfs/multipart.cc b/src/rgw/driver/sfs/multipart.cc index b1f0d5c4530a5..4288ef74d6a22 100644 --- a/src/rgw/driver/sfs/multipart.cc +++ b/src/rgw/driver/sfs/multipart.cc @@ -134,7 +134,7 @@ int SFSMultipartUpload::complete( ceph_assert(target_obj); ceph_assert(target_obj->get_name() == mp->objref->name); - sfs::ObjectRef outobj = bucketref->get_or_create(target_obj->get_key()); + sfs::ObjectRef outobj = bucketref->create_version(target_obj->get_key()); std::filesystem::path outpath = store->get_data_path() / outobj->get_storage_path(); // ensure directory structure exists diff --git a/src/rgw/driver/sfs/object.cc b/src/rgw/driver/sfs/object.cc index 84012601fd9db..6c1b76c67f4eb 100644 --- a/src/rgw/driver/sfs/object.cc +++ b/src/rgw/driver/sfs/object.cc @@ -33,7 +33,8 @@ SFSObject::SFSReadOp::SFSReadOp(SFSObject* _source) : source(_source) { In those cases the SFSReadOp is not properly initialized and those calls are going to fail. */ - source->refresh_meta(); + // read op needs to retrieve also the version_id from the db + source->refresh_meta(true); objref = source->get_object_ref(); } @@ -54,7 +55,8 @@ int SFSObject::SFSReadOp::prepare( lsfs_dout(dpp, 10) << "bucket: " << source->bucket->get_name() << ", obj: " << source->get_name() - << ", size: " << source->get_obj_size() << dendl; + << ", size: " << source->get_obj_size() + << ", versionId: " << source->get_instance() << dendl; if (params.lastmod) { *params.lastmod = source->get_mtime(); } @@ -150,7 +152,7 @@ int SFSObject::SFSDeleteOp::delete_obj( const DoutPrefixProvider* dpp, optional_yield y ) { lsfs_dout(dpp, 10) << "bucket: " << source->bucket->get_name() - << "bucket versioning: " + << " bucket versioning: " << source->bucket->versioning_enabled() << ", object: " << source->get_name() << ", instance: " << source->get_instance() << dendl; @@ -162,12 +164,16 @@ int SFSObject::SFSDeleteOp::delete_obj( } auto version_id = source->get_instance(); + std::string delete_marker_version_id; if (source->objref) { - bucketref->delete_object(source->objref, source->get_key()); - } else if (source->bucket->versioning_enabled()) { + bucketref->delete_object( + source->objref, source->get_key(), source->bucket->versioning_enabled(), + delete_marker_version_id + ); + } else if (source->bucket->versioning_enabled() && source->get_instance().empty()) { // create delete marker // even the object does not exist AWS creates a delete marker for it - // if versioning is enabled + // if versioning is enabled and a specific version was not specified version_id = bucketref->create_non_existing_object_delete_marker(source->get_key()); } @@ -176,6 +182,12 @@ int SFSObject::SFSDeleteOp::delete_obj( // and return the version id if (source->bucket->versioning_enabled()) { result.version_id = version_id; + if (!delete_marker_version_id.empty()) { + // a new delete marker was created. + // Return the version id generated for it. + result.version_id = delete_marker_version_id; + } + source->delete_marker = true; // needed for multiobject delete result.delete_marker = true; } return 0; @@ -220,7 +232,7 @@ int SFSObject::copy_object( std::filesystem::path srcpath = store->get_data_path() / objref->get_storage_path(); - sfs::ObjectRef dstref = dst_bucket_ref->get_or_create(dst_object->get_key()); + sfs::ObjectRef dstref = dst_bucket_ref->create_version(dst_object->get_key()); std::filesystem::path dstpath = store->get_data_path() / dstref->get_storage_path(); @@ -247,7 +259,9 @@ int SFSObject::copy_object( dest_meta.mtime = ceph::real_clock::now(); dstref->update_attrs(objref->get_attrs()); dstref->update_meta(dest_meta); - dstref->metadata_finish(store); + dstref->metadata_finish( + store, dst_bucket_ref->get_info().versioning_enabled() + ); return 0; } @@ -438,42 +452,30 @@ std::unique_ptr SFSObject::get_delete_op() { return std::make_unique(this, ref); } -void SFSObject::refresh_meta() { +void SFSObject::refresh_meta(bool update_version_id_from_metadata) { if (!bucketref) { bucketref = store->get_bucket_ref(bucket->get_name()); } try { - objref = bucketref->get(get_name()); + objref = bucketref->get(rgw_obj_key(get_name(), get_instance())); } catch (sfs::UnknownObjectException& e) { // object probably not created yet? return; } - _refresh_meta_from_object(); + _refresh_meta_from_object(objref, update_version_id_from_metadata); } -void SFSObject::_refresh_meta_from_object() { +void SFSObject::_refresh_meta_from_object( + sfs::ObjectRef objref, bool update_version_id_from_metadata +) { ceph_assert(objref); - if (!get_instance().empty() && get_instance() != objref->instance) { - // object specific version requested and it's not the last one - sfs::sqlite::SQLiteVersionedObjects db_versioned_objects(store->db_conn); - auto db_version = db_versioned_objects.get_versioned_object(get_instance()); - if (db_version.has_value()) { - auto uuid = objref->path.get_uuid(); - auto deleted = db_version->object_state == sfs::ObjectState::DELETED; - objref.reset(sfs::Object::create_for_query( - get_name(), uuid, deleted, db_version->id - )); - set_obj_size(db_version->size); - objref->update_attrs(db_version->attrs); - auto meta = objref->get_meta(); - meta.etag = db_version->etag; - objref->update_meta(meta); - } - } else { - set_obj_size(objref->get_meta().size); - } + // fill values from objref + set_obj_size(objref->get_meta().size); set_attrs(objref->get_attrs()); state.mtime = objref->get_meta().mtime; + if (update_version_id_from_metadata) { + set_instance(objref->instance); + } } } // namespace rgw::sal diff --git a/src/rgw/driver/sfs/object.h b/src/rgw/driver/sfs/object.h index db201f1bb5fa7..0f072b75c2c70 100644 --- a/src/rgw/driver/sfs/object.h +++ b/src/rgw/driver/sfs/object.h @@ -36,7 +36,9 @@ class SFSObject : public StoreObject { protected: SFSObject(SFSObject&) = default; - void _refresh_meta_from_object(); + void _refresh_meta_from_object( + sfs::ObjectRef objref, bool update_version_id_from_metadata = false + ); public: /** @@ -109,7 +111,7 @@ class SFSObject : public StoreObject { store(_st), bucketref(_bucketref), objref(_objref) { - _refresh_meta_from_object(); + _refresh_meta_from_object(objref); } virtual std::unique_ptr clone() override { @@ -224,7 +226,13 @@ class SFSObject : public StoreObject { void set_object_ref(sfs::ObjectRef objref) { this->objref = objref; } - void refresh_meta(); + // Refresh metadata from db. + // Also retrieves version_id when specified. + // There are situations (like delete operations) in which we don't want to + // update the version_id passed in the S3 call. + // Doing so could convert an "add delete marker" call to a "delete a specific + // version" call. + void refresh_meta(bool update_version_id_from_metadata = false); const std::string get_cls_name() { return "object"; } }; diff --git a/src/rgw/driver/sfs/object_state.h b/src/rgw/driver/sfs/object_state.h index 91e4de53887db..7361d0d6dea4a 100644 --- a/src/rgw/driver/sfs/object_state.h +++ b/src/rgw/driver/sfs/object_state.h @@ -16,13 +16,7 @@ namespace rgw::sal::sfs { -enum class ObjectState { - OPEN = 0, - COMMITTED, - LOCKED, - DELETED, - LAST_VALUE = DELETED -}; +enum class ObjectState { OPEN = 0, COMMITTED, DELETED, LAST_VALUE = DELETED }; } // namespace rgw::sal::sfs diff --git a/src/rgw/driver/sfs/sfs_gc.cc b/src/rgw/driver/sfs/sfs_gc.cc index 71ad1c6965fc4..b846851316a25 100644 --- a/src/rgw/driver/sfs/sfs_gc.cc +++ b/src/rgw/driver/sfs/sfs_gc.cc @@ -102,7 +102,9 @@ void SFSGC::delete_objects(const std::string& bucket_id) { void SFSGC::delete_versioned_objects(const Object& object) { sqlite::SQLiteVersionedObjects db_ver_objs(store->db_conn); - auto versions = db_ver_objs.get_versioned_objects(object.path.get_uuid()); + // get all versions. Including deleted ones + auto versions = + db_ver_objs.get_versioned_objects(object.path.get_uuid(), false); for (auto const& version : versions) { if (max_objects <= 0) { break; diff --git a/src/rgw/driver/sfs/sqlite/bindings/blob.h b/src/rgw/driver/sfs/sqlite/bindings/blob.h new file mode 100644 index 0000000000000..d2545cef946c2 --- /dev/null +++ b/src/rgw/driver/sfs/sqlite/bindings/blob.h @@ -0,0 +1,69 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t +// vim: ts=8 sw=2 smarttab ft=cpp +/* + * Ceph - scalable distributed file system + * SFS SAL implementation + * + * Copyright (C) 2023 SUSE LLC + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ +#pragma once + +#include + +#include "rgw/driver/sfs/sqlite/conversion_utils.h" +#include "rgw/driver/sfs/sqlite/sqlite_orm.h" +#include "rgw_common.h" + +namespace sqlite_orm { + +template +struct type_printer< + T, typename std::enable_if< + std::is_same::value, void>::type> + : public blob_printer {}; + +template +struct statement_binder< + T, typename std::enable_if< + std::is_same::value, void>::type> { + int bind(sqlite3_stmt* stmt, int index, const T& value) { + std::vector blobValue; + rgw::sal::sfs::sqlite::encode_blob(value, blobValue); + return statement_binder>().bind(stmt, index, blobValue); + } +}; + +template +struct field_printer< + T, typename std::enable_if< + std::is_same::value, void>::type> { + std::string operator()(const T& value) const { return "ENCODED BLOB"; } +}; + +template +struct row_extractor< + T, typename std::enable_if< + std::is_same::value, void>::type> { + T extract(sqlite3_stmt* stmt, int columnIndex) { + auto blob_data = sqlite3_column_blob(stmt, columnIndex); + auto blob_size = sqlite3_column_bytes(stmt, columnIndex); + if (blob_data == nullptr || blob_size < 0) { + throw(std::system_error( + ERANGE, std::system_category(), + "Invalid blob at column : (" + std::to_string(columnIndex) + ")" + )); + } + T ret; + rgw::sal::sfs::sqlite::decode_blob( + reinterpret_cast(blob_data), + static_cast(blob_size), ret + ); + return ret; + } +}; +} // namespace sqlite_orm diff --git a/src/rgw/driver/sfs/sqlite/conversion_utils.h b/src/rgw/driver/sfs/sqlite/conversion_utils.h index 8c55bdd70424e..33d01e1ca7127 100644 --- a/src/rgw/driver/sfs/sqlite/conversion_utils.h +++ b/src/rgw/driver/sfs/sqlite/conversion_utils.h @@ -25,6 +25,13 @@ void decode_blob(const BLOB_HOLDER& blob_holder, DEST& dest) { ceph::decode(dest, buffer); } +template +void decode_blob(const char* data, size_t data_size, DEST& dest) { + bufferlist buffer; + buffer.append(data, data_size); + ceph::decode(dest, buffer); +} + template void encode_blob(const ORIGIN& origin, BLOB_HOLDER& dest) { bufferlist buffer; diff --git a/src/rgw/driver/sfs/sqlite/dbconn.h b/src/rgw/driver/sfs/sqlite/dbconn.h index 3af9ade23d345..3baee0256a8f8 100644 --- a/src/rgw/driver/sfs/sqlite/dbconn.h +++ b/src/rgw/driver/sfs/sqlite/dbconn.h @@ -57,9 +57,7 @@ inline auto _make_storage(const std::string& path) { ), sqlite_orm::make_index("bucket_ownerid_idx", &DBBucket::owner_id), sqlite_orm::make_index("bucket_name_idx", &DBBucket::bucket_name), - sqlite_orm::make_index( - "objects_bucketid_idx", &DBOPObjectInfo::bucket_id - ), + sqlite_orm::make_index("objects_bucketid_idx", &DBObject::bucket_id), sqlite_orm::make_index( "vobjs_versionid_idx", &DBVersionedObject::version_id ), @@ -127,11 +125,11 @@ inline auto _make_storage(const std::string& path) { sqlite_orm::make_table( std::string(OBJECTS_TABLE), sqlite_orm::make_column( - "uuid", &DBOPObjectInfo::uuid, sqlite_orm::primary_key() + "uuid", &DBObject::uuid, sqlite_orm::primary_key() ), - sqlite_orm::make_column("bucket_id", &DBOPObjectInfo::bucket_id), - sqlite_orm::make_column("name", &DBOPObjectInfo::name), - sqlite_orm::foreign_key(&DBOPObjectInfo::bucket_id) + sqlite_orm::make_column("bucket_id", &DBObject::bucket_id), + sqlite_orm::make_column("name", &DBObject::name), + sqlite_orm::foreign_key(&DBObject::bucket_id) .references(&DBBucket::bucket_id) ), sqlite_orm::make_table( @@ -163,7 +161,7 @@ inline auto _make_storage(const std::string& path) { "version_type", &DBVersionedObject::version_type ), sqlite_orm::foreign_key(&DBVersionedObject::object_id) - .references(&DBOPObjectInfo::uuid) + .references(&DBObject::uuid) ), sqlite_orm::make_table( std::string(ACCESS_KEYS), diff --git a/src/rgw/driver/sfs/sqlite/objects/object_definitions.h b/src/rgw/driver/sfs/sqlite/objects/object_definitions.h index fbe022f1e1aa1..159b505a035d7 100644 --- a/src/rgw/driver/sfs/sqlite/objects/object_definitions.h +++ b/src/rgw/driver/sfs/sqlite/objects/object_definitions.h @@ -20,7 +20,7 @@ namespace rgw::sal::sfs::sqlite { -struct DBOPObjectInfo { +struct DBObject { uuid_d uuid; std::string bucket_id; std::string name; diff --git a/src/rgw/driver/sfs/sqlite/sqlite_objects.cc b/src/rgw/driver/sfs/sqlite/sqlite_objects.cc index 8df3e32e5fef1..46e45524a5d32 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_objects.cc +++ b/src/rgw/driver/sfs/sqlite/sqlite_objects.cc @@ -13,41 +13,42 @@ */ #include "sqlite_objects.h" +#include "sqlite_versioned_objects.h" + using namespace sqlite_orm; namespace rgw::sal::sfs::sqlite { SQLiteObjects::SQLiteObjects(DBConnRef _conn) : conn(_conn) {} -std::vector SQLiteObjects::get_objects( - const std::string& bucket_id +std::vector SQLiteObjects::get_objects(const std::string& bucket_id ) const { auto storage = conn->get_storage(); - return storage.get_all( - where(is_equal(&DBOPObjectInfo::bucket_id, bucket_id)) + return storage.get_all( + where(is_equal(&DBObject::bucket_id, bucket_id)) ); } -std::optional SQLiteObjects::get_object(const uuid_d& uuid -) const { +std::optional SQLiteObjects::get_object(const uuid_d& uuid) const { auto storage = conn->get_storage(); - auto object = storage.get_pointer(uuid.to_string()); - std::optional ret_value; + auto object = storage.get_pointer(uuid.to_string()); + std::optional ret_value; if (object) { ret_value = *object; } return ret_value; } -std::optional SQLiteObjects::get_object( +std::optional SQLiteObjects::get_object( const std::string& bucket_id, const std::string& object_name ) const { auto storage = conn->get_storage(); - auto objects = storage.get_all(where( - is_equal(&DBOPObjectInfo::bucket_id, bucket_id) and - is_equal(&DBOPObjectInfo::name, object_name) + auto objects = storage.get_all(where( + is_equal(&DBObject::bucket_id, bucket_id) and + is_equal(&DBObject::name, object_name) )); - std::optional ret_value; + + std::optional ret_value; // value must be unique if (objects.size() == 1) { ret_value = objects[0]; @@ -55,27 +56,14 @@ std::optional SQLiteObjects::get_object( return ret_value; } -void SQLiteObjects::store_object(const DBOPObjectInfo& object) const { +void SQLiteObjects::store_object(const DBObject& object) const { auto storage = conn->get_storage(); storage.replace(object); } void SQLiteObjects::remove_object(const uuid_d& uuid) const { auto storage = conn->get_storage(); - storage.remove(uuid); -} - -std::vector SQLiteObjects::get_object_ids() const { - auto storage = conn->get_storage(); - return storage.select(&DBOPObjectInfo::uuid); -} - -std::vector SQLiteObjects::get_object_ids(const std::string& bucket_id -) const { - auto storage = conn->get_storage(); - return storage.select( - &DBOPObjectInfo::uuid, where(c(&DBOPObjectInfo::bucket_id) = bucket_id) - ); + storage.remove(uuid); } } // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/sqlite_objects.h b/src/rgw/driver/sfs/sqlite/sqlite_objects.h index 5ce7bfe331064..2bc18060f0649 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_objects.h +++ b/src/rgw/driver/sfs/sqlite/sqlite_objects.h @@ -27,17 +27,16 @@ class SQLiteObjects { SQLiteObjects(const SQLiteObjects&) = delete; SQLiteObjects& operator=(const SQLiteObjects&) = delete; - std::vector get_objects(const std::string& bucket_id) const; - std::optional get_object(const uuid_d& uuid) const; - std::optional get_object( + std::vector get_objects(const std::string& bucket_id) const; + + std::optional get_object(const uuid_d& uuid) const; + + std::optional get_object( const std::string& bucket_id, const std::string& object_name ) const; - void store_object(const DBOPObjectInfo& object) const; + void store_object(const DBObject& object) const; void remove_object(const uuid_d& uuid) const; - - std::vector get_object_ids() const; - std::vector get_object_ids(const std::string& bucket_id) const; }; } // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc index 14c172304a276..e58d472abe7aa 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc +++ b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.cc @@ -13,63 +13,121 @@ */ #include "sqlite_versioned_objects.h" +#include "rgw/driver/sfs/uuid_path.h" + using namespace sqlite_orm; namespace rgw::sal::sfs::sqlite { -std::vector get_rgw_versioned_objects( - const std::vector& db_versioned_objects -) { - std::vector ret_objs; - for (const auto& db_obj : db_versioned_objects) { - auto rgw_obj = get_rgw_versioned_object(db_obj); - ret_objs.push_back(rgw_obj); - } - return ret_objs; -} - SQLiteVersionedObjects::SQLiteVersionedObjects(DBConnRef _conn) : conn(_conn) {} -std::optional -SQLiteVersionedObjects::get_versioned_object(uint id) const { +std::optional SQLiteVersionedObjects::get_versioned_object( + uint id, bool filter_deleted +) const { auto storage = conn->get_storage(); auto object = storage.get_pointer(id); - std::optional ret_value; + std::optional ret_value; if (object) { - ret_value = get_rgw_versioned_object(*object); + if (!filter_deleted || object->object_state != ObjectState::DELETED) { + ret_value = *object; + } } return ret_value; } -std::optional -SQLiteVersionedObjects::get_versioned_object(const std::string& version_id +std::optional SQLiteVersionedObjects::get_versioned_object( + const std::string& version_id, bool filter_deleted ) const { auto storage = conn->get_storage(); auto versioned_objects = storage.get_all( where(c(&DBVersionedObject::version_id) = version_id) ); ceph_assert(versioned_objects.size() <= 1); - std::optional ret_value; + std::optional ret_value; if (versioned_objects.size()) { - ret_value = get_rgw_versioned_object(versioned_objects[0]); + if (!filter_deleted || + versioned_objects[0].object_state != ObjectState::DELETED) { + ret_value = versioned_objects[0]; + } } return ret_value; } +std::optional +SQLiteVersionedObjects::get_non_deleted_versioned_object( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id +) const { + if (version_id.empty()) { + return get_non_deleted_versioned_object_last_version( + bucket_id, object_name + ); + } + return get_non_deleted_versioned_object_specific_version( + bucket_id, object_name, version_id + ); +} + +DBObjectsListItems SQLiteVersionedObjects::list_last_versioned_objects( + const std::string& bucket_id +) const { + auto storage = conn->get_storage(); + auto results = storage.select( + columns( + &DBObject::uuid, &DBObject::name, &DBVersionedObject::version_id, + max(&DBVersionedObject::commit_time), max(&DBVersionedObject::id), + &DBVersionedObject::size, &DBVersionedObject::etag, + &DBVersionedObject::mtime, &DBVersionedObject::delete_time, + &DBVersionedObject::attrs, &DBVersionedObject::version_type, + &DBVersionedObject::object_state + ), + inner_join( + on(is_equal(&DBObject::uuid, &DBVersionedObject::object_id)) + ), + where( + is_equal(&DBObject::bucket_id, bucket_id) and + is_not_equal(&DBVersionedObject::object_state, ObjectState::DELETED) + ), + group_by(&DBObject::uuid), order_by(&DBVersionedObject::create_time).asc() + ); + return results; +} + uint SQLiteVersionedObjects::insert_versioned_object( - const DBOPVersionedObjectInfo& object + const DBVersionedObject& object ) const { auto storage = conn->get_storage(); - auto db_object = get_db_versioned_object(object); - return storage.insert(db_object); + return storage.insert(object); } void SQLiteVersionedObjects::store_versioned_object( - const DBOPVersionedObjectInfo& object + const DBVersionedObject& object ) const { auto storage = conn->get_storage(); - auto db_object = get_db_versioned_object(object); - storage.update(db_object); + storage.update(object); +} + +void SQLiteVersionedObjects::store_versioned_object_delete_rest_transact( + const DBVersionedObject& object +) const { + try { + auto storage = conn->get_storage(); + auto transaction = storage.transaction_guard(); + storage.update(object); + // soft delete the rest of this object + storage.update_all( + set(c(&DBVersionedObject::object_state) = ObjectState::DELETED), + where( + is_equal(&DBVersionedObject::object_id, object.object_id) and + is_not_equal(&DBVersionedObject::id, object.id) + ) + ); + transaction.commit(); + } catch (const std::system_error& e) { + // throw exception (will be caught later in the sfs logic) + // TODO revisit this when error handling is defined + throw(e); + } } void SQLiteVersionedObjects::remove_versioned_object(uint id) const { @@ -77,46 +135,314 @@ void SQLiteVersionedObjects::remove_versioned_object(uint id) const { storage.remove(id); } -std::vector SQLiteVersionedObjects::get_versioned_object_ids() const { +std::vector SQLiteVersionedObjects::get_versioned_object_ids( + bool filter_deleted +) const { auto storage = conn->get_storage(); + if (filter_deleted) { + return storage.select( + &DBVersionedObject::id, + where( + is_not_equal(&DBVersionedObject::object_state, ObjectState::DELETED) + ) + ); + } return storage.select(&DBVersionedObject::id); } std::vector SQLiteVersionedObjects::get_versioned_object_ids( - const uuid_d& object_id + const uuid_d& object_id, bool filter_deleted ) const { auto storage = conn->get_storage(); auto uuid = object_id.to_string(); + if (filter_deleted) { + return storage.select( + &DBVersionedObject::id, + where( + is_equal(&DBVersionedObject::object_id, uuid) and + is_not_equal(&DBVersionedObject::object_state, ObjectState::DELETED) + ) + ); + } return storage.select( &DBVersionedObject::id, where(c(&DBVersionedObject::object_id) = uuid) ); } -std::vector -SQLiteVersionedObjects::get_versioned_objects(const uuid_d& object_id) const { +std::vector SQLiteVersionedObjects::get_versioned_objects( + const uuid_d& object_id, bool filter_deleted +) const { auto storage = conn->get_storage(); auto uuid = object_id.to_string(); - auto versioned_objects = storage.get_all( + if (filter_deleted) { + return storage.get_all( + where( + is_equal(&DBVersionedObject::object_id, uuid) and + is_not_equal(&DBVersionedObject::object_state, ObjectState::DELETED) + ), + order_by(&DBVersionedObject::commit_time).desc() + ); + } + return storage.get_all( where(c(&DBVersionedObject::object_id) = uuid) ); - return get_rgw_versioned_objects(versioned_objects); } -std::optional -SQLiteVersionedObjects::get_last_versioned_object(const uuid_d& object_id +std::optional +SQLiteVersionedObjects::get_last_versioned_object( + const uuid_d& object_id, bool filter_deleted +) const { + auto storage = conn->get_storage(); + std::vector>> + max_commit_time_ids; + // we are looking for the ids that match the object_id with the highest + // commit_time and we want to get the highest id. + if (filter_deleted) { + max_commit_time_ids = storage.select( + columns(&DBVersionedObject::id, max(&DBVersionedObject::commit_time)), + where( + is_equal(&DBVersionedObject::object_id, object_id) and + is_not_equal(&DBVersionedObject::object_state, ObjectState::DELETED) + ), + group_by(&DBVersionedObject::id), + order_by(&DBVersionedObject::id).desc() + ); + } else { + max_commit_time_ids = storage.select( + columns(&DBVersionedObject::id, max(&DBVersionedObject::commit_time)), + where(is_equal(&DBVersionedObject::object_id, object_id)), + group_by(&DBVersionedObject::id), + order_by(&DBVersionedObject::id).desc() + ); + } + + // if found, value we are looking for is in the first position of the results + // because we ordered descending in the query + auto found_value = max_commit_time_ids.size() && + std::get<1>(max_commit_time_ids[0]) != nullptr; + + std::optional ret_value; + if (found_value) { + auto last_version_id = std::get<0>(max_commit_time_ids[0]); + auto last_version = storage.get_pointer(last_version_id); + if (last_version) { + ret_value = *last_version; + } + } + return ret_value; +} + +std::optional +SQLiteVersionedObjects::delete_version_and_get_previous_transact(uint id) { + try { + auto storage = conn->get_storage(); + auto transaction = storage.transaction_guard(); + auto version = storage.get_pointer(id); + std::optional ret_value; + if (version != nullptr) { + auto object_id = version->object_id; + storage.remove(id); + // get the last version of the object now + auto max_commit_time_ids = storage.select( + columns(&DBVersionedObject::id, max(&DBVersionedObject::commit_time)), + where( + is_equal(&DBVersionedObject::object_id, object_id) and + is_not_equal( + &DBVersionedObject::object_state, ObjectState::DELETED + ) + ), + group_by(&DBVersionedObject::id), + order_by(&DBVersionedObject::id).desc() + ); + auto found_value = max_commit_time_ids.size() && + std::get<1>(max_commit_time_ids[0]) != nullptr; + if (found_value) { + // if not value is found could be, for example, if lifecycle deleted all + // non current versions before. + auto last_version_id = std::get<0>(max_commit_time_ids[0]); + auto last_version = + storage.get_pointer(last_version_id); + if (last_version) { + ret_value = *last_version; + } + } + transaction.commit(); + } + return ret_value; + } catch (const std::system_error& e) { + // throw exception (will be caught later in the sfs logic) + // TODO revisit this when error handling is defined + throw(e); + } +} + +uint SQLiteVersionedObjects::add_delete_marker_transact( + const uuid_d& object_id, const std::string& delete_marker_id, bool& added +) const { + uint ret_id{0}; + added = false; + try { + auto storage = conn->get_storage(); + auto transaction = storage.transaction_guard(); + auto max_commit_time_ids = storage.select( + columns(&DBVersionedObject::id, max(&DBVersionedObject::commit_time)), + where( + is_equal(&DBVersionedObject::object_id, object_id) and + is_not_equal(&DBVersionedObject::object_state, ObjectState::DELETED) + ), + group_by(&DBVersionedObject::id), + order_by(&DBVersionedObject::id).desc() + ); + // if found, value we are looking for is in the first position of the results + // because we ordered descending in the query + auto found_value = max_commit_time_ids.size() && + std::get<1>(max_commit_time_ids[0]) != nullptr; + if (found_value) { + auto last_version_id = std::get<0>(max_commit_time_ids[0]); + auto last_version = + storage.get_pointer(last_version_id); + if (last_version && + last_version->object_state == ObjectState::COMMITTED && + last_version->version_type == VersionType::REGULAR) { + auto now = ceph::real_clock::now(); + last_version->version_type = VersionType::DELETE_MARKER; + last_version->delete_time = now; + last_version->mtime = now; + last_version->version_id = delete_marker_id; + ret_id = storage.insert(*last_version); + added = true; + // only commit if the delete maker was indeed inserted. + // the rest of calls in this transaction are read operations + transaction.commit(); + } + } + } catch (const std::system_error& e) { + // throw exception (will be caught later in the sfs logic) + // TODO revisit this when error handling is defined + throw(e); + } + return ret_id; +} + +std::optional +SQLiteVersionedObjects::get_non_deleted_versioned_object_specific_version( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id ) const { auto storage = conn->get_storage(); - auto last_version_id = storage.max( + auto ids = storage.select( &DBVersionedObject::id, - where(c(&DBVersionedObject::object_id) = object_id.to_string()) + inner_join( + on(is_equal(&DBObject::uuid, &DBVersionedObject::object_id)) + ), + where( + is_not_equal( + &DBVersionedObject::object_state, ObjectState::DELETED + ) and + is_equal(&DBObject::bucket_id, bucket_id) and + is_equal(&DBObject::name, object_name) and + is_equal(&DBVersionedObject::version_id, version_id) + ) ); - std::optional ret_value; - if (last_version_id) { - auto last_version = - storage.get_pointer(*last_version_id); + // TODO return an error if this returns more than 1 version? + // Only 1 object with no deleted versions should be present + // revisit this ceph_assert after error handling is defined + ceph_assert(ids.size() <= 1); + std::optional ret_value; + if (ids.size() > 0) { + auto version = storage.get_pointer(ids[0]); + if (version != nullptr) { + ret_value = *version; + } + } + return ret_value; +} + +std::optional +SQLiteVersionedObjects::get_non_deleted_versioned_object_last_version( + const std::string& bucket_id, const std::string& object_name +) const { + // we don't have a version_id, so return the last available one that is + // committed + auto storage = conn->get_storage(); + auto max_commit_time_ids = storage.select( + columns(&DBVersionedObject::id, max(&DBVersionedObject::commit_time)), + inner_join( + on(is_equal(&DBObject::uuid, &DBVersionedObject::object_id)) + ), + where( + is_equal(&DBObject::bucket_id, bucket_id) and + is_equal(&DBObject::name, object_name) and + is_not_equal(&DBVersionedObject::object_state, ObjectState::DELETED) + ), + group_by(&DBVersionedObject::id), order_by(&DBVersionedObject::id).desc() + ); + auto found_value = max_commit_time_ids.size() && + std::get<1>(max_commit_time_ids[0]) != nullptr; + std::optional ret_value; + if (found_value) { + // if not value is found could be, for example, if lifecycle deleted all + // non current versions before. + auto last_version_id = std::get<0>(max_commit_time_ids[0]); + auto last_version = storage.get_pointer(last_version_id); if (last_version) { - ret_value = get_rgw_versioned_object(*last_version); + ret_value = *last_version; + } + } + return ret_value; +} + +std::optional +SQLiteVersionedObjects::create_new_versioned_object_transact( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id +) const { + std::optional ret_value; + try { + auto storage = conn->get_storage(); + auto transaction = storage.transaction_guard(); + auto objs = storage.select( + columns(&DBObject::uuid), + inner_join( + on(is_equal(&DBObject::uuid, &DBVersionedObject::object_id)) + ), + where( + is_not_equal( + &DBVersionedObject::object_state, ObjectState::DELETED + ) and + is_equal(&DBObject::bucket_id, bucket_id) and + is_equal(&DBObject::name, object_name) + ), + group_by(&DBObject::uuid) + ); + // should return none or 1 + // TODO revisit this ceph_assert after error handling is defined + ceph_assert(objs.size() <= 1); + DBObject obj; + obj.name = object_name; + obj.bucket_id = bucket_id; + if (objs.size() == 0) { + // object does not exist + // create it + obj.uuid.generate_random(); + storage.replace(obj); + } else { + obj.uuid = std::get<0>(objs[0]); } + // create the version now + DBVersionedObject version; + version.object_id = obj.uuid; + version.object_state = ObjectState::OPEN; + version.version_type = VersionType::REGULAR; + version.version_id = version_id; + version.create_time = ceph::real_clock::now(); + version.id = storage.insert(version); + transaction.commit(); + ret_value = version; + } catch (const std::system_error& e) { + // throw exception (will be caught later in the sfs logic) + // TODO revisit this when error handling is defined + throw(e); } return ret_value; } diff --git a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h index caacff524fc5c..707ecef22f3bf 100644 --- a/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h +++ b/src/rgw/driver/sfs/sqlite/sqlite_versioned_objects.h @@ -14,7 +14,7 @@ #pragma once #include "dbconn.h" -#include "versioned_object/versioned_object_conversions.h" +#include "versioned_object/versioned_object_definitions.h" namespace rgw::sal::sfs::sqlite { @@ -28,23 +28,61 @@ class SQLiteVersionedObjects { SQLiteVersionedObjects(const SQLiteVersionedObjects&) = delete; SQLiteVersionedObjects& operator=(const SQLiteVersionedObjects&) = delete; - std::optional get_versioned_object(uint id) const; - std::optional get_versioned_object( + std::optional get_versioned_object( + uint id, bool filter_deleted = true + ) const; + std::optional get_versioned_object( + const std::string& version_id, bool filter_deleted = true + ) const; + std::optional get_non_deleted_versioned_object( + const std::string& bucket_id, const std::string& object_name, const std::string& version_id ) const; + DBObjectsListItems list_last_versioned_objects(const std::string& bucket_id + ) const; - uint insert_versioned_object(const DBOPVersionedObjectInfo& object) const; - void store_versioned_object(const DBOPVersionedObjectInfo& object) const; + uint insert_versioned_object(const DBVersionedObject& object) const; + void store_versioned_object(const DBVersionedObject& object) const; void remove_versioned_object(uint id) const; + void store_versioned_object_delete_rest_transact( + const DBVersionedObject& object + ) const; + + std::vector get_versioned_object_ids(bool filter_deleted = true) const; + std::vector get_versioned_object_ids( + const uuid_d& object_id, bool filter_deleted = true + ) const; + std::vector get_versioned_objects( + const uuid_d& object_id, bool filter_deleted = true + ) const; + + std::optional get_last_versioned_object( + const uuid_d& object_id, bool filter_deleted = true + ) const; + + std::optional delete_version_and_get_previous_transact( + uint id + ); + + std::optional create_new_versioned_object_transact( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id + ) const; + + uint add_delete_marker_transact( + const uuid_d& object_id, const std::string& delete_marker_id, bool& added + ) const; - std::vector get_versioned_object_ids() const; - std::vector get_versioned_object_ids(const uuid_d& object_id) const; - std::vector get_versioned_objects( - const uuid_d& object_id + private: + std::optional + get_non_deleted_versioned_object_specific_version( + const std::string& bucket_id, const std::string& object_name, + const std::string& version_id ) const; - std::optional get_last_versioned_object( - const uuid_d& object_id + std::optional + get_non_deleted_versioned_object_last_version( + const std::string& bucket_id, const std::string& object_name ) const; }; diff --git a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.cc b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.cc deleted file mode 100644 index 9bcc4346c0492..0000000000000 --- a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.cc +++ /dev/null @@ -1,70 +0,0 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -// vim: ts=8 sw=2 smarttab ft=cpp -/* - * Ceph - scalable distributed file system - * SFS SAL implementation - * - * Copyright (C) 2022 SUSE LLC - * - * This is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software - * Foundation. See file COPYING. - */ -#include "versioned_object_conversions.h" - -#include "../conversion_utils.h" - -namespace rgw::sal::sfs::sqlite { - -ObjectState get_object_state(uint state) { - if (state > static_cast(ObjectState::LAST_VALUE)) { - throw(std::runtime_error( - "incorrect state found (" + std::to_string(state) + ")" - )); - } - return static_cast(state); -} - -uint get_uint_object_state(ObjectState state) { - return static_cast(state); -} - -DBOPVersionedObjectInfo get_rgw_versioned_object(const DBVersionedObject& object -) { - DBOPVersionedObjectInfo rgw_object; - rgw_object.id = object.id; - rgw_object.object_id = object.object_id; - rgw_object.checksum = object.checksum; - rgw_object.size = object.size; - rgw_object.create_time = object.create_time; - rgw_object.delete_time = object.delete_time; - rgw_object.commit_time = object.commit_time; - rgw_object.mtime = object.mtime; - rgw_object.object_state = object.object_state; - rgw_object.version_id = object.version_id; - rgw_object.etag = object.etag; - assign_optional_value(object.attrs, rgw_object.attrs); - rgw_object.version_type = object.version_type; - return rgw_object; -} - -DBVersionedObject get_db_versioned_object(const DBOPVersionedObjectInfo& object -) { - DBVersionedObject db_object; - db_object.id = object.id; - db_object.object_id = object.object_id; - db_object.checksum = object.checksum; - db_object.size = object.size; - db_object.create_time = object.create_time; - db_object.delete_time = object.delete_time; - db_object.commit_time = object.commit_time; - db_object.mtime = object.mtime; - db_object.object_state = object.object_state; - db_object.version_id = object.version_id; - db_object.etag = object.etag; - assign_db_value(object.attrs, db_object.attrs); - db_object.version_type = object.version_type; - return db_object; -} -} // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h deleted file mode 100644 index bfaa913a612b0..0000000000000 --- a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h +++ /dev/null @@ -1,26 +0,0 @@ -// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -// vim: ts=8 sw=2 smarttab ft=cpp -/* - * Ceph - scalable distributed file system - * SFS SAL implementation - * - * Copyright (C) 2022 SUSE LLC - * - * This is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License version 2.1, as published by the Free Software - * Foundation. See file COPYING. - */ -#pragma once - -#include "versioned_object_definitions.h" - -namespace rgw::sal::sfs::sqlite { - -// Functions that convert DB type to RGW type (and vice-versa) -DBOPVersionedObjectInfo get_rgw_versioned_object(const DBVersionedObject& object -); -DBVersionedObject get_db_versioned_object(const DBOPVersionedObjectInfo& object -); - -} // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h index b41a60a9659f8..afcec64c6bde3 100644 --- a/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h +++ b/src/rgw/driver/sfs/sqlite/versioned_object/versioned_object_definitions.h @@ -16,6 +16,7 @@ #include #include "rgw/driver/sfs/object_state.h" +#include "rgw/driver/sfs/sqlite/bindings/blob.h" #include "rgw/driver/sfs/sqlite/bindings/enum.h" #include "rgw/driver/sfs/sqlite/bindings/real_time.h" #include "rgw/driver/sfs/version_type.h" @@ -24,25 +25,7 @@ namespace rgw::sal::sfs::sqlite { -using BLOB = std::vector; - struct DBVersionedObject { - uint id; - uuid_d object_id; - std::string checksum; - size_t size; - ceph::real_time create_time; - ceph::real_time delete_time; - ceph::real_time commit_time; - ceph::real_time mtime; - ObjectState object_state; - std::string version_id; - std::string etag; - std::optional attrs; - VersionType version_type = rgw::sal::sfs::VersionType::REGULAR; -}; - -struct DBOPVersionedObjectInfo { uint id; uuid_d object_id; std::string checksum; @@ -58,4 +41,77 @@ struct DBOPVersionedObjectInfo { VersionType version_type = rgw::sal::sfs::VersionType::REGULAR; }; +using DBObjectsListItem = std::tuple< + decltype(DBObject::uuid), decltype(DBObject::name), + decltype(DBVersionedObject::version_id), + std::unique_ptr, + std::unique_ptr, + decltype(DBVersionedObject::size), decltype(DBVersionedObject::etag), + decltype(DBVersionedObject::mtime), + decltype(DBVersionedObject::delete_time), + decltype(DBVersionedObject::attrs), + decltype(DBVersionedObject::version_type), + decltype(DBVersionedObject::object_state)>; + +using DBObjectsListItems = std::vector; + +/// DBObjectsListItem helpers +inline decltype(DBObject::uuid) get_uuid(const DBObjectsListItem& item) { + return std::get<0>(item); +} + +inline decltype(DBObject::name) get_name(const DBObjectsListItem& item) { + return std::get<1>(item); +} + +inline decltype(DBVersionedObject::version_id) get_version_id( + const DBObjectsListItem& item +) { + return std::get<2>(item); +} + +inline decltype(DBVersionedObject::id) get_id(const DBObjectsListItem& item) { + return *(std::get<4>(item)); +} + +inline decltype(DBVersionedObject::size) get_size(const DBObjectsListItem& item +) { + return std::get<5>(item); +} + +inline decltype(DBVersionedObject::etag) get_etag(const DBObjectsListItem& item +) { + return std::get<6>(item); +} + +inline decltype(DBVersionedObject::mtime) get_mtime( + const DBObjectsListItem& item +) { + return std::get<7>(item); +} + +inline decltype(DBVersionedObject::delete_time) get_delete_time( + const DBObjectsListItem& item +) { + return std::get<8>(item); +} + +inline decltype(DBVersionedObject::attrs) get_attrs( + const DBObjectsListItem& item +) { + return std::get<9>(item); +} + +inline decltype(DBVersionedObject::version_type) get_version_type( + const DBObjectsListItem& item +) { + return std::get<10>(item); +} + +inline decltype(DBVersionedObject::object_state) get_object_state( + const DBObjectsListItem& item +) { + return std::get<11>(item); +} + } // namespace rgw::sal::sfs::sqlite diff --git a/src/rgw/driver/sfs/types.cc b/src/rgw/driver/sfs/types.cc index 525a6030032b1..03724c2a5fe5f 100644 --- a/src/rgw/driver/sfs/types.cc +++ b/src/rgw/driver/sfs/types.cc @@ -25,14 +25,20 @@ #include "rgw/rgw_sal_sfs.h" #include "rgw_sal_sfs.h" +#define dout_subsys ceph_subsys_rgw namespace rgw::sal::sfs { -Object::Object(const std::string& _name, const uuid_d& _uuid) - : name(_name), path(_uuid), deleted(false) {} +std::string generate_new_version_id(CephContext* ceph_context) { +#define OBJ_INSTANCE_LEN 32 + char buf[OBJ_INSTANCE_LEN + 1]; + gen_rand_alphanumeric_no_underscore(ceph_context, buf, OBJ_INSTANCE_LEN); + return std::string(buf); +} -Object* Object::create_for_immediate_deletion( - const sqlite::DBOPObjectInfo& object -) { +Object::Object(const rgw_obj_key& _key, const uuid_d& _uuid) + : name(_key.name), instance(_key.instance), path(_uuid), deleted(false) {} + +Object* Object::create_for_immediate_deletion(const sqlite::DBObject& object) { Object* result = new Object(object.name, object.uuid); result->deleted = true; return result; @@ -57,103 +63,79 @@ Object* Object::create_from_obj_key(const rgw_obj_key& key) { return result; } -Object* Object::create_for_multipart(const std::string& name) { - Object* result = new Object(name, UUIDPath::create().get_uuid()); +Object* Object::create_from_db_version( + const std::string& object_name, const sqlite::DBVersionedObject& version +) { + Object* result = new Object( + rgw_obj_key(object_name, version.version_id), version.object_id + ); + result->deleted = (version.version_type == VersionType::DELETE_MARKER); + result->version_id = version.id; + result->meta = { + .size = version.size, + .etag = version.etag, + .mtime = version.mtime, + .delete_at = version.delete_time}; + result->attrs = version.attrs; return result; } -Object* Object::create_commit_delete_marker( - const rgw_obj_key& key, SFStore* store, const std::string& bucket_id +Object* Object::create_from_db_version( + const std::string& object_name, const sqlite::DBObjectsListItem& version ) { - Object* result = new Object(key); - result->deleted = true; - - sqlite::DBOPObjectInfo oinfo; - oinfo.uuid = result->path.get_uuid(); - oinfo.bucket_id = bucket_id; - oinfo.name = result->name; + Object* result = new Object( + rgw_obj_key(object_name, sqlite::get_version_id(version)), + sqlite::get_uuid(version) + ); + result->deleted = + (sqlite::get_version_type(version) == VersionType::DELETE_MARKER); + result->version_id = sqlite::get_id(version); + result->meta = { + .size = sqlite::get_size(version), + .etag = sqlite::get_etag(version), + .mtime = sqlite::get_mtime(version), + .delete_at = sqlite::get_delete_time(version)}; + result->attrs = sqlite::get_attrs(version); + return result; +} - sqlite::SQLiteObjects dbobjs(store->db_conn); - dbobjs.store_object(oinfo); +Object* Object::create_for_multipart(const std::string& name) { + Object* result = new Object(name, UUIDPath::create().get_uuid()); return result; } -Object* Object::create_commit_new_object( - const rgw_obj_key& key, SFStore* store, const std::string& bucket_id, - const std::string* version_id +Object* Object::create_commit_delete_marker( + const rgw_obj_key& key, SFStore* store, const std::string& bucket_id ) { Object* result = new Object(key); + result->deleted = true; - if (version_id != nullptr) { - result->instance = *version_id; - } - - sqlite::DBOPObjectInfo oinfo; + sqlite::DBObject oinfo; oinfo.uuid = result->path.get_uuid(); oinfo.bucket_id = bucket_id; oinfo.name = result->name; - // TODO(https://github.com/aquarist-labs/s3gw/issues/378) make - // object and version insert a transaction sqlite::SQLiteObjects dbobjs(store->db_conn); dbobjs.store_object(oinfo); - - sqlite::DBOPVersionedObjectInfo version_info; - version_info.object_id = result->path.get_uuid(); - version_info.object_state = ObjectState::OPEN; - version_info.version_id = result->instance; - sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - result->version_id = db_versioned_objs.insert_versioned_object(version_info); return result; } -Object* Object::try_create_with_last_version_fetch_from_database( - SFStore* store, const std::string& name, const std::string& bucket_id -) { - sqlite::SQLiteObjects objs(store->db_conn); - auto obj = objs.get_object(bucket_id, name); - if (!obj) { - return nullptr; - } - - sqlite::SQLiteVersionedObjects objs_versions(store->db_conn); - auto last_version = objs_versions.get_last_versioned_object(obj->uuid); - if (!last_version.has_value()) { - return nullptr; - } - - Object* result = new Object(name, obj->uuid); - result->deleted = (last_version->object_state == ObjectState::DELETED); - result->version_id = last_version->id; - result->meta = { - .size = last_version->size, - .etag = last_version->etag, - .mtime = last_version->mtime, - .delete_at = last_version->delete_time}; - result->attrs = last_version->attrs; - result->instance = last_version->version_id; - - return result; -} - -Object* Object::try_create_fetch_from_database( +Object* Object::try_fetch_from_database( SFStore* store, const std::string& name, const std::string& bucket_id, const std::string& version_id ) { - sqlite::SQLiteObjects objs(store->db_conn); - auto obj = objs.get_object(bucket_id, name); - if (!obj) { - return nullptr; - } - sqlite::SQLiteVersionedObjects objs_versions(store->db_conn); - auto version = objs_versions.get_versioned_object(version_id); + // if version_id is empty it will get the last version for that object + auto version = objs_versions.get_non_deleted_versioned_object( + bucket_id, name, version_id + ); if (!version.has_value()) { return nullptr; } - Object* result = new Object(name, obj->uuid); - result->deleted = (version->object_state == ObjectState::DELETED); + auto result = + new Object(rgw_obj_key(name, version->version_id), version->object_id); + result->deleted = (version->version_type == VersionType::DELETE_MARKER); result->version_id = version->id; result->meta = { .size = version->size, @@ -161,7 +143,7 @@ Object* Object::try_create_fetch_from_database( .mtime = version->mtime, .delete_at = version->delete_time}; result->attrs = version->attrs; - result->instance = version->version_id; + return result; } @@ -206,30 +188,6 @@ void Object::update_attrs(const Attrs& update) { attrs = update; } -void Object::update_commit_new_version( - SFStore* store, const std::string& new_version -) { - sqlite::DBOPVersionedObjectInfo version_info; - version_info.object_id = path.get_uuid(); - version_info.object_state = ObjectState::OPEN; - version_info.version_id = new_version; - sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - version_id = db_versioned_objs.insert_versioned_object(version_info); - instance = new_version; -} - -void Object::metadata_change_version_state(SFStore* store, ObjectState state) { - sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - auto versioned_object = db_versioned_objs.get_versioned_object(version_id); - ceph_assert(versioned_object.has_value()); - versioned_object->object_state = state; - if (state == ObjectState::DELETED) { - deleted = true; - versioned_object->delete_time = ceph::real_clock::now(); - } - db_versioned_objs.store_versioned_object(*versioned_object); -} - void Object::metadata_flush_attrs(SFStore* store) { sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); auto versioned_object = db_versioned_objs.get_versioned_object(version_id); @@ -238,7 +196,7 @@ void Object::metadata_flush_attrs(SFStore* store) { db_versioned_objs.store_versioned_object(*versioned_object); } -void Object::metadata_finish(SFStore* store) { +void Object::metadata_finish(SFStore* store, bool versioning_enabled) { sqlite::SQLiteObjects dbobjs(store->db_conn); auto db_object = dbobjs.get_object(path.get_uuid()); ceph_assert(db_object.has_value()); @@ -246,7 +204,11 @@ void Object::metadata_finish(SFStore* store) { dbobjs.store_object(*db_object); sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - auto db_versioned_object = db_versioned_objs.get_versioned_object(version_id); + // get the object, even if it was deleted. + // 2 threads could be creating and deleting the object in parallel. + // last one finishing wins + auto db_versioned_object = + db_versioned_objs.get_versioned_object(version_id, false); ceph_assert(db_versioned_object.has_value()); // TODO calculate checksum. Is it already calculated while writing? db_versioned_object->size = meta.size; @@ -254,9 +216,16 @@ void Object::metadata_finish(SFStore* store) { db_versioned_object->delete_time = meta.delete_at; db_versioned_object->mtime = meta.mtime; db_versioned_object->object_state = ObjectState::COMMITTED; + db_versioned_object->commit_time = ceph::real_clock::now(); db_versioned_object->etag = meta.etag; db_versioned_object->attrs = get_attrs(); - db_versioned_objs.store_versioned_object(*db_versioned_object); + if (versioning_enabled) { + db_versioned_objs.store_versioned_object(*db_versioned_object); + } else { + db_versioned_objs.store_versioned_object_delete_rest_transact( + *db_versioned_object + ); + } } int Object::delete_object_version(SFStore* store) const { @@ -327,97 +296,83 @@ void MultipartUpload::abort(const DoutPrefixProvider* dpp) { objref.reset(); } -ObjectRef Bucket::get_or_create(const rgw_obj_key& key) { - const bool wants_specific_version = !key.instance.empty(); +ObjectRef Bucket::create_version(const rgw_obj_key& key) { + // even if a specific version was not asked we generate one + // non-versioned bucket objects will also have a version_id + auto version_id = key.instance; + if (version_id.empty()) { + version_id = generate_new_version_id(store->ceph_context()); + } ObjectRef result; - - auto maybe_result = Object::try_create_with_last_version_fetch_from_database( - store, key.name, info.bucket.bucket_id + sqlite::SQLiteVersionedObjects objs_versions(store->db_conn); + // create objects in a transaction. + // That way threads trying to create the same object in parallel will be + // synchronised by the database without using extra mutexes. + auto new_version = objs_versions.create_new_versioned_object_transact( + info.bucket.bucket_id, key.name, version_id ); - - if (maybe_result == nullptr) { // new object - result.reset(Object::create_commit_new_object( - key, store, info.bucket.bucket_id, &key.instance - )); - return result; + if (new_version.has_value()) { + result.reset(Object::create_from_db_version(key.name, *new_version)); } - - // an object exists with at least 1 version - if (wants_specific_version && maybe_result->instance == key.instance) { - // requested version happens to be the last version - result.reset(maybe_result); - } else if (wants_specific_version && maybe_result->instance != key.instance) { - // requested version is not last - - auto specific_version_object = Object::try_create_fetch_from_database( - store, key.name, info.bucket.bucket_id, key.instance - ); - - if (specific_version_object == nullptr) { - // requested version does not exist -> create it from last - // version object - result.reset(maybe_result); - result->update_commit_new_version(store, key.instance); - } else { - // requested version does exist -> return it - result.reset(specific_version_object); - } - } else { - // no specific version requested - return last - result.reset(maybe_result); - } - - ceph_assert(result); return result; } -ObjectRef Bucket::get(const std::string& name) { - auto maybe_result = Object::try_create_with_last_version_fetch_from_database( - store, name, info.bucket.bucket_id +ObjectRef Bucket::get(const rgw_obj_key& key) { + auto maybe_result = Object::try_fetch_from_database( + store, key.name, info.bucket.bucket_id, key.instance ); if (maybe_result == nullptr) { throw UnknownObjectException(); } + return std::shared_ptr(maybe_result); } std::vector Bucket::get_all() { std::vector result; - sqlite::SQLiteObjects dbobjs(store->db_conn); - for (const auto& db_obj : dbobjs.get_objects(info.bucket.bucket_id)) { - result.push_back(get(db_obj.name)); + sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); + // get the list of objects and its last version (filters deleted versions) + // if an object has all versions deleted it is also filtered + auto objects = + db_versioned_objs.list_last_versioned_objects(info.bucket.bucket_id); + for (const auto& db_obj : objects) { + if (sqlite::get_object_state(db_obj) == ObjectState::COMMITTED) { + result.push_back(std::shared_ptr( + Object::create_from_db_version(sqlite::get_name(db_obj), db_obj) + )); + } } return result; } -void Bucket::delete_object(ObjectRef objref, const rgw_obj_key& key) { +void Bucket::delete_object( + ObjectRef objref, const rgw_obj_key& key, bool versioned_bucket, + std::string& delete_marker_version_id +) { + delete_marker_version_id = ""; sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); - // get the last available version to make a copy changing the object state to DELETED - auto last_version = - db_versioned_objs.get_last_versioned_object(objref->path.get_uuid()); - ceph_assert(last_version.has_value()); - if (last_version->object_state == ObjectState::DELETED) { - _undelete_object(objref, key, db_versioned_objs, *last_version); - } else { - last_version->object_state = ObjectState::DELETED; - last_version->delete_time = ceph::real_clock::now(); - if (last_version->version_id != "") { -// generate a new version id -#define OBJ_INSTANCE_LEN 32 - char buf[OBJ_INSTANCE_LEN + 1]; - gen_rand_alphanumeric_no_underscore( - store->ceph_context(), buf, OBJ_INSTANCE_LEN - ); - last_version->version_id = std::string(buf); - objref->instance = last_version->version_id; - // insert a new deleted version - db_versioned_objs.insert_versioned_object(*last_version); + if (!versioned_bucket) { + _delete_object_non_versioned(objref, key, db_versioned_objs); + } else { + if (key.instance.empty()) { + delete_marker_version_id = + _add_delete_marker(objref, key, db_versioned_objs); } else { - db_versioned_objs.store_versioned_object(*last_version); + // we have a version id (instance) + auto version_to_delete = + db_versioned_objs.get_versioned_object(key.instance); + if (version_to_delete.has_value()) { + if (version_to_delete->version_type == VersionType::DELETE_MARKER) { + _undelete_object(objref, key, db_versioned_objs, *version_to_delete); + } else { + _delete_object_version( + objref, key, db_versioned_objs, *version_to_delete + ); + } + } } - objref->deleted = true; } } @@ -427,17 +382,13 @@ std::string Bucket::create_non_existing_object_delete_marker( auto obj = std::shared_ptr( Object::create_commit_delete_marker(key, store, info.bucket.bucket_id) ); -// create the delete marker -// generate a new version id -#define OBJ_INSTANCE_LEN 32 - char buf[OBJ_INSTANCE_LEN + 1]; - gen_rand_alphanumeric_no_underscore( - store->ceph_context(), buf, OBJ_INSTANCE_LEN - ); - auto new_version_id = std::string(buf); - sqlite::DBOPVersionedObjectInfo version_info; + // create the delete marker + // generate a new version id + auto new_version_id = generate_new_version_id(store->ceph_context()); + sqlite::DBVersionedObject version_info; version_info.object_id = obj->path.get_uuid(); - version_info.object_state = ObjectState::DELETED; + version_info.object_state = ObjectState::COMMITTED; + version_info.version_type = VersionType::DELETE_MARKER; version_info.version_id = new_version_id; version_info.delete_time = ceph::real_clock::now(); sqlite::SQLiteVersionedObjects db_versioned_objs(store->db_conn); @@ -449,18 +400,16 @@ std::string Bucket::create_non_existing_object_delete_marker( void Bucket::_undelete_object( ObjectRef objref, const rgw_obj_key& key, sqlite::SQLiteVersionedObjects& sqlite_versioned_objects, - sqlite::DBOPVersionedObjectInfo& last_version + sqlite::DBVersionedObject& last_version ) { if (!last_version.version_id.empty()) { // versioned object // only remove the delete marker if the requested version id is the last one if (!key.instance.empty() && (key.instance == last_version.version_id)) { - // remove the delete marker - sqlite_versioned_objects.remove_versioned_object(last_version.id); - // get the previous id + // remove the delete marker and get the previous version in a transaction auto previous_version = - sqlite_versioned_objects.get_last_versioned_object( - objref->path.get_uuid() + sqlite_versioned_objects.delete_version_and_get_previous_transact( + last_version.id ); if (previous_version.has_value()) { objref->instance = previous_version->version_id; @@ -469,14 +418,46 @@ void Bucket::_undelete_object( // all versions were removed for this object } } - } else { - // non-versioned object - // just remove the delete marker in the version and store - last_version.object_state = ObjectState::COMMITTED; - last_version.delete_time = ceph::real_clock::now(); - sqlite_versioned_objects.store_versioned_object(last_version); - objref->deleted = false; } } +void Bucket::_delete_object_non_versioned( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& db_versioned_objs +) { + auto version_to_delete = + db_versioned_objs.get_last_versioned_object(objref->path.get_uuid()); + _delete_object_version(objref, key, db_versioned_objs, *version_to_delete); +} + +void Bucket::_delete_object_version( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& db_versioned_objs, + sqlite::DBVersionedObject& version +) { + auto now = ceph::real_clock::now(); + version.delete_time = now; + version.mtime = now; + version.object_state = ObjectState::DELETED; + db_versioned_objs.store_versioned_object(version); + objref->deleted = true; +} + +std::string Bucket::_add_delete_marker( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& db_versioned_objs +) { + std::string delete_marker_id = generate_new_version_id(store->ceph_context()); + bool added; + auto version_id = db_versioned_objs.add_delete_marker_transact( + objref->path.get_uuid(), delete_marker_id, added + ); + if (added) { + objref->deleted = true; + objref->instance = delete_marker_id; + objref->version_id = version_id; + return delete_marker_id; + } + return ""; +} } // namespace rgw::sal::sfs diff --git a/src/rgw/driver/sfs/types.h b/src/rgw/driver/sfs/types.h index b3cdfcfdc6b99..f450ba802ea63 100644 --- a/src/rgw/driver/sfs/types.h +++ b/src/rgw/driver/sfs/types.h @@ -58,7 +58,7 @@ class Object { std::map attrs; protected: - Object(const std::string& _name, const uuid_d& _uuid); + Object(const rgw_obj_key& _key, const uuid_d& _uuid); Object(const rgw_obj_key& key) : name(key.name), @@ -66,29 +66,30 @@ class Object { path(UUIDPath::create()), deleted(false) {} - public: - static Object* create_for_immediate_deletion( - const sqlite::DBOPObjectInfo& object + static Object* _get_object( + SFStore* store, const std::string& bucket_id, const rgw_obj_key& key ); + + public: + static Object* create_for_immediate_deletion(const sqlite::DBObject& object); static Object* create_for_query( const std::string& name, const uuid_d& uuid, bool deleted, uint version_id ); static Object* create_for_testing(const std::string& name); static Object* create_from_obj_key(const rgw_obj_key& key); + static Object* create_from_db_version( + const std::string& object_name, const sqlite::DBVersionedObject& version + ); + static Object* create_from_db_version( + const std::string& object_name, const sqlite::DBObjectsListItem& version + ); static Object* create_for_multipart(const std::string& name); static Object* create_commit_delete_marker( const rgw_obj_key& key, SFStore* store, const std::string& bucket_id ); - static Object* create_commit_new_object( - const rgw_obj_key& key, SFStore* store, const std::string& bucket_id, - const std::string* version_id - ); - static Object* try_create_with_last_version_fetch_from_database( - SFStore* store, const std::string& name, const std::string& bucket_id - ); - static Object* try_create_fetch_from_database( + static Object* try_fetch_from_database( SFStore* store, const std::string& name, const std::string& bucket_id, const std::string& version_id ); @@ -105,20 +106,11 @@ class Object { std::filesystem::path get_storage_path() const; - /// Update version and commit to database - void update_commit_new_version(SFStore* store, const std::string& version_id); - - /// Change obj version state. - // Use this for example to update objs to in flight states like - // WRITING. - // Special case: DELETED sets this to deleted - // and commits a deletion time - void metadata_change_version_state(SFStore* store, ObjectState state); - /// Commit all object state to database // Including meta and attrs // Sets obj version state to COMMITTED - void metadata_finish(SFStore* store); + // For unversioned buckets it set the other versions state to DELETED + void metadata_finish(SFStore* store, bool versioning_enabled); /// Commit attrs to database void metadata_flush_attrs(SFStore* store); @@ -313,7 +305,23 @@ class Bucket { void _undelete_object( ObjectRef objref, const rgw_obj_key& key, sqlite::SQLiteVersionedObjects& sqlite_versioned_objects, - sqlite::DBOPVersionedObjectInfo& last_version + sqlite::DBVersionedObject& last_version + ); + + void _delete_object_non_versioned( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& sqlite_versioned_objects + ); + + void _delete_object_version( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& sqlite_versioned_objects, + sqlite::DBVersionedObject& version + ); + + std::string _add_delete_marker( + ObjectRef objref, const rgw_obj_key& key, + sqlite::SQLiteVersionedObjects& sqlite_versioned_objects ); public: @@ -354,17 +362,21 @@ class Bucket { uint32_t get_flags() const { return info.flags; } public: - /// Return object for key. Do everything necessary to retrieve or - // create this object including object version. - ObjectRef get_or_create(const rgw_obj_key& key); + /// Create object version for key + ObjectRef create_version(const rgw_obj_key& key); - /// Get existing object by name. Throws if it doesn't exist. - ObjectRef get(const std::string& name); - /// Get copy of all objects + /// Get existing object by key. Throws if it doesn't exist. + ObjectRef get(const rgw_obj_key& key); + /// Get copy of all objects that are committed and not deleted std::vector get_all(); /// S3 delete object operation: delete version or create tombstone. - void delete_object(ObjectRef objref, const rgw_obj_key& key); + /// If a delete marker was added, it returns the new version id generated for + /// it + void delete_object( + ObjectRef objref, const rgw_obj_key& key, bool versioned_bucket, + std::string& delete_marker_version_id + ); /// Delete a non-existing object. Creates object with toumbstone // version in database. @@ -400,7 +412,7 @@ class Bucket { mp->finish(); multiparts.erase(it); - objref->metadata_finish(store); + objref->metadata_finish(store, get_info().versioning_enabled()); } std::string gen_multipart_upload_id() { diff --git a/src/rgw/driver/sfs/user.cc b/src/rgw/driver/sfs/user.cc index 04c67307769e8..2a328803ed884 100644 --- a/src/rgw/driver/sfs/user.cc +++ b/src/rgw/driver/sfs/user.cc @@ -159,7 +159,7 @@ int SFSUser::create_bucket( if (store->bucket_exists(b)) { *existed = true; - return -EEXIST; + return 0; } *existed = false; diff --git a/src/rgw/driver/sfs/writer.cc b/src/rgw/driver/sfs/writer.cc index 3801f41ecdac7..abe199479343e 100644 --- a/src/rgw/driver/sfs/writer.cc +++ b/src/rgw/driver/sfs/writer.cc @@ -190,7 +190,7 @@ int SFSAtomicWriter::prepare(optional_yield y) { } try { - objref = bucketref->get_or_create(obj.get_key()); + objref = bucketref->create_version(obj.get_key()); } catch (const std::system_error& e) { lsfs_dout(dpp, -1) << fmt::format( @@ -325,7 +325,7 @@ int SFSAtomicWriter::complete( *mtime = now; } try { - objref->metadata_finish(store); + objref->metadata_finish(store, bucketref->get_info().versioning_enabled()); } catch (const std::system_error& e) { lsfs_dout(dpp, -1) << fmt::format( "failed to update db object {}: {}. " diff --git a/src/test/rgw/sfs/compatibility_test_cases/columns_added.h b/src/test/rgw/sfs/compatibility_test_cases/columns_added.h index cef66877f9bb4..c9d989e1d4bfb 100644 --- a/src/test/rgw/sfs/compatibility_test_cases/columns_added.h +++ b/src/test/rgw/sfs/compatibility_test_cases/columns_added.h @@ -76,7 +76,7 @@ struct DBTestBucket { // bool deleted; }; -struct DBOPTestObjectInfo { +struct DBTestObject { uuid_d uuid; std::string bucket_id; std::string name; @@ -94,7 +94,7 @@ struct DBTestVersionedObject { rgw::sal::sfs::ObjectState object_state; std::string version_id; std::string etag; - std::optional attrs; + rgw::sal::Attrs attrs; rgw::sal::sfs::VersionType version_type = rgw::sal::sfs::VersionType::REGULAR; }; @@ -114,6 +114,19 @@ struct DBOPTestLCEntry { inline auto _make_test_storage(const std::string& path) { return sqlite_orm::make_storage( path, + sqlite_orm::make_unique_index( + "versioned_object_objid_vid_unique", + &DBTestVersionedObject::object_id, &DBTestVersionedObject::version_id + ), + sqlite_orm::make_index("bucket_ownerid_idx", &DBTestBucket::owner_id), + sqlite_orm::make_index("bucket_name_idx", &DBTestBucket::bucket_name), + sqlite_orm::make_index("objects_bucketid_idx", &DBTestObject::bucket_id), + sqlite_orm::make_index( + "vobjs_versionid_idx", &DBTestVersionedObject::version_id + ), + sqlite_orm::make_index( + "vobjs_object_id_idx", &DBTestVersionedObject::object_id + ), sqlite_orm::make_table( std::string(USERS_TABLE), sqlite_orm::make_column( @@ -186,11 +199,11 @@ inline auto _make_test_storage(const std::string& path) { sqlite_orm::make_table( std::string(OBJECTS_TABLE), sqlite_orm::make_column( - "uuid", &DBOPTestObjectInfo::uuid, sqlite_orm::primary_key() + "uuid", &DBTestObject::uuid, sqlite_orm::primary_key() ), - sqlite_orm::make_column("bucket_id", &DBOPTestObjectInfo::bucket_id), - sqlite_orm::make_column("name", &DBOPTestObjectInfo::name), - sqlite_orm::foreign_key(&DBOPTestObjectInfo::bucket_id) + sqlite_orm::make_column("bucket_id", &DBTestObject::bucket_id), + sqlite_orm::make_column("name", &DBTestObject::name), + sqlite_orm::foreign_key(&DBTestObject::bucket_id) .references(&DBTestBucket::bucket_id) ), sqlite_orm::make_table( @@ -226,7 +239,7 @@ inline auto _make_test_storage(const std::string& path) { "version_type", &DBTestVersionedObject::version_type ), sqlite_orm::foreign_key(&DBTestVersionedObject::object_id) - .references(&DBOPTestObjectInfo::uuid) + .references(&DBTestObject::uuid) ), sqlite_orm::make_table( std::string(ACCESS_KEYS), @@ -266,7 +279,7 @@ struct TestDB { TestStorage storage; DBTestUser test_user; DBTestBucket test_bucket; - DBOPTestObjectInfo test_object; + DBTestObject test_object; DBTestVersionedObject test_version; explicit TestDB(const std::string& db_full_path) @@ -300,8 +313,8 @@ struct TestDB { return bucket; } - DBOPTestObjectInfo get_test_object() { - DBOPTestObjectInfo object; + DBTestObject get_test_object() { + DBTestObject object; uuid_d uuid_val; uuid_val.parse("9f06d9d3-307f-4c98-865b-cd3b087acc4f"); object.uuid = uuid_val; @@ -335,9 +348,7 @@ struct TestDB { return true; } - bool compareObject( - const DBOPTestObjectInfo& obj1, const DBOPTestObjectInfo& obj2 - ) { + bool compareObject(const DBTestObject& obj1, const DBTestObject& obj2) { if (obj1.uuid != obj2.uuid) return false; if (obj1.bucket_id != obj2.bucket_id) return false; if (obj1.name != obj2.name) return false; @@ -379,7 +390,7 @@ struct TestDB { auto users = storage.get_all(); if (users.size() != 1) return false; - auto objs = storage.get_all(); + auto objs = storage.get_all(); if (objs.size() != 1) return false; auto versions = storage.get_all(); @@ -393,7 +404,7 @@ struct TestDB { if (!user) return false; if (!compareUser(*user, test_user)) return false; - auto object = storage.get_pointer(test_object.uuid); + auto object = storage.get_pointer(test_object.uuid); if (!object) return false; if (!compareObject(*object, test_object)) return false; diff --git a/src/test/rgw/sfs/compatibility_test_cases/columns_deleted.h b/src/test/rgw/sfs/compatibility_test_cases/columns_deleted.h index 579bd85f9b2c0..60605771ef4f4 100644 --- a/src/test/rgw/sfs/compatibility_test_cases/columns_deleted.h +++ b/src/test/rgw/sfs/compatibility_test_cases/columns_deleted.h @@ -72,7 +72,7 @@ struct DBTestBucket { bool deleted; }; -struct DBOPTestObjectInfo { +struct DBTestObject { uuid_d uuid; std::string bucket_id; std::string name; @@ -91,7 +91,7 @@ struct DBTestVersionedObject { rgw::sal::sfs::ObjectState object_state; std::string version_id; std::string etag; - std::optional attrs; + rgw::sal::Attrs attrs; rgw::sal::sfs::VersionType version_type = rgw::sal::sfs::VersionType::REGULAR; std::string extra; // extra column that is considered as deleted }; @@ -112,6 +112,19 @@ struct DBOPTestLCEntry { inline auto _make_test_storage(const std::string& path) { return sqlite_orm::make_storage( path, + sqlite_orm::make_unique_index( + "versioned_object_objid_vid_unique", + &DBTestVersionedObject::object_id, &DBTestVersionedObject::version_id + ), + sqlite_orm::make_index("bucket_ownerid_idx", &DBTestBucket::owner_id), + sqlite_orm::make_index("bucket_name_idx", &DBTestBucket::bucket_name), + sqlite_orm::make_index("objects_bucketid_idx", &DBTestObject::bucket_id), + sqlite_orm::make_index( + "vobjs_versionid_idx", &DBTestVersionedObject::version_id + ), + sqlite_orm::make_index( + "vobjs_object_id_idx", &DBTestVersionedObject::object_id + ), sqlite_orm::make_table( std::string(USERS_TABLE), sqlite_orm::make_column( @@ -183,12 +196,12 @@ inline auto _make_test_storage(const std::string& path) { sqlite_orm::make_table( std::string(OBJECTS_TABLE), sqlite_orm::make_column( - "uuid", &DBOPTestObjectInfo::uuid, sqlite_orm::primary_key() + "uuid", &DBTestObject::uuid, sqlite_orm::primary_key() ), - sqlite_orm::make_column("bucket_id", &DBOPTestObjectInfo::bucket_id), - sqlite_orm::make_column("name", &DBOPTestObjectInfo::name), - sqlite_orm::make_column("extra", &DBOPTestObjectInfo::extra), - sqlite_orm::foreign_key(&DBOPTestObjectInfo::bucket_id) + sqlite_orm::make_column("bucket_id", &DBTestObject::bucket_id), + sqlite_orm::make_column("name", &DBTestObject::name), + sqlite_orm::make_column("extra", &DBTestObject::extra), + sqlite_orm::foreign_key(&DBTestObject::bucket_id) .references(&DBTestBucket::bucket_id) ), sqlite_orm::make_table( @@ -225,7 +238,7 @@ inline auto _make_test_storage(const std::string& path) { ), sqlite_orm::make_column("extra", &DBTestVersionedObject::extra), sqlite_orm::foreign_key(&DBTestVersionedObject::object_id) - .references(&DBOPTestObjectInfo::uuid) + .references(&DBTestObject::uuid) ), sqlite_orm::make_table( std::string(ACCESS_KEYS), @@ -265,7 +278,7 @@ struct TestDB { TestStorage storage; DBTestUser test_user; DBTestBucket test_bucket; - DBOPTestObjectInfo test_object; + DBTestObject test_object; DBTestVersionedObject test_version; explicit TestDB(const std::string& db_full_path) @@ -300,8 +313,8 @@ struct TestDB { return bucket; } - DBOPTestObjectInfo get_test_object() { - DBOPTestObjectInfo object; + DBTestObject get_test_object() { + DBTestObject object; uuid_d uuid_val; uuid_val.parse("9f06d9d3-307f-4c98-865b-cd3b087acc4f"); object.uuid = uuid_val; @@ -337,9 +350,7 @@ struct TestDB { return true; } - bool compareObject( - const DBOPTestObjectInfo& obj1, const DBOPTestObjectInfo& obj2 - ) { + bool compareObject(const DBTestObject& obj1, const DBTestObject& obj2) { if (obj1.uuid != obj2.uuid) return false; if (obj1.bucket_id != obj2.bucket_id) return false; if (obj1.name != obj2.name) return false; @@ -383,7 +394,7 @@ struct TestDB { auto users = storage.get_all(); if (users.size() != 1) return false; - auto objs = storage.get_all(); + auto objs = storage.get_all(); if (objs.size() != 1) return false; auto versions = storage.get_all(); @@ -397,7 +408,7 @@ struct TestDB { if (!user) return false; if (!compareUser(*user, test_user)) return false; - auto object = storage.get_pointer(test_object.uuid); + auto object = storage.get_pointer(test_object.uuid); if (!object) return false; if (!compareObject(*object, test_object)) return false; diff --git a/src/test/rgw/sfs/compatibility_test_cases/optional_columns_added.h b/src/test/rgw/sfs/compatibility_test_cases/optional_columns_added.h index 1842a4638cb04..0ee24f961944f 100644 --- a/src/test/rgw/sfs/compatibility_test_cases/optional_columns_added.h +++ b/src/test/rgw/sfs/compatibility_test_cases/optional_columns_added.h @@ -73,7 +73,7 @@ struct DBTestBucket { bool deleted; }; -struct DBOPTestObjectInfo { +struct DBTestObject { uuid_d uuid; std::string bucket_id; std::string name; @@ -91,7 +91,7 @@ struct DBTestVersionedObject { rgw::sal::sfs::ObjectState object_state; std::string version_id; std::string etag; - std::optional attrs; + rgw::sal::Attrs attrs; rgw::sal::sfs::VersionType version_type = rgw::sal::sfs::VersionType::REGULAR; }; @@ -111,6 +111,19 @@ struct DBOPTestLCEntry { inline auto _make_test_storage(const std::string& path) { return sqlite_orm::make_storage( path, + sqlite_orm::make_unique_index( + "versioned_object_objid_vid_unique", + &DBTestVersionedObject::object_id, &DBTestVersionedObject::version_id + ), + sqlite_orm::make_index("bucket_ownerid_idx", &DBTestBucket::owner_id), + sqlite_orm::make_index("bucket_name_idx", &DBTestBucket::bucket_name), + sqlite_orm::make_index("objects_bucketid_idx", &DBTestObject::bucket_id), + sqlite_orm::make_index( + "vobjs_versionid_idx", &DBTestVersionedObject::version_id + ), + sqlite_orm::make_index( + "vobjs_object_id_idx", &DBTestVersionedObject::object_id + ), sqlite_orm::make_table( std::string(USERS_TABLE), sqlite_orm::make_column( @@ -158,7 +171,6 @@ inline auto _make_test_storage(const std::string& path) { "bucket_id", &DBTestBucket::bucket_id, sqlite_orm::primary_key() ), sqlite_orm::make_column("bucket_name", &DBTestBucket::bucket_name), - // these 2 columns will be added // sqlite_orm::make_column("tenant", &DBTestBucket::tenant), // sqlite_orm::make_column("marker", &DBTestBucket::marker), sqlite_orm::make_column("owner_id", &DBTestBucket::owner_id), @@ -183,11 +195,11 @@ inline auto _make_test_storage(const std::string& path) { sqlite_orm::make_table( std::string(OBJECTS_TABLE), sqlite_orm::make_column( - "uuid", &DBOPTestObjectInfo::uuid, sqlite_orm::primary_key() + "uuid", &DBTestObject::uuid, sqlite_orm::primary_key() ), - sqlite_orm::make_column("bucket_id", &DBOPTestObjectInfo::bucket_id), - sqlite_orm::make_column("name", &DBOPTestObjectInfo::name), - sqlite_orm::foreign_key(&DBOPTestObjectInfo::bucket_id) + sqlite_orm::make_column("bucket_id", &DBTestObject::bucket_id), + sqlite_orm::make_column("name", &DBTestObject::name), + sqlite_orm::foreign_key(&DBTestObject::bucket_id) .references(&DBTestBucket::bucket_id) ), sqlite_orm::make_table( @@ -223,7 +235,7 @@ inline auto _make_test_storage(const std::string& path) { "version_type", &DBTestVersionedObject::version_type ), sqlite_orm::foreign_key(&DBTestVersionedObject::object_id) - .references(&DBOPTestObjectInfo::uuid) + .references(&DBTestObject::uuid) ), sqlite_orm::make_table( std::string(ACCESS_KEYS), @@ -263,7 +275,7 @@ struct TestDB { TestStorage storage; DBTestUser test_user; DBTestBucket test_bucket; - DBOPTestObjectInfo test_object; + DBTestObject test_object; DBTestVersionedObject test_version; explicit TestDB(const std::string& db_full_path) @@ -297,8 +309,8 @@ struct TestDB { return bucket; } - DBOPTestObjectInfo get_test_object() { - DBOPTestObjectInfo object; + DBTestObject get_test_object() { + DBTestObject object; uuid_d uuid_val; uuid_val.parse("9f06d9d3-307f-4c98-865b-cd3b087acc4f"); object.uuid = uuid_val; @@ -332,9 +344,7 @@ struct TestDB { return true; } - bool compareObject( - const DBOPTestObjectInfo& obj1, const DBOPTestObjectInfo& obj2 - ) { + bool compareObject(const DBTestObject& obj1, const DBTestObject& obj2) { if (obj1.uuid != obj2.uuid) return false; if (obj1.bucket_id != obj2.bucket_id) return false; if (obj1.name != obj2.name) return false; @@ -376,7 +386,7 @@ struct TestDB { auto users = storage.get_all(); if (users.size() != 1) return false; - auto objs = storage.get_all(); + auto objs = storage.get_all(); if (objs.size() != 1) return false; auto versions = storage.get_all(); @@ -390,7 +400,7 @@ struct TestDB { if (!user) return false; if (!compareUser(*user, test_user)) return false; - auto object = storage.get_pointer(test_object.uuid); + auto object = storage.get_pointer(test_object.uuid); if (!object) return false; if (!compareObject(*object, test_object)) return false; diff --git a/src/test/rgw/sfs/test_rgw_sfs_gc.cc b/src/test/rgw/sfs/test_rgw_sfs_gc.cc index 2f06e6ac27255..fb639714ad388 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_gc.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_gc.cc @@ -1,24 +1,23 @@ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab -#include "common/ceph_context.h" -#include "rgw/driver/sfs/sqlite/dbconn.h" -#include "rgw/driver/sfs/sqlite/sqlite_buckets.h" -#include "rgw/driver/sfs/sqlite/buckets/bucket_conversions.h" -#include "rgw/driver/sfs/sqlite/sqlite_users.h" - -#include "rgw/driver/sfs/uuid_path.h" -#include "rgw/driver/sfs/sfs_gc.h" - -#include "rgw/rgw_sal_sfs.h" +#include #include #include -#include #include #include #include +#include "common/ceph_context.h" +#include "rgw/driver/sfs/sfs_gc.h" +#include "rgw/driver/sfs/sqlite/buckets/bucket_conversions.h" +#include "rgw/driver/sfs/sqlite/dbconn.h" +#include "rgw/driver/sfs/sqlite/sqlite_buckets.h" +#include "rgw/driver/sfs/sqlite/sqlite_users.h" +#include "rgw/driver/sfs/uuid_path.h" +#include "rgw/rgw_sal_sfs.h" + using namespace rgw::sal::sfs::sqlite; using namespace std::this_thread; using namespace std::chrono_literals; @@ -29,7 +28,7 @@ const static std::string TEST_DIR = "rgw_sfs_tests"; const static std::string TEST_USERNAME = "test_user"; class TestSFSGC : public ::testing::Test { -protected: + protected: void SetUp() override { fs::current_path(fs::temp_directory_path()); fs::create_directory(TEST_DIR); @@ -45,24 +44,26 @@ class TestSFSGC : public ::testing::Test { return test_dir.string(); } - fs::path getDBFullPath(const std::string & base_dir) const { + fs::path getDBFullPath(const std::string& base_dir) const { auto db_full_name = "s3gw.db"; - auto db_full_path = fs::path(base_dir) / db_full_name; + auto db_full_path = fs::path(base_dir) / db_full_name; return db_full_path; } - fs::path getDBFullPath() const { - return getDBFullPath(getTestDir()); - } + fs::path getDBFullPath() const { return getDBFullPath(getTestDir()); } std::size_t getStoreDataFileCount() { using std::filesystem::recursive_directory_iterator; return std::count_if( recursive_directory_iterator(getTestDir()), - recursive_directory_iterator{}, [](const std::filesystem::path& path) { - return (std::filesystem::is_regular_file(path) && - !path.filename().string().starts_with("s3gw.db")); - }); + recursive_directory_iterator{}, + [](const std::filesystem::path& path) { + return ( + std::filesystem::is_regular_file(path) && + !path.filename().string().starts_with("s3gw.db") + ); + } + ); } std::size_t databaseFileExists() { @@ -76,30 +77,28 @@ class TestSFSGC : public ::testing::Test { users.store_user(user); } - void storeRandomObjectVersion(const std::shared_ptr & object) { + void storeRandomObjectVersion( + const std::shared_ptr& object + ) { std::filesystem::path object_path = getTestDir() / object->get_storage_path(); std::filesystem::create_directories(object_path.parent_path()); - auto mode = \ - std::ofstream::binary | \ - std::ofstream::out | \ - std::ofstream::app; + auto mode = std::ofstream::binary | std::ofstream::out | std::ofstream::app; std::ofstream ofs(object_path, mode); std::random_device rd; std::mt19937 gen(rd()); std::uniform_int_distribution dist(1, 4096); auto file_size = dist(gen); - while(file_size) { - ofs << dist(gen); - --file_size; + while (file_size) { + ofs << dist(gen); + --file_size; } ofs.flush(); ofs.close(); } - void createTestBucket(const std::string & bucket_id, - DBConnRef conn) { + void createTestBucket(const std::string& bucket_id, DBConnRef conn) { SQLiteBuckets db_buckets(conn); DBOPBucketInfo bucket; bucket.binfo.bucket.name = bucket_id + "_name"; @@ -109,21 +108,20 @@ class TestSFSGC : public ::testing::Test { db_buckets.store_bucket(bucket); } - bool bucketExists(const std::string & bucket_id, - DBConnRef conn) { + bool bucketExists(const std::string& bucket_id, DBConnRef conn) { SQLiteBuckets db_buckets(conn); auto bucket = db_buckets.get_bucket(bucket_id); return bucket.has_value(); } std::shared_ptr createTestObject( - const std::string & bucket_id, - const std::string & name, - DBConnRef conn) { + const std::string& bucket_id, const std::string& name, DBConnRef conn + ) { auto object = std::shared_ptr( - rgw::sal::sfs::Object::create_for_testing(name)); + rgw::sal::sfs::Object::create_for_testing(name) + ); SQLiteObjects db_objects(conn); - DBOPObjectInfo db_object; + DBObject db_object; db_object.uuid = object->path.get_uuid(); db_object.name = name; db_object.bucket_id = bucket_id; @@ -131,13 +129,14 @@ class TestSFSGC : public ::testing::Test { return object; } - void createTestObjectVersion(std::shared_ptr & object, - uint version, - DBConnRef conn) { + void createTestObjectVersion( + std::shared_ptr& object, uint version, + DBConnRef conn + ) { object->version_id = version; storeRandomObjectVersion(object); SQLiteVersionedObjects db_versioned_objects(conn); - DBOPVersionedObjectInfo db_version; + DBVersionedObject db_version; db_version.id = version; db_version.object_id = object->path.get_uuid(); db_version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; @@ -145,12 +144,13 @@ class TestSFSGC : public ::testing::Test { db_versioned_objects.insert_versioned_object(db_version); } - void deleteTestObject(std::shared_ptr & object, - DBConnRef conn) { + void deleteTestObject( + std::shared_ptr& object, DBConnRef conn + ) { // delete mark the object SQLiteVersionedObjects db_versioned_objects(conn); - auto last_version = db_versioned_objects.get_last_versioned_object( - object->path.get_uuid()); + auto last_version = + db_versioned_objects.get_last_versioned_object(object->path.get_uuid()); ASSERT_TRUE(last_version.has_value()); last_version->object_state = rgw::sal::sfs::ObjectState::DELETED; last_version->version_id.append("_next_"); @@ -158,26 +158,29 @@ class TestSFSGC : public ::testing::Test { db_versioned_objects.insert_versioned_object(*last_version); } - void deleteTestBucket(const std::string & bucket_id, DBConnRef conn) { + void deleteTestBucket(const std::string& bucket_id, DBConnRef conn) { SQLiteBuckets db_buckets(conn); auto bucket = db_buckets.get_bucket(bucket_id); ASSERT_TRUE(bucket.has_value()); SQLiteObjects db_objects(conn); auto objects = db_objects.get_objects(bucket_id); - for (auto & object: objects) { - auto objptr = std::shared_ptr( - rgw::sal::sfs::Object::create_for_immediate_deletion(object)); - deleteTestObject(objptr, conn); + for (auto& object : objects) { + auto objptr = std::shared_ptr( + rgw::sal::sfs::Object::create_for_immediate_deletion(object) + ); + deleteTestObject(objptr, conn); } bucket->deleted = true; db_buckets.store_bucket(*bucket); } - size_t getNumberObjectsForBucket(const std::string & bucket_id, DBConnRef conn) { - SQLiteObjects db_objs(conn); - auto objects = db_objs.get_objects(bucket_id); - return objects.size(); + size_t getNumberObjectsForBucket( + const std::string& bucket_id, DBConnRef conn + ) { + SQLiteObjects db_objs(conn); + auto objects = db_objs.get_objects(bucket_id); + return objects.size(); } }; @@ -227,7 +230,7 @@ TEST_F(TestSFSGC, TestDeletedBuckets) { // nothing should be removed permanently yet EXPECT_EQ(getStoreDataFileCount(), 5); EXPECT_TRUE(databaseFileExists()); - versions = db_versioned_objs.get_versioned_object_ids(); + versions = db_versioned_objs.get_versioned_object_ids(false); // we should have 1 more version (delete marker for 1 object) EXPECT_EQ(versions.size(), 6); diff --git a/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc b/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc index f8e6fdbda2c99..b29ad281ee3c0 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_sfs_bucket.cc @@ -63,7 +63,7 @@ class TestSFSBucket : public ::testing::Test { rgw::sal::sfs::Object::create_for_testing(name) ); SQLiteObjects db_objects(conn); - DBOPObjectInfo db_object; + DBObject db_object; db_object.uuid = object->path.get_uuid(); db_object.name = name; db_object.bucket_id = bucket_id; @@ -72,7 +72,8 @@ class TestSFSBucket : public ::testing::Test { } void createTestBucket( - const std::string& bucket_id, const std::string& user_id, DBConnRef conn + const std::string& bucket_id, const std::string& user_id, DBConnRef conn, + bool versioned = false ) { SQLiteBuckets db_buckets(conn); DBOPBucketInfo bucket; @@ -80,6 +81,9 @@ class TestSFSBucket : public ::testing::Test { bucket.binfo.bucket.bucket_id = bucket_id; bucket.binfo.owner.id = user_id; bucket.deleted = false; + if (versioned) { + bucket.binfo.flags |= BUCKET_VERSIONED; + } db_buckets.store_bucket(bucket); } @@ -89,7 +93,7 @@ class TestSFSBucket : public ::testing::Test { ) { object->version_id = version; SQLiteVersionedObjects db_versioned_objects(conn); - DBOPVersionedObjectInfo db_version; + DBVersionedObject db_version; db_version.id = version; db_version.object_id = object->path.get_uuid(); db_version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; @@ -625,7 +629,7 @@ TEST_F(TestSFSBucket, TestListObjectsAndVersions) { createUser("test_user", store->db_conn); // create test bucket - createTestBucket("test_bucket", "test_user", store->db_conn); + createTestBucket("test_bucket", "test_user", store->db_conn, true); // create a few objects in test_bucket with a few versions uint version_id = 1; @@ -1051,8 +1055,8 @@ TEST_F(TestSFSBucket, TestListObjectVersionsDelimiter) { // create the test user createUser("test_user", store->db_conn); - // create test bucket - createTestBucket("test_bucket", "test_user", store->db_conn); + // create test bucket versioned + createTestBucket("test_bucket", "test_user", store->db_conn, true); // create the following objects: // directory/ diff --git a/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc b/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc index 05f5476ce7731..02a9f7ea96835 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_sqlite_objects.cc @@ -63,28 +63,58 @@ class TestSFSSQLiteObjects : public ::testing::Test { } }; -void compareObjects(const DBOPObjectInfo& origin, const DBOPObjectInfo& dest) { +void compareObjects(const DBObject& origin, const DBObject& dest) { ASSERT_EQ(origin.uuid, dest.uuid); ASSERT_EQ(origin.bucket_id, dest.bucket_id); ASSERT_EQ(origin.name, dest.name); } -DBOPObjectInfo createTestObject( +DBObject createTestObject( const std::string& suffix, CephContext* context, const std::string& username = "usertest" ) { - DBOPObjectInfo object; + DBObject object; object.uuid.generate_random(); object.bucket_id = "test_bucket"; object.name = "test" + suffix; return object; } -bool uuidInVector(const uuid_d& uuid, const std::vector& uuids) { - for (auto const& list_uuid : uuids) { - if (list_uuid == uuid) return true; +DBVersionedObject createTestVersionedObject( + uint id, const std::string& object_id, const std::string& suffix +) { + DBVersionedObject test_versioned_object; + test_versioned_object.id = id; + uuid_d uuid; + uuid.parse(object_id.c_str()); + test_versioned_object.object_id = uuid; + test_versioned_object.checksum = "test_checksum_" + suffix; + // test_versioned_object.size = rand(); + test_versioned_object.size = 1999; + test_versioned_object.create_time = ceph::real_clock::now(); + test_versioned_object.delete_time = ceph::real_clock::now(); + test_versioned_object.commit_time = ceph::real_clock::now(); + test_versioned_object.mtime = ceph::real_clock::now(); + test_versioned_object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + test_versioned_object.version_id = "test_version_id_" + suffix; + test_versioned_object.etag = "test_etag_" + suffix; + test_versioned_object.version_type = rgw::sal::sfs::VersionType::REGULAR; + + //set attrs with default ACL + { + RGWAccessControlPolicy aclp; + rgw_user aclu("usertest"); + aclp.get_acl().create_default(aclu, "usertest"); + aclp.get_owner().set_name("usertest"); + aclp.get_owner().set_id(aclu); + bufferlist acl_bl; + aclp.encode(acl_bl); + rgw::sal::Attrs attrs; + attrs[RGW_ATTR_ACL] = acl_bl; + test_versioned_object.attrs = attrs; } - return false; + + return test_versioned_object; } TEST_F(TestSFSSQLiteObjects, CreateAndGet) { @@ -108,73 +138,6 @@ TEST_F(TestSFSSQLiteObjects, CreateAndGet) { compareObjects(object, *ret_object); } -TEST_F(TestSFSSQLiteObjects, ListObjectsIDs) { - auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); - ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); - - EXPECT_FALSE(fs::exists(getDBFullPath())); - DBConnRef conn = std::make_shared(ceph_context.get()); - - // Create the bucket, we need it because BucketName is a foreign key of Bucket::BucketID - createBucket("usertest", "test_bucket", conn); - - auto db_objects = std::make_shared(conn); - - auto obj1 = createTestObject("1", ceph_context.get()); - db_objects->store_object(obj1); - auto obj2 = createTestObject("2", ceph_context.get()); - db_objects->store_object(obj2); - auto obj3 = createTestObject("3", ceph_context.get()); - db_objects->store_object(obj3); - - EXPECT_TRUE(fs::exists(getDBFullPath())); - - auto object_ids = db_objects->get_object_ids(); - EXPECT_EQ(object_ids.size(), 3); - - EXPECT_TRUE(uuidInVector(obj1.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj2.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj3.uuid, object_ids)); -} - -TEST_F(TestSFSSQLiteObjects, ListBucketsIDsPerBucket) { - auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); - ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); - - EXPECT_FALSE(fs::exists(getDBFullPath())); - DBConnRef conn = std::make_shared(ceph_context.get()); - - createBucket("usertest", "test_bucket_1", conn); - createBucket("usertest", "test_bucket_2", conn); - createBucket("usertest", "test_bucket_3", conn); - - auto db_objects = std::make_shared(conn); - - auto test_object_1 = createTestObject("1", ceph_context.get()); - test_object_1.bucket_id = "test_bucket_1"; - db_objects->store_object(test_object_1); - - auto test_object_2 = createTestObject("2", ceph_context.get()); - test_object_2.bucket_id = "test_bucket_2"; - db_objects->store_object(test_object_2); - - auto test_object_3 = createTestObject("3", ceph_context.get()); - test_object_3.bucket_id = "test_bucket_3"; - db_objects->store_object(test_object_3); - - auto objects_ids = db_objects->get_object_ids("test_bucket_1"); - ASSERT_EQ(objects_ids.size(), 1); - EXPECT_EQ(objects_ids[0], test_object_1.uuid); - - objects_ids = db_objects->get_object_ids("test_bucket_2"); - ASSERT_EQ(objects_ids.size(), 1); - EXPECT_EQ(objects_ids[0], test_object_2.uuid); - - objects_ids = db_objects->get_object_ids("test_bucket_3"); - ASSERT_EQ(objects_ids.size(), 1); - EXPECT_EQ(objects_ids[0], test_object_3.uuid); -} - TEST_F(TestSFSSQLiteObjects, remove_object) { auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); @@ -195,13 +158,6 @@ TEST_F(TestSFSSQLiteObjects, remove_object) { db_objects->store_object(obj3); db_objects->remove_object(obj2.uuid); - auto object_ids = db_objects->get_object_ids(); - EXPECT_EQ(object_ids.size(), 2); - - EXPECT_TRUE(uuidInVector(obj1.uuid, object_ids)); - EXPECT_FALSE(uuidInVector(obj2.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj3.uuid, object_ids)); - auto ret_object = db_objects->get_object(obj2.uuid); ASSERT_FALSE(ret_object.has_value()); } @@ -227,12 +183,8 @@ TEST_F(TestSFSSQLiteObjects, remove_objectThatDoesNotExist) { uuid_d non_existing_uuid; db_objects->remove_object(non_existing_uuid); - auto object_ids = db_objects->get_object_ids(); - EXPECT_EQ(object_ids.size(), 3); - - EXPECT_TRUE(uuidInVector(obj1.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj2.uuid, object_ids)); - EXPECT_TRUE(uuidInVector(obj3.uuid, object_ids)); + auto objects = db_objects->get_objects("test_bucket"); + EXPECT_EQ(objects.size(), 3); } TEST_F(TestSFSSQLiteObjects, CreateAndUpdate) { @@ -300,7 +252,7 @@ TEST_F(TestSFSSQLiteObjects, CreateObjectForNonExistingBucket) { SQLiteObjects db_objects(conn); auto storage = conn->get_storage(); - DBOPObjectInfo db_object; + DBObject db_object; uuid_d uuid_obj; uuid_obj.parse("254ddc1a-06a6-11ed-b939-0242ac120002"); diff --git a/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc b/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc index b376d4fa78ab3..a47d4fcac6878 100644 --- a/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc +++ b/src/test/rgw/sfs/test_rgw_sfs_sqlite_versioned_objects.cc @@ -12,7 +12,6 @@ #include "rgw/driver/sfs/sqlite/sqlite_objects.h" #include "rgw/driver/sfs/sqlite/sqlite_users.h" #include "rgw/driver/sfs/sqlite/sqlite_versioned_objects.h" -#include "rgw/driver/sfs/sqlite/versioned_object/versioned_object_conversions.h" #include "rgw/rgw_sal_sfs.h" using namespace rgw::sal::sfs::sqlite; @@ -22,6 +21,7 @@ const static std::string TEST_DIR = "rgw_sfs_tests"; const static std::string TEST_USERNAME = "test_username"; const static std::string TEST_BUCKET = "test_bucket"; +const static std::string TEST_BUCKET_2 = "test_bucket_2"; const static std::string TEST_OBJECT_ID = "80943a6d-9f72-4001-bac0-a9a036be8c49"; const static std::string TEST_OBJECT_ID_1 = @@ -30,6 +30,8 @@ const static std::string TEST_OBJECT_ID_2 = "af06d9d3-307f-4c98-865b-cd3b087acc4f"; const static std::string TEST_OBJECT_ID_3 = "bf06d9d3-307f-4c98-865b-cd3b087acc4f"; +const static std::string TEST_OBJECT_ID_4 = + "cf06d9d3-307f-4c98-865b-cd3b087acc4f"; class TestSFSSQLiteVersionedObjects : public ::testing::Test { protected: @@ -82,7 +84,7 @@ class TestSFSSQLiteVersionedObjects : public ::testing::Test { createBucket(username, bucketname, conn); SQLiteObjects objects(conn); - DBOPObjectInfo object; + DBObject object; object.uuid.parse(object_id.c_str()); object.bucket_id = bucketname; object.name = "test_name"; @@ -90,16 +92,17 @@ class TestSFSSQLiteVersionedObjects : public ::testing::Test { } }; -DBOPVersionedObjectInfo createTestVersionedObject( +DBVersionedObject createTestVersionedObject( uint id, const std::string& object_id, const std::string& suffix ) { - DBOPVersionedObjectInfo test_versioned_object; + DBVersionedObject test_versioned_object; test_versioned_object.id = id; uuid_d uuid; uuid.parse(object_id.c_str()); test_versioned_object.object_id = uuid; test_versioned_object.checksum = "test_checksum_" + suffix; - test_versioned_object.size = rand(); + // test_versioned_object.size = rand(); + test_versioned_object.size = 1999; test_versioned_object.create_time = ceph::real_clock::now(); test_versioned_object.delete_time = ceph::real_clock::now(); test_versioned_object.commit_time = ceph::real_clock::now(); @@ -148,7 +151,7 @@ void compareVersionedObjectsAttrs( } void compareVersionedObjects( - const DBOPVersionedObjectInfo& origin, const DBOPVersionedObjectInfo& dest + const DBVersionedObject& origin, const DBVersionedObject& dest ) { ASSERT_EQ(origin.id, dest.id); ASSERT_EQ(origin.object_id, dest.object_id); @@ -228,9 +231,9 @@ TEST_F(TestSFSSQLiteVersionedObjects, ListObjectsIDs) { uuid.parse(TEST_OBJECT_ID.c_str()); auto objects = db_versioned_objects->get_versioned_objects(uuid); ASSERT_EQ(objects.size(), 3); - compareVersionedObjects(objects[0], obj1); + compareVersionedObjects(objects[0], obj3); compareVersionedObjects(objects[1], obj2); - compareVersionedObjects(objects[2], obj3); + compareVersionedObjects(objects[2], obj1); } TEST_F(TestSFSSQLiteVersionedObjects, ListBucketsIDsPerObject) { @@ -468,46 +471,6 @@ TEST_F(TestSFSSQLiteVersionedObjects, CreateObjectForNonExistingBucket) { ); } -TEST_F(TestSFSSQLiteVersionedObjects, Testobject_stateConversion) { - auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); - ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); - - DBConnRef conn = std::make_shared(ceph_context.get()); - - // Create the object, we need it because of foreign key constrains - createObject( - TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn - ); - - SQLiteVersionedObjects db_objects(conn); - auto storage = conn->get_storage(); - - auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); - auto db_object = get_db_versioned_object(object); - ASSERT_EQ(rgw::sal::sfs::ObjectState::OPEN, db_object.object_state); - - db_object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; - storage.replace(db_object); - - auto ret_object = db_objects.get_versioned_object(db_object.id); - ASSERT_TRUE(ret_object.has_value()); - ASSERT_EQ(rgw::sal::sfs::ObjectState::COMMITTED, ret_object->object_state); - - db_object.object_state = rgw::sal::sfs::ObjectState::LOCKED; - storage.replace(db_object); - - ret_object = db_objects.get_versioned_object(db_object.id); - ASSERT_TRUE(ret_object.has_value()); - ASSERT_EQ(rgw::sal::sfs::ObjectState::LOCKED, ret_object->object_state); - - db_object.object_state = rgw::sal::sfs::ObjectState::DELETED; - storage.replace(db_object); - - ret_object = db_objects.get_versioned_object(db_object.id); - ASSERT_TRUE(ret_object.has_value()); - ASSERT_EQ(rgw::sal::sfs::ObjectState::DELETED, ret_object->object_state); -} - TEST_F(TestSFSSQLiteVersionedObjects, StoreCreatesNewVersions) { auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); @@ -594,6 +557,7 @@ TEST_F(TestSFSSQLiteVersionedObjects, GetLastVersion) { // just update the size, and add a new version object.size = 1999; object.version_id = "test_version_id_2"; + object.commit_time = ceph::real_clock::now(); db_versioned_objects->insert_versioned_object(object); // now it should return the last one @@ -610,6 +574,68 @@ TEST_F(TestSFSSQLiteVersionedObjects, GetLastVersion) { ASSERT_FALSE(ret_object.has_value()); } +TEST_F(TestSFSSQLiteVersionedObjects, GetLastVersionRepeatedCommitTime) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + // keep it for later + auto commit_time = object.commit_time; + db_versioned_objects->insert_versioned_object(object); + EXPECT_TRUE(fs::exists(getDBFullPath())); + + uuid_d uuid; + uuid.parse(TEST_OBJECT_ID.c_str()); + auto ret_object = db_versioned_objects->get_last_versioned_object(uuid); + ASSERT_TRUE(ret_object.has_value()); + compareVersionedObjects(object, *ret_object); + + // just update the size, and add a new version + object.size = 1999; + object.version_id = "test_version_id_2"; + // set the same commit time + object.commit_time = commit_time; + db_versioned_objects->insert_versioned_object(object); + + // now we have 2 entries with the same commit time. + // it should return the one with the highest id + ret_object = db_versioned_objects->get_last_versioned_object(uuid); + ASSERT_TRUE(ret_object.has_value()); + object.id = 2; + compareVersionedObjects(object, *ret_object); + + // just update the size, and add a new version + object.size = 3121; + object.version_id = "test_version_id_3"; + // set the same commit time + object.commit_time = commit_time; + db_versioned_objects->insert_versioned_object(object); + + // now we have 3 entries with the same commit time. + // it should return the one with the highest id (3) + ret_object = db_versioned_objects->get_last_versioned_object(uuid); + ASSERT_TRUE(ret_object.has_value()); + object.id = 3; + compareVersionedObjects(object, *ret_object); + + uuid_d uuid_that_does_not_exist; + uuid_that_does_not_exist.parse(TEST_OBJECT_ID_2.c_str()); + + ret_object = + db_versioned_objects->get_last_versioned_object(uuid_that_does_not_exist); + ASSERT_FALSE(ret_object.has_value()); +} + TEST_F(TestSFSSQLiteVersionedObjects, TestInsertIncreaseID) { auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); @@ -728,8 +754,10 @@ TEST_F(TestSFSSQLiteVersionedObjects, StoreUnsupportedTimestamp) { ; } catch (const std::system_error& e) { EXPECT_STREQ( - "Error converting ceph::real_time to int64. Nanoseconds value: " - "9223372036854775808 is out of range: Numerical result out of " + "Error converting ceph::real_time to int64. Nanoseconds " + "value: " + "9223372036854775808 is out of range: Numerical result out " + "of " "range", e.what() ); @@ -739,3 +767,580 @@ TEST_F(TestSFSSQLiteVersionedObjects, StoreUnsupportedTimestamp) { std::system_error ); } + +TEST_F(TestSFSSQLiteVersionedObjects, TestFilterDeleted) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // create 5 versions + // versions 2 and 3 are deleted + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + object.object_state = rgw::sal::sfs::ObjectState::DELETED; + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + object.object_state = rgw::sal::sfs::ObjectState::OPEN; + object.version_id = "test_version_id_4"; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_5"; + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(object)); + + // get_versioned_object(uint id) + // try to get version 1 (not deleted) + auto not_deleted = db_versioned_objects->get_versioned_object(1); + ASSERT_TRUE(not_deleted.has_value()); + ASSERT_NE(rgw::sal::sfs::ObjectState::DELETED, not_deleted->object_state); + + // now version 2 (deleted) + auto deleted = db_versioned_objects->get_versioned_object(2); + ASSERT_FALSE(deleted.has_value()); + // now version 2, not filtering deleted + deleted = db_versioned_objects->get_versioned_object(2, false); + ASSERT_TRUE(deleted.has_value()); + ASSERT_EQ(rgw::sal::sfs::ObjectState::DELETED, deleted->object_state); + + // get_versioned_object(const std::string & version_id) + // try to get version 1 (not deleted) + not_deleted = db_versioned_objects->get_versioned_object("test_version_id_1"); + ASSERT_TRUE(not_deleted.has_value()); + ASSERT_NE(rgw::sal::sfs::ObjectState::DELETED, not_deleted->object_state); + + // now version 2 (deleted) + deleted = db_versioned_objects->get_versioned_object("test_version_id_2"); + ASSERT_FALSE(deleted.has_value()); + // now version 2, not filtering deleted + deleted = + db_versioned_objects->get_versioned_object("test_version_id_2", false); + ASSERT_TRUE(deleted.has_value()); + ASSERT_EQ(rgw::sal::sfs::ObjectState::DELETED, deleted->object_state); + + // get_versioned_object_ids + auto ids = db_versioned_objects->get_versioned_object_ids(); + ASSERT_EQ(3, ids.size()); // 2 and 3 will not be returned + for (const auto& id : ids) { + ASSERT_NE(2, id); + ASSERT_NE(3, id); + } + + ids = db_versioned_objects->get_versioned_object_ids(false); + ASSERT_EQ(5, ids.size()); // 2 and 3 will be returned + + // get_versioned_object_ids(const uuid_d & object_id) + uuid_d object_id; + object_id.parse(TEST_OBJECT_ID.c_str()); + ids = db_versioned_objects->get_versioned_object_ids(object_id); + ASSERT_EQ(3, ids.size()); // 2 and 3 will not be returned + for (const auto& id : ids) { + ASSERT_NE(2, id); + ASSERT_NE(3, id); + } + + ids = db_versioned_objects->get_versioned_object_ids(object_id, false); + ASSERT_EQ(5, ids.size()); // 2 and 3 will be returned + + // get_versioned_objects(const uuid_d & object_id) + auto versions = db_versioned_objects->get_versioned_objects(object_id); + ASSERT_EQ(3, versions.size()); // 2 and 3 will not be returned + for (const auto& version : versions) { + ASSERT_NE(2, version.id); + ASSERT_NE(3, version.id); + ASSERT_NE(rgw::sal::sfs::ObjectState::DELETED, version.object_state); + } + versions = db_versioned_objects->get_versioned_objects(object_id, false); + ASSERT_EQ(5, versions.size()); // 2 and 3 will be returned + + // get_last_versioned_object + // this time last version (5) is not deleted + // get it first, then flag as deleted and check + auto last_version = + db_versioned_objects->get_last_versioned_object(object_id); + ASSERT_TRUE(last_version.has_value()); + ASSERT_EQ(5, last_version->id); + + // now flag the last version as DELETED + last_version->object_state = rgw::sal::sfs::ObjectState::DELETED; + db_versioned_objects->store_versioned_object(*last_version); + + // we update, so no new version should be created + versions = db_versioned_objects->get_versioned_objects(object_id, false); + ASSERT_EQ(5, versions.size()); // will still return 5 versions + + versions = db_versioned_objects->get_versioned_objects(object_id); + ASSERT_EQ(2, versions.size()); // now only 2 are not deleted + + // now last version should be 4 + last_version = db_versioned_objects->get_last_versioned_object(object_id); + ASSERT_TRUE(last_version.has_value()); + ASSERT_EQ(4, last_version->id); + + // if we don't filter deleted it's still 5 + last_version = + db_versioned_objects->get_last_versioned_object(object_id, false); + ASSERT_TRUE(last_version.has_value()); + ASSERT_EQ(5, last_version->id); +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestDeleteLastAndGetPrevious) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // create 3 versions (last one is a delete marker) + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + object.version_type = rgw::sal::sfs::VersionType::DELETE_MARKER; + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + + auto last_version_now = + db_versioned_objects->delete_version_and_get_previous_transact(3); + ASSERT_TRUE(last_version_now.has_value()); + ASSERT_EQ(2, last_version_now->id); + ASSERT_EQ("test_version_id_2", last_version_now->version_id); + + uuid_d object_id; + object_id.parse(TEST_OBJECT_ID.c_str()); + last_version_now = db_versioned_objects->get_last_versioned_object(object_id); + ASSERT_TRUE(last_version_now.has_value()); + ASSERT_EQ(2, last_version_now->id); + ASSERT_EQ("test_version_id_2", last_version_now->version_id); +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestGetByBucketAndObjectName) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + // SCENARIO 1. ONLY 1 object with committed versions + // Create the object, we need it because of foreign key constrains + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // insert 3 committed versions + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + object.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + + // try to get version (TEST_BUCKET, "test_name", "test_version_id_2") + // corresponding to the second version + auto version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_2" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_2", version->version_id); + EXPECT_EQ(2, version->id); + + // don't pass any version. Should return the last one + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // pass a non existing version_id + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "this_version_does_not_exist" + ); + ASSERT_FALSE(version.has_value()); + + // SCENARIO 2. There is one object with all versions deleted (waiting to be + // removed by the garbage collector) and the alive object, both with the same + // object name but different uuid + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_2, ceph_context.get(), conn + ); + object = createTestVersionedObject(4, TEST_OBJECT_ID_2, "4"); + object.object_state = rgw::sal::sfs::ObjectState::DELETED; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_5"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(object)); + + // even though commit times for this versions are later in time than for the + // first object it should still return versions from the first object + + // don't pass any version. Should return the last one + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // try to get a deleted version (TEST_BUCKET, "test_name", "test_version_id_5") + // corresponding to the second version + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_5" + ); + // should not return that object + // (it is deleted waiting for the garbage collector) + ASSERT_FALSE(version.has_value()); + + // still return valid version + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_3" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // SCENARIO 3. 2 Objects with the same name in different buckets. + // in this case the object in bucket TEST_BUCKET_2 is in open state + // (still writing to it), but that's still an alive object + createObject( + TEST_USERNAME, TEST_BUCKET_2, TEST_OBJECT_ID_3, ceph_context.get(), conn + ); + object = createTestVersionedObject(6, TEST_OBJECT_ID_3, "6"); + EXPECT_EQ(6, db_versioned_objects->insert_versioned_object(object)); + + // still return valid version for 1st object + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_3" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_3", version->version_id); + EXPECT_EQ(3, version->id); + + // and also valid for the object in the second bucket + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET_2, "test_name", "test_version_id_6" + ); + ASSERT_TRUE(version.has_value()); + EXPECT_EQ("test_version_id_6", version->version_id); + EXPECT_EQ(6, version->id); + + // but version 6 is not on first bucket + version = db_versioned_objects->get_non_deleted_versioned_object( + TEST_BUCKET, "test_name", "test_version_id_6" + ); + ASSERT_FALSE(version.has_value()); +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestUpdateAndDeleteRest) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // insert 3 open versions + auto object = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + object.object_state = rgw::sal::sfs::ObjectState::OPEN; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_2"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_3"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(object)); + + // create a different object + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_2, ceph_context.get(), conn + ); + // with also 3 open versions + object = createTestVersionedObject(4, TEST_OBJECT_ID_2, "4"); + object.object_state = rgw::sal::sfs::ObjectState::OPEN; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_5"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(object)); + object.version_id = "test_version_id_6"; + object.commit_time = ceph::real_clock::now(); + EXPECT_EQ(6, db_versioned_objects->insert_versioned_object(object)); + + // update object 2 to COMMITTED and DELETE the rest in a transaction + auto object_2 = db_versioned_objects->get_versioned_object(2, false); + ASSERT_TRUE(object_2.has_value()); + object_2->object_state = rgw::sal::sfs::ObjectState::COMMITTED; + db_versioned_objects->store_versioned_object_delete_rest_transact(*object_2); + + // all the rest should be updated (but only for that object) + auto object_ret = db_versioned_objects->get_versioned_object(1, false); + ASSERT_TRUE(object_ret.has_value()); + EXPECT_EQ(rgw::sal::sfs::ObjectState::DELETED, object_ret->object_state); + object_ret = db_versioned_objects->get_versioned_object(3, false); + ASSERT_TRUE(object_ret.has_value()); + EXPECT_EQ(rgw::sal::sfs::ObjectState::DELETED, object_ret->object_state); + + // the other object versions should be still open + uuid_d uuid_second_object; + uuid_second_object.parse(TEST_OBJECT_ID_2.c_str()); + // get the objects but not filtering deleted ones (we get all) + auto versions = + db_versioned_objects->get_versioned_objects(uuid_second_object, false); + EXPECT_EQ(3, versions.size()); + for (const auto& ver : versions) { + EXPECT_EQ(rgw::sal::sfs::ObjectState::OPEN, ver.object_state); + } +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestUpdateDeleteVersionDeletesObject) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // insert 3 committed versions + auto version = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + version.object_state = rgw::sal::sfs::ObjectState::DELETED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_2"; + version.commit_time = ceph::real_clock::now(); + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(version)); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_id = "test_version_id_3"; + version.commit_time = ceph::real_clock::now(); + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(version)); + + // insert 3 committed versions + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_2, ceph_context.get(), conn + ); + version = createTestVersionedObject(4, TEST_OBJECT_ID_2, "4"); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(4, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_5"; + EXPECT_EQ(5, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_6"; + EXPECT_EQ(6, db_versioned_objects->insert_versioned_object(version)); + + // insert 3 committed versions for another object + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID_3, ceph_context.get(), conn + ); + version = createTestVersionedObject(7, TEST_OBJECT_ID_3, "7"); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(7, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_8"; + EXPECT_EQ(8, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_9"; + EXPECT_EQ(9, db_versioned_objects->insert_versioned_object(version)); + + // insert 3 committed versions for another object in another bucket + createObject( + TEST_USERNAME, TEST_BUCKET_2, TEST_OBJECT_ID_4, ceph_context.get(), conn + ); + version = createTestVersionedObject(10, TEST_OBJECT_ID_4, "10"); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(10, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_11"; + EXPECT_EQ(11, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_12"; + EXPECT_EQ(12, db_versioned_objects->insert_versioned_object(version)); + + // we have 3 objects with 3 versions in TEST_BUCKET + // one of the objects has 2 version deleted. The rest have all versions alive. + + // we also have object with 3 version in TEST_BUCKET_2 + auto object_list = + db_versioned_objects->list_last_versioned_objects(TEST_BUCKET); + ASSERT_EQ(3, object_list.size()); + // first item + uuid_d uuid_object; + uuid_object.parse(TEST_OBJECT_ID.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[0])); + // versions 1 and 2 for TEST_OBJECT_ID are deleted + EXPECT_EQ( + "test_version_id_3", rgw::sal::sfs::sqlite::get_version_id(object_list[0]) + ); + EXPECT_EQ(3, rgw::sal::sfs::sqlite::get_id(object_list[0])); + + // second item + uuid_object.parse(TEST_OBJECT_ID_2.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[1])); + EXPECT_EQ( + "test_version_id_6", rgw::sal::sfs::sqlite::get_version_id(object_list[1]) + ); + EXPECT_EQ(6, rgw::sal::sfs::sqlite::get_id(object_list[1])); + + // third item + uuid_object.parse(TEST_OBJECT_ID_3.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[2])); + EXPECT_EQ( + "test_version_id_9", rgw::sal::sfs::sqlite::get_version_id(object_list[2]) + ); + EXPECT_EQ(9, rgw::sal::sfs::sqlite::get_id(object_list[2])); + + // now delete the 3rd version of TEST_OBJECT_ID + auto version_to_delete = db_versioned_objects->get_versioned_object(3); + version_to_delete->object_state = rgw::sal::sfs::ObjectState::DELETED; + db_versioned_objects->store_versioned_object(*version_to_delete); + + // list again + object_list = db_versioned_objects->list_last_versioned_objects(TEST_BUCKET); + // the object with all version deleted should not be listed + ASSERT_EQ(2, object_list.size()); + + // second item + uuid_object.parse(TEST_OBJECT_ID_2.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[0])); + EXPECT_EQ( + "test_version_id_6", rgw::sal::sfs::sqlite::get_version_id(object_list[0]) + ); + EXPECT_EQ(6, rgw::sal::sfs::sqlite::get_id(object_list[0])); + + // third item + uuid_object.parse(TEST_OBJECT_ID_3.c_str()); + EXPECT_EQ(uuid_object, rgw::sal::sfs::sqlite::get_uuid(object_list[1])); + EXPECT_EQ( + "test_version_id_9", rgw::sal::sfs::sqlite::get_version_id(object_list[1]) + ); + EXPECT_EQ(9, rgw::sal::sfs::sqlite::get_id(object_list[1])); +} + +TEST_F(TestSFSSQLiteVersionedObjects, TestAddDeleteMarker) { + auto ceph_context = std::make_shared(CEPH_ENTITY_TYPE_CLIENT); + ceph_context->_conf.set_val("rgw_sfs_data_path", getTestDir()); + + EXPECT_FALSE(fs::exists(getDBFullPath())); + DBConnRef conn = std::make_shared(ceph_context.get()); + + auto db_versioned_objects = std::make_shared(conn); + + createObject( + TEST_USERNAME, TEST_BUCKET, TEST_OBJECT_ID, ceph_context.get(), conn + ); + + // insert 3 committed versions + auto version = createTestVersionedObject(1, TEST_OBJECT_ID, "1"); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_type = rgw::sal::sfs::VersionType::REGULAR; + EXPECT_EQ(1, db_versioned_objects->insert_versioned_object(version)); + version.version_id = "test_version_id_2"; + version.commit_time = ceph::real_clock::now(); + EXPECT_EQ(2, db_versioned_objects->insert_versioned_object(version)); + version.object_state = rgw::sal::sfs::ObjectState::COMMITTED; + version.version_id = "test_version_id_3"; + version.commit_time = ceph::real_clock::now(); + EXPECT_EQ(3, db_versioned_objects->insert_versioned_object(version)); + + // add a delete marker + auto delete_marker_id = "delete_marker_id"; + uuid_d uuid; + uuid.parse(TEST_OBJECT_ID.c_str()); + bool added; + auto id = db_versioned_objects->add_delete_marker_transact( + uuid, delete_marker_id, added + ); + EXPECT_TRUE(added); + EXPECT_EQ(4, id); + auto delete_marker = db_versioned_objects->get_versioned_object(4); + ASSERT_TRUE(delete_marker.has_value()); + EXPECT_EQ( + rgw::sal::sfs::VersionType::DELETE_MARKER, delete_marker->version_type + ); + EXPECT_EQ(rgw::sal::sfs::ObjectState::COMMITTED, delete_marker->object_state); + EXPECT_EQ(version.etag, delete_marker->etag); + EXPECT_EQ("delete_marker_id", delete_marker->version_id); + + // add another delete marker (should not add it because the marker already + // exists) + id = db_versioned_objects->add_delete_marker_transact( + uuid, delete_marker_id, added + ); + EXPECT_FALSE(added); + EXPECT_EQ(0, id); + auto last_version = db_versioned_objects->get_versioned_object(5); + ASSERT_FALSE(last_version.has_value()); + + // delete the delete marker + db_versioned_objects->remove_versioned_object(4); + + // now lets say version 2 and 3 are expired and deleted by LC + // (for whatever reason) + auto read_version = db_versioned_objects->get_versioned_object(2); + ASSERT_TRUE(read_version.has_value()); + read_version->object_state = rgw::sal::sfs::ObjectState::DELETED; + db_versioned_objects->store_versioned_object(*read_version); + + read_version = db_versioned_objects->get_versioned_object(3); + ASSERT_TRUE(read_version.has_value()); + read_version->object_state = rgw::sal::sfs::ObjectState::DELETED; + db_versioned_objects->store_versioned_object(*read_version); + + // try to create the delete marker (we still have 1 alive version) + id = db_versioned_objects->add_delete_marker_transact( + uuid, delete_marker_id, added + ); + EXPECT_TRUE(added); + EXPECT_EQ(5, id); + delete_marker = db_versioned_objects->get_versioned_object(5); + ASSERT_TRUE(delete_marker.has_value()); + EXPECT_EQ( + rgw::sal::sfs::VersionType::DELETE_MARKER, delete_marker->version_type + ); + EXPECT_EQ(rgw::sal::sfs::ObjectState::COMMITTED, delete_marker->object_state); + EXPECT_EQ(version.etag, delete_marker->etag); + EXPECT_EQ("delete_marker_id", delete_marker->version_id); + + // delete the marker + db_versioned_objects->remove_versioned_object(5); + + // mark the alive version as deleted + read_version = db_versioned_objects->get_versioned_object(1); + ASSERT_TRUE(read_version.has_value()); + read_version->object_state = rgw::sal::sfs::ObjectState::DELETED; + db_versioned_objects->store_versioned_object(*read_version); + + // add another delete marker (should not add it because all the versions of + // the object are deleted) + id = db_versioned_objects->add_delete_marker_transact( + uuid, delete_marker_id, added + ); + EXPECT_FALSE(added); + EXPECT_EQ(0, id); +}