From ce129c578499e276ccd3c20519494e2d818d56e6 Mon Sep 17 00:00:00 2001 From: Vidushi Mishra Date: Thu, 2 Jan 2025 10:16:50 +0530 Subject: [PATCH] object restore feature automation Signed-off-by: Vidushi Mishra --- ...st_lc_cloud_transition_restore_object.yaml | 46 ++++++++ .../s3_swift/reusables/s3_object_restore.py | 106 ++++++++++++++++++ ..._lifecycle_object_expiration_transition.py | 46 ++++++++ 3 files changed, 198 insertions(+) create mode 100644 rgw/v2/tests/s3_swift/multisite_configs/test_lc_cloud_transition_restore_object.yaml create mode 100644 rgw/v2/tests/s3_swift/reusables/s3_object_restore.py diff --git a/rgw/v2/tests/s3_swift/multisite_configs/test_lc_cloud_transition_restore_object.yaml b/rgw/v2/tests/s3_swift/multisite_configs/test_lc_cloud_transition_restore_object.yaml new file mode 100644 index 000000000..70c5b94fb --- /dev/null +++ b/rgw/v2/tests/s3_swift/multisite_configs/test_lc_cloud_transition_restore_object.yaml @@ -0,0 +1,46 @@ +#test_bucket_lifecycle_object_expiration_transition.py +#CEPH-83591622, CEPH-83591672, CEPH-83591621 +config: + user_count: 1 + bucket_count: 1 + objects_count: 10 + parallel_lc: False + test_lc_transition: True + enable_resharding: True + sharding_type: manual + shards: 211 + pool_name: data.cold + storage_class: cold + ec_pool_transition: False + multiple_transitions: True + two_pool_transition: False + second_pool_name: data.glacier + second_storage_class: glacier + remote_zone: secondary + objects_size_range: + min: 5 + max: 15 + test_ops: + create_bucket: true + create_object: true + enable_versioning: true + version_count: 3 + delete_marker: false + test_cloud_transition: true + test_pool_transition: false + test_ibm_cloud_transition: true + test_aws_cloud_transition: false + test_retain_head: true + test_cloud_transition_at_remote: false + test_s3_restore_from_cloud: true + lifecycle_conf: + - ID: LC_Rule_1 + Filter: + Prefix: key1 + Status: Enabled + Transitions: + - Days: 2 + StorageClass: CLOUDIBM + NoncurrentVersionTransitions: + - NoncurrentDays: 1 + StorageClass: CLOUDIBM diff --git a/rgw/v2/tests/s3_swift/reusables/s3_object_restore.py b/rgw/v2/tests/s3_swift/reusables/s3_object_restore.py new file mode 100644 index 000000000..feb23628d --- /dev/null +++ b/rgw/v2/tests/s3_swift/reusables/s3_object_restore.py @@ -0,0 +1,106 @@ +import json +import logging +import os +import random +import time +import timeit +from urllib import parse as urlparse + +import boto3 +import v2.lib.manage_data as manage_data +import v2.utils.utils as utils +from botocore.exceptions import ClientError +from v2.lib.exceptions import RGWBaseException, TestExecError +from v2.lib.resource_op import Config +from v2.lib.rgw_config_opts import ConfigOpts + +log = logging.getLogger() + + +def restore_s3_object( + s3_client, each_user, config, bucket_name, object_key, version_id=None, days=7 +): + """ + Restore an S3 object, verify restore attributes, and download the restored object. + + :param bucket_name: Name of the S3 bucket. + :param object_key: Key of the S3 object. + :param version_id: Version ID of the object (optional). + :param days: Number of days to keep the restored object. + """ + try: + # Initiate restore request + restore_request = { + "Days": days, + } + + if version_id: + response = s3_client.restore_object( + Bucket=bucket_name, + Key=object_key, + VersionId=version_id, + RestoreRequest=restore_request, + ) + else: + response = s3_client.restore_object( + Bucket=bucket_name, Key=object_key, RestoreRequest=restore_request + ) + + log.info("Restore initiated:", response) + + # Validate restore attributes + head_response = s3_client.head_object( + Bucket=bucket_name, Key=object_key, VersionId=version_id + ) + log.info(f" the head_object is {head_response}") + restore_status = head_response.get("Restore", "") + if 'ongoing-request="false"' in restore_status: + log.info("Object is successfully restored.") + else: + log.info("Restore status:", restore_status) + + # Download the restored object + download_path = f"restored-{object_key}" + s3_client.download_file( + Bucket=bucket_name, + Key=object_key, + Filename=download_path, + ExtraArgs={"VersionId": version_id} if version_id else None, + ) + log.info(f"Restored object downloaded to {download_path}.") + + except ClientError as e: + log.info("Error:", e) + + +def check_restore_expiry( + s3_client, each_user, config, bucket_name, object_key, version_id=None +): + """ + Check if the restored object is no longer accessible after the restore period. + + :param s3_client: The S3 client instance. + :param bucket_name: Name of the S3 bucket. + :param object_key: Key of the S3 object. + :param version_id: Version ID of the object (optional). + """ + try: + download_path = f"expired-{object_key}" + s3_client.download_file( + Bucket=bucket_name, + Key=object_key, + Filename=download_path, + ExtraArgs={"VersionId": version_id} if version_id else None, + ) + raise Exception( + "Unexpected: Object is still accessible after the restore period." + ) + except ClientError as e: + if e.response["Error"]["Code"] == "NoSuchKey": + log.info("Restore has expired and the object is no longer accessible.") + elif e.response["Error"]["Code"] == "InvalidObjectState": + log.info( + "Restore has expired, and the object is no longer in a restored state." + ) + else: + log.info("Error while checking restore expiration:", e) diff --git a/rgw/v2/tests/s3_swift/test_bucket_lifecycle_object_expiration_transition.py b/rgw/v2/tests/s3_swift/test_bucket_lifecycle_object_expiration_transition.py index 903f8cbe9..64c5daf7a 100644 --- a/rgw/v2/tests/s3_swift/test_bucket_lifecycle_object_expiration_transition.py +++ b/rgw/v2/tests/s3_swift/test_bucket_lifecycle_object_expiration_transition.py @@ -18,6 +18,7 @@ test_lc_process_without_applying_rule.yaml test_lc_transition_with_lc_process.yaml test_sse_kms_per_bucket_multipart_object_download_after_transition.yaml + test_lc_cloud_transition_restore_object.yaml Operation: @@ -51,6 +52,7 @@ from v2.lib.s3.auth import Auth from v2.lib.s3.write_io_info import BasicIOInfoStructure, BucketIoInfo, IOInfoInitialize from v2.tests.s3_swift import reusable +from v2.tests.s3_swift.reusables import s3_object_restore as reusables_s3_restore from v2.tests.s3_swift.reusables.bucket_notification import NotificationService from v2.utils.log import configure_logging from v2.utils.test_desc import AddTestInfo @@ -134,6 +136,7 @@ def test_exec(config, ssh_con): for each_user in user_info: auth = Auth(each_user, ssh_con, ssl=config.ssl, haproxy=config.haproxy) rgw_conn = auth.do_auth() + s3_client = auth.do_auth_using_client() rgw_conn2 = auth.do_auth_using_client() notification = None @@ -499,6 +502,49 @@ def test_exec(config, ssh_con): and config.test_ops.get("send_bucket_notifications", False) is True ): notification.verify(bucket_name) + if config.test_ops.get("test_s3_restore_from_cloud", False): + log.info( + f"Test s3 restore of objects transitioned to the cloud for {bucket_name}" + ) + bucket_list_op = utils.exec_shell_cmd( + f"radosgw-admin bucket list --bucket={bucket_name}" + ) + json_doc_list = json.loads(bucket_list_op) + log.info(f"the bucket list for {bucket_name} is {json_doc_list}") + objs_total = sum(1 for item in json_doc_list if "instance" in item) + log.info( + f"Occurances of verion_ids in bucket list is {objs_total} times" + ) + for i in range(0, objs_total): + if json_doc_list[i]["tag"] != "delete-marker": + object_key = json_doc_list[i]["name"] + version_id = json_doc_list[i]["instance"] + reusables_s3_restore.restore_s3_object( + s3_client, + each_user, + config, + bucket_name, + object_key, + version_id, + days=7, + ) + # Test restored objects are not available after restore interval + log.info( + "Test restored objects are not available after restore interval" + ) + time.sleep(210) + for i in range(0, objs_total): + if json_doc_list[i]["tag"] != "delete-marker": + object_key = json_doc_list[i]["name"] + version_id = json_doc_list[i]["instance"] + reusables_s3_restore.check_restore_expiry( + s3_client, + each_user, + config, + bucket_name, + object_key, + version_id, + ) if config.parallel_lc: log.info("Inside parallel lc processing") life_cycle_rule = {"Rules": config.lifecycle_conf}