From b74804e301ae0c28fc8de6bf9ac7516050a212eb Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 29 Nov 2023 11:13:26 -0800 Subject: [PATCH 01/10] make create session high priority --- source/s3_client.c | 69 +++++-- source/s3express_credentials_provider.c | 2 + tests/CMakeLists.txt | 12 +- .../s3_mock_server_s3express_provider_test.c | 5 +- ...ient_test.c => s3_s3express_client_test.c} | 171 +++++++++++++++++- 5 files changed, 225 insertions(+), 34 deletions(-) rename tests/{s3_mock_server_s3express_client_test.c => s3_s3express_client_test.c} (70%) diff --git a/source/s3_client.c b/source/s3_client.c index 553402752..68f31a954 100644 --- a/source/s3_client.c +++ b/source/s3_client.c @@ -1606,6 +1606,53 @@ static void s_s3_client_prepare_callback_queue_request( int error_code, void *user_data); +static bool s_meta_request_create_more_requests( + struct aws_s3_meta_request *meta_request, + struct aws_s3_client *client, + uint32_t num_requests_in_flight, + const uint32_t max_requests_in_flight, + const uint32_t max_requests_prepare) { + + /* CreateSession has high priority to bypass the checks. */ + if (meta_request->type == AWS_S3_META_REQUEST_TYPE_DEFAULT) { + struct aws_s3_meta_request_default *meta_request_default = meta_request->impl; + if (aws_string_eq_c_str(meta_request_default->operation_name, "CreateSession")) { + return true; + } + } + + /** + * If: + * * Number of being-prepared + already-prepared-and-queued requests is more than the max that can + * be in the preparation stage. + * * Total number of requests tracked by the client is more than the max tracked ("in flight") + * requests. + * + * We cannot create more requests for this meta request. + */ + if ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) > + max_requests_prepare || + num_requests_in_flight > max_requests_in_flight) { + return false; + } + + /* If this particular endpoint doesn't have any known addresses yet, then we don't want to go full speed in + * ramping up requests just yet. If there is already enough in the queue for one address (even if those + * aren't for this particular endpoint) we skip over this meta request for now. */ + struct aws_s3_endpoint *endpoint = meta_request->endpoint; + AWS_ASSERT(endpoint != NULL); + AWS_ASSERT(client->vtable->get_host_address_count); + size_t num_known_vips = client->vtable->get_host_address_count( + client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A); + if (num_known_vips == 0 && (client->threaded_data.num_requests_being_prepared + + client->threaded_data.request_queue_size) >= g_max_num_connections_per_vip) { + return false; + } + + /* Nothing blocks the meta request to create more requests */ + return true; +} + void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { AWS_PRECONDITION(client); @@ -1629,36 +1676,22 @@ void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { for (uint32_t pass_index = 0; pass_index < num_passes; ++pass_index) { /* While: - * * Number of being-prepared + already-prepared-and-queued requests is less than the max that can be in the - * preparation stage. - * * Total number of requests tracked by the client is less than the max tracked ("in flight") requests. * * There are meta requests to get requests from. * * Then update meta requests to get new requests that can then be prepared (reading from any streams, signing, * etc.) for sending. */ - while ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) < - max_requests_prepare && - num_requests_in_flight < max_requests_in_flight && - !aws_linked_list_empty(&client->threaded_data.meta_requests)) { + while (!aws_linked_list_empty(&client->threaded_data.meta_requests)) { struct aws_linked_list_node *meta_request_node = aws_linked_list_begin(&client->threaded_data.meta_requests); struct aws_s3_meta_request *meta_request = AWS_CONTAINER_OF(meta_request_node, struct aws_s3_meta_request, client_process_work_threaded_data); - struct aws_s3_endpoint *endpoint = meta_request->endpoint; - AWS_ASSERT(endpoint != NULL); - - AWS_ASSERT(client->vtable->get_host_address_count); - size_t num_known_vips = client->vtable->get_host_address_count( - client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A); + if (!s_meta_request_create_more_requests( + meta_request, client, num_requests_in_flight, max_requests_in_flight, max_requests_prepare)) { - /* If this particular endpoint doesn't have any known addresses yet, then we don't want to go full speed in - * ramping up requests just yet. If there is already enough in the queue for one address (even if those - * aren't for this particular endpoint) we skip over this meta request for now. */ - if (num_known_vips == 0 && (client->threaded_data.num_requests_being_prepared + - client->threaded_data.request_queue_size) >= g_max_num_connections_per_vip) { + /* Move the meta request to be processed from next loop. */ aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node); aws_linked_list_push_back( &meta_requests_work_remaining, &meta_request->client_process_work_threaded_data.node); diff --git a/source/s3express_credentials_provider.c b/source/s3express_credentials_provider.c index 98f3b4a0d..2bb5c3160 100644 --- a/source/s3express_credentials_provider.c +++ b/source/s3express_credentials_provider.c @@ -497,6 +497,7 @@ static struct aws_s3express_session_creator *s_session_creator_new( }; aws_byte_buf_init(&session_creator->response_buf, provider->allocator, 512); + struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("CreateSession"); struct aws_s3_meta_request_options options = { .message = request, .type = AWS_S3_META_REQUEST_TYPE_DEFAULT, @@ -506,6 +507,7 @@ static struct aws_s3express_session_creator *s_session_creator_new( /* Override endpoint only for tests. */ .endpoint = impl->mock_test.endpoint_override ? impl->mock_test.endpoint_override : NULL, .user_data = session_creator, + .operation_name = operation_name, }; session_creator->synced_data.meta_request = aws_s3_client_make_meta_request(impl->client, &options); AWS_FATAL_ASSERT(session_creator->synced_data.meta_request); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index b96fc70f9..e2fe12e01 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -294,11 +294,13 @@ if(ENABLE_MOCK_SERVER_TESTS) add_net_test_case(request_time_too_skewed_mock_server) endif() -add_net_test_case(s3express_provider_long_run_real_server) -add_net_test_case(s3express_client_put_test_small_real_server) -add_net_test_case(s3express_client_put_test_large_real_server) -add_net_test_case(s3express_client_put_long_running_test_real_server) -add_net_test_case(s3express_client_get_test_real_server) +add_net_test_case(s3express_provider_long_running) +add_net_test_case(s3express_client_put_test_small) +add_net_test_case(s3express_client_put_test_large) +add_net_test_case(s3express_client_put_test_multiple) +add_net_test_case(s3express_client_put_long_running_test) +add_net_test_case(s3express_client_get_test) +add_net_test_case(s3express_client_get_multiple_test) add_net_test_case(meta_request_auto_ranged_get_new_error_handling) add_net_test_case(meta_request_auto_ranged_put_new_error_handling) diff --git a/tests/s3_mock_server_s3express_provider_test.c b/tests/s3_mock_server_s3express_provider_test.c index ee20557e1..6e988bc7d 100644 --- a/tests/s3_mock_server_s3express_provider_test.c +++ b/tests/s3_mock_server_s3express_provider_test.c @@ -547,7 +547,6 @@ TEST_CASE(s3express_provider_stress_mock_server) { /* Stress about under load, keep hitting 10 hosts */ for (size_t i = 0; i < num_requests; i++) { - /* code */ char key_buffer[128] = ""; snprintf(key_buffer, sizeof(key_buffer), "test-%zu", (size_t)(i % 10)); struct aws_credentials_properties_s3express property = { @@ -562,7 +561,6 @@ TEST_CASE(s3express_provider_stress_mock_server) { /* Stress about over load, keep hitting different hosts */ s_s3express_tester.credentials_callbacks_received = 0; for (size_t i = 0; i < num_requests; i++) { - /* code */ char key_buffer[128] = ""; snprintf(key_buffer, sizeof(key_buffer), "test-%zu", i); struct aws_credentials_properties_s3express property = { @@ -583,7 +581,7 @@ TEST_CASE(s3express_provider_stress_mock_server) { return AWS_OP_SUCCESS; } -TEST_CASE(s3express_provider_long_run_real_server) { +TEST_CASE(s3express_provider_long_running) { (void)ctx; struct aws_s3_tester tester; @@ -637,7 +635,6 @@ TEST_CASE(s3express_provider_long_run_real_server) { } /** * We should have more than 2 different creds. - * Server can return a credentials that expires less than 5 mins. **/ ASSERT_TRUE(s_s3express_tester.number_of_credentials >= 2); diff --git a/tests/s3_mock_server_s3express_client_test.c b/tests/s3_s3express_client_test.c similarity index 70% rename from tests/s3_mock_server_s3express_client_test.c rename to tests/s3_s3express_client_test.c index 14019e045..a04e3045a 100644 --- a/tests/s3_mock_server_s3express_client_test.c +++ b/tests/s3_s3express_client_test.c @@ -221,7 +221,7 @@ static int s_s3express_put_object_request( return AWS_OP_SUCCESS; } -static int s_s3express_client_put_test_real_server_helper(struct aws_allocator *allocator, size_t content_length) { +static int s_s3express_client_put_test_helper(struct aws_allocator *allocator, size_t content_length) { struct aws_s3_tester tester; ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); @@ -256,14 +256,101 @@ static int s_s3express_client_put_test_real_server_helper(struct aws_allocator * return AWS_OP_SUCCESS; } -TEST_CASE(s3express_client_put_test_small_real_server) { +TEST_CASE(s3express_client_put_test_small) { (void)ctx; - return s_s3express_client_put_test_real_server_helper(allocator, MB_TO_BYTES(1)); + return s_s3express_client_put_test_helper(allocator, MB_TO_BYTES(1)); } -TEST_CASE(s3express_client_put_test_large_real_server) { +TEST_CASE(s3express_client_put_test_large) { (void)ctx; - return s_s3express_client_put_test_real_server_helper(allocator, MB_TO_BYTES(100)); + return s_s3express_client_put_test_helper(allocator, MB_TO_BYTES(100)); +} + +TEST_CASE(s3express_client_put_test_multiple) { + (void)ctx; + +#define NUM_REQUESTS 100 + struct aws_s3_meta_request *meta_requests[NUM_REQUESTS]; + struct aws_s3_meta_request_test_results meta_request_test_results[NUM_REQUESTS]; + struct aws_input_stream *input_streams[NUM_REQUESTS]; + + struct aws_s3_tester tester; + ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); + + struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); + + char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; + struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); + struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); + + struct aws_byte_cursor west2_region_cursor = aws_byte_cursor_from_c_str("us-west-2"); + char west2_endpoint[] = "crts-west2--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"; + struct aws_byte_cursor west2_host_cursor = aws_byte_cursor_from_c_str(west2_endpoint); + + struct aws_s3_client_config client_config = { + .part_size = MB_TO_BYTES(5), + .enable_s3express = true, + .region = region_cursor, + }; + + ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); + + struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); + + for (size_t i = 0; i < NUM_REQUESTS; ++i) { + input_streams[i] = aws_s3_test_input_stream_new(allocator, MB_TO_BYTES(10)); + + struct aws_byte_cursor request_region = region_cursor; + struct aws_byte_cursor request_host = host_cursor; + if (i % 2 == 0) { + request_region = west2_region_cursor; + request_host = west2_host_cursor; + } + + struct aws_http_message *message = aws_s3_test_put_object_request_new( + allocator, &request_host, key_cursor, g_test_body_content_type, input_streams[i], 0); + + struct aws_s3_meta_request_options options; + AWS_ZERO_STRUCT(options); + options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; + options.message = message; + struct aws_signing_config_aws s3express_signing_config = { + .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, + .service = g_s3express_service_name, + .region = request_region, + }; + options.signing_config = &s3express_signing_config; + aws_s3_meta_request_test_results_init(&meta_request_test_results[i], allocator); + + ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results[i])); + + meta_requests[i] = aws_s3_client_make_meta_request(client, &options); + ASSERT_TRUE(meta_requests[i] != NULL); + } + /* Wait for the request to finish. */ + aws_s3_tester_wait_for_meta_request_finish(&tester); + aws_s3_tester_lock_synced_data(&tester); + ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); + aws_s3_tester_unlock_synced_data(&tester); + + for (size_t i = 0; i < NUM_REQUESTS; ++i) { + meta_requests[i] = aws_s3_meta_request_release(meta_requests[i]); + } + + aws_s3_tester_wait_for_meta_request_shutdown(&tester); + + for (size_t i = 0; i < NUM_REQUESTS; ++i) { + aws_s3_tester_validate_get_object_results(&meta_request_test_results[i], 0); + aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); + } + + for (size_t i = 0; i < NUM_REQUESTS; ++i) { + aws_input_stream_release(input_streams[i]); + } + + aws_s3_client_release(client); + aws_s3_tester_clean_up(&tester); + return AWS_OP_SUCCESS; } void s_meta_request_finished_overhead( @@ -300,7 +387,7 @@ struct aws_s3express_credentials_provider *s_s3express_provider_mock_factory( } /* Long running test to make sure our refresh works properly */ -TEST_CASE(s3express_client_put_long_running_test_real_server) { +TEST_CASE(s3express_client_put_long_running_test) { (void)ctx; struct aws_s3_tester tester; @@ -375,7 +462,7 @@ TEST_CASE(s3express_client_put_long_running_test_real_server) { return AWS_OP_SUCCESS; } -TEST_CASE(s3express_client_get_test_real_server) { +TEST_CASE(s3express_client_get_test) { (void)ctx; struct aws_s3_tester tester; @@ -429,3 +516,73 @@ TEST_CASE(s3express_client_get_test_real_server) { aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; } + +TEST_CASE(s3express_client_get_multiple_test) { + (void)ctx; + + struct aws_s3_meta_request *meta_requests[100]; + struct aws_s3_meta_request_test_results meta_request_test_results[100]; + size_t num_meta_requests = AWS_ARRAY_SIZE(meta_requests); + + struct aws_s3_tester tester; + ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); + + struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); + + char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; + struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); + struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-download-10MB"); + + struct aws_s3_client_config client_config = { + .part_size = MB_TO_BYTES(5), + .enable_s3express = true, + .region = region_cursor, + }; + + ASSERT_SUCCESS(aws_s3_tester_bind_client(&tester, &client_config, AWS_S3_TESTER_BIND_CLIENT_SIGNING)); + + struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); + + for (size_t i = 0; i < num_meta_requests; ++i) { + + struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, host_cursor, key_cursor); + + struct aws_s3_meta_request_options options; + AWS_ZERO_STRUCT(options); + options.type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT; + options.message = message; + struct aws_signing_config_aws s3express_signing_config = { + .algorithm = AWS_SIGNING_ALGORITHM_V4_S3EXPRESS, + .service = g_s3express_service_name, + }; + options.signing_config = &s3express_signing_config; + aws_s3_meta_request_test_results_init(&meta_request_test_results[i], allocator); + + ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results[i])); + + meta_requests[i] = aws_s3_client_make_meta_request(client, &options); + ASSERT_TRUE(meta_requests[i] != NULL); + + aws_http_message_release(message); + } + /* Wait for the request to finish. */ + aws_s3_tester_wait_for_meta_request_finish(&tester); + aws_s3_tester_lock_synced_data(&tester); + ASSERT_TRUE(tester.synced_data.finish_error_code == AWS_ERROR_SUCCESS); + aws_s3_tester_unlock_synced_data(&tester); + + for (size_t i = 0; i < num_meta_requests; ++i) { + meta_requests[i] = aws_s3_meta_request_release(meta_requests[i]); + } + + aws_s3_tester_wait_for_meta_request_shutdown(&tester); + + for (size_t i = 0; i < num_meta_requests; ++i) { + aws_s3_tester_validate_get_object_results(&meta_request_test_results[i], 0); + aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); + } + + aws_s3_client_release(client); + aws_s3_tester_clean_up(&tester); + return AWS_OP_SUCCESS; +} From 60e791869176f84ff31e0affa6d696ce0c341a16 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 29 Nov 2023 20:05:09 +0000 Subject: [PATCH 02/10] fix the test and wrong number --- source/s3_client.c | 4 ++-- tests/CMakeLists.txt | 2 +- tests/s3_data_plane_tests.c | 1 + tests/s3_s3express_client_test.c | 5 +++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/source/s3_client.c b/source/s3_client.c index 68f31a954..337f661fd 100644 --- a/source/s3_client.c +++ b/source/s3_client.c @@ -1630,9 +1630,9 @@ static bool s_meta_request_create_more_requests( * * We cannot create more requests for this meta request. */ - if ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) > + if ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) >= max_requests_prepare || - num_requests_in_flight > max_requests_in_flight) { + num_requests_in_flight >= max_requests_in_flight) { return false; } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e2fe12e01..408ec952b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -300,7 +300,7 @@ add_net_test_case(s3express_client_put_test_large) add_net_test_case(s3express_client_put_test_multiple) add_net_test_case(s3express_client_put_long_running_test) add_net_test_case(s3express_client_get_test) -add_net_test_case(s3express_client_get_multiple_test) +add_net_test_case(s3express_client_get_test_multiple) add_net_test_case(meta_request_auto_ranged_get_new_error_handling) add_net_test_case(meta_request_auto_ranged_put_new_error_handling) diff --git a/tests/s3_data_plane_tests.c b/tests/s3_data_plane_tests.c index 67db4a858..cc1de27db 100644 --- a/tests/s3_data_plane_tests.c +++ b/tests/s3_data_plane_tests.c @@ -1801,6 +1801,7 @@ static int s_test_s3_put_object_multiple_helper(struct aws_allocator *allocator, aws_string_destroy(filepath_str[i]); } } +#undef NUM_REQUESTS aws_string_destroy(host_name); host_name = NULL; diff --git a/tests/s3_s3express_client_test.c b/tests/s3_s3express_client_test.c index a04e3045a..becba0f39 100644 --- a/tests/s3_s3express_client_test.c +++ b/tests/s3_s3express_client_test.c @@ -326,6 +326,7 @@ TEST_CASE(s3express_client_put_test_multiple) { meta_requests[i] = aws_s3_client_make_meta_request(client, &options); ASSERT_TRUE(meta_requests[i] != NULL); + aws_http_message_release(message); } /* Wait for the request to finish. */ aws_s3_tester_wait_for_meta_request_finish(&tester); @@ -340,7 +341,7 @@ TEST_CASE(s3express_client_put_test_multiple) { aws_s3_tester_wait_for_meta_request_shutdown(&tester); for (size_t i = 0; i < NUM_REQUESTS; ++i) { - aws_s3_tester_validate_get_object_results(&meta_request_test_results[i], 0); + aws_s3_tester_validate_put_object_results(&meta_request_test_results[i], 0); aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); } @@ -517,7 +518,7 @@ TEST_CASE(s3express_client_get_test) { return AWS_OP_SUCCESS; } -TEST_CASE(s3express_client_get_multiple_test) { +TEST_CASE(s3express_client_get_test_multiple) { (void)ctx; struct aws_s3_meta_request *meta_requests[100]; From a9d7793585aea9e4114c7149b6911473c2193b27 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 29 Nov 2023 20:09:20 +0000 Subject: [PATCH 03/10] undef --- tests/s3_s3express_client_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/s3_s3express_client_test.c b/tests/s3_s3express_client_test.c index becba0f39..8bdba09f8 100644 --- a/tests/s3_s3express_client_test.c +++ b/tests/s3_s3express_client_test.c @@ -348,7 +348,7 @@ TEST_CASE(s3express_client_put_test_multiple) { for (size_t i = 0; i < NUM_REQUESTS; ++i) { aws_input_stream_release(input_streams[i]); } - +#undef NUM_REQUESTS aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; From 8360284b884db2ee2fcb02c1ad5b7f53ff5ee20e Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 29 Nov 2023 20:54:18 +0000 Subject: [PATCH 04/10] update comments --- source/s3_client.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/s3_client.c b/source/s3_client.c index 337f661fd..4a5d22f41 100644 --- a/source/s3_client.c +++ b/source/s3_client.c @@ -1675,10 +1675,8 @@ void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { for (uint32_t pass_index = 0; pass_index < num_passes; ++pass_index) { - /* While: - * * There are meta requests to get requests from. - * - * Then update meta requests to get new requests that can then be prepared (reading from any streams, signing, + /** + * Iterate through the meta requests to update meta requests and get new requests that can then be prepared (reading from any streams, signing, * etc.) for sending. */ while (!aws_linked_list_empty(&client->threaded_data.meta_requests)) { From ef9ad58b314568c679e710b26fb91975ce69159a Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 29 Nov 2023 20:59:06 +0000 Subject: [PATCH 05/10] format --- source/s3_client.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/s3_client.c b/source/s3_client.c index 4a5d22f41..360240322 100644 --- a/source/s3_client.c +++ b/source/s3_client.c @@ -1676,8 +1676,8 @@ void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { for (uint32_t pass_index = 0; pass_index < num_passes; ++pass_index) { /** - * Iterate through the meta requests to update meta requests and get new requests that can then be prepared (reading from any streams, signing, - * etc.) for sending. + * Iterate through the meta requests to update meta requests and get new requests that can then be prepared ++ * (reading from any streams, signing, etc.) for sending. */ while (!aws_linked_list_empty(&client->threaded_data.meta_requests)) { From ad4086ab9c22d3922cc5f078b845cd2119101ea8 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Wed, 29 Nov 2023 14:25:18 -0800 Subject: [PATCH 06/10] comments address --- source/s3_client.c | 19 ++++++++++--------- source/s3express_credentials_provider.c | 3 +-- tests/CMakeLists.txt | 15 ++++++++------- tests/s3_data_plane_tests.c | 4 ++-- .../s3_mock_server_s3express_provider_test.c | 2 +- tests/s3_s3express_client_test.c | 18 ++++++++++-------- 6 files changed, 32 insertions(+), 29 deletions(-) diff --git a/source/s3_client.c b/source/s3_client.c index 360240322..549a1a2da 100644 --- a/source/s3_client.c +++ b/source/s3_client.c @@ -1606,9 +1606,9 @@ static void s_s3_client_prepare_callback_queue_request( int error_code, void *user_data); -static bool s_meta_request_create_more_requests( - struct aws_s3_meta_request *meta_request, +static bool s_s3_client_should_update_meta_request( struct aws_s3_client *client, + struct aws_s3_meta_request *meta_request, uint32_t num_requests_in_flight, const uint32_t max_requests_in_flight, const uint32_t max_requests_prepare) { @@ -1622,17 +1622,18 @@ static bool s_meta_request_create_more_requests( } /** - * If: - * * Number of being-prepared + already-prepared-and-queued requests is more than the max that can + * If number of being-prepared + already-prepared-and-queued requests is more than the max that can * be in the preparation stage. - * * Total number of requests tracked by the client is more than the max tracked ("in flight") + * Or total number of requests tracked by the client is more than the max tracked ("in flight") * requests. * * We cannot create more requests for this meta request. */ if ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) >= - max_requests_prepare || - num_requests_in_flight >= max_requests_in_flight) { + max_requests_prepare) { + return false; + } + if (num_requests_in_flight >= max_requests_in_flight) { return false; } @@ -1686,8 +1687,8 @@ void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) { struct aws_s3_meta_request *meta_request = AWS_CONTAINER_OF(meta_request_node, struct aws_s3_meta_request, client_process_work_threaded_data); - if (!s_meta_request_create_more_requests( - meta_request, client, num_requests_in_flight, max_requests_in_flight, max_requests_prepare)) { + if (!s_s3_client_should_update_meta_request( + client, meta_request, num_requests_in_flight, max_requests_in_flight, max_requests_prepare)) { /* Move the meta request to be processed from next loop. */ aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node); diff --git a/source/s3express_credentials_provider.c b/source/s3express_credentials_provider.c index 2bb5c3160..1dbf9b969 100644 --- a/source/s3express_credentials_provider.c +++ b/source/s3express_credentials_provider.c @@ -497,7 +497,6 @@ static struct aws_s3express_session_creator *s_session_creator_new( }; aws_byte_buf_init(&session_creator->response_buf, provider->allocator, 512); - struct aws_byte_cursor operation_name = aws_byte_cursor_from_c_str("CreateSession"); struct aws_s3_meta_request_options options = { .message = request, .type = AWS_S3_META_REQUEST_TYPE_DEFAULT, @@ -507,7 +506,7 @@ static struct aws_s3express_session_creator *s_session_creator_new( /* Override endpoint only for tests. */ .endpoint = impl->mock_test.endpoint_override ? impl->mock_test.endpoint_override : NULL, .user_data = session_creator, - .operation_name = operation_name, + .operation_name = aws_byte_cursor_from_c_str("CreateSession"), }; session_creator->synced_data.meta_request = aws_s3_client_make_meta_request(impl->client, &options); AWS_FATAL_ASSERT(session_creator->synced_data.meta_request); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f4331498a..c827a0ade 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -294,13 +294,14 @@ if(ENABLE_MOCK_SERVER_TESTS) add_net_test_case(request_time_too_skewed_mock_server) endif() -add_net_test_case(s3express_provider_long_running) -add_net_test_case(s3express_client_put_test_small) -add_net_test_case(s3express_client_put_test_large) -add_net_test_case(s3express_client_put_test_multiple) -add_net_test_case(s3express_client_put_long_running_test) -add_net_test_case(s3express_client_get_test) -add_net_test_case(s3express_client_get_test_multiple) +add_net_test_case(s3express_provider_long_running_session_refresh) + +add_net_test_case(s3express_client_put_object) +add_net_test_case(s3express_client_put_object_multipart) +add_net_test_case(s3express_client_put_object_multipart_multiple) +add_net_test_case(s3express_client_put_object_long_running_session_refresh) +add_net_test_case(s3express_client_get_object) +add_net_test_case(s3express_client_get_object_multiple) add_net_test_case(meta_request_auto_ranged_get_new_error_handling) add_net_test_case(meta_request_auto_ranged_put_new_error_handling) diff --git a/tests/s3_data_plane_tests.c b/tests/s3_data_plane_tests.c index cc1de27db..e879a9557 100644 --- a/tests/s3_data_plane_tests.c +++ b/tests/s3_data_plane_tests.c @@ -1711,7 +1711,8 @@ static int s_test_s3_multipart_put_object_with_acl(struct aws_allocator *allocat static int s_test_s3_put_object_multiple_helper(struct aws_allocator *allocator, bool file_on_disk) { -#define NUM_REQUESTS 5 + enum s_numbers { NUM_REQUESTS = 5 }; + struct aws_s3_meta_request *meta_requests[NUM_REQUESTS]; struct aws_s3_meta_request_test_results meta_request_test_results[NUM_REQUESTS]; struct aws_http_message *messages[NUM_REQUESTS]; @@ -1801,7 +1802,6 @@ static int s_test_s3_put_object_multiple_helper(struct aws_allocator *allocator, aws_string_destroy(filepath_str[i]); } } -#undef NUM_REQUESTS aws_string_destroy(host_name); host_name = NULL; diff --git a/tests/s3_mock_server_s3express_provider_test.c b/tests/s3_mock_server_s3express_provider_test.c index 6e988bc7d..693c9fdf8 100644 --- a/tests/s3_mock_server_s3express_provider_test.c +++ b/tests/s3_mock_server_s3express_provider_test.c @@ -581,7 +581,7 @@ TEST_CASE(s3express_provider_stress_mock_server) { return AWS_OP_SUCCESS; } -TEST_CASE(s3express_provider_long_running) { +TEST_CASE(s3express_provider_long_running_session_refresh) { (void)ctx; struct aws_s3_tester tester; diff --git a/tests/s3_s3express_client_test.c b/tests/s3_s3express_client_test.c index 8bdba09f8..cad5b81d2 100644 --- a/tests/s3_s3express_client_test.c +++ b/tests/s3_s3express_client_test.c @@ -256,20 +256,21 @@ static int s_s3express_client_put_test_helper(struct aws_allocator *allocator, s return AWS_OP_SUCCESS; } -TEST_CASE(s3express_client_put_test_small) { +TEST_CASE(s3express_client_put_object) { (void)ctx; return s_s3express_client_put_test_helper(allocator, MB_TO_BYTES(1)); } -TEST_CASE(s3express_client_put_test_large) { +TEST_CASE(s3express_client_put_object_multipart) { (void)ctx; return s_s3express_client_put_test_helper(allocator, MB_TO_BYTES(100)); } -TEST_CASE(s3express_client_put_test_multiple) { +TEST_CASE(s3express_client_put_object_multipart_multiple) { (void)ctx; -#define NUM_REQUESTS 100 + enum s_numbers { NUM_REQUESTS = 100 }; + struct aws_s3_meta_request *meta_requests[NUM_REQUESTS]; struct aws_s3_meta_request_test_results meta_request_test_results[NUM_REQUESTS]; struct aws_input_stream *input_streams[NUM_REQUESTS]; @@ -303,6 +304,7 @@ TEST_CASE(s3express_client_put_test_multiple) { struct aws_byte_cursor request_region = region_cursor; struct aws_byte_cursor request_host = host_cursor; if (i % 2 == 0) { + /* Make half of request to east1 and rest half to west2 */ request_region = west2_region_cursor; request_host = west2_host_cursor; } @@ -348,7 +350,7 @@ TEST_CASE(s3express_client_put_test_multiple) { for (size_t i = 0; i < NUM_REQUESTS; ++i) { aws_input_stream_release(input_streams[i]); } -#undef NUM_REQUESTS + aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); return AWS_OP_SUCCESS; @@ -388,7 +390,7 @@ struct aws_s3express_credentials_provider *s_s3express_provider_mock_factory( } /* Long running test to make sure our refresh works properly */ -TEST_CASE(s3express_client_put_long_running_test) { +TEST_CASE(s3express_client_put_object_long_running_session_refresh) { (void)ctx; struct aws_s3_tester tester; @@ -463,7 +465,7 @@ TEST_CASE(s3express_client_put_long_running_test) { return AWS_OP_SUCCESS; } -TEST_CASE(s3express_client_get_test) { +TEST_CASE(s3express_client_get_object) { (void)ctx; struct aws_s3_tester tester; @@ -518,7 +520,7 @@ TEST_CASE(s3express_client_get_test) { return AWS_OP_SUCCESS; } -TEST_CASE(s3express_client_get_test_multiple) { +TEST_CASE(s3express_client_get_object_multiple) { (void)ctx; struct aws_s3_meta_request *meta_requests[100]; From 5ae133c713bfcf91f7ca3a97211c355e15419c36 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 1 Dec 2023 13:46:29 -0800 Subject: [PATCH 07/10] WIP --- .../s3_mock_server_s3express_provider_test.c | 3 +- tests/s3_tester.c | 34 ++++ tests/s3_tester.h | 10 ++ tests/test_helper/mytest.py | 24 +++ tests/test_helper/test_helper.py | 155 +++++++++++------- 5 files changed, 163 insertions(+), 63 deletions(-) create mode 100644 tests/test_helper/mytest.py diff --git a/tests/s3_mock_server_s3express_provider_test.c b/tests/s3_mock_server_s3express_provider_test.c index 693c9fdf8..bd7d3d873 100644 --- a/tests/s3_mock_server_s3express_provider_test.c +++ b/tests/s3_mock_server_s3express_provider_test.c @@ -614,9 +614,8 @@ TEST_CASE(s3express_provider_long_running_session_refresh) { /* 300 secs to make sure we will refresh it at least once. */ size_t num_requests = 600; - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; struct aws_credentials_properties_s3express property = { - .host = aws_byte_cursor_from_c_str(endpoint), + .host = g_test_s3express_bucket_use1_az4_endpoint, }; for (size_t i = 0; i < num_requests; i++) { diff --git a/tests/s3_tester.c b/tests/s3_tester.c index d1e4e6321..e41192d75 100644 --- a/tests/s3_tester.c +++ b/tests/s3_tester.c @@ -64,6 +64,16 @@ struct aws_byte_cursor g_test_bucket_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERA /* If `$CRT_S3_TEST_BUCKET_NAME` envrionment variable is set, use `$CRT_S3_TEST_BUCKET_NAME-public`; otherwise, use * aws-c-s3-test-bucket-public */ struct aws_byte_cursor g_test_public_bucket_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-c-s3-test-bucket-public"); +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com */ +struct aws_byte_cursor g_test_s3express_bucket_usw2_az1_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( + "aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"); +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--us1-az1--x-s3.s3express-use1-az4.us-east-1.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com */ +struct aws_byte_cursor g_test_s3express_bucket_use1_az4_endpoint = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL( + "aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"); #ifdef BYO_CRYPTO /* Under BYO_CRYPTO, this function currently needs to be defined by the user. Defining a null implementation here so @@ -355,6 +365,28 @@ int aws_s3_tester_init(struct aws_allocator *allocator, struct aws_s3_tester *te AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); tester->public_bucket_name = aws_string_new_from_c_str(allocator, public_bucket_name_buffer); g_test_public_bucket_name = aws_byte_cursor_from_string(tester->public_bucket_name); + + char s3express_bucket_usw2_az1_endpoint_buffer[512] = ""; + snprintf( + s3express_bucket_usw2_az1_endpoint_buffer, + sizeof(s3express_bucket_usw2_az1_endpoint_buffer), + "" PRInSTR "--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com", + AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); + tester->s3express_bucket_usw2_az1_endpoint = + aws_string_new_from_c_str(allocator, s3express_bucket_usw2_az1_endpoint_buffer); + g_test_s3express_bucket_usw2_az1_endpoint = + aws_byte_cursor_from_string(tester->s3express_bucket_usw2_az1_endpoint); + + char s3express_bucket_use1_az4_name_buffer[128] = ""; + snprintf( + s3express_bucket_use1_az4_name_buffer, + sizeof(s3express_bucket_use1_az4_name_buffer), + "" PRInSTR "--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com", + AWS_BYTE_CURSOR_PRI(g_test_bucket_name)); + tester->s3express_bucket_use1_az4_endpoint = + aws_string_new_from_c_str(allocator, s3express_bucket_use1_az4_name_buffer); + g_test_s3express_bucket_use1_az4_endpoint = + aws_byte_cursor_from_string(tester->s3express_bucket_use1_az4_endpoint); } aws_s3_library_init(allocator); @@ -698,6 +730,8 @@ void aws_s3_tester_clean_up(struct aws_s3_tester *tester) { } aws_string_destroy(tester->bucket_name); aws_string_destroy(tester->public_bucket_name); + aws_string_destroy(tester->s3express_bucket_usw2_az1_endpoint); + aws_string_destroy(tester->s3express_bucket_use1_az4_endpoint); aws_credentials_release(tester->anonymous_creds); diff --git a/tests/s3_tester.h b/tests/s3_tester.h index a2df2817b..a0f5e5c40 100644 --- a/tests/s3_tester.h +++ b/tests/s3_tester.h @@ -100,6 +100,8 @@ struct aws_s3_tester { struct aws_string *bucket_name; struct aws_string *public_bucket_name; + struct aws_string *s3express_bucket_usw2_az1_endpoint; + struct aws_string *s3express_bucket_use1_az4_endpoint; struct { struct aws_mutex lock; @@ -489,5 +491,13 @@ extern struct aws_byte_cursor g_test_bucket_name; * aws-c-s3-test-bucket-public */ extern struct aws_byte_cursor g_test_public_bucket_name; +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com */ +extern struct aws_byte_cursor g_test_s3express_bucket_usw2_az1_endpoint; +/* If `$CRT_S3_TEST_BUCKET_NAME` environment variable is set, use + * `$CRT_S3_TEST_BUCKET_NAME--us1-az1--x-s3.s3express-use1-az4.us-east-1.amazonaws.com`; otherwise, use + * aws-c-s3-test-bucket--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com */ +extern struct aws_byte_cursor g_test_s3express_bucket_use1_az4_endpoint; #endif /* AWS_S3_TESTER_H */ diff --git a/tests/test_helper/mytest.py b/tests/test_helper/mytest.py new file mode 100644 index 000000000..443758bff --- /dev/null +++ b/tests/test_helper/mytest.py @@ -0,0 +1,24 @@ +import logging +import boto3 +from botocore.exceptions import ClientError + + +print("boto3 version is: " + boto3.__version__) + + +def create_bucket(s3_client, bucket_name, availability_zone): + s3_client.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={ + 'Location': {'Type': "AvailabilityZone", 'Name': availability_zone}, + 'Bucket': {'Type': "Directory", 'DataRedundancy': "SingleAvailabilityZone"} + }) + + +if __name__ == '__main__': + + bucket_name = 'test-bbarrn--use1-az4--x-s3' + region = 'us-east-1' + availability_zone = 'use1-az4' + s3_client = boto3.client('s3', region_name=region) + create_bucket(s3_client, bucket_name, availability_zone) diff --git a/tests/test_helper/test_helper.py b/tests/test_helper/test_helper.py index 29e65d909..003acbbda 100755 --- a/tests/test_helper/test_helper.py +++ b/tests/test_helper/test_helper.py @@ -9,17 +9,16 @@ import os import random +print(boto3.__version__) +REGION = 'us-east-1' s3 = boto3.resource('s3') -s3_client = boto3.client('s3') +s3_client = boto3.client('s3', region_name=REGION) s3_control_client = boto3.client('s3control') -REGION = 'us-west-2' - - MB = 1024*1024 GB = 1024*1024*1024 @@ -37,21 +36,21 @@ args = parser.parse_args() if args.bucket_name is not None: - BUCKET_NAME = args.bucket_name + BUCKET_NAME_BASE = args.bucket_name elif "CRT_S3_TEST_BUCKET_NAME" in os.environ: - BUCKET_NAME = os.environ['CRT_S3_TEST_BUCKET_NAME'] + BUCKET_NAME_BASE = os.environ['CRT_S3_TEST_BUCKET_NAME'] else: # Generate a random bucket name - BUCKET_NAME = 'aws-c-s3-test-bucket-' + str(random.random())[2:8] + BUCKET_NAME_BASE = 'aws-c-s3-test-bucket-' + str(random.random())[2:8] -PUBLIC_BUCKET_NAME = BUCKET_NAME + "-public" +PUBLIC_BUCKET_NAME = BUCKET_NAME_BASE + "-public" def create_bytes(size): return bytearray([1] * size) -def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME, sse=None, public_read=False): +def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME_BASE, sse=None, public_read=False): if size == 0: s3_client.put_object(Bucket=bucket, Key=keyname) print(f"Object {keyname} uploaded") @@ -81,55 +80,87 @@ def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME, sse=None, public print(f"Object {keyname} uploaded") -def create_bucket_with_lifecycle(): +def create_bucket_with_lifecycle(availability_zone=None): try: # Create the bucket. This returns an error if the bucket already exists. + + if availability_zone is not None: + bucket_config = { + 'Location': { + 'Type': 'AvailabilityZone', + 'Name': availability_zone + }, + 'Bucket': { + 'Type': 'Directory', + 'DataRedundancy': 'SingleAvailabilityZone' + } + } + bucket_name = BUCKET_NAME_BASE+f"--{availability_zone}--x-s3" + else: + bucket_config = {'LocationConstraint': REGION} + bucket_name = BUCKET_NAME_BASE + + print(bucket_name) + print(bucket_config) + s3_client.create_bucket( - Bucket=BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': REGION}) - s3_client.put_bucket_lifecycle_configuration( - Bucket=BUCKET_NAME, - LifecycleConfiguration={ - 'Rules': [ - { - 'ID': 'clean up non-pre-existing objects', - 'Expiration': { - 'Days': 1, - }, - 'Filter': { - 'Prefix': 'upload/', + Bucket=bucket_name, CreateBucketConfiguration=bucket_config) + if availability_zone is None: + s3_client.put_bucket_lifecycle_configuration( + Bucket=bucket_name, + LifecycleConfiguration={ + 'Rules': [ + { + 'ID': 'clean up non-pre-existing objects', + 'Expiration': { + 'Days': 1, + }, + 'Filter': { + 'Prefix': 'upload/', + }, + 'Status': 'Enabled', + 'NoncurrentVersionExpiration': { + 'NoncurrentDays': 1, + }, + 'AbortIncompleteMultipartUpload': { + 'DaysAfterInitiation': 1, + }, }, - 'Status': 'Enabled', - 'NoncurrentVersionExpiration': { - 'NoncurrentDays': 1, - }, - 'AbortIncompleteMultipartUpload': { - 'DaysAfterInitiation': 1, - }, - }, - ], - }, - ) - print(f"Bucket {BUCKET_NAME} created", file=sys.stderr) - put_pre_existing_objects( - 10*MB, 'pre-existing-10MB-aes256-c', sse='aes256-c') - put_pre_existing_objects( - 10*MB, 'pre-existing-10MB-aes256', sse='aes256') + ], + }, + ) + print(f"Bucket {bucket_name} created", file=sys.stderr) + put_pre_existing_objects( - 10*MB, 'pre-existing-10MB-kms', sse='kms') - put_pre_existing_objects(256*MB, 'pre-existing-256MB') - put_pre_existing_objects(256*MB, 'pre-existing-256MB-@') - put_pre_existing_objects(2*GB, 'pre-existing-2GB') - put_pre_existing_objects(2*GB, 'pre-existing-2GB-@') - put_pre_existing_objects(10*MB, 'pre-existing-10MB') - put_pre_existing_objects(1*MB, 'pre-existing-1MB') - put_pre_existing_objects(1*MB, 'pre-existing-1MB-@') - put_pre_existing_objects(0, 'pre-existing-empty') + 10*MB, 'pre-existing-10MB', bucket=bucket_name) + + if availability_zone is None: + put_pre_existing_objects( + 10*MB, 'pre-existing-10MB-aes256-c', sse='aes256-c', bucket=bucket_name) + put_pre_existing_objects( + 10*MB, 'pre-existing-10MB-aes256', sse='aes256', bucket=bucket_name) + put_pre_existing_objects( + 10*MB, 'pre-existing-10MB-kms', sse='kms', bucket=bucket_name) + put_pre_existing_objects( + 256*MB, 'pre-existing-256MB', bucket=bucket_name) + put_pre_existing_objects( + 256*MB, 'pre-existing-256MB-@', bucket=bucket_name) + put_pre_existing_objects( + 2*GB, 'pre-existing-2GB', bucket=bucket_name) + put_pre_existing_objects( + 2*GB, 'pre-existing-2GB-@', bucket=bucket_name) + put_pre_existing_objects( + 1*MB, 'pre-existing-1MB', bucket=bucket_name) + put_pre_existing_objects( + 1*MB, 'pre-existing-1MB-@', bucket=bucket_name) + put_pre_existing_objects( + 0, 'pre-existing-empty', bucket=bucket_name) except botocore.exceptions.ClientError as e: # The bucket already exists. That's fine. if e.response['Error']['Code'] == 'BucketAlreadyOwnedByYou' or e.response['Error']['Code'] == 'BucketAlreadyExists': print( - f"Bucket {BUCKET_NAME} not created, skip initializing.", file=sys.stderr) + f"Bucket {bucket_name} not created, skip initializing.", file=sys.stderr) return raise e @@ -168,25 +199,27 @@ def cleanup(bucket_name): if args.action == 'init': try: - print(BUCKET_NAME + " " + PUBLIC_BUCKET_NAME + " initializing...") - create_bucket_with_lifecycle() - create_bucket_with_public_object() - if os.environ.get('CRT_S3_TEST_BUCKET_NAME') != BUCKET_NAME: + print(BUCKET_NAME_BASE + " " + PUBLIC_BUCKET_NAME + " initializing...") + # TODO we cannot use the client across region + create_bucket_with_lifecycle("use1-az4") + create_bucket_with_lifecycle("usw2-az1") + # create_bucket_with_lifecycle() + # create_bucket_with_public_object() + if os.environ.get('CRT_S3_TEST_BUCKET_NAME') != BUCKET_NAME_BASE: print( - f"* Please set the environment variable $CRT_S3_TEST_BUCKET_NAME to {BUCKET_NAME} before running the tests.") + f"* Please set the environment variable $CRT_S3_TEST_BUCKET_NAME to {BUCKET_NAME_BASE} before running the tests.") except Exception as e: print(e) - try: - # Try to clean up the bucket created, when initialization failed. - cleanup(BUCKET_NAME) - cleanup(PUBLIC_BUCKET_NAME) - except Exception as e: - exit(-1) - raise e + # try: + # # Try to clean up the bucket created, when initialization failed. + # # cleanup(BUCKET_NAME_BASE) + # # cleanup(PUBLIC_BUCKET_NAME) + # except Exception as e2: + # exit(-1) exit(-1) elif args.action == 'clean': if "CRT_S3_TEST_BUCKET_NAME" not in os.environ and args.bucket_name is None: print("Set the environment variable CRT_S3_TEST_BUCKET_NAME before clean up, or pass in bucket_name as argument.") exit(-1) - cleanup(BUCKET_NAME) + cleanup(BUCKET_NAME_BASE) cleanup(PUBLIC_BUCKET_NAME) From 57df3451857de04bbcf4f55e55de4ec492ffc526 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 1 Dec 2023 17:02:34 -0800 Subject: [PATCH 08/10] make the helper work for s3express --- tests/s3_s3express_client_test.c | 51 +++++++++++++++-------------- tests/test_helper/test_helper.py | 56 ++++++++++++++++++-------------- 2 files changed, 57 insertions(+), 50 deletions(-) diff --git a/tests/s3_s3express_client_test.c b/tests/s3_s3express_client_test.c index cad5b81d2..b8f771a7f 100644 --- a/tests/s3_s3express_client_test.c +++ b/tests/s3_s3express_client_test.c @@ -228,8 +228,6 @@ static int s_s3express_client_put_test_helper(struct aws_allocator *allocator, s struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_s3_client_config client_config = { @@ -243,13 +241,23 @@ static int s_s3express_client_put_test_helper(struct aws_allocator *allocator, s struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); ASSERT_SUCCESS(s_s3express_put_object_request( - allocator, client, content_length, &tester, host_cursor, key_cursor, region_cursor)); + allocator, + client, + content_length, + &tester, + g_test_s3express_bucket_use1_az4_endpoint, + key_cursor, + region_cursor)); struct aws_byte_cursor west2_region_cursor = aws_byte_cursor_from_c_str("us-west-2"); - char west2_endpoint[] = "crts-west2--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"; - struct aws_byte_cursor west2_host_cursor = aws_byte_cursor_from_c_str(west2_endpoint); ASSERT_SUCCESS(s_s3express_put_object_request( - allocator, client, content_length, &tester, west2_host_cursor, key_cursor, west2_region_cursor)); + allocator, + client, + content_length, + &tester, + g_test_s3express_bucket_usw2_az1_endpoint, + key_cursor, + west2_region_cursor)); aws_s3_client_release(client); aws_s3_tester_clean_up(&tester); @@ -280,13 +288,9 @@ TEST_CASE(s3express_client_put_object_multipart_multiple) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_byte_cursor west2_region_cursor = aws_byte_cursor_from_c_str("us-west-2"); - char west2_endpoint[] = "crts-west2--usw2-az1--x-s3.s3express-usw2-az1.us-west-2.amazonaws.com"; - struct aws_byte_cursor west2_host_cursor = aws_byte_cursor_from_c_str(west2_endpoint); struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), @@ -302,11 +306,11 @@ TEST_CASE(s3express_client_put_object_multipart_multiple) { input_streams[i] = aws_s3_test_input_stream_new(allocator, MB_TO_BYTES(10)); struct aws_byte_cursor request_region = region_cursor; - struct aws_byte_cursor request_host = host_cursor; + struct aws_byte_cursor request_host = g_test_s3express_bucket_use1_az4_endpoint; if (i % 2 == 0) { /* Make half of request to east1 and rest half to west2 */ request_region = west2_region_cursor; - request_host = west2_host_cursor; + request_host = g_test_s3express_bucket_usw2_az1_endpoint; } struct aws_http_message *message = aws_s3_test_put_object_request_new( @@ -401,8 +405,6 @@ TEST_CASE(s3express_client_put_object_long_running_session_refresh) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-test"); struct aws_s3_client_config client_config = { @@ -421,7 +423,12 @@ TEST_CASE(s3express_client_put_object_long_running_session_refresh) { struct aws_input_stream *upload_stream = aws_s3_test_input_stream_new(allocator, MB_TO_BYTES(10)); struct aws_http_message *message = aws_s3_test_put_object_request_new( - allocator, &host_cursor, key_cursor, g_test_body_content_type, upload_stream, 0); + allocator, + &g_test_s3express_bucket_use1_az4_endpoint, + key_cursor, + g_test_body_content_type, + upload_stream, + 0); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); options.type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT; @@ -473,10 +480,6 @@ TEST_CASE(s3express_client_get_object) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); - struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-download-10MB"); - struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, @@ -487,7 +490,8 @@ TEST_CASE(s3express_client_get_object) { struct aws_s3_client *client = aws_s3_client_new(allocator, &client_config); - struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, host_cursor, key_cursor); + struct aws_http_message *message = aws_s3_test_get_object_request_new( + allocator, g_test_s3express_bucket_use1_az4_endpoint, g_pre_existing_object_10MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); @@ -532,10 +536,6 @@ TEST_CASE(s3express_client_get_object_multiple) { struct aws_byte_cursor region_cursor = aws_byte_cursor_from_c_str("us-east-1"); - char endpoint[] = "crts-east1--use1-az4--x-s3.s3express-use1-az4.us-east-1.amazonaws.com"; - struct aws_byte_cursor host_cursor = aws_byte_cursor_from_c_str(endpoint); - struct aws_byte_cursor key_cursor = aws_byte_cursor_from_c_str("/crt-download-10MB"); - struct aws_s3_client_config client_config = { .part_size = MB_TO_BYTES(5), .enable_s3express = true, @@ -548,7 +548,8 @@ TEST_CASE(s3express_client_get_object_multiple) { for (size_t i = 0; i < num_meta_requests; ++i) { - struct aws_http_message *message = aws_s3_test_get_object_request_new(allocator, host_cursor, key_cursor); + struct aws_http_message *message = aws_s3_test_get_object_request_new( + allocator, g_test_s3express_bucket_use1_az4_endpoint, g_pre_existing_object_10MB); struct aws_s3_meta_request_options options; AWS_ZERO_STRUCT(options); diff --git a/tests/test_helper/test_helper.py b/tests/test_helper/test_helper.py index 003acbbda..9056dd97a 100755 --- a/tests/test_helper/test_helper.py +++ b/tests/test_helper/test_helper.py @@ -11,9 +11,11 @@ print(boto3.__version__) -REGION = 'us-east-1' +REGION = 'us-west-2' +REGION_EAST_1 = 'us-east-1' s3 = boto3.resource('s3') s3_client = boto3.client('s3', region_name=REGION) +s3_client_east1 = boto3.client('s3', region_name=REGION_EAST_1) s3_control_client = boto3.client('s3control') @@ -50,9 +52,9 @@ def create_bytes(size): return bytearray([1] * size) -def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME_BASE, sse=None, public_read=False): +def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME_BASE, sse=None, public_read=False, client=s3_client): if size == 0: - s3_client.put_object(Bucket=bucket, Key=keyname) + client.put_object(Bucket=bucket, Key=keyname) print(f"Object {keyname} uploaded") return @@ -71,7 +73,7 @@ def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME_BASE, sse=None, p if public_read: args['ACL'] = 'public-read' try: - s3_client.put_object(**args) + client.put_object(**args) except botocore.exceptions.ClientError as e: print(f"Object {keyname} failed to upload, with exception: {e}") if public_read and e.response['Error']['Code'] == 'AccessDenied': @@ -80,7 +82,7 @@ def put_pre_existing_objects(size, keyname, bucket=BUCKET_NAME_BASE, sse=None, p print(f"Object {keyname} uploaded") -def create_bucket_with_lifecycle(availability_zone=None): +def create_bucket_with_lifecycle(availability_zone=None, client=s3_client): try: # Create the bucket. This returns an error if the bucket already exists. @@ -100,13 +102,10 @@ def create_bucket_with_lifecycle(availability_zone=None): bucket_config = {'LocationConstraint': REGION} bucket_name = BUCKET_NAME_BASE - print(bucket_name) - print(bucket_config) - - s3_client.create_bucket( + client.create_bucket( Bucket=bucket_name, CreateBucketConfiguration=bucket_config) if availability_zone is None: - s3_client.put_bucket_lifecycle_configuration( + client.put_bucket_lifecycle_configuration( Bucket=bucket_name, LifecycleConfiguration={ 'Rules': [ @@ -132,7 +131,7 @@ def create_bucket_with_lifecycle(availability_zone=None): print(f"Bucket {bucket_name} created", file=sys.stderr) put_pre_existing_objects( - 10*MB, 'pre-existing-10MB', bucket=bucket_name) + 10*MB, 'pre-existing-10MB', bucket=bucket_name, client=client) if availability_zone is None: put_pre_existing_objects( @@ -190,36 +189,43 @@ def create_bucket_with_public_object(): raise e -def cleanup(bucket_name): - bucket = s3.Bucket(bucket_name) - bucket.objects.all().delete() - s3_client.delete_bucket(Bucket=bucket_name) +def cleanup(bucket_name, availability_zone=None, client=s3_client): + if availability_zone is not None: + bucket_name = bucket_name+f"--{availability_zone}--x-s3" + + objects = client.list_objects_v2(Bucket=bucket_name)["Contents"] + objects = list(map(lambda x: {"Key": x["Key"]}, objects)) + client.delete_objects(Bucket=bucket_name, Delete={"Objects": objects}) + client.delete_bucket(Bucket=bucket_name) print(f"Bucket {bucket_name} deleted", file=sys.stderr) if args.action == 'init': try: print(BUCKET_NAME_BASE + " " + PUBLIC_BUCKET_NAME + " initializing...") - # TODO we cannot use the client across region - create_bucket_with_lifecycle("use1-az4") + create_bucket_with_lifecycle("use1-az4", s3_client_east1) create_bucket_with_lifecycle("usw2-az1") - # create_bucket_with_lifecycle() - # create_bucket_with_public_object() + create_bucket_with_lifecycle() + create_bucket_with_public_object() if os.environ.get('CRT_S3_TEST_BUCKET_NAME') != BUCKET_NAME_BASE: print( f"* Please set the environment variable $CRT_S3_TEST_BUCKET_NAME to {BUCKET_NAME_BASE} before running the tests.") except Exception as e: print(e) - # try: - # # Try to clean up the bucket created, when initialization failed. - # # cleanup(BUCKET_NAME_BASE) - # # cleanup(PUBLIC_BUCKET_NAME) - # except Exception as e2: - # exit(-1) + try: + # Try to clean up the bucket created, when initialization failed. + cleanup(BUCKET_NAME_BASE, "use1-az4", s3_client_east1) + cleanup(BUCKET_NAME_BASE, "usw2-az1") + cleanup(BUCKET_NAME_BASE) + cleanup(PUBLIC_BUCKET_NAME) + except Exception as e2: + exit(-1) exit(-1) elif args.action == 'clean': if "CRT_S3_TEST_BUCKET_NAME" not in os.environ and args.bucket_name is None: print("Set the environment variable CRT_S3_TEST_BUCKET_NAME before clean up, or pass in bucket_name as argument.") exit(-1) + cleanup(BUCKET_NAME_BASE, "use1-az4", s3_client_east1) + cleanup(BUCKET_NAME_BASE, "usw2-az1") cleanup(BUCKET_NAME_BASE) cleanup(PUBLIC_BUCKET_NAME) From 535f3816f67fd76ea24bfc9a777b0b41159d65e4 Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Fri, 1 Dec 2023 17:04:30 -0800 Subject: [PATCH 09/10] remove my example program --- tests/test_helper/mytest.py | 24 ------------------------ 1 file changed, 24 deletions(-) delete mode 100644 tests/test_helper/mytest.py diff --git a/tests/test_helper/mytest.py b/tests/test_helper/mytest.py deleted file mode 100644 index 443758bff..000000000 --- a/tests/test_helper/mytest.py +++ /dev/null @@ -1,24 +0,0 @@ -import logging -import boto3 -from botocore.exceptions import ClientError - - -print("boto3 version is: " + boto3.__version__) - - -def create_bucket(s3_client, bucket_name, availability_zone): - s3_client.create_bucket( - Bucket=bucket_name, - CreateBucketConfiguration={ - 'Location': {'Type': "AvailabilityZone", 'Name': availability_zone}, - 'Bucket': {'Type': "Directory", 'DataRedundancy': "SingleAvailabilityZone"} - }) - - -if __name__ == '__main__': - - bucket_name = 'test-bbarrn--use1-az4--x-s3' - region = 'us-east-1' - availability_zone = 'use1-az4' - s3_client = boto3.client('s3', region_name=region) - create_bucket(s3_client, bucket_name, availability_zone) From bb75cd3ca8aeefa46c496b9e12efc9cd8f30a0fc Mon Sep 17 00:00:00 2001 From: Dengke Tang Date: Mon, 4 Dec 2023 09:43:13 -0800 Subject: [PATCH 10/10] update the readme --- tests/test_helper/README.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/test_helper/README.md b/tests/test_helper/README.md index 6fb9627e9..91432460a 100644 --- a/tests/test_helper/README.md +++ b/tests/test_helper/README.md @@ -32,13 +32,22 @@ python3 test_helper.py clean + `pre-existing-10MB` + `pre-existing-1MB` + `pre-existing-empty` + * Create `-public` in us-west-2 * Upload files: + `pre-existing-1MB` 1MB file with public read access. +* Create directory bucket `--usw2-az1--x-s3` in us-west-2 +* Upload files: + + `pre-existing-10MB` 10MB file. + +* Create directory bucket `--use1-az4--x-s3` in us-east-1 +* Upload files: + + `pre-existing-10MB` 10MB file. + ### `clean` action -* Delete the `` and `-public` and every object inside them +* Delete the buckets create by init action and every object inside them. ## BUCKET_NAME