Skip to content

Commit

Permalink
Merge branch 'main' into bypass_create_session
Browse files Browse the repository at this point in the history
  • Loading branch information
TingDaoK authored Nov 29, 2023
2 parents ad4086a + ecd0143 commit fcda93b
Show file tree
Hide file tree
Showing 7 changed files with 76 additions and 15 deletions.
3 changes: 3 additions & 0 deletions include/aws/s3/private/s3_buffer_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ struct aws_s3_buffer_pool_usage_stats {
* buffer reserved for overhead of the pool */
size_t mem_limit;

/* Max size of buffer to be allocated from primary. */
size_t primary_cutoff;

/* How much mem is used in primary storage. includes memory used by blocks
* that are waiting on all allocs to release before being put back in circulation. */
size_t primary_used;
Expand Down
1 change: 1 addition & 0 deletions include/aws/s3/s3.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ enum aws_s3_errors {
AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED,
AWS_ERROR_S3_FILE_MODIFIED,
AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT,
AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG,
AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID)
};

Expand Down
2 changes: 2 additions & 0 deletions source/s3.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ static struct aws_error_info s_errors[] = {
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED, "RequestTimeTooSkewed error received from S3."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_FILE_MODIFIED, "The file was modified during upload."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, "Request was not created due to used memory exceeding memory limit."),
AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, "Specified memory configuration is invalid for the system. "
"Memory limit should be at least 1GiB. Part size and max part size should be smaller than memory limit."),
};
/* clang-format on */

Expand Down
50 changes: 36 additions & 14 deletions source/s3_buffer_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,19 @@ static size_t s_block_list_initial_capacity = 5;
* client as well as any allocations overruns due to memory waste in the pool. */
static const size_t s_buffer_pool_reserved_mem = MB_TO_BYTES(128);

/*
* How many chunks make up a block in primary storage.
*/
static const size_t s_chunks_per_block = 16;

/*
* Max size of chunks in primary.
* Effectively if client part size is above the following number, primary
* storage along with buffer reuse is disabled and all buffers are allocated
* directly using allocator.
*/
static const size_t s_max_chunk_size_for_buffer_reuse = MB_TO_BYTES(64);

struct aws_s3_buffer_pool {
struct aws_allocator *base_allocator;
struct aws_mutex mutex;
Expand Down Expand Up @@ -124,29 +135,39 @@ struct aws_s3_buffer_pool *aws_s3_buffer_pool_new(

if (mem_limit < GB_TO_BYTES(1)) {
AWS_LOGF_ERROR(
AWS_LS_S3_CLIENT, "Failed to initialize buffer pool. Min supported value for Memory Limit is 1GB.");
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
AWS_LS_S3_CLIENT,
"Failed to initialize buffer pool. "
"Minimum supported value for Memory Limit is 1GB.");
aws_raise_error(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG);
return NULL;
}

if (!(chunk_size == 0 || (chunk_size > (1024) && chunk_size % 1024 == 0))) {
AWS_LOGF_ERROR(
if (chunk_size < (1024) || chunk_size % (4 * 1024) != 0) {
AWS_LOGF_WARN(
AWS_LS_S3_CLIENT,
"Failed to initialize buffer pool. Chunk size must be either 0 or more than 1 KB and size must be 1 KB "
"aligned.");
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
return NULL;
"Part size specified on the client can lead to suboptimal performance. "
"Consider specifying size in multiples of 4KiB. Ideal part size for most transfers is "
"1MiB multiple between 8MiB and 16MiB. Note: the client will automatically scale part size "
"if its not sufficient to transfer data within the maximum number of parts");
}

size_t adjusted_mem_lim = mem_limit - s_buffer_pool_reserved_mem;

if (chunk_size * s_chunks_per_block > adjusted_mem_lim) {
AWS_LOGF_ERROR(
/*
* TODO: There is several things we can consider tweaking here:
* - if chunk size is a weird number of bytes, force it to the closest page size?
* - grow chunk size max based on overall mem lim (ex. for 4gb it might be
* 64mb, but for 8gb it can be 128mb)
* - align chunk size to better fill available mem? some chunk sizes can
* result in memory being wasted because overall limit does not divide
* nicely into chunks
*/
if (chunk_size > s_max_chunk_size_for_buffer_reuse || chunk_size * s_chunks_per_block > adjusted_mem_lim) {
AWS_LOGF_WARN(
AWS_LS_S3_CLIENT,
"Failed to initialize buffer pool. Chunk size is too large for the memory limit. "
"Consider adjusting memory limit or part size.");
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
return NULL;
"Part size specified on the client is too large for automatic buffer reuse. "
"Consider specifying a smaller part size to improve performance and memory utilization");
chunk_size = 0;
}

struct aws_s3_buffer_pool *buffer_pool = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_buffer_pool));
Expand Down Expand Up @@ -405,6 +426,7 @@ struct aws_s3_buffer_pool_usage_stats aws_s3_buffer_pool_get_usage(struct aws_s3

struct aws_s3_buffer_pool_usage_stats ret = (struct aws_s3_buffer_pool_usage_stats){
.mem_limit = buffer_pool->mem_limit,
.primary_cutoff = buffer_pool->primary_size_cutoff,
.primary_allocated = buffer_pool->primary_allocated,
.primary_used = buffer_pool->primary_used,
.primary_reserved = buffer_pool->primary_reserved,
Expand Down
2 changes: 1 addition & 1 deletion source/s3_client.c
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ struct aws_s3_client *aws_s3_client_new(
AWS_LS_S3_CLIENT,
"Cannot create client from client_config; configured max part size should not exceed memory limit."
"size.");
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
aws_raise_error(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG);
goto on_early_fail;
}

Expand Down
2 changes: 2 additions & 0 deletions tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -318,9 +318,11 @@ add_test_case(parallel_read_stream_from_file_sanity_test)
add_test_case(parallel_read_stream_from_large_file_test)

add_test_case(test_s3_buffer_pool_threaded_allocs_and_frees)
add_test_case(test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees)
add_test_case(test_s3_buffer_pool_limits)
add_test_case(test_s3_buffer_pool_trim)
add_test_case(test_s3_buffer_pool_reservation_hold)
add_test_case(test_s3_buffer_pool_too_small)
add_net_test_case(test_s3_put_object_buffer_pool_trim)

add_net_test_case(client_update_upload_part_timeout)
Expand Down
31 changes: 31 additions & 0 deletions tests/s3_buffer_pool_tests.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,25 @@ static int s_test_s3_buffer_pool_threaded_allocs_and_frees(struct aws_allocator
}
AWS_TEST_CASE(test_s3_buffer_pool_threaded_allocs_and_frees, s_test_s3_buffer_pool_threaded_allocs_and_frees)

static int s_test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees(struct aws_allocator *allocator, void *ctx) {
(void)allocator;
(void)ctx;

struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(65), GB_TO_BYTES(2));

struct aws_s3_buffer_pool_usage_stats stats = aws_s3_buffer_pool_get_usage(buffer_pool);
ASSERT_INT_EQUALS(0, stats.primary_cutoff);

s_thread_test(allocator, s_threaded_alloc_worker, buffer_pool);

aws_s3_buffer_pool_destroy(buffer_pool);

return 0;
}
AWS_TEST_CASE(
test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees,
s_test_s3_buffer_pool_large_chunk_threaded_allocs_and_frees)

static int s_test_s3_buffer_pool_limits(struct aws_allocator *allocator, void *ctx) {
(void)allocator;
(void)ctx;
Expand Down Expand Up @@ -186,3 +205,15 @@ static int s_test_s3_buffer_pool_reservation_hold(struct aws_allocator *allocato
return 0;
};
AWS_TEST_CASE(test_s3_buffer_pool_reservation_hold, s_test_s3_buffer_pool_reservation_hold)

static int s_test_s3_buffer_pool_too_small(struct aws_allocator *allocator, void *ctx) {
(void)allocator;
(void)ctx;

struct aws_s3_buffer_pool *buffer_pool = aws_s3_buffer_pool_new(allocator, MB_TO_BYTES(8), MB_TO_BYTES(512));
ASSERT_NULL(buffer_pool);
ASSERT_INT_EQUALS(AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, aws_last_error());

return 0;
};
AWS_TEST_CASE(test_s3_buffer_pool_too_small, s_test_s3_buffer_pool_too_small)

0 comments on commit fcda93b

Please sign in to comment.