Skip to content
This repository has been archived by the owner on Oct 9, 2024. It is now read-only.

Commit

Permalink
MERGEME: Add new option: USE_ARRAY_BASED_BLOCK
Browse files Browse the repository at this point in the history
  • Loading branch information
shintaro-iwasaki committed Aug 29, 2018
1 parent 082c7d1 commit e1e32d8
Showing 1 changed file with 66 additions and 6 deletions.
72 changes: 66 additions & 6 deletions src/mem/zm_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,16 @@

#endif // USE_PAGE

// Define if array-based block management is used.
// Array-based block management
// Pros: No additional data segment is needed.
// Cons: Block size is large, so copying blocks is costly.
// Linked list-based block management
// Pros: Block size is small, so copying blocks is lightweight.
// Cons: Each element has an additional data segment for linked list.

#define USE_ARRAY_BASED_BLOCK

////////////////////////////////////////////////////////////////////////////////
// Utility
////////////////////////////////////////////////////////////////////////////////
Expand Down Expand Up @@ -400,8 +410,10 @@ static void write_extensible_array(struct extensible_array *p_array, int index,
////////////////////////////////////////////////////////////////////////////////

struct element {
#ifndef USE_ARRAY_BASED_BLOCK
struct element *next;
// element is the first sizeof(element) bytes of an element.
#endif
};

#ifdef USE_PAGE
Expand All @@ -413,17 +425,21 @@ struct memory_bulk {

struct block {
int num_elements;
#ifdef USE_ARRAY_BASED_BLOCK
struct element *elements[BLOCKSIZE]; // This is part of block, so never freed.
#else
struct element *head;
struct element *tail;
#endif
};

struct local_pool {
int num_elements;
struct block *blocks;
#ifdef USE_PAGE
size_t extra_mem_size;
void *p_extra_mem_ptr;
#endif
struct block blocks[LOCALPOOL_NUM_BLOCKS];
} __attribute__((aligned(ZM_CACHELINE_SIZE)));

struct global_pool {
Expand Down Expand Up @@ -453,23 +469,30 @@ static inline void atomic_insert_bulk(struct global_pool *p_global_pool, struct
while (1) {
struct memory_bulk *cur_bulk = p_global_pool->bulk;
bulk->next = cur_bulk;
if (zm_atomic_compare_exchange_weak(&p_global_pool->bulk, &cur_bulk, bulk, zm_memord_acq_rel, zm_memord_release))
if (zm_atomic_compare_exchange_weak(&p_global_pool->bulk, &cur_bulk, bulk, zm_memord_acq_rel, zm_memord_acquire))
return;
}
}
#endif

static inline void *element_to_ptr(struct element *element) {
#ifdef USE_ARRAY_BASED_BLOCK
return (void *)element;
#else
return (void *)(((char *)element) + sizeof(struct element));
#endif
}

static inline struct element *ptr_to_element(void *ptr) {
#ifdef USE_ARRAY_BASED_BLOCK
return (struct element *)ptr;
#else
return (struct element *)(((char *)ptr) - sizeof(struct element));
#endif
}

static struct local_pool *local_pool_create() {
struct local_pool *local_pool = (struct local_pool *)aligned_calloc(sizeof(struct local_pool));
local_pool->blocks = (struct block *)aligned_calloc(sizeof(struct block) * LOCALPOOL_NUM_BLOCKS);
return local_pool;
}

Expand Down Expand Up @@ -563,7 +586,11 @@ int zm_pool_alloc(zm_pool_t handle, void **ptr) {
}
}
if (GLOBAL_TO_LOCAL_NUM_BLOCKS != num_taken_blocks) {
element_size = ((sizeof(struct element) + global_pool->element_size + MALLOC_ALIGNMENT - 1) / MALLOC_ALIGNMENT) * MALLOC_ALIGNMENT;
#ifdef USE_ARRAY_BASED_BLOCK
element_size = (global_pool->element_size + MALLOC_ALIGNMENT - 1) & (~(MALLOC_ALIGNMENT - 1));
#else
element_size = (sizeof(struct element) + global_pool->element_size + MALLOC_ALIGNMENT - 1) & (~(MALLOC_ALIGNMENT - 1));
#endif
}
}
lock_release(&global_pool->lock);
Expand All @@ -581,8 +608,12 @@ int zm_pool_alloc(zm_pool_t handle, void **ptr) {

struct block *block = &blocks[block_i];
int block_num_elements = 0;
#ifdef USE_ARRAY_BASED_BLOCK
struct element **block_elements = block->elements;
#else
struct element *block_head = NULL;
struct element *block_tail = NULL;
#endif
struct memory_bulk *new_bulk = NULL;

while (num_remaining_elements) {
Expand All @@ -601,23 +632,31 @@ int zm_pool_alloc(zm_pool_t handle, void **ptr) {
struct element *new_element = (struct element *)p_extra_mem_ptr;
p_extra_mem_ptr += element_size;
extra_mem_size -= element_size;
#ifdef USE_ARRAY_BASED_BLOCK
block_elements[block_num_elements] = new_element;
#else
new_element->next = NULL;
// Put it to a tail block.
if (block_num_elements == 0) {
block_head = new_element;
block_tail = new_element;
} else {
block_tail->next = new_element;
block_tail = new_element;
}
#endif
block_num_elements++;
if (block_num_elements == BLOCKSIZE) {
#ifndef USE_ARRAY_BASED_BLOCK
block->head = block_head;
block->tail = block_tail;
#endif
block->num_elements = block_num_elements;
block_i++;
block = &blocks[block_i];
block_num_elements = 0;
#ifdef USE_ARRAY_BASED_BLOCK
block_elements = block->elements;
#endif
}
num_remaining_elements--;
}
Expand All @@ -636,28 +675,41 @@ int zm_pool_alloc(zm_pool_t handle, void **ptr) {
struct block *blocks = local_pool->blocks;
struct block *block = &blocks[block_i];
int block_num_elements = 0;
#ifdef USE_ARRAY_BASED_BLOCK
struct element **block_elements = block->elements;
#else
struct element *block_head = NULL;
struct element *block_tail = NULL;
#endif
for (int i = 0; i < num_remaining_elements; i++) {
// Allocate all single elements.
struct element *new_element = (struct element *)aligned_malloc(element_size);
new_element->next = NULL;
// Put it to a tail block.
#ifdef USE_ARRAY_BASED_BLOCK
block_elements[block_num_elements] = new_element;
#else
new_element->next = NULL;
if (block_num_elements == 0) {
block_head = new_element;
block_tail = new_element;
} else {
block_tail->next = new_element;
block_tail = new_element;
}
#endif
block_num_elements++;
if (block_num_elements == BLOCKSIZE) {
#ifndef USE_ARRAY_BASED_BLOCK
block->head = block_head;
block->tail = block_tail;
#endif
block->num_elements = block_num_elements;
block_i++;
block = &blocks[block_i];
block_num_elements = 0;
#ifdef USE_ARRAY_BASED_BLOCK
block_elements = block->elements;
#endif
}
}
assert(block_num_elements == 0);
Expand All @@ -670,10 +722,14 @@ int zm_pool_alloc(zm_pool_t handle, void **ptr) {
local_pool->num_elements -= 1;
int block_i = local_pool->num_elements >> BLOCKSIZE_LOG;
struct block *block = &local_pool->blocks[block_i];
#ifdef USE_ARRAY_BASED_BLOCK
struct element *element = block->elements[--block->num_elements];
#else
struct element *element = block->head;
struct element *next = element->next;
block->head = next;
block->num_elements -= 1;
#endif
*ptr = element_to_ptr(element);
}
return 0;
Expand Down Expand Up @@ -734,6 +790,9 @@ int zm_pool_free(zm_pool_t handle, void *ptr) {
local_pool->num_elements += 1;
struct block *block = &local_pool->blocks[block_i];
struct element *element = ptr_to_element(ptr);
#ifdef USE_ARRAY_BASED_BLOCK
block->elements[++block->num_elements] = element;
#else
if (zm_unlikely(block->num_elements == 0)) {
block->head = element;
block->tail = element;
Expand All @@ -743,6 +802,7 @@ int zm_pool_free(zm_pool_t handle, void *ptr) {
block->head = element;
}
block->num_elements += 1;
#endif
}
return 0;
}
Expand Down

0 comments on commit e1e32d8

Please sign in to comment.