Skip to content

Commit

Permalink
optimize: only lookup thread_local once per reservation
Browse files Browse the repository at this point in the history
Signed-off-by: Greg Greenway <ggreenway@apple.com>
  • Loading branch information
ggreenway committed Jan 26, 2021
1 parent 6bc1fc3 commit 9f3a717
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 20 deletions.
2 changes: 1 addition & 1 deletion source/common/buffer/buffer_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ Reservation OwnedImpl::reserveWithMaxLength(uint64_t max_length) {
break;
}

Slice slice(size);
Slice slice(size, slices_owner->free_list_);
reservation_slices.push_back(slice.reserve(size));
slices_owner->owned_slices_.emplace_back(std::move(slice));
ASSERT(slice.dataSize() == 0);
Expand Down
72 changes: 53 additions & 19 deletions source/common/buffer/buffer_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,15 @@ class Slice {
using Reservation = RawSlice;
using StoragePtr = std::unique_ptr<uint8_t[]>;

static constexpr uint32_t free_list_max_ = Buffer::Reservation::MAX_SLICES_;
using FreeListType = absl::InlinedVector<StoragePtr, free_list_max_>;
class FreeListReference {
private:
FreeListReference(FreeListType& free_list) : free_list_(free_list) {}
FreeListType& free_list_;
friend class Slice;
};

/**
* Create an empty Slice with 0 capacity.
*/
Expand All @@ -45,9 +54,9 @@ class Slice {
* @param min_capacity number of bytes of space the slice should have. Actual capacity is rounded
* up to the next multiple of 4kb.
*/
Slice(uint64_t min_capacity)
: capacity_(sliceSize(min_capacity)), storage_(newStorage(capacity_)), base_(storage_.get()),
data_(0), reservable_(0) {}
Slice(uint64_t min_capacity, absl::optional<FreeListReference> free_list = absl::nullopt)
: capacity_(sliceSize(min_capacity)), storage_(newStorage(capacity_, free_list)),
base_(storage_.get()), data_(0), reservable_(0) {}

/**
* Create an immutable Slice that refers to an external buffer fragment.
Expand Down Expand Up @@ -100,6 +109,11 @@ class Slice {
freeStorage(std::move(storage_), capacity_);
}

void freeStorage(FreeListReference free_list) {
callAndClearDrainTrackers();
freeStorage(std::move(storage_), capacity_, free_list);
}

/**
* @return true if the data in the slice is mutable
*/
Expand Down Expand Up @@ -292,6 +306,8 @@ class Slice {

static constexpr uint32_t default_slice_size_ = 16384;

static FreeListReference freeList() { return FreeListReference(free_list_); }

protected:
/**
* Compute a slice size big enough to hold a specified amount of data.
Expand All @@ -304,40 +320,48 @@ class Slice {
return num_pages * PageSize;
}

static StoragePtr newStorage(uint64_t capacity) {
static StoragePtr newStorage(uint64_t capacity, absl::optional<FreeListReference> free_list_opt) {
ASSERT(sliceSize(default_slice_size_) == default_slice_size_,
"default_slice_size_ incompatible with sliceSize()");
ASSERT(sliceSize(capacity) == capacity,
"newStorage should only be called on values returned from sliceSize()");
ASSERT(!free_list_opt.has_value() || &free_list_opt->free_list_ == &free_list_);

StoragePtr storage;
if (capacity == default_slice_size_ && !free_list_.empty()) {
storage = std::move(free_list_.back());
ASSERT(storage != nullptr);
ASSERT(free_list_.back() == nullptr);
free_list_.pop_back();
} else {
storage.reset(new uint8_t[capacity]);
if (capacity == default_slice_size_ && free_list_opt.has_value()) {
FreeListType& free_list = free_list_opt->free_list_;
if (!free_list.empty()) {
storage = std::move(free_list.back());
ASSERT(storage != nullptr);
ASSERT(free_list.back() == nullptr);
free_list.pop_back();
return storage;
}
}

storage.reset(new uint8_t[capacity]);
return storage;
}

static void freeStorage(StoragePtr storage, uint64_t capacity) {
static void freeStorage(StoragePtr storage, uint64_t capacity,
absl::optional<FreeListReference> free_list_opt = absl::nullopt) {
if (storage == nullptr) {
return;
}

if (capacity == default_slice_size_ && free_list_.size() < free_list_max_) {
free_list_.emplace_back(std::move(storage));
ASSERT(storage == nullptr);
} else {
storage.reset();
if (capacity == default_slice_size_ && free_list_opt.has_value()) {
FreeListType& free_list = free_list_opt->free_list_;
if (free_list.size() < free_list_max_) {
free_list.emplace_back(std::move(storage));
ASSERT(storage == nullptr);
return;
}
}

storage.reset();
}

static constexpr uint32_t free_list_max_ = Buffer::Reservation::MAX_SLICES_;
static thread_local absl::InlinedVector<StoragePtr, free_list_max_> free_list_;
static thread_local FreeListType free_list_;

/** Length of the byte array that base_ points to. This is also the offset in bytes from the start
* of the slice to the end of the Reservable section. */
Expand Down Expand Up @@ -711,8 +735,18 @@ class OwnedImpl : public LibEventInstance {
};

struct OwnedImplReservationSlicesOwnerMultiple : public OwnedImplReservationSlicesOwner {
// Optimization: get the thread_local freeList() once per Reservation, outside the loop.
OwnedImplReservationSlicesOwnerMultiple() : free_list_(Slice::freeList()) {}

~OwnedImplReservationSlicesOwnerMultiple() {
while (!owned_slices_.empty()) {
owned_slices_.back().freeStorage(free_list_);
owned_slices_.pop_back();
}
}
absl::Span<Slice> ownedSlices() override { return absl::MakeSpan(owned_slices_); }

Slice::FreeListReference free_list_;
absl::InlinedVector<Slice, Buffer::Reservation::MAX_SLICES_> owned_slices_;
};

Expand Down

0 comments on commit 9f3a717

Please sign in to comment.