Skip to content

Commit

Permalink
Revert RID_Owner safety (temp)
Browse files Browse the repository at this point in the history
  • Loading branch information
RandomShaper committed Oct 21, 2024
1 parent 31cdf94 commit 439f294
Showing 1 changed file with 23 additions and 33 deletions.
56 changes: 23 additions & 33 deletions core/templates/rid_owner.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
#define RID_OWNER_H

#include "core/os/memory.h"
#include "core/os/spin_lock.h"
#include "core/os/mutex.h"
#include "core/string/print_string.h"
#include "core/templates/hash_set.h"
#include "core/templates/list.h"
Expand Down Expand Up @@ -83,18 +83,18 @@ class RID_Alloc : public RID_AllocBase {

const char *description = nullptr;

AcqRelSpinLock spin;
mutable Mutex mutex;

_FORCE_INLINE_ RID _allocate_rid() {
if constexpr (THREAD_SAFE) {
spin.lock();
mutex.lock();
}

if (alloc_count == max_alloc) {
//allocate a new chunk
uint32_t chunk_count = alloc_count == 0 ? 0 : (max_alloc / elements_in_chunk);
if (THREAD_SAFE && chunk_count == chunk_limit) {
spin.unlock();
mutex.unlock();
if (description != nullptr) {
ERR_FAIL_V_MSG(RID(), vformat("Element limit for RID of type '%s' reached.", String(description)));
} else {
Expand All @@ -120,8 +120,7 @@ class RID_Alloc : public RID_AllocBase {
free_list_chunks[chunk_count][i] = alloc_count + i;
}

// Store atomically to avoid data race with the load in get_or_null().
((std::atomic<uint32_t> *)&max_alloc)->store(max_alloc + elements_in_chunk, std::memory_order_relaxed);
max_alloc += elements_in_chunk;
}

uint32_t free_index = free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk];
Expand All @@ -141,7 +140,7 @@ class RID_Alloc : public RID_AllocBase {
alloc_count++;

if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}

return _make_from_id(id);
Expand Down Expand Up @@ -169,13 +168,9 @@ class RID_Alloc : public RID_AllocBase {
return nullptr;
}

spin.acquire();

uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
// Read atomically to avoid data race with the store in _allocate_rid().
uint32_t ma = ((std::atomic<uint32_t> *)&max_alloc)->load(std::memory_order_relaxed);
if (unlikely(idx >= ma)) {
if (unlikely(idx >= max_alloc)) {
return nullptr;
}

Expand Down Expand Up @@ -220,14 +215,14 @@ class RID_Alloc : public RID_AllocBase {

_FORCE_INLINE_ bool owns(const RID &p_rid) const {
if constexpr (THREAD_SAFE) {
spin.lock();
mutex.lock();
}

uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
if (unlikely(idx >= max_alloc)) {
if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}
return false;
}
Expand All @@ -240,22 +235,22 @@ class RID_Alloc : public RID_AllocBase {
bool owned = (validator != 0x7FFFFFFF) && (chunks[idx_chunk][idx_element].validator & 0x7FFFFFFF) == validator;

if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}

return owned;
}

_FORCE_INLINE_ void free(const RID &p_rid) {
if constexpr (THREAD_SAFE) {
spin.lock();
mutex.lock();
}

uint64_t id = p_rid.get_id();
uint32_t idx = uint32_t(id & 0xFFFFFFFF);
if (unlikely(idx >= max_alloc)) {
if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}
ERR_FAIL();
}
Expand All @@ -266,12 +261,12 @@ class RID_Alloc : public RID_AllocBase {
uint32_t validator = uint32_t(id >> 32);
if (unlikely(chunks[idx_chunk][idx_element].validator & 0x80000000)) {
if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}
ERR_FAIL_MSG("Attempted to free an uninitialized or invalid RID");
} else if (unlikely(chunks[idx_chunk][idx_element].validator != validator)) {
if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}
ERR_FAIL();
}
Expand All @@ -283,7 +278,7 @@ class RID_Alloc : public RID_AllocBase {
free_list_chunks[alloc_count / elements_in_chunk][alloc_count % elements_in_chunk] = idx;

if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}
}

Expand All @@ -292,35 +287,35 @@ class RID_Alloc : public RID_AllocBase {
}
void get_owned_list(List<RID> *p_owned) const {
if constexpr (THREAD_SAFE) {
spin.lock();
mutex.lock();
}
for (size_t i = 0; i < max_alloc; i++) {
uint32_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
uint64_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
if (validator != 0xFFFFFFFF) {
p_owned->push_back(_make_from_id(((uint64_t)validator << 32) | i));
p_owned->push_back(_make_from_id((validator << 32) | i));
}
}
if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}
}

//used for fast iteration in the elements or RIDs
void fill_owned_buffer(RID *p_rid_buffer) const {
if constexpr (THREAD_SAFE) {
spin.lock();
mutex.lock();
}
uint32_t idx = 0;
for (size_t i = 0; i < max_alloc; i++) {
uint32_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
uint64_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
if (validator != 0xFFFFFFFF) {
p_rid_buffer[idx] = _make_from_id((validator << 32) | i);
idx++;
}
}

if constexpr (THREAD_SAFE) {
spin.unlock();
mutex.unlock();
}
}

Expand All @@ -334,21 +329,16 @@ class RID_Alloc : public RID_AllocBase {
chunk_limit = (p_maximum_number_of_elements / elements_in_chunk) + 1;
chunks = (Chunk **)memalloc(sizeof(Chunk *) * chunk_limit);
free_list_chunks = (uint32_t **)memalloc(sizeof(uint32_t *) * chunk_limit);
spin.release();
}
}

~RID_Alloc() {
if constexpr (THREAD_SAFE) {
spin.lock();
}

if (alloc_count) {
print_error(vformat("ERROR: %d RID allocations of type '%s' were leaked at exit.",
alloc_count, description ? description : typeid(T).name()));

for (size_t i = 0; i < max_alloc; i++) {
uint32_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
uint64_t validator = chunks[i / elements_in_chunk][i % elements_in_chunk].validator;
if (validator & 0x80000000) {
continue; //uninitialized
}
Expand Down

0 comments on commit 439f294

Please sign in to comment.