Skip to content

Commit

Permalink
[proxy](1/n) start self-vendored STL with atomic (#715)
Browse files Browse the repository at this point in the history
* [proxy] start self-vendored STL

* address CR
  • Loading branch information
SchrodingerZhu authored Jan 3, 2025
1 parent fb29a5e commit feff2e8
Show file tree
Hide file tree
Showing 34 changed files with 469 additions and 131 deletions.
47 changes: 47 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -504,6 +504,53 @@ jobs:
- name: Test
run: ${{github.workspace}}/build/fuzzing/snmalloc-fuzzer

self-vendored:
strategy:
matrix:
include:
- os: windows-2022
cxx: clang-cl
cc: clang-cl
- os: ubuntu-latest
cxx: clang++
cc: clang
- os: ubuntu-latest
cxx: g++
cc: gcc
- os: macos-latest
cxx: clang++
cc: clang
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- name: Prepare Windows
if: runner.os == 'Windows'
run: |
choco install ninja
- name: Prepare macOS
if: runner.os == 'macOS'
run: |
brew install ninja
- name: Prepare Ubuntu
if: runner.os == 'Linux'
run: |
sudo apt install ninja-build
- name: Configure CMake
run: >
cmake
-B ${{github.workspace}}/build
-DSNMALLOC_USE_SELF_VENDORED_STL=ON
-GNinja
-DCMAKE_BUILD_TYPE=RelWithDebInfo
-DCMAKE_CXX_COMPILER=${{ matrix.cxx }}
-DCMAKE_C_COMPILER=${{ matrix.cc }}
- name: Build
run: cmake --build ${{github.workspace}}/build --parallel
- name: Test
run: |
cd ${{github.workspace}}/build
ctest --parallel
all-checks:
# Currently FreeBSD and NetBSD CI are not working, so we do not require them to pass.
# Add fuzzing back when the memove issue is fixed.
Expand Down
7 changes: 7 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ option(SNMALLOC_BENCHMARK_INDIVIDUAL_MITIGATIONS "Build tests and ld_preload for
option(SNMALLOC_ENABLE_DYNAMIC_LOADING "Build such that snmalloc can be dynamically loaded. This is not required for LD_PRELOAD, and will harm performance if enabled." OFF)
option(SNMALLOC_ENABLE_WAIT_ON_ADDRESS "Use wait on address backoff strategy if it is available" ON)
option(SNMALLOC_ENABLE_FUZZING "Enable fuzzing instrumentation tests" OFF)
option(SNMALLOC_USE_SELF_VENDORED_STL "Avoid using system STL" OFF)
# Options that apply only if we're not building the header-only library
cmake_dependent_option(SNMALLOC_RUST_SUPPORT "Build static library for rust" OFF "NOT SNMALLOC_HEADER_ONLY_LIBRARY" OFF)
cmake_dependent_option(SNMALLOC_STATIC_LIBRARY "Build static libraries" ON "NOT SNMALLOC_HEADER_ONLY_LIBRARY" OFF)
Expand Down Expand Up @@ -205,6 +206,12 @@ else()
target_compile_definitions(snmalloc INTERFACE SNMALLOC_USE_WAIT_ON_ADDRESS=0)
endif()

if(SNMALLOC_USE_SELF_VENDORED_STL)
target_compile_definitions(snmalloc INTERFACE SNMALLOC_USE_SELF_VENDORED_STL=1)
else()
target_compile_definitions(snmalloc INTERFACE SNMALLOC_USE_SELF_VENDORED_STL=0)
endif()

# https://learn.microsoft.com/en-us/cpp/build/reference/zc-cplusplus
if(MSVC)
target_compile_options(snmalloc INTERFACE "/Zc:__cplusplus")
Expand Down
6 changes: 3 additions & 3 deletions src/snmalloc/backend/globalconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ namespace snmalloc
* Specifies if the Configuration has been initialised.
*/
SNMALLOC_REQUIRE_CONSTINIT
inline static std::atomic<bool> initialised{false};
inline static stl::Atomic<bool> initialised{false};

/**
* Used to prevent two threads attempting to initialise the configuration
Expand Down Expand Up @@ -126,7 +126,7 @@ namespace snmalloc
Authmap::init();
}

initialised.store(true, std::memory_order_release);
initialised.store(true, stl::memory_order_release);
});
}

Expand All @@ -146,7 +146,7 @@ namespace snmalloc
// and concurrency safe.
SNMALLOC_FAST_PATH static void ensure_init()
{
if (SNMALLOC_LIKELY(initialised.load(std::memory_order_acquire)))
if (SNMALLOC_LIKELY(initialised.load(stl::memory_order_acquire)))
return;

ensure_init_slow();
Expand Down
2 changes: 1 addition & 1 deletion src/snmalloc/backend_helpers/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

#include "../ds/ds.h"
#include "../mem/mem.h"
#include "snmalloc/stl/atomic.h"

#include <atomic>
#include <utility>

namespace snmalloc
Expand Down
7 changes: 3 additions & 4 deletions src/snmalloc/backend_helpers/statsrange.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@

#include "empty_range.h"
#include "range_helpers.h"

#include <atomic>
#include "snmalloc/stl/atomic.h"

namespace snmalloc
{
Expand All @@ -17,8 +16,8 @@ namespace snmalloc
{
using ContainsParent<ParentRange>::parent;

static inline std::atomic<size_t> current_usage{};
static inline std::atomic<size_t> peak_usage{};
static inline stl::Atomic<size_t> current_usage{};
static inline stl::Atomic<size_t> peak_usage{};

public:
static constexpr bool Aligned = ParentRange::Aligned;
Expand Down
32 changes: 16 additions & 16 deletions src/snmalloc/ds/aba.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ namespace snmalloc

struct Independent
{
std::atomic<T*> ptr{nullptr};
std::atomic<uintptr_t> aba{0};
stl::Atomic<T*> ptr{nullptr};
stl::Atomic<uintptr_t> aba{0};
};

static_assert(
Expand All @@ -49,7 +49,7 @@ namespace snmalloc
private:
union
{
alignas(2 * sizeof(std::size_t)) std::atomic<Linked> linked;
alignas(2 * sizeof(std::size_t)) stl::Atomic<Linked> linked;
Independent independent;
};

Expand All @@ -58,8 +58,8 @@ namespace snmalloc

void init(T* x)
{
independent.ptr.store(x, std::memory_order_relaxed);
independent.aba.store(0, std::memory_order_relaxed);
independent.ptr.store(x, stl::memory_order_relaxed);
independent.aba.store(0, stl::memory_order_relaxed);
}

struct Cmp;
Expand All @@ -72,8 +72,8 @@ namespace snmalloc
operation_in_flight = true;
# endif
return Cmp{
{independent.ptr.load(std::memory_order_relaxed),
independent.aba.load(std::memory_order_relaxed)},
{independent.ptr.load(stl::memory_order_relaxed),
independent.aba.load(stl::memory_order_relaxed)},
this};
}

Expand Down Expand Up @@ -109,10 +109,10 @@ namespace snmalloc
# endif

Linked xchg{value, old.aba + 1};
std::atomic<Linked>& addr = parent->linked;
stl::Atomic<Linked>& addr = parent->linked;

auto result = addr.compare_exchange_weak(
old, xchg, std::memory_order_acq_rel, std::memory_order_relaxed);
old, xchg, stl::memory_order_acq_rel, stl::memory_order_relaxed);
# endif
return result;
}
Expand All @@ -131,7 +131,7 @@ namespace snmalloc
// This method is used in Verona
T* peek()
{
return independent.ptr.load(std::memory_order_relaxed);
return independent.ptr.load(stl::memory_order_relaxed);
}
};
#else
Expand All @@ -141,21 +141,21 @@ namespace snmalloc
template<typename T, Construction c = RequiresInit>
class ABA
{
std::atomic<T*> ptr = nullptr;
std::atomic<bool> lock{false};
stl::Atomic<T*> ptr = nullptr;
stl::Atomic<bool> lock{false};

public:
// This method is used in Verona
void init(T* x)
{
ptr.store(x, std::memory_order_relaxed);
ptr.store(x, stl::memory_order_relaxed);
}

struct Cmp;

Cmp read()
{
while (lock.exchange(true, std::memory_order_acquire))
while (lock.exchange(true, stl::memory_order_acquire))
Aal::pause();

# if !defined(NDEBUG) && !defined(SNMALLOC_DISABLE_ABA_VERIFY)
Expand Down Expand Up @@ -185,7 +185,7 @@ namespace snmalloc

~Cmp()
{
parent->lock.store(false, std::memory_order_release);
parent->lock.store(false, stl::memory_order_release);
# if !defined(NDEBUG) && !defined(SNMALLOC_DISABLE_ABA_VERIFY)
operation_in_flight = false;
# endif
Expand All @@ -195,7 +195,7 @@ namespace snmalloc
// This method is used in Verona
T* peek()
{
return ptr.load(std::memory_order_relaxed);
return ptr.load(stl::memory_order_relaxed);
}
};
#endif
Expand Down
Loading

0 comments on commit feff2e8

Please sign in to comment.