From a5caeeb664e5fb8d0141786585d3dc790c9c4b72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pedro=20J=2E=20Est=C3=A9banez?= Date: Wed, 13 Nov 2024 10:45:43 +0100 Subject: [PATCH] SpinLock: Overhaul false sharing avoidance --- core/os/spin_lock.h | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/core/os/spin_lock.h b/core/os/spin_lock.h index 8c2d5667ffd3..eb7b1721d742 100644 --- a/core/os/spin_lock.h +++ b/core/os/spin_lock.h @@ -31,8 +31,15 @@ #ifndef SPIN_LOCK_H #define SPIN_LOCK_H +#include "core/os/thread.h" #include "core/typedefs.h" +#ifdef THREADS_ENABLED + +// Note the implementations below avoid false sharing by ensuring their +// sizes match the assumed cache line. We can't use align attributes +// because these objects may end up unaligned in semi-tightly packed arrays. + #ifdef _MSC_VER #include #endif @@ -42,7 +49,10 @@ #include class SpinLock { - mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT; + union { + mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT; + char aligner[Thread::CACHE_LINE_BYTES]; + }; public: _ALWAYS_INLINE_ void lock() const { @@ -54,9 +64,7 @@ class SpinLock { } }; -#else - -#include "core/os/thread.h" +#else // __APPLE__ #include @@ -84,8 +92,11 @@ _ALWAYS_INLINE_ static void _cpu_pause() { static_assert(std::atomic_bool::is_always_lock_free); -class alignas(Thread::CACHE_LINE_BYTES) SpinLock { - mutable std::atomic locked = ATOMIC_VAR_INIT(false); +class SpinLock { + union { + mutable std::atomic locked = ATOMIC_VAR_INIT(false); + char aligner[Thread::CACHE_LINE_BYTES]; + }; public: _ALWAYS_INLINE_ void lock() const { @@ -107,4 +118,14 @@ class alignas(Thread::CACHE_LINE_BYTES) SpinLock { #endif // __APPLE__ +#else // THREADS_ENABLED + +class SpinLock { +public: + void lock() const {} + void unlock() const {} +}; + +#endif // THREADS_ENABLED + #endif // SPIN_LOCK_H