Skip to content

Commit

Permalink
SpinLock: Overhaul false sharing avoidance
Browse files Browse the repository at this point in the history
  • Loading branch information
RandomShaper committed Nov 13, 2024
1 parent cb411fa commit a5caeeb
Showing 1 changed file with 27 additions and 6 deletions.
33 changes: 27 additions & 6 deletions core/os/spin_lock.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,15 @@
#ifndef SPIN_LOCK_H
#define SPIN_LOCK_H

#include "core/os/thread.h"
#include "core/typedefs.h"

#ifdef THREADS_ENABLED

// Note the implementations below avoid false sharing by ensuring their
// sizes match the assumed cache line. We can't use align attributes
// because these objects may end up unaligned in semi-tightly packed arrays.

#ifdef _MSC_VER
#include <intrin.h>
#endif
Expand All @@ -42,7 +49,10 @@
#include <os/lock.h>

class SpinLock {
mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT;
union {
mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT;
char aligner[Thread::CACHE_LINE_BYTES];
};

public:
_ALWAYS_INLINE_ void lock() const {
Expand All @@ -54,9 +64,7 @@ class SpinLock {
}
};

#else

#include "core/os/thread.h"
#else // __APPLE__

#include <atomic>

Expand Down Expand Up @@ -84,8 +92,11 @@ _ALWAYS_INLINE_ static void _cpu_pause() {

static_assert(std::atomic_bool::is_always_lock_free);

class alignas(Thread::CACHE_LINE_BYTES) SpinLock {
mutable std::atomic<bool> locked = ATOMIC_VAR_INIT(false);
class SpinLock {
union {
mutable std::atomic<bool> locked = ATOMIC_VAR_INIT(false);
char aligner[Thread::CACHE_LINE_BYTES];
};

public:
_ALWAYS_INLINE_ void lock() const {
Expand All @@ -107,4 +118,14 @@ class alignas(Thread::CACHE_LINE_BYTES) SpinLock {

#endif // __APPLE__

#else // THREADS_ENABLED

class SpinLock {
public:
void lock() const {}
void unlock() const {}
};

#endif // THREADS_ENABLED

#endif // SPIN_LOCK_H

0 comments on commit a5caeeb

Please sign in to comment.