Skip to content

Commit

Permalink
pythongh-109693: Update _gil_runtime_state.locked to use pyatomic.h (p…
Browse files Browse the repository at this point in the history
  • Loading branch information
corona10 authored and Glyphack committed Jan 27, 2024
1 parent f445d3b commit 8bd0584
Show file tree
Hide file tree
Showing 8 changed files with 86 additions and 18 deletions.
6 changes: 6 additions & 0 deletions Include/cpython/pyatomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -463,6 +463,12 @@ _Py_atomic_load_ptr_acquire(const void *obj);
static inline void
_Py_atomic_store_ptr_release(void *obj, void *value);

static inline void
_Py_atomic_store_int_release(int *obj, int value);

static inline int
_Py_atomic_load_int_acquire(const int *obj);


// --- _Py_atomic_fence ------------------------------------------------------

Expand Down
8 changes: 8 additions & 0 deletions Include/cpython/pyatomic_gcc.h
Original file line number Diff line number Diff line change
Expand Up @@ -487,6 +487,14 @@ static inline void
_Py_atomic_store_ptr_release(void *obj, void *value)
{ __atomic_store_n((void **)obj, value, __ATOMIC_RELEASE); }

static inline void
_Py_atomic_store_int_release(int *obj, int value)
{ __atomic_store_n(obj, value, __ATOMIC_RELEASE); }

static inline int
_Py_atomic_load_int_acquire(const int *obj)
{ return __atomic_load_n(obj, __ATOMIC_ACQUIRE); }


// --- _Py_atomic_fence ------------------------------------------------------

Expand Down
26 changes: 26 additions & 0 deletions Include/cpython/pyatomic_msc.h
Original file line number Diff line number Diff line change
Expand Up @@ -912,6 +912,32 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
#endif
}

static inline void
_Py_atomic_store_int_release(int *obj, int value)
{
#if defined(_M_X64) || defined(_M_IX86)
*(int volatile *)obj = value;
#elif defined(_M_ARM64)
_Py_atomic_ASSERT_ARG_TYPE(unsigned __int32);
__stlr32((unsigned __int32 volatile *)obj, (unsigned __int32)value);
#else
# error "no implementation of _Py_atomic_store_int_release"
#endif
}

static inline int
_Py_atomic_load_int_acquire(const int *obj)
{
#if defined(_M_X64) || defined(_M_IX86)
return *(int volatile *)obj;
#elif defined(_M_ARM64)
_Py_atomic_ASSERT_ARG_TYPE(unsigned __int32);
return (int)__ldar32((unsigned __int32 volatile *)obj);
#else
# error "no implementation of _Py_atomic_load_int_acquire"
#endif
}


// --- _Py_atomic_fence ------------------------------------------------------

Expand Down
17 changes: 17 additions & 0 deletions Include/cpython/pyatomic_std.h
Original file line number Diff line number Diff line change
Expand Up @@ -854,6 +854,23 @@ _Py_atomic_store_ptr_release(void *obj, void *value)
memory_order_release);
}

static inline void
_Py_atomic_store_int_release(int *obj, int value)
{
_Py_USING_STD;
atomic_store_explicit((_Atomic(int)*)obj, value,
memory_order_release);
}

static inline int
_Py_atomic_load_int_acquire(const int *obj)
{
_Py_USING_STD;
return atomic_load_explicit((const _Atomic(int)*)obj,
memory_order_acquire);
}



// --- _Py_atomic_fence ------------------------------------------------------

Expand Down
3 changes: 1 addition & 2 deletions Include/internal/pycore_gil.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ extern "C" {
# error "this header requires Py_BUILD_CORE define"
#endif

#include "pycore_atomic.h" // _Py_atomic_int
#include "pycore_condvar.h" // PyCOND_T

#ifndef Py_HAVE_CONDVAR
Expand All @@ -28,7 +27,7 @@ struct _gil_runtime_state {
PyThreadState* last_holder;
/* Whether the GIL is already taken (-1 if uninitialized). This is
atomic because it can be read without any lock taken in ceval.c. */
_Py_atomic_int locked;
int locked;
/* Number of GIL switches since the beginning. */
unsigned long switch_number;
/* This condition variable allows one or several threads to wait
Expand Down
16 changes: 16 additions & 0 deletions Modules/_testcapi/pyatomic.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,21 @@ test_atomic_release_acquire(PyObject *self, PyObject *obj) {
Py_RETURN_NONE;
}

static PyObject *
test_atomic_load_store_int_release_acquire(PyObject *self, PyObject *obj) { \
int x = 0;
int y = 1;
int z = 2;
assert(_Py_atomic_load_int_acquire(&x) == 0);
_Py_atomic_store_int_release(&x, y);
assert(x == y);
assert(_Py_atomic_load_int_acquire(&x) == y);
_Py_atomic_store_int_release(&x, z);
assert(x == z);
assert(_Py_atomic_load_int_acquire(&x) == z);
Py_RETURN_NONE;
}

// NOTE: all tests should start with "test_atomic_" to be included
// in test_pyatomic.py

Expand All @@ -162,6 +177,7 @@ static PyMethodDef test_methods[] = {
FOR_BITWISE_TYPES(BIND_TEST_AND_OR)
{"test_atomic_fences", test_atomic_fences, METH_NOARGS},
{"test_atomic_release_acquire", test_atomic_release_acquire, METH_NOARGS},
{"test_atomic_load_store_int_release_acquire", test_atomic_load_store_int_release_acquire, METH_NOARGS},
{NULL, NULL} /* sentinel */
};

Expand Down
27 changes: 11 additions & 16 deletions Python/ceval_gil.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@

#include "Python.h"
#include "pycore_atomic.h" // _Py_atomic_int
#include "pycore_atomic.h" // _Py_ANNOTATE_RWLOCK_CREATE
#include "pycore_ceval.h" // _PyEval_SignalReceived()
#include "pycore_initconfig.h" // _PyStatus_OK()
#include "pycore_interp.h" // _Py_RunGC()
Expand Down Expand Up @@ -120,9 +120,6 @@ UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
#include <stdlib.h>
#include <errno.h>

#include "pycore_atomic.h"


#include "condvar.h"

#define MUTEX_INIT(mut) \
Expand Down Expand Up @@ -166,8 +163,7 @@ UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)

static void _gil_initialize(struct _gil_runtime_state *gil)
{
_Py_atomic_int uninitialized = {-1};
gil->locked = uninitialized;
gil->locked = -1;
gil->interval = DEFAULT_INTERVAL;
}

Expand All @@ -176,7 +172,7 @@ static int gil_created(struct _gil_runtime_state *gil)
if (gil == NULL) {
return 0;
}
return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
return (_Py_atomic_load_int_acquire(&gil->locked) >= 0);
}

static void create_gil(struct _gil_runtime_state *gil)
Expand All @@ -191,7 +187,7 @@ static void create_gil(struct _gil_runtime_state *gil)
#endif
_Py_atomic_store_ptr_relaxed(&gil->last_holder, 0);
_Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
_Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
_Py_atomic_store_int_release(&gil->locked, 0);
}

static void destroy_gil(struct _gil_runtime_state *gil)
Expand All @@ -205,8 +201,7 @@ static void destroy_gil(struct _gil_runtime_state *gil)
COND_FINI(gil->switch_cond);
MUTEX_FINI(gil->switch_mutex);
#endif
_Py_atomic_store_explicit(&gil->locked, -1,
_Py_memory_order_release);
_Py_atomic_store_int_release(&gil->locked, -1);
_Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
}

Expand Down Expand Up @@ -247,7 +242,7 @@ drop_gil(PyInterpreterState *interp, PyThreadState *tstate)

MUTEX_LOCK(gil->mutex);
_Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
_Py_atomic_store_relaxed(&gil->locked, 0);
_Py_atomic_store_int_relaxed(&gil->locked, 0);
COND_SIGNAL(gil->cond);
MUTEX_UNLOCK(gil->mutex);

Expand Down Expand Up @@ -313,12 +308,12 @@ take_gil(PyThreadState *tstate)

MUTEX_LOCK(gil->mutex);

if (!_Py_atomic_load_relaxed(&gil->locked)) {
if (!_Py_atomic_load_int_relaxed(&gil->locked)) {
goto _ready;
}

int drop_requested = 0;
while (_Py_atomic_load_relaxed(&gil->locked)) {
while (_Py_atomic_load_int_relaxed(&gil->locked)) {
unsigned long saved_switchnum = gil->switch_number;

unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
Expand All @@ -328,7 +323,7 @@ take_gil(PyThreadState *tstate)
/* If we timed out and no switch occurred in the meantime, it is time
to ask the GIL-holding thread to drop it. */
if (timed_out &&
_Py_atomic_load_relaxed(&gil->locked) &&
_Py_atomic_load_int_relaxed(&gil->locked) &&
gil->switch_number == saved_switchnum)
{
if (_PyThreadState_MustExit(tstate)) {
Expand Down Expand Up @@ -358,7 +353,7 @@ take_gil(PyThreadState *tstate)
MUTEX_LOCK(gil->switch_mutex);
#endif
/* We now hold the GIL */
_Py_atomic_store_relaxed(&gil->locked, 1);
_Py_atomic_store_int_relaxed(&gil->locked, 1);
_Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);

if (tstate != (PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) {
Expand Down Expand Up @@ -437,7 +432,7 @@ current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
if (((PyThreadState*)_Py_atomic_load_ptr_relaxed(&gil->last_holder)) != tstate) {
return 0;
}
return _Py_atomic_load_relaxed(&gil->locked);
return _Py_atomic_load_int_relaxed(&gil->locked);
}

static void
Expand Down
1 change: 1 addition & 0 deletions Python/thread_pthread.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#include "pycore_interp.h" // _PyInterpreterState.threads.stacksize
#include "pycore_pythread.h" // _POSIX_SEMAPHORES
#include "pycore_atomic.h" // _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX

/* Posix threads interface */

Expand Down

0 comments on commit 8bd0584

Please sign in to comment.