From 1c0a104eca189a932e0b44ca9bef46cce3d0b801 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 19 Nov 2024 12:59:19 -0700 Subject: gh-126914: Store the Preallocated Thread State's Pointer in a PyInterpreterState Field (gh-126989) This approach eliminates the originally reported race. It also gets rid of the deadlock reported in gh-96071, so we can remove the workaround added then. --- Python/pystate.c | 93 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 47 deletions(-) (limited to 'Python/pystate.c') diff --git a/Python/pystate.c b/Python/pystate.c index a209a26f16f..01e54fc745d 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -629,6 +629,8 @@ init_interpreter(PyInterpreterState *interp, assert(next != NULL || (interp == runtime->interpreters.main)); interp->next = next; + interp->threads.preallocated = &interp->_initial_thread; + // We would call _PyObject_InitState() at this point // if interp->feature_flags were alredy set. @@ -766,7 +768,6 @@ PyInterpreterState_New(void) return interp; } - static void interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) { @@ -910,6 +911,9 @@ interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate) // XXX Once we have one allocator per interpreter (i.e. // per-interpreter GC) we must ensure that all of the interpreter's // objects have been cleaned up at the point. + + // We could clear interp->threads.freelist here + // if it held more than just the initial thread state. } @@ -1386,22 +1390,45 @@ allocate_chunk(int size_in_bytes, _PyStackChunk* previous) return res; } +static void +reset_threadstate(_PyThreadStateImpl *tstate) +{ + // Set to _PyThreadState_INIT directly? + memcpy(tstate, + &initial._main_interpreter._initial_thread, + sizeof(*tstate)); +} + static _PyThreadStateImpl * -alloc_threadstate(void) +alloc_threadstate(PyInterpreterState *interp) { - return PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl)); + _PyThreadStateImpl *tstate; + + // Try the preallocated tstate first. + tstate = _Py_atomic_exchange_ptr(&interp->threads.preallocated, NULL); + + // Fall back to the allocator. + if (tstate == NULL) { + tstate = PyMem_RawCalloc(1, sizeof(_PyThreadStateImpl)); + if (tstate == NULL) { + return NULL; + } + reset_threadstate(tstate); + } + return tstate; } static void free_threadstate(_PyThreadStateImpl *tstate) { + PyInterpreterState *interp = tstate->base.interp; // The initial thread state of the interpreter is allocated // as part of the interpreter state so should not be freed. - if (tstate == &tstate->base.interp->_initial_thread) { - // Restore to _PyThreadState_INIT. - memcpy(tstate, - &initial._main_interpreter._initial_thread, - sizeof(*tstate)); + if (tstate == &interp->_initial_thread) { + // Make it available again. + reset_threadstate(tstate); + assert(interp->threads.preallocated == NULL); + _Py_atomic_store_ptr(&interp->threads.preallocated, tstate); } else { PyMem_RawFree(tstate); @@ -1492,66 +1519,38 @@ add_threadstate(PyInterpreterState *interp, PyThreadState *tstate, static PyThreadState * new_threadstate(PyInterpreterState *interp, int whence) { - _PyThreadStateImpl *tstate; - _PyRuntimeState *runtime = interp->runtime; - // We don't need to allocate a thread state for the main interpreter - // (the common case), but doing it later for the other case revealed a - // reentrancy problem (deadlock). So for now we always allocate before - // taking the interpreters lock. See GH-96071. - _PyThreadStateImpl *new_tstate = alloc_threadstate(); - int used_newtstate; - if (new_tstate == NULL) { + // Allocate the thread state. + _PyThreadStateImpl *tstate = alloc_threadstate(interp); + if (tstate == NULL) { return NULL; } + #ifdef Py_GIL_DISABLED Py_ssize_t qsbr_idx = _Py_qsbr_reserve(interp); if (qsbr_idx < 0) { - PyMem_RawFree(new_tstate); + free_threadstate(tstate); return NULL; } int32_t tlbc_idx = _Py_ReserveTLBCIndex(interp); if (tlbc_idx < 0) { - PyMem_RawFree(new_tstate); + free_threadstate(tstate); return NULL; } #endif /* We serialize concurrent creation to protect global state. */ - HEAD_LOCK(runtime); + HEAD_LOCK(interp->runtime); + // Initialize the new thread state. interp->threads.next_unique_id += 1; uint64_t id = interp->threads.next_unique_id; + init_threadstate(tstate, interp, id, whence); - // Allocate the thread state and add it to the interpreter. + // Add the new thread state to the interpreter. PyThreadState *old_head = interp->threads.head; - if (old_head == NULL) { - // It's the interpreter's initial thread state. - used_newtstate = 0; - tstate = &interp->_initial_thread; - } - // XXX Re-use interp->_initial_thread if not in use? - else { - // Every valid interpreter must have at least one thread. - assert(id > 1); - assert(old_head->prev == NULL); - used_newtstate = 1; - tstate = new_tstate; - // Set to _PyThreadState_INIT. - memcpy(tstate, - &initial._main_interpreter._initial_thread, - sizeof(*tstate)); - } - - init_threadstate(tstate, interp, id, whence); add_threadstate(interp, (PyThreadState *)tstate, old_head); - HEAD_UNLOCK(runtime); - if (!used_newtstate) { - // Must be called with lock unlocked to avoid re-entrancy deadlock. - PyMem_RawFree(new_tstate); - } - else { - } + HEAD_UNLOCK(interp->runtime); #ifdef Py_GIL_DISABLED // Must be called with lock unlocked to avoid lock ordering deadlocks. -- cgit v1.2.3