diff options
author | Dino Viehland <dinoviehland@meta.com> | 2024-11-21 08:41:19 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-11-21 10:41:19 -0600 |
commit | bf542f8bb9f12f0df9481f2222b21545806dd9e1 (patch) | |
tree | 41db15281aa5a0e25e63a46e80cbe50ef80cb7f6 /Python/gc_free_threading.c | |
parent | 3926842117feffe5d2c9727e1899bea5ae2adb28 (diff) | |
download | cpython-bf542f8bb9f12f0df9481f2222b21545806dd9e1.tar.gz cpython-bf542f8bb9f12f0df9481f2222b21545806dd9e1.zip |
gh-124470: Fix crash when reading from object instance dictionary while replacing it (#122489)
Delay free a dictionary when replacing it
Diffstat (limited to 'Python/gc_free_threading.c')
-rw-r--r-- | Python/gc_free_threading.c | 49 |
1 files changed, 29 insertions, 20 deletions
diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c index a6e0022340b..0920c616c3c 100644 --- a/Python/gc_free_threading.c +++ b/Python/gc_free_threading.c @@ -394,6 +394,23 @@ gc_visit_thread_stacks(PyInterpreterState *interp) } static void +queue_untracked_obj_decref(PyObject *op, struct collection_state *state) +{ + if (!_PyObject_GC_IS_TRACKED(op)) { + // GC objects with zero refcount are handled subsequently by the + // GC as if they were cyclic trash, but we have to handle dead + // non-GC objects here. Add one to the refcount so that we can + // decref and deallocate the object once we start the world again. + op->ob_ref_shared += (1 << _Py_REF_SHARED_SHIFT); +#ifdef Py_REF_DEBUG + _Py_IncRefTotal(_PyThreadState_GET()); +#endif + worklist_push(&state->objs_to_decref, op); + } + +} + +static void merge_queued_objects(_PyThreadStateImpl *tstate, struct collection_state *state) { struct _brc_thread_state *brc = &tstate->brc; @@ -404,22 +421,20 @@ merge_queued_objects(_PyThreadStateImpl *tstate, struct collection_state *state) // Subtract one when merging because the queue had a reference. Py_ssize_t refcount = merge_refcount(op, -1); - if (!_PyObject_GC_IS_TRACKED(op) && refcount == 0) { - // GC objects with zero refcount are handled subsequently by the - // GC as if they were cyclic trash, but we have to handle dead - // non-GC objects here. Add one to the refcount so that we can - // decref and deallocate the object once we start the world again. - op->ob_ref_shared += (1 << _Py_REF_SHARED_SHIFT); -#ifdef Py_REF_DEBUG - _Py_IncRefTotal(_PyThreadState_GET()); -#endif - worklist_push(&state->objs_to_decref, op); + if (refcount == 0) { + queue_untracked_obj_decref(op, state); } } } static void -process_delayed_frees(PyInterpreterState *interp) +queue_freed_object(PyObject *obj, void *arg) +{ + queue_untracked_obj_decref(obj, arg); +} + +static void +process_delayed_frees(PyInterpreterState *interp, struct collection_state *state) { // While we are in a "stop the world" pause, we can observe the latest // write sequence by advancing the write sequence immediately. @@ -438,7 +453,7 @@ process_delayed_frees(PyInterpreterState *interp) } HEAD_UNLOCK(&_PyRuntime); - _PyMem_ProcessDelayed((PyThreadState *)current_tstate); + _PyMem_ProcessDelayedNoDealloc((PyThreadState *)current_tstate, queue_freed_object, state); } // Subtract an incoming reference from the computed "gc_refs" refcount. @@ -1231,7 +1246,7 @@ gc_collect_internal(PyInterpreterState *interp, struct collection_state *state, } HEAD_UNLOCK(&_PyRuntime); - process_delayed_frees(interp); + process_delayed_frees(interp, state); // Find unreachable objects int err = deduce_unreachable_heap(interp, state); @@ -1910,13 +1925,7 @@ PyObject_GC_Del(void *op) } record_deallocation(_PyThreadState_GET()); - PyObject *self = (PyObject *)op; - if (_PyObject_GC_IS_SHARED_INLINE(self)) { - _PyObject_FreeDelayed(((char *)op)-presize); - } - else { - PyObject_Free(((char *)op)-presize); - } + PyObject_Free(((char *)op)-presize); } int |