aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Python/gc_free_threading.c
diff options
context:
space:
mode:
authorSam Gross <colesbury@gmail.com>2024-04-12 13:36:20 -0400
committerGitHub <noreply@github.com>2024-04-12 17:36:20 +0000
commit4ad8f090cce03c24fd4279ec8198a099b2d0cf97 (patch)
treeda9e5dc5aac6b0b1121eaf4a35c286783d41d2ff /Python/gc_free_threading.c
parentc50cb6dd09d5a1bfdd1b896cc31ccdc96c72e561 (diff)
downloadcpython-4ad8f090cce03c24fd4279ec8198a099b2d0cf97.tar.gz
cpython-4ad8f090cce03c24fd4279ec8198a099b2d0cf97.zip
gh-117376: Partial implementation of deferred reference counting (#117696)
This marks objects as using deferred refrence counting using the `ob_gc_bits` field in the free-threaded build and collects those objects during GC.
Diffstat (limited to 'Python/gc_free_threading.c')
-rw-r--r--Python/gc_free_threading.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c
index 111632ffb77..9cf0e989d09 100644
--- a/Python/gc_free_threading.c
+++ b/Python/gc_free_threading.c
@@ -159,6 +159,15 @@ gc_decref(PyObject *op)
op->ob_tid -= 1;
}
+static void
+disable_deferred_refcounting(PyObject *op)
+{
+ if (_PyObject_HasDeferredRefcount(op)) {
+ op->ob_gc_bits &= ~_PyGC_BITS_DEFERRED;
+ op->ob_ref_shared -= (1 << _Py_REF_SHARED_SHIFT);
+ }
+}
+
static Py_ssize_t
merge_refcount(PyObject *op, Py_ssize_t extra)
{
@@ -375,9 +384,10 @@ update_refs(const mi_heap_t *heap, const mi_heap_area_t *area,
}
Py_ssize_t refcount = Py_REFCNT(op);
+ refcount -= _PyObject_HasDeferredRefcount(op);
_PyObject_ASSERT(op, refcount >= 0);
- if (refcount > 0) {
+ if (refcount > 0 && !_PyObject_HasDeferredRefcount(op)) {
// Untrack tuples and dicts as necessary in this pass, but not objects
// with zero refcount, which we will want to collect.
if (PyTuple_CheckExact(op)) {
@@ -466,6 +476,9 @@ mark_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
return true;
}
+ _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(op) >= 0,
+ "refcount is too small");
+
if (gc_is_unreachable(op) && gc_get_refs(op) != 0) {
// Object is reachable but currently marked as unreachable.
// Mark it as reachable and traverse its pointers to find
@@ -499,6 +512,10 @@ scan_heap_visitor(const mi_heap_t *heap, const mi_heap_area_t *area,
struct collection_state *state = (struct collection_state *)args;
if (gc_is_unreachable(op)) {
+ // Disable deferred refcounting for unreachable objects so that they
+ // are collected immediately after finalization.
+ disable_deferred_refcounting(op);
+
// Merge and add one to the refcount to prevent deallocation while we
// are holding on to it in a worklist.
merge_refcount(op, 1);