aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Python/gc_free_threading.c
diff options
context:
space:
mode:
authorMark Shannon <mark@hotpy.org>2024-03-20 08:54:42 +0000
committerGitHub <noreply@github.com>2024-03-20 08:54:42 +0000
commit15309329b65a285cb7b3071f0f08ac964b61411b (patch)
tree83b5be564755d7ea396c76eda29e6d33faf535d9 /Python/gc_free_threading.c
parentd5ebf8b71fd18d7a1f2f6b670a2c18749dc2b55e (diff)
downloadcpython-15309329b65a285cb7b3071f0f08ac964b61411b.tar.gz
cpython-15309329b65a285cb7b3071f0f08ac964b61411b.zip
GH-108362: Incremental Cycle GC (GH-116206)
Diffstat (limited to 'Python/gc_free_threading.c')
-rw-r--r--Python/gc_free_threading.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/Python/gc_free_threading.c b/Python/gc_free_threading.c
index 2b13d1f005d..52c79c02099 100644
--- a/Python/gc_free_threading.c
+++ b/Python/gc_free_threading.c
@@ -675,7 +675,7 @@ void
_PyGC_InitState(GCState *gcstate)
{
// TODO: move to pycore_runtime_init.h once the incremental GC lands.
- gcstate->generations[0].threshold = 2000;
+ gcstate->young.threshold = 2000;
}
@@ -970,8 +970,8 @@ cleanup_worklist(struct worklist *worklist)
static bool
gc_should_collect(GCState *gcstate)
{
- int count = _Py_atomic_load_int_relaxed(&gcstate->generations[0].count);
- int threshold = gcstate->generations[0].threshold;
+ int count = _Py_atomic_load_int_relaxed(&gcstate->young.count);
+ int threshold = gcstate->young.threshold;
if (count <= threshold || threshold == 0 || !gcstate->enabled) {
return false;
}
@@ -979,7 +979,7 @@ gc_should_collect(GCState *gcstate)
// objects. A few tests rely on immediate scheduling of the GC so we ignore
// the scaled threshold if generations[1].threshold is set to zero.
return (count > gcstate->long_lived_total / 4 ||
- gcstate->generations[1].threshold == 0);
+ gcstate->old[0].threshold == 0);
}
static void
@@ -993,7 +993,7 @@ record_allocation(PyThreadState *tstate)
if (gc->alloc_count >= LOCAL_ALLOC_COUNT_THRESHOLD) {
// TODO: Use Py_ssize_t for the generation count.
GCState *gcstate = &tstate->interp->gc;
- _Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
+ _Py_atomic_add_int(&gcstate->young.count, (int)gc->alloc_count);
gc->alloc_count = 0;
if (gc_should_collect(gcstate) &&
@@ -1012,7 +1012,7 @@ record_deallocation(PyThreadState *tstate)
gc->alloc_count--;
if (gc->alloc_count <= -LOCAL_ALLOC_COUNT_THRESHOLD) {
GCState *gcstate = &tstate->interp->gc;
- _Py_atomic_add_int(&gcstate->generations[0].count, (int)gc->alloc_count);
+ _Py_atomic_add_int(&gcstate->young.count, (int)gc->alloc_count);
gc->alloc_count = 0;
}
}
@@ -1137,10 +1137,11 @@ gc_collect_main(PyThreadState *tstate, int generation, _PyGC_Reason reason)
/* update collection and allocation counters */
if (generation+1 < NUM_GENERATIONS) {
- gcstate->generations[generation+1].count += 1;
+ gcstate->old[generation].count += 1;
}
- for (i = 0; i <= generation; i++) {
- gcstate->generations[i].count = 0;
+ gcstate->young.count = 0;
+ for (i = 1; i <= generation; i++) {
+ gcstate->old[i-1].count = 0;
}
PyInterpreterState *interp = tstate->interp;
@@ -1463,7 +1464,7 @@ _PyGC_Collect(PyThreadState *tstate, int generation, _PyGC_Reason reason)
return gc_collect_main(tstate, generation, reason);
}
-Py_ssize_t
+void
_PyGC_CollectNoFail(PyThreadState *tstate)
{
/* Ideally, this function is only called on interpreter shutdown,
@@ -1472,7 +1473,7 @@ _PyGC_CollectNoFail(PyThreadState *tstate)
during interpreter shutdown (and then never finish it).
See http://bugs.python.org/issue8713#msg195178 for an example.
*/
- return gc_collect_main(tstate, NUM_GENERATIONS - 1, _Py_GC_REASON_SHUTDOWN);
+ gc_collect_main(tstate, NUM_GENERATIONS - 1, _Py_GC_REASON_SHUTDOWN);
}
void