aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Python/specialize.c
diff options
context:
space:
mode:
authorT. Wouters <thomas@python.org>2025-03-12 16:21:46 +0100
committerGitHub <noreply@github.com>2025-03-12 16:21:46 +0100
commitde2f7da77d9e8dc6758430249e7179d37b3fee44 (patch)
tree13a20c58828b74a4851373476635382fc4aba797 /Python/specialize.c
parentdb27aee2fe253855fc57b118658f4f4718819382 (diff)
downloadcpython-de2f7da77d9e8dc6758430249e7179d37b3fee44.tar.gz
cpython-de2f7da77d9e8dc6758430249e7179d37b3fee44.zip
gh-115999: Add free-threaded specialization for FOR_ITER (#128798)
Add free-threaded versions of existing specialization for FOR_ITER (list, tuples, fast range iterators and generators), without significantly affecting their thread-safety. (Iterating over shared lists/tuples/ranges should be fine like before. Reusing iterators between threads is not fine, like before. Sharing generators between threads is a recipe for significant crashes, like before.)
Diffstat (limited to 'Python/specialize.c')
-rw-r--r--Python/specialize.c53
1 files changed, 32 insertions, 21 deletions
diff --git a/Python/specialize.c b/Python/specialize.c
index c741c4f93f3..0466b5bee90 100644
--- a/Python/specialize.c
+++ b/Python/specialize.c
@@ -2826,45 +2826,56 @@ int
void
_Py_Specialize_ForIter(_PyStackRef iter, _Py_CODEUNIT *instr, int oparg)
{
- assert(ENABLE_SPECIALIZATION);
+ assert(ENABLE_SPECIALIZATION_FT);
assert(_PyOpcode_Caches[FOR_ITER] == INLINE_CACHE_ENTRIES_FOR_ITER);
- _PyForIterCache *cache = (_PyForIterCache *)(instr + 1);
PyObject *iter_o = PyStackRef_AsPyObjectBorrow(iter);
PyTypeObject *tp = Py_TYPE(iter_o);
+#ifdef Py_GIL_DISABLED
+ // Only specialize for uniquely referenced iterators, so that we know
+ // they're only referenced by this one thread. This is more limiting
+ // than we need (even `it = iter(mylist); for item in it:` won't get
+ // specialized) but we don't have a way to check whether we're the only
+ // _thread_ who has access to the object.
+ if (!_PyObject_IsUniquelyReferenced(iter_o))
+ goto failure;
+#endif
if (tp == &PyListIter_Type) {
- instr->op.code = FOR_ITER_LIST;
- goto success;
+#ifdef Py_GIL_DISABLED
+ _PyListIterObject *it = (_PyListIterObject *)iter_o;
+ if (!_Py_IsOwnedByCurrentThread((PyObject *)it->it_seq) &&
+ !_PyObject_GC_IS_SHARED(it->it_seq)) {
+ // Maybe this should just set GC_IS_SHARED in a critical
+ // section, instead of leaving it to the first iteration?
+ goto failure;
+ }
+#endif
+ specialize(instr, FOR_ITER_LIST);
+ return;
}
else if (tp == &PyTupleIter_Type) {
- instr->op.code = FOR_ITER_TUPLE;
- goto success;
+ specialize(instr, FOR_ITER_TUPLE);
+ return;
}
else if (tp == &PyRangeIter_Type) {
- instr->op.code = FOR_ITER_RANGE;
- goto success;
+ specialize(instr, FOR_ITER_RANGE);
+ return;
}
else if (tp == &PyGen_Type && oparg <= SHRT_MAX) {
+ // Generators are very much not thread-safe, so don't worry about
+ // the specialization not being thread-safe.
assert(instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == END_FOR ||
instr[oparg + INLINE_CACHE_ENTRIES_FOR_ITER + 1].op.code == INSTRUMENTED_END_FOR
);
/* Don't specialize if PEP 523 is active */
- if (_PyInterpreterState_GET()->eval_frame) {
- SPECIALIZATION_FAIL(FOR_ITER, SPEC_FAIL_OTHER);
+ if (_PyInterpreterState_GET()->eval_frame)
goto failure;
- }
- instr->op.code = FOR_ITER_GEN;
- goto success;
+ specialize(instr, FOR_ITER_GEN);
+ return;
}
+failure:
SPECIALIZATION_FAIL(FOR_ITER,
_PySpecialization_ClassifyIterator(iter_o));
-failure:
- STAT_INC(FOR_ITER, failure);
- instr->op.code = FOR_ITER;
- cache->counter = adaptive_counter_backoff(cache->counter);
- return;
-success:
- STAT_INC(FOR_ITER, success);
- cache->counter = adaptive_counter_cooldown();
+ unspecialize(instr);
}
void