aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Objects/tupleobject.c
diff options
context:
space:
mode:
authorT. Wouters <thomas@python.org>2025-02-18 16:52:46 -0800
committerGitHub <noreply@github.com>2025-02-18 16:52:46 -0800
commit388e1ca9f08ee5caefd1dd946dc6e236ce73d46f (patch)
tree90baf87109187b4c25d0b7e6a7db2e29476bee4b /Objects/tupleobject.c
parent736ad664e0c3be05fad76a764b68ffb700afb612 (diff)
downloadcpython-388e1ca9f08ee5caefd1dd946dc6e236ce73d46f.tar.gz
cpython-388e1ca9f08ee5caefd1dd946dc6e236ce73d46f.zip
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is: Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does. Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.) Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong. Specific issues fixed in order to make the tests pass: - List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy. - Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
Diffstat (limited to 'Objects/tupleobject.c')
-rw-r--r--Objects/tupleobject.c29
1 files changed, 23 insertions, 6 deletions
diff --git a/Objects/tupleobject.c b/Objects/tupleobject.c
index 60af9e40e3f..b7416a5a1c5 100644
--- a/Objects/tupleobject.c
+++ b/Objects/tupleobject.c
@@ -1014,18 +1014,23 @@ tupleiter_next(PyObject *self)
assert(it != NULL);
seq = it->it_seq;
+#ifndef Py_GIL_DISABLED
if (seq == NULL)
return NULL;
+#endif
assert(PyTuple_Check(seq));
- if (it->it_index < PyTuple_GET_SIZE(seq)) {
- item = PyTuple_GET_ITEM(seq, it->it_index);
- ++it->it_index;
+ Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
+ if (index < PyTuple_GET_SIZE(seq)) {
+ FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index + 1);
+ item = PyTuple_GET_ITEM(seq, index);
return Py_NewRef(item);
}
+#ifndef Py_GIL_DISABLED
it->it_seq = NULL;
Py_DECREF(seq);
+#endif
return NULL;
}
@@ -1034,8 +1039,15 @@ tupleiter_len(PyObject *self, PyObject *Py_UNUSED(ignored))
{
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
Py_ssize_t len = 0;
+#ifdef Py_GIL_DISABLED
+ Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
+ Py_ssize_t seq_len = PyTuple_GET_SIZE(it->it_seq);
+ if (idx < seq_len)
+ len = seq_len - idx;
+#else
if (it->it_seq)
len = PyTuple_GET_SIZE(it->it_seq) - it->it_index;
+#endif
return PyLong_FromSsize_t(len);
}
@@ -1051,10 +1063,15 @@ tupleiter_reduce(PyObject *self, PyObject *Py_UNUSED(ignored))
* see issue #101765 */
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
+#ifdef Py_GIL_DISABLED
+ Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
+ if (idx < PyTuple_GET_SIZE(it->it_seq))
+ return Py_BuildValue("N(O)n", iter, it->it_seq, idx);
+#else
if (it->it_seq)
return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index);
- else
- return Py_BuildValue("N(())", iter);
+#endif
+ return Py_BuildValue("N(())", iter);
}
static PyObject *
@@ -1069,7 +1086,7 @@ tupleiter_setstate(PyObject *self, PyObject *state)
index = 0;
else if (index > PyTuple_GET_SIZE(it->it_seq))
index = PyTuple_GET_SIZE(it->it_seq); /* exhausted iterator */
- it->it_index = index;
+ FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index);
}
Py_RETURN_NONE;
}