aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorPeter Hawkins <phawkins@google.com>2025-05-13 13:38:57 -0400
committerGitHub <noreply@github.com>2025-05-13 17:38:57 +0000
commit9ad0c7b0f14c5fcda6bfae6692c88abb95502d38 (patch)
tree8d818a816699d0363098e527fd6f3bfad7cdccc4
parent35f47d05893e012e9f2b145b934c1d8c61d2bb7d (diff)
downloadcpython-9ad0c7b0f14c5fcda6bfae6692c88abb95502d38.tar.gz
cpython-9ad0c7b0f14c5fcda6bfae6692c88abb95502d38.zip
gh-132641: fix race in `lru_cache` under free-threading (#133787)
Fix race in `lru_cache` by acquiring critical section on the cache object itself and call the lock held variant of dict functions to modify the underlying dict.
-rw-r--r--Include/internal/pycore_dict.h2
-rw-r--r--Lib/test/test_free_threading/test_functools.py75
-rw-r--r--Misc/NEWS.d/next/Library/2025-05-09-20-59-24.gh-issue-132641.3qTw44.rst1
-rw-r--r--Modules/_functoolsmodule.c16
-rw-r--r--Objects/dictobject.c5
5 files changed, 94 insertions, 5 deletions
diff --git a/Include/internal/pycore_dict.h b/Include/internal/pycore_dict.h
index 754eb88a85c..25bb224921a 100644
--- a/Include/internal/pycore_dict.h
+++ b/Include/internal/pycore_dict.h
@@ -150,6 +150,8 @@ extern int _PyDict_Pop_KnownHash(
Py_hash_t hash,
PyObject **result);
+extern void _PyDict_Clear_LockHeld(PyObject *op);
+
#ifdef Py_GIL_DISABLED
PyAPI_FUNC(void) _PyDict_EnsureSharedOnRead(PyDictObject *mp);
#endif
diff --git a/Lib/test/test_free_threading/test_functools.py b/Lib/test/test_free_threading/test_functools.py
new file mode 100644
index 00000000000..a442fe056ce
--- /dev/null
+++ b/Lib/test/test_free_threading/test_functools.py
@@ -0,0 +1,75 @@
+import random
+import unittest
+
+from functools import lru_cache
+from threading import Barrier, Thread
+
+from test.support import threading_helper
+
+@threading_helper.requires_working_threading()
+class TestLRUCache(unittest.TestCase):
+
+ def _test_concurrent_operations(self, maxsize):
+ num_threads = 10
+ b = Barrier(num_threads)
+ @lru_cache(maxsize=maxsize)
+ def func(arg=0):
+ return object()
+
+
+ def thread_func():
+ b.wait()
+ for i in range(1000):
+ r = random.randint(0, 1000)
+ if i < 800:
+ func(i)
+ elif i < 900:
+ func.cache_info()
+ else:
+ func.cache_clear()
+
+ threads = []
+ for i in range(num_threads):
+ t = Thread(target=thread_func)
+ threads.append(t)
+
+ with threading_helper.start_threads(threads):
+ pass
+
+ def test_concurrent_operations_unbounded(self):
+ self._test_concurrent_operations(maxsize=None)
+
+ def test_concurrent_operations_bounded(self):
+ self._test_concurrent_operations(maxsize=128)
+
+ def _test_reentrant_cache_clear(self, maxsize):
+ num_threads = 10
+ b = Barrier(num_threads)
+ @lru_cache(maxsize=maxsize)
+ def func(arg=0):
+ func.cache_clear()
+ return object()
+
+
+ def thread_func():
+ b.wait()
+ for i in range(1000):
+ func(random.randint(0, 10000))
+
+ threads = []
+ for i in range(num_threads):
+ t = Thread(target=thread_func)
+ threads.append(t)
+
+ with threading_helper.start_threads(threads):
+ pass
+
+ def test_reentrant_cache_clear_unbounded(self):
+ self._test_reentrant_cache_clear(maxsize=None)
+
+ def test_reentrant_cache_clear_bounded(self):
+ self._test_reentrant_cache_clear(maxsize=128)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/Misc/NEWS.d/next/Library/2025-05-09-20-59-24.gh-issue-132641.3qTw44.rst b/Misc/NEWS.d/next/Library/2025-05-09-20-59-24.gh-issue-132641.3qTw44.rst
new file mode 100644
index 00000000000..419ff19d9c0
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-09-20-59-24.gh-issue-132641.3qTw44.rst
@@ -0,0 +1 @@
+Fixed a race in :func:`functools.lru_cache` under free-threading.
diff --git a/Modules/_functoolsmodule.c b/Modules/_functoolsmodule.c
index 899eef50ecc..354dbad84b5 100644
--- a/Modules/_functoolsmodule.c
+++ b/Modules/_functoolsmodule.c
@@ -1383,8 +1383,8 @@ bounded_lru_cache_update_lock_held(lru_cache_object *self,
this same key, then this setitem call will update the cache dict
with this new link, leaving the old link as an orphan (i.e. not
having a cache dict entry that refers to it). */
- if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link,
- hash) < 0) {
+ if (_PyDict_SetItem_KnownHash_LockHeld((PyDictObject *)self->cache, key,
+ (PyObject *)link, hash) < 0) {
Py_DECREF(link);
return NULL;
}
@@ -1453,8 +1453,8 @@ bounded_lru_cache_update_lock_held(lru_cache_object *self,
for successful insertion in the cache dict before adding the
link to the linked list. Otherwise, the potentially reentrant
__eq__ call could cause the then orphan link to be visited. */
- if (_PyDict_SetItem_KnownHash(self->cache, key, (PyObject *)link,
- hash) < 0) {
+ if (_PyDict_SetItem_KnownHash_LockHeld((PyDictObject *)self->cache, key,
+ (PyObject *)link, hash) < 0) {
/* Somehow the cache dict update failed. We no longer can
restore the old link. Let the error propagate upward and
leave the cache short one link. */
@@ -1689,7 +1689,13 @@ _functools__lru_cache_wrapper_cache_clear_impl(PyObject *self)
lru_list_elem *list = lru_cache_unlink_list(_self);
FT_ATOMIC_STORE_SSIZE_RELAXED(_self->hits, 0);
FT_ATOMIC_STORE_SSIZE_RELAXED(_self->misses, 0);
- PyDict_Clear(_self->cache);
+ if (_self->wrapper == bounded_lru_cache_wrapper) {
+ /* The critical section on the lru cache itself protects the dictionary
+ for bounded_lru_cache instances. */
+ _PyDict_Clear_LockHeld(_self->cache);
+ } else {
+ PyDict_Clear(_self->cache);
+ }
lru_cache_clear_list(list);
Py_RETURN_NONE;
}
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index ce27e47dabf..fd8ccf56324 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -2916,6 +2916,11 @@ clear_lock_held(PyObject *op)
}
void
+_PyDict_Clear_LockHeld(PyObject *op) {
+ clear_lock_held(op);
+}
+
+void
PyDict_Clear(PyObject *op)
{
Py_BEGIN_CRITICAL_SECTION(op);