aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--Doc/howto/free-threading-extensions.rst2
-rw-r--r--Doc/howto/free-threading-python.rst2
-rw-r--r--Doc/library/asyncio-eventloop.rst25
-rw-r--r--Doc/library/asyncio-task.rst21
-rw-r--r--Doc/library/multiprocessing.rst10
-rw-r--r--Doc/library/stdtypes.rst22
-rw-r--r--Doc/using/windows.rst47
-rw-r--r--Lib/asyncio/base_subprocess.py7
-rw-r--r--Lib/asyncio/tools.py12
-rw-r--r--Lib/http/server.py15
-rw-r--r--Lib/ipaddress.py11
-rw-r--r--Lib/multiprocessing/context.py2
-rw-r--r--Lib/pdb.py3
-rw-r--r--Lib/test/_test_multiprocessing.py22
-rw-r--r--Lib/test/support/interpreters/channels.py2
-rw-r--r--Lib/test/test__interpreters.py25
-rw-r--r--Lib/test/test_ast/test_ast.py1
-rw-r--r--Lib/test/test_capi/test_object.py1
-rw-r--r--Lib/test/test_collections.py2
-rw-r--r--Lib/test/test_httpservers.py70
-rw-r--r--Lib/test/test_interpreters/test_api.py23
-rw-r--r--Lib/test/test_ipaddress.py11
-rw-r--r--Lib/test/test_json/test_recursion.py1
-rw-r--r--Lib/test/test_memoryview.py20
-rw-r--r--Lib/test/test_statistics.py1
-rw-r--r--Lib/test/test_unittest/test_result.py37
-rw-r--r--Lib/test/test_unittest/test_runner.py52
-rw-r--r--Lib/test/test_zstd.py61
-rw-r--r--Lib/types.py2
-rw-r--r--Lib/unittest/case.py4
-rw-r--r--Lib/unittest/suite.py20
-rw-r--r--Misc/NEWS.d/next/Build/2025-05-21-22-13-30.gh-issue-134486.yvdL6f.rst3
-rw-r--r--Misc/NEWS.d/next/Core_and_Builtins/2025-05-22-14-48-19.gh-issue-134381.2BXhth.rst1
-rw-r--r--Misc/NEWS.d/next/Library/2024-06-06-17-49-07.gh-issue-120170.DUxhmT.rst3
-rw-r--r--Misc/NEWS.d/next/Library/2025-05-18-13-23-29.gh-issue-134168.hgx3Xg.rst2
-rw-r--r--Misc/NEWS.d/next/Library/2025-05-22-13-10-32.gh-issue-114177.3TYUJ3.rst1
-rw-r--r--Misc/NEWS.d/next/Library/2025-05-22-14-12-53.gh-issue-134451.M1rD-j.rst1
-rw-r--r--Misc/NEWS.d/next/Library/2025-05-22-18-14-13.gh-issue-134546.fjLVzK.rst1
-rw-r--r--Misc/NEWS.d/next/Library/2025-05-23-10-15-36.gh-issue-134565.zmb66C.rst3
-rw-r--r--Misc/NEWS.d/next/Library/2025-05-24-03-10-36.gh-issue-80334.z21cMa.rst2
-rw-r--r--Misc/NEWS.d/next/Security/2025-01-14-11-19-07.gh-issue-128840.M1doZW.rst2
-rw-r--r--Modules/_ctypes/callbacks.c9
-rw-r--r--Modules/_ctypes/callproc.c8
-rw-r--r--Modules/_ctypes/ctypes.h14
-rw-r--r--Modules/_interpretersmodule.c294
-rw-r--r--Modules/_pickle.c4
-rw-r--r--Modules/_threadmodule.c6
-rw-r--r--Modules/_zstd/_zstdmodule.c118
-rw-r--r--Modules/_zstd/buffer.h9
-rw-r--r--Modules/_zstd/clinic/decompressor.c.h11
-rw-r--r--Modules/_zstd/clinic/zstddict.c.h39
-rw-r--r--Modules/_zstd/compressor.c205
-rw-r--r--Modules/_zstd/decompressor.c187
-rw-r--r--Modules/_zstd/zstddict.c39
-rw-r--r--Modules/_zstd/zstddict.h3
-rw-r--r--Python/optimizer_analysis.c3
56 files changed, 1014 insertions, 488 deletions
diff --git a/Doc/howto/free-threading-extensions.rst b/Doc/howto/free-threading-extensions.rst
index 5a3970f15d5..175bb5dc831 100644
--- a/Doc/howto/free-threading-extensions.rst
+++ b/Doc/howto/free-threading-extensions.rst
@@ -396,7 +396,7 @@ The wheels, shared libraries, and binaries are indicated by a ``t`` suffix.
free-threaded build, with the ``t`` suffix, such as ``python3.13t``.
* `pypa/cibuildwheel <https://github.com/pypa/cibuildwheel>`_ supports the
free-threaded build if you set
- `CIBW_FREE_THREADED_SUPPORT <https://cibuildwheel.pypa.io/en/stable/options/#free-threaded-support>`_.
+ `CIBW_ENABLE to cpython-freethreading <https://cibuildwheel.pypa.io/en/stable/options/#enable>`_.
Limited C API and Stable ABI
............................
diff --git a/Doc/howto/free-threading-python.rst b/Doc/howto/free-threading-python.rst
index f7a894ac2cd..c33cef2c8e9 100644
--- a/Doc/howto/free-threading-python.rst
+++ b/Doc/howto/free-threading-python.rst
@@ -32,7 +32,7 @@ optionally support installing free-threaded Python binaries. The installers
are available at https://www.python.org/downloads/.
For information on other platforms, see the `Installing a Free-Threaded Python
-<https://py-free-threading.github.io/installing_cpython/>`_, a
+<https://py-free-threading.github.io/installing-cpython/>`_, a
community-maintained installation guide for installing free-threaded Python.
When building CPython from source, the :option:`--disable-gil` configure option
diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst
index 21f7d0547af..91970c28239 100644
--- a/Doc/library/asyncio-eventloop.rst
+++ b/Doc/library/asyncio-eventloop.rst
@@ -361,7 +361,7 @@ Creating Futures and Tasks
.. versionadded:: 3.5.2
-.. method:: loop.create_task(coro, *, name=None, context=None, eager_start=None)
+.. method:: loop.create_task(coro, *, name=None, context=None, eager_start=None, **kwargs)
Schedule the execution of :ref:`coroutine <coroutine>` *coro*.
Return a :class:`Task` object.
@@ -370,6 +370,10 @@ Creating Futures and Tasks
for interoperability. In this case, the result type is a subclass
of :class:`Task`.
+ The full function signature is largely the same as that of the
+ :class:`Task` constructor (or factory) - all of the keyword arguments to
+ this function are passed through to that interface.
+
If the *name* argument is provided and not ``None``, it is set as
the name of the task using :meth:`Task.set_name`.
@@ -388,8 +392,15 @@ Creating Futures and Tasks
.. versionchanged:: 3.11
Added the *context* parameter.
+ .. versionchanged:: 3.13.3
+ Added ``kwargs`` which passes on arbitrary extra parameters, including ``name`` and ``context``.
+
+ .. versionchanged:: 3.13.4
+ Rolled back the change that passes on *name* and *context* (if it is None),
+ while still passing on other arbitrary keyword arguments (to avoid breaking backwards compatibility with 3.13.3).
+
.. versionchanged:: 3.14
- Added the *eager_start* parameter.
+ All *kwargs* are now passed on. The *eager_start* parameter works with eager task factories.
.. method:: loop.set_task_factory(factory)
@@ -402,6 +413,16 @@ Creating Futures and Tasks
event loop, and *coro* is a coroutine object. The callable
must pass on all *kwargs*, and return a :class:`asyncio.Task`-compatible object.
+ .. versionchanged:: 3.13.3
+ Required that all *kwargs* are passed on to :class:`asyncio.Task`.
+
+ .. versionchanged:: 3.13.4
+ *name* is no longer passed to task factories. *context* is no longer passed
+ to task factories if it is ``None``.
+
+ .. versionchanged:: 3.14
+ *name* and *context* are now unconditionally passed on to task factories again.
+
.. method:: loop.get_task_factory()
Return a task factory or ``None`` if the default one is in use.
diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst
index 59acce1990a..b19ffa8213a 100644
--- a/Doc/library/asyncio-task.rst
+++ b/Doc/library/asyncio-task.rst
@@ -238,18 +238,24 @@ Creating Tasks
-----------------------------------------------
-.. function:: create_task(coro, *, name=None, context=None)
+.. function:: create_task(coro, *, name=None, context=None, eager_start=None, **kwargs)
Wrap the *coro* :ref:`coroutine <coroutine>` into a :class:`Task`
and schedule its execution. Return the Task object.
- If *name* is not ``None``, it is set as the name of the task using
- :meth:`Task.set_name`.
+ The full function signature is largely the same as that of the
+ :class:`Task` constructor (or factory) - all of the keyword arguments to
+ this function are passed through to that interface.
An optional keyword-only *context* argument allows specifying a
custom :class:`contextvars.Context` for the *coro* to run in.
The current context copy is created when no *context* is provided.
+ An optional keyword-only *eager_start* argument allows specifying
+ if the task should execute eagerly during the call to create_task,
+ or be scheduled later. If *eager_start* is not passed the mode set
+ by :meth:`loop.set_task_factory` will be used.
+
The task is executed in the loop returned by :func:`get_running_loop`,
:exc:`RuntimeError` is raised if there is no running loop in
current thread.
@@ -290,6 +296,9 @@ Creating Tasks
.. versionchanged:: 3.11
Added the *context* parameter.
+ .. versionchanged:: 3.14
+ Added the *eager_start* parameter by passing on all *kwargs*.
+
Task Cancellation
=================
@@ -330,7 +339,7 @@ and reliable way to wait for all tasks in the group to finish.
.. versionadded:: 3.11
- .. method:: create_task(coro, *, name=None, context=None)
+ .. method:: create_task(coro, *, name=None, context=None, eager_start=None, **kwargs)
Create a task in this task group.
The signature matches that of :func:`asyncio.create_task`.
@@ -342,6 +351,10 @@ and reliable way to wait for all tasks in the group to finish.
Close the given coroutine if the task group is not active.
+ .. versionchanged:: 3.14
+
+ Passes on all *kwargs* to :meth:`loop.create_task`
+
Example::
async def main():
diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst
index 80e33c4a1df..fc3c1134f97 100644
--- a/Doc/library/multiprocessing.rst
+++ b/Doc/library/multiprocessing.rst
@@ -1081,7 +1081,7 @@ Miscellaneous
.. function:: freeze_support()
Add support for when a program which uses :mod:`multiprocessing` has been
- frozen to produce a Windows executable. (Has been tested with **py2exe**,
+ frozen to produce an executable. (Has been tested with **py2exe**,
**PyInstaller** and **cx_Freeze**.)
One needs to call this function straight after the ``if __name__ ==
@@ -1099,10 +1099,10 @@ Miscellaneous
If the ``freeze_support()`` line is omitted then trying to run the frozen
executable will raise :exc:`RuntimeError`.
- Calling ``freeze_support()`` has no effect when invoked on any operating
- system other than Windows. In addition, if the module is being run
- normally by the Python interpreter on Windows (the program has not been
- frozen), then ``freeze_support()`` has no effect.
+ Calling ``freeze_support()`` has no effect when the start method is not
+ *spawn*. In addition, if the module is being run normally by the Python
+ interpreter (the program has not been frozen), then ``freeze_support()``
+ has no effect.
.. function:: get_all_start_methods()
diff --git a/Doc/library/stdtypes.rst b/Doc/library/stdtypes.rst
index 3486a18b5cb..31d71031bca 100644
--- a/Doc/library/stdtypes.rst
+++ b/Doc/library/stdtypes.rst
@@ -1788,8 +1788,14 @@ expression support in the :mod:`re` module).
Return centered in a string of length *width*. Padding is done using the
specified *fillchar* (default is an ASCII space). The original string is
- returned if *width* is less than or equal to ``len(s)``.
+ returned if *width* is less than or equal to ``len(s)``. For example::
+ >>> 'Python'.center(10)
+ ' Python '
+ >>> 'Python'.center(10, '-')
+ '--Python--'
+ >>> 'Python'.center(4)
+ 'Python'
.. method:: str.count(sub[, start[, end]])
@@ -1799,8 +1805,18 @@ expression support in the :mod:`re` module).
interpreted as in slice notation.
If *sub* is empty, returns the number of empty strings between characters
- which is the length of the string plus one.
-
+ which is the length of the string plus one. For example::
+
+ >>> 'spam, spam, spam'.count('spam')
+ 3
+ >>> 'spam, spam, spam'.count('spam', 5)
+ 2
+ >>> 'spam, spam, spam'.count('spam', 5, 10)
+ 1
+ >>> 'spam, spam, spam'.count('eggs')
+ 0
+ >>> 'spam, spam, spam'.count('')
+ 17
.. method:: str.encode(encoding="utf-8", errors="strict")
diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst
index cd22148b531..9628da3d2f6 100644
--- a/Doc/using/windows.rst
+++ b/Doc/using/windows.rst
@@ -504,6 +504,14 @@ configuration option.
installing and uninstalling.
+.. _Add-AppxPackage: https://learn.microsoft.com/powershell/module/appx/add-appxpackage
+
+.. _Remove-AppxPackage: https://learn.microsoft.com/powershell/module/appx/remove-appxpackage
+
+.. _Add-AppxProvisionedPackage: https://learn.microsoft.com/powershell/module/dism/add-appxprovisionedpackage
+
+.. _PackageManager: https://learn.microsoft.com/uwp/api/windows.management.deployment.packagemanager
+
.. _pymanager-advancedinstall:
Advanced Installation
@@ -559,12 +567,10 @@ downloaded MSIX can be installed by launching or using the commands below.
.. code-block:: powershell
- $> winget download 9NQ7512CXL7T -e --skip-license --accept-package-agreements --disable-interactivity
+ $> winget download 9NQ7512CXL7T -e --skip-license --accept-package-agreements --accept-source-agreements
To programmatically install or uninstall an MSIX using only PowerShell, the
-`Add-AppxPackage <https://learn.microsoft.com/powershell/module/appx/add-appxpackage>`_
-and `Remove-AppxPackage <https://learn.microsoft.com/powershell/module/appx/remove-appxpackage>`_
-PowerShell cmdlets are recommended:
+`Add-AppxPackage`_ and `Remove-AppxPackage`_ PowerShell cmdlets are recommended:
.. code-block:: powershell
@@ -572,18 +578,33 @@ PowerShell cmdlets are recommended:
...
$> Get-AppxPackage PythonSoftwareFoundation.PythonManager | Remove-AppxPackage
-The native APIs for package management may be found on the Windows
-`PackageManager <https://learn.microsoft.com/uwp/api/windows.management.deployment.packagemanager>`_
-class. The :func:`!AddPackageAsync` method installs for the current user, or use
-:func:`!StagePackageAsync` followed by :func:`!ProvisionPackageForAllUsersAsync`
-to install the Python install manager for all users from the MSIX package. Users
-will still need to install their own copies of Python itself, as there is no way
-to trigger those installs without being a logged in user.
+The latest release can be downloaded and installed by Windows by passing the
+AppInstaller file to the Add-AppxPackage command. This installs using the MSIX
+on python.org, and is only recommended for cases where installing via the Store
+(interactively or using WinGet) is not possible.
+
+.. code-block:: powershell
+
+ $> Add-AppxPackage -AppInstallerFile https://www.python.org/ftp/python/pymanager/pymanager.appinstaller
+
+Other tools and APIs may also be used to provision an MSIX package for all users
+on a machine, but Python does not consider this a supported scenario. We suggest
+looking into the PowerShell `Add-AppxProvisionedPackage`_ cmdlet, the native
+Windows `PackageManager`_ class, or the documentation and support for your
+deployment tool.
+
+Regardless of the install method, users will still need to install their own
+copies of Python itself, as there is no way to trigger those installs without
+being a logged in user. When using the MSIX, the latest version of Python will
+be available for all users to install without network access.
Note that the MSIX downloadable from the Store and from the Python website are
subtly different and cannot be installed at the same time. Wherever possible,
-we suggest using the above commands to download the package from the Store to
-reduce the risk of setting up conflicting installs.
+we suggest using the above WinGet commands to download the package from the
+Store to reduce the risk of setting up conflicting installs. There are no
+licensing restrictions on the Python install manager that would prevent using
+the Store package in this way.
+
.. _pymanager-admin-config:
diff --git a/Lib/asyncio/base_subprocess.py b/Lib/asyncio/base_subprocess.py
index 9c2ba679ce2..d40af422e61 100644
--- a/Lib/asyncio/base_subprocess.py
+++ b/Lib/asyncio/base_subprocess.py
@@ -104,7 +104,12 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
for proto in self._pipes.values():
if proto is None:
continue
- proto.pipe.close()
+ # See gh-114177
+ # skip closing the pipe if loop is already closed
+ # this can happen e.g. when loop is closed immediately after
+ # process is killed
+ if self._loop and not self._loop.is_closed():
+ proto.pipe.close()
if (self._proc is not None and
# has the child process finished?
diff --git a/Lib/asyncio/tools.py b/Lib/asyncio/tools.py
index bf1cb5e64cb..b2da7d2f6ba 100644
--- a/Lib/asyncio/tools.py
+++ b/Lib/asyncio/tools.py
@@ -13,11 +13,17 @@ class NodeType(Enum):
TASK = 2
-@dataclass(frozen=True)
class CycleFoundException(Exception):
"""Raised when there is a cycle when drawing the call tree."""
- cycles: list[list[int]]
- id2name: dict[int, str]
+ def __init__(
+ self,
+ cycles: list[list[int]],
+ id2name: dict[int, str],
+ ) -> None:
+ super().__init__(cycles, id2name)
+ self.cycles = cycles
+ self.id2name = id2name
+
# ─── indexing helpers ───────────────────────────────────────────
diff --git a/Lib/http/server.py b/Lib/http/server.py
index f6d1b998f42..ef10d185932 100644
--- a/Lib/http/server.py
+++ b/Lib/http/server.py
@@ -980,8 +980,8 @@ def test(HandlerClass=BaseHTTPRequestHandler,
HandlerClass.protocol_version = protocol
if tls_cert:
- server = ThreadingHTTPSServer(addr, HandlerClass, certfile=tls_cert,
- keyfile=tls_key, password=tls_password)
+ server = ServerClass(addr, HandlerClass, certfile=tls_cert,
+ keyfile=tls_key, password=tls_password)
else:
server = ServerClass(addr, HandlerClass)
@@ -1041,7 +1041,7 @@ def _main(args=None):
parser.error(f"Failed to read TLS password file: {e}")
# ensure dual-stack is not disabled; ref #38907
- class DualStackServer(ThreadingHTTPServer):
+ class DualStackServerMixin:
def server_bind(self):
# suppress exception when protocol is IPv4
@@ -1054,9 +1054,16 @@ def _main(args=None):
self.RequestHandlerClass(request, client_address, self,
directory=args.directory)
+ class HTTPDualStackServer(DualStackServerMixin, ThreadingHTTPServer):
+ pass
+ class HTTPSDualStackServer(DualStackServerMixin, ThreadingHTTPSServer):
+ pass
+
+ ServerClass = HTTPSDualStackServer if args.tls_cert else HTTPDualStackServer
+
test(
HandlerClass=SimpleHTTPRequestHandler,
- ServerClass=DualStackServer,
+ ServerClass=ServerClass,
port=args.port,
bind=args.bind,
protocol=args.protocol,
diff --git a/Lib/ipaddress.py b/Lib/ipaddress.py
index d8a84f33264..69e933a6541 100644
--- a/Lib/ipaddress.py
+++ b/Lib/ipaddress.py
@@ -1660,8 +1660,16 @@ class _BaseV6:
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
+ if len(ip_str) > 39:
+ msg = ("At most 39 characters expected in "
+ f"{ip_str[:14]!r}({len(ip_str)-28} chars elided){ip_str[-14:]!r}")
+ raise AddressValueError(msg)
- parts = ip_str.split(':')
+ # We want to allow more parts than the max to be 'split'
+ # to preserve the correct error message when there are
+ # too many parts combined with '::'
+ _max_parts = cls._HEXTET_COUNT + 1
+ parts = ip_str.split(':', maxsplit=_max_parts)
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
@@ -1681,7 +1689,6 @@ class _BaseV6:
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
- _max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (_max_parts-1, ip_str)
raise AddressValueError(msg)
diff --git a/Lib/multiprocessing/context.py b/Lib/multiprocessing/context.py
index d0a3ad00e53..051d567d457 100644
--- a/Lib/multiprocessing/context.py
+++ b/Lib/multiprocessing/context.py
@@ -145,7 +145,7 @@ class BaseContext(object):
'''Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
- if sys.platform == 'win32' and getattr(sys, 'frozen', False):
+ if self.get_start_method() == 'spawn' and getattr(sys, 'frozen', False):
from .spawn import freeze_support
freeze_support()
diff --git a/Lib/pdb.py b/Lib/pdb.py
index 78ee35f61bb..fc83728fb6d 100644
--- a/Lib/pdb.py
+++ b/Lib/pdb.py
@@ -75,6 +75,7 @@ import dis
import code
import glob
import json
+import stat
import token
import types
import atexit
@@ -3418,6 +3419,8 @@ def attach(pid, commands=()):
)
)
connect_script.close()
+ orig_mode = os.stat(connect_script.name).st_mode
+ os.chmod(connect_script.name, orig_mode | stat.S_IROTH | stat.S_IRGRP)
sys.remote_exec(pid, connect_script.name)
# TODO Add a timeout? Or don't bother since the user can ^C?
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index 6a20a1eb03e..75f31d858d3 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -6844,6 +6844,28 @@ class MiscTestCase(unittest.TestCase):
self.assertEqual("332833500", out.decode('utf-8').strip())
self.assertFalse(err, msg=err.decode('utf-8'))
+ def test_forked_thread_not_started(self):
+ # gh-134381: Ensure that a thread that has not been started yet in
+ # the parent process can be started within a forked child process.
+
+ if multiprocessing.get_start_method() != "fork":
+ self.skipTest("fork specific test")
+
+ q = multiprocessing.Queue()
+ t = threading.Thread(target=lambda: q.put("done"), daemon=True)
+
+ def child():
+ t.start()
+ t.join()
+
+ p = multiprocessing.Process(target=child)
+ p.start()
+ p.join(support.SHORT_TIMEOUT)
+
+ self.assertEqual(p.exitcode, 0)
+ self.assertEqual(q.get_nowait(), "done")
+ close_queue(q)
+
#
# Mixins
diff --git a/Lib/test/support/interpreters/channels.py b/Lib/test/support/interpreters/channels.py
index 3b6e0f0effd..7a2bd7d63f8 100644
--- a/Lib/test/support/interpreters/channels.py
+++ b/Lib/test/support/interpreters/channels.py
@@ -69,7 +69,7 @@ def list_all():
if not hasattr(send, '_unboundop'):
send._set_unbound(unboundop)
else:
- assert send._unbound[0] == unboundop
+ assert send._unbound[0] == op
channels.append(chan)
return channels
diff --git a/Lib/test/test__interpreters.py b/Lib/test/test__interpreters.py
index ad3ebbfdff6..63fdaad8de7 100644
--- a/Lib/test/test__interpreters.py
+++ b/Lib/test/test__interpreters.py
@@ -474,15 +474,13 @@ class CommonTests(TestBase):
def test_signatures(self):
# See https://github.com/python/cpython/issues/126654
- msg = r'_interpreters.exec\(\) argument 3 must be dict, not int'
+ msg = "expected 'shared' to be a dict"
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', 1)
with self.assertRaisesRegex(TypeError, msg):
_interpreters.exec(self.id, 'a', shared=1)
- msg = r'_interpreters.run_string\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_string(self.id, 'a', shared=1)
- msg = r'_interpreters.run_func\(\) argument 3 must be dict, not int'
with self.assertRaisesRegex(TypeError, msg):
_interpreters.run_func(self.id, lambda: None, shared=1)
@@ -954,8 +952,7 @@ class RunFailedTests(TestBase):
""")
with self.subTest('script'):
- with self.assertRaises(SyntaxError):
- _interpreters.run_string(self.id, script)
+ self.assert_run_failed(SyntaxError, script)
with self.subTest('module'):
modname = 'spam_spam_spam'
@@ -1022,19 +1019,12 @@ class RunFuncTests(TestBase):
with open(w, 'w', encoding="utf-8") as spipe:
with contextlib.redirect_stdout(spipe):
print('it worked!', end='')
- failed = None
def f():
- nonlocal failed
- try:
- _interpreters.set___main___attrs(self.id, dict(w=w))
- _interpreters.run_func(self.id, script)
- except Exception as exc:
- failed = exc
+ _interpreters.set___main___attrs(self.id, dict(w=w))
+ _interpreters.run_func(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
- if failed:
- raise Exception from failed
with open(r, encoding="utf-8") as outfile:
out = outfile.read()
@@ -1063,16 +1053,19 @@ class RunFuncTests(TestBase):
spam = True
def script():
assert spam
- with self.assertRaises(ValueError):
+
+ with self.assertRaises(TypeError):
_interpreters.run_func(self.id, script)
+ # XXX This hasn't been fixed yet.
+ @unittest.expectedFailure
def test_return_value(self):
def script():
return 'spam'
with self.assertRaises(ValueError):
_interpreters.run_func(self.id, script)
-# @unittest.skip("we're not quite there yet")
+ @unittest.skip("we're not quite there yet")
def test_args(self):
with self.subTest('args'):
def script(a, b=0):
diff --git a/Lib/test/test_ast/test_ast.py b/Lib/test/test_ast/test_ast.py
index 1479a8eafd6..46745cfa8f8 100644
--- a/Lib/test/test_ast/test_ast.py
+++ b/Lib/test/test_ast/test_ast.py
@@ -3292,6 +3292,7 @@ class CommandLineTests(unittest.TestCase):
expect = self.text_normalize(expect)
self.assertEqual(res, expect)
+ @support.requires_resource('cpu')
def test_invocation(self):
# test various combinations of parameters
base_flags = (
diff --git a/Lib/test/test_capi/test_object.py b/Lib/test/test_capi/test_object.py
index cd772120bde..d4056727d07 100644
--- a/Lib/test/test_capi/test_object.py
+++ b/Lib/test/test_capi/test_object.py
@@ -221,6 +221,7 @@ class CAPITest(unittest.TestCase):
"""
self.check_negative_refcount(code)
+ @support.requires_resource('cpu')
def test_decref_delayed(self):
# gh-130519: Test that _PyObject_XDecRefDelayed() and QSBR code path
# handles destructors that are possibly re-entrant or trigger a GC.
diff --git a/Lib/test/test_collections.py b/Lib/test/test_collections.py
index 1e93530398b..d9d61e5c205 100644
--- a/Lib/test/test_collections.py
+++ b/Lib/test/test_collections.py
@@ -542,6 +542,8 @@ class TestNamedTuple(unittest.TestCase):
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
+ @support.requires_resource('cpu')
+ def test_large_size(self):
n = support.exceeds_recursion_limit()
names = list(set(''.join([choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
diff --git a/Lib/test/test_httpservers.py b/Lib/test/test_httpservers.py
index 0af1c45ecb2..2548a7c5f29 100644
--- a/Lib/test/test_httpservers.py
+++ b/Lib/test/test_httpservers.py
@@ -21,6 +21,7 @@ import email.utils
import html
import http, http.client
import urllib.parse
+import urllib.request
import tempfile
import time
import datetime
@@ -33,6 +34,8 @@ from test import support
from test.support import (
is_apple, import_helper, os_helper, threading_helper
)
+from test.support.script_helper import kill_python, spawn_python
+from test.support.socket_helper import find_unused_port
try:
import ssl
@@ -1452,6 +1455,73 @@ class CommandLineTestCase(unittest.TestCase):
self.assertIn('error', stderr.getvalue())
+class CommandLineRunTimeTestCase(unittest.TestCase):
+ served_data = os.urandom(32)
+ served_filename = 'served_filename'
+ tls_cert = certdata_file('ssl_cert.pem')
+ tls_key = certdata_file('ssl_key.pem')
+ tls_password = b'somepass'
+ tls_password_file = 'ssl_key_password'
+
+ def setUp(self):
+ super().setUp()
+ server_dir_context = os_helper.temp_cwd()
+ server_dir = self.enterContext(server_dir_context)
+ with open(self.served_filename, 'wb') as f:
+ f.write(self.served_data)
+ with open(self.tls_password_file, 'wb') as f:
+ f.write(self.tls_password)
+
+ def fetch_file(self, path, context=None):
+ req = urllib.request.Request(path, method='GET')
+ with urllib.request.urlopen(req, context=context) as res:
+ return res.read()
+
+ def parse_cli_output(self, output):
+ match = re.search(r'Serving (HTTP|HTTPS) on (.+) port (\d+)', output)
+ if match is None:
+ return None, None, None
+ return match.group(1).lower(), match.group(2), int(match.group(3))
+
+ def wait_for_server(self, proc, protocol, bind, port):
+ """Check that the server has been successfully started."""
+ line = proc.stdout.readline().strip()
+ if support.verbose:
+ print()
+ print('python -m http.server: ', line)
+ return self.parse_cli_output(line) == (protocol, bind, port)
+
+ def test_http_client(self):
+ bind, port = '127.0.0.1', find_unused_port()
+ proc = spawn_python('-u', '-m', 'http.server', str(port), '-b', bind,
+ bufsize=1, text=True)
+ self.addCleanup(kill_python, proc)
+ self.addCleanup(proc.terminate)
+ self.assertTrue(self.wait_for_server(proc, 'http', bind, port))
+ res = self.fetch_file(f'http://{bind}:{port}/{self.served_filename}')
+ self.assertEqual(res, self.served_data)
+
+ @unittest.skipIf(ssl is None, "requires ssl")
+ def test_https_client(self):
+ context = ssl.create_default_context()
+ # allow self-signed certificates
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+
+ bind, port = '127.0.0.1', find_unused_port()
+ proc = spawn_python('-u', '-m', 'http.server', str(port), '-b', bind,
+ '--tls-cert', self.tls_cert,
+ '--tls-key', self.tls_key,
+ '--tls-password-file', self.tls_password_file,
+ bufsize=1, text=True)
+ self.addCleanup(kill_python, proc)
+ self.addCleanup(proc.terminate)
+ self.assertTrue(self.wait_for_server(proc, 'https', bind, port))
+ url = f'https://{bind}:{port}/{self.served_filename}'
+ res = self.fetch_file(url, context=context)
+ self.assertEqual(res, self.served_data)
+
+
def setUpModule():
unittest.addModuleCleanup(os.chdir, os.getcwd())
diff --git a/Lib/test/test_interpreters/test_api.py b/Lib/test/test_interpreters/test_api.py
index 165949167ce..1e2d572b1cb 100644
--- a/Lib/test/test_interpreters/test_api.py
+++ b/Lib/test/test_interpreters/test_api.py
@@ -839,16 +839,9 @@ class TestInterpreterExec(TestBase):
interp.exec(10)
def test_bytes_for_script(self):
- r, w = self.pipe()
- RAN = b'R'
- DONE = b'D'
interp = interpreters.create()
- interp.exec(f"""if True:
- import os
- os.write({w}, {RAN!r})
- """)
- os.write(w, DONE)
- self.assertEqual(os.read(r, 1), RAN)
+ with self.assertRaises(TypeError):
+ interp.exec(b'print("spam")')
def test_with_background_threads_still_running(self):
r_interp, w_interp = self.pipe()
@@ -1017,6 +1010,8 @@ class TestInterpreterCall(TestBase):
for i, (callable, args, kwargs) in enumerate([
(call_func_noop, (), {}),
+ (call_func_return_shareable, (), {}),
+ (call_func_return_not_shareable, (), {}),
(Spam.noop, (), {}),
]):
with self.subTest(f'success case #{i+1}'):
@@ -1041,8 +1036,6 @@ class TestInterpreterCall(TestBase):
(call_func_complex, ('custom', 'spam!'), {}),
(call_func_complex, ('custom-inner', 'eggs!'), {}),
(call_func_complex, ('???',), {'exc': ValueError('spam')}),
- (call_func_return_shareable, (), {}),
- (call_func_return_not_shareable, (), {}),
]):
with self.subTest(f'invalid case #{i+1}'):
with self.assertRaises(Exception):
@@ -1058,6 +1051,8 @@ class TestInterpreterCall(TestBase):
for i, (callable, args, kwargs) in enumerate([
(call_func_noop, (), {}),
+ (call_func_return_shareable, (), {}),
+ (call_func_return_not_shareable, (), {}),
(Spam.noop, (), {}),
]):
with self.subTest(f'success case #{i+1}'):
@@ -1084,8 +1079,6 @@ class TestInterpreterCall(TestBase):
(call_func_complex, ('custom', 'spam!'), {}),
(call_func_complex, ('custom-inner', 'eggs!'), {}),
(call_func_complex, ('???',), {'exc': ValueError('spam')}),
- (call_func_return_shareable, (), {}),
- (call_func_return_not_shareable, (), {}),
]):
with self.subTest(f'invalid case #{i+1}'):
if args or kwargs:
@@ -1625,8 +1618,8 @@ class LowLevelTests(TestBase):
def test_call(self):
with self.subTest('no args'):
interpid = _interpreters.create()
- with self.assertRaises(ValueError):
- _interpreters.call(interpid, call_func_return_shareable)
+ exc = _interpreters.call(interpid, call_func_return_shareable)
+ self.assertIs(exc, None)
with self.subTest('uncaught exception'):
interpid = _interpreters.create()
diff --git a/Lib/test/test_ipaddress.py b/Lib/test/test_ipaddress.py
index a06608d0016..ee95454e64b 100644
--- a/Lib/test/test_ipaddress.py
+++ b/Lib/test/test_ipaddress.py
@@ -397,6 +397,17 @@ class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42%scope")
+ def test_bad_address_split_v6_too_long(self):
+ def assertBadSplit(addr):
+ msg = r"At most 39 characters expected in %s"
+ with self.assertAddressError(msg, repr(re.escape(addr[:14]))):
+ ipaddress.IPv6Address(addr)
+
+ # Long IPv6 address
+ long_addr = ("0:" * 10000) + "0"
+ assertBadSplit(long_addr)
+ assertBadSplit(long_addr + "%zoneid")
+
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
diff --git a/Lib/test/test_json/test_recursion.py b/Lib/test/test_json/test_recursion.py
index 8f0e5e078ed..5d7b56ff9ad 100644
--- a/Lib/test/test_json/test_recursion.py
+++ b/Lib/test/test_json/test_recursion.py
@@ -86,6 +86,7 @@ class TestRecursion:
@support.skip_wasi_stack_overflow()
@support.skip_emscripten_stack_overflow()
+ @support.requires_resource('cpu')
def test_highly_nested_objects_encoding(self):
# See #12051
l, d = [], {}
diff --git a/Lib/test/test_memoryview.py b/Lib/test/test_memoryview.py
index 61b068c630c..64f440f180b 100644
--- a/Lib/test/test_memoryview.py
+++ b/Lib/test/test_memoryview.py
@@ -743,19 +743,21 @@ class RacingTest(unittest.TestCase):
from multiprocessing.managers import SharedMemoryManager
except ImportError:
self.skipTest("Test requires multiprocessing")
- from threading import Thread
+ from threading import Thread, Event
- n = 100
+ start = Event()
with SharedMemoryManager() as smm:
obj = smm.ShareableList(range(100))
- threads = []
- for _ in range(n):
- # Issue gh-127085, the `ShareableList.count` is just a convenient way to mess the `exports`
- # counter of `memoryview`, this issue has no direct relation with `ShareableList`.
- threads.append(Thread(target=obj.count, args=(1,)))
-
+ def test():
+ # Issue gh-127085, the `ShareableList.count` is just a
+ # convenient way to mess the `exports` counter of `memoryview`,
+ # this issue has no direct relation with `ShareableList`.
+ start.wait(support.SHORT_TIMEOUT)
+ for i in range(10):
+ obj.count(1)
+ threads = [Thread(target=test) for _ in range(10)]
with threading_helper.start_threads(threads):
- pass
+ start.set()
del obj
diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py
index 5980f939185..0dd619dd7c8 100644
--- a/Lib/test/test_statistics.py
+++ b/Lib/test/test_statistics.py
@@ -2346,6 +2346,7 @@ class TestGeometricMean(unittest.TestCase):
class TestKDE(unittest.TestCase):
+ @support.requires_resource('cpu')
def test_kde(self):
kde = statistics.kde
StatisticsError = statistics.StatisticsError
diff --git a/Lib/test/test_unittest/test_result.py b/Lib/test/test_unittest/test_result.py
index 9ac4c52449c..3f44e617303 100644
--- a/Lib/test/test_unittest/test_result.py
+++ b/Lib/test/test_unittest/test_result.py
@@ -1282,14 +1282,22 @@ class TestOutputBuffering(unittest.TestCase):
suite(result)
expected_out = '\nStdout:\ndo cleanup2\ndo cleanup1\n'
self.assertEqual(stdout.getvalue(), expected_out)
- self.assertEqual(len(result.errors), 1)
+ self.assertEqual(len(result.errors), 2)
description = 'tearDownModule (Module)'
test_case, formatted_exc = result.errors[0]
self.assertEqual(test_case.description, description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc)
+ test_case, formatted_exc = result.errors[1]
+ self.assertEqual(test_case.description, description)
+ self.assertIn('TypeError: bad cleanup1', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
+ self.assertNotIn('ValueError', formatted_exc)
+ self.assertIn(expected_out, formatted_exc)
+
def testBufferSetUpModule_DoModuleCleanups(self):
with captured_stdout() as stdout:
result = unittest.TestResult()
@@ -1313,22 +1321,34 @@ class TestOutputBuffering(unittest.TestCase):
suite(result)
expected_out = '\nStdout:\nset up module\ndo cleanup2\ndo cleanup1\n'
self.assertEqual(stdout.getvalue(), expected_out)
- self.assertEqual(len(result.errors), 2)
+ self.assertEqual(len(result.errors), 3)
description = 'setUpModule (Module)'
test_case, formatted_exc = result.errors[0]
self.assertEqual(test_case.description, description)
self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn('\nStdout:\nset up module\n', formatted_exc)
+
test_case, formatted_exc = result.errors[1]
self.assertIn(expected_out, formatted_exc)
self.assertEqual(test_case.description, description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc)
+ test_case, formatted_exc = result.errors[2]
+ self.assertIn(expected_out, formatted_exc)
+ self.assertEqual(test_case.description, description)
+ self.assertIn('TypeError: bad cleanup1', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
+ self.assertNotIn('ZeroDivisionError', formatted_exc)
+ self.assertNotIn('ValueError', formatted_exc)
+ self.assertIn(expected_out, formatted_exc)
+
def testBufferTearDownModule_DoModuleCleanups(self):
with captured_stdout() as stdout:
result = unittest.TestResult()
@@ -1355,21 +1375,32 @@ class TestOutputBuffering(unittest.TestCase):
suite(result)
expected_out = '\nStdout:\ntear down module\ndo cleanup2\ndo cleanup1\n'
self.assertEqual(stdout.getvalue(), expected_out)
- self.assertEqual(len(result.errors), 2)
+ self.assertEqual(len(result.errors), 3)
description = 'tearDownModule (Module)'
test_case, formatted_exc = result.errors[0]
self.assertEqual(test_case.description, description)
self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn('\nStdout:\ntear down module\n', formatted_exc)
+
test_case, formatted_exc = result.errors[1]
self.assertEqual(test_case.description, description)
self.assertIn('ValueError: bad cleanup2', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc)
+ test_case, formatted_exc = result.errors[2]
+ self.assertEqual(test_case.description, description)
+ self.assertIn('TypeError: bad cleanup1', formatted_exc)
+ self.assertNotIn('ExceptionGroup', formatted_exc)
+ self.assertNotIn('ZeroDivisionError', formatted_exc)
+ self.assertNotIn('ValueError', formatted_exc)
+ self.assertIn(expected_out, formatted_exc)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Lib/test/test_unittest/test_runner.py b/Lib/test/test_unittest/test_runner.py
index 4d3cfd60b8d..a47e2ebb59d 100644
--- a/Lib/test/test_unittest/test_runner.py
+++ b/Lib/test/test_unittest/test_runner.py
@@ -13,6 +13,7 @@ from test.test_unittest.support import (
LoggingResult,
ResultWithNoStartTestRunStopTestRun,
)
+from test.support.testcase import ExceptionIsLikeMixin
def resultFactory(*_):
@@ -604,7 +605,7 @@ class TestClassCleanup(unittest.TestCase):
@support.force_not_colorized_test_class
-class TestModuleCleanUp(unittest.TestCase):
+class TestModuleCleanUp(ExceptionIsLikeMixin, unittest.TestCase):
def test_add_and_do_ModuleCleanup(self):
module_cleanups = []
@@ -646,11 +647,50 @@ class TestModuleCleanUp(unittest.TestCase):
[(module_cleanup_good, (1, 2, 3),
dict(four='hello', five='goodbye')),
(module_cleanup_bad, (), {})])
- with self.assertRaises(CustomError) as e:
+ with self.assertRaises(Exception) as e:
unittest.case.doModuleCleanups()
- self.assertEqual(str(e.exception), 'CleanUpExc')
+ self.assertExceptionIsLike(e.exception,
+ ExceptionGroup('module cleanup failed',
+ [CustomError('CleanUpExc')]))
self.assertEqual(unittest.case._module_cleanups, [])
+ def test_doModuleCleanup_with_multiple_errors_in_addModuleCleanup(self):
+ def module_cleanup_bad1():
+ raise TypeError('CleanUpExc1')
+
+ def module_cleanup_bad2():
+ raise ValueError('CleanUpExc2')
+
+ class Module:
+ unittest.addModuleCleanup(module_cleanup_bad1)
+ unittest.addModuleCleanup(module_cleanup_bad2)
+ with self.assertRaises(ExceptionGroup) as e:
+ unittest.case.doModuleCleanups()
+ self.assertExceptionIsLike(e.exception,
+ ExceptionGroup('module cleanup failed', [
+ ValueError('CleanUpExc2'),
+ TypeError('CleanUpExc1'),
+ ]))
+
+ def test_doModuleCleanup_with_exception_group_in_addModuleCleanup(self):
+ def module_cleanup_bad():
+ raise ExceptionGroup('CleanUpExc', [
+ ValueError('CleanUpExc2'),
+ TypeError('CleanUpExc1'),
+ ])
+
+ class Module:
+ unittest.addModuleCleanup(module_cleanup_bad)
+ with self.assertRaises(ExceptionGroup) as e:
+ unittest.case.doModuleCleanups()
+ self.assertExceptionIsLike(e.exception,
+ ExceptionGroup('module cleanup failed', [
+ ExceptionGroup('CleanUpExc', [
+ ValueError('CleanUpExc2'),
+ TypeError('CleanUpExc1'),
+ ]),
+ ]))
+
def test_addModuleCleanup_arg_errors(self):
cleanups = []
def cleanup(*args, **kwargs):
@@ -871,9 +911,11 @@ class TestModuleCleanUp(unittest.TestCase):
ordering = []
blowUp = True
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestableTest)
- with self.assertRaises(CustomError) as cm:
+ with self.assertRaises(Exception) as cm:
suite.debug()
- self.assertEqual(str(cm.exception), 'CleanUpExc')
+ self.assertExceptionIsLike(cm.exception,
+ ExceptionGroup('module cleanup failed',
+ [CustomError('CleanUpExc')]))
self.assertEqual(ordering, ['setUpModule', 'setUpClass', 'test',
'tearDownClass', 'tearDownModule', 'cleanup_exc'])
self.assertEqual(unittest.case._module_cleanups, [])
diff --git a/Lib/test/test_zstd.py b/Lib/test/test_zstd.py
index 53ca592ea38..34c7c721b1a 100644
--- a/Lib/test/test_zstd.py
+++ b/Lib/test/test_zstd.py
@@ -1424,11 +1424,12 @@ class FileTestCase(unittest.TestCase):
with self.assertRaises(ValueError):
ZstdFile(io.BytesIO(COMPRESSED_100_PLUS_32KB), "rw")
- with self.assertRaisesRegex(TypeError, r"NOT be CompressionParameter"):
+ with self.assertRaisesRegex(TypeError,
+ r"NOT be a CompressionParameter"):
ZstdFile(io.BytesIO(), 'rb',
options={CompressionParameter.compression_level:5})
with self.assertRaisesRegex(TypeError,
- r"NOT be DecompressionParameter"):
+ r"NOT be a DecompressionParameter"):
ZstdFile(io.BytesIO(), 'wb',
options={DecompressionParameter.window_log_max:21})
@@ -2430,10 +2431,8 @@ class OpenTestCase(unittest.TestCase):
self.assertEqual(f.write(arr), LENGTH)
self.assertEqual(f.tell(), LENGTH)
-@unittest.skip("it fails for now, see gh-133885")
class FreeThreadingMethodTests(unittest.TestCase):
- @unittest.skipUnless(Py_GIL_DISABLED, 'this test can only possibly fail with GIL disabled')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_compress_locking(self):
@@ -2470,7 +2469,6 @@ class FreeThreadingMethodTests(unittest.TestCase):
actual = b''.join(output) + rest2
self.assertEqual(expected, actual)
- @unittest.skipUnless(Py_GIL_DISABLED, 'this test can only possibly fail with GIL disabled')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
def test_decompress_locking(self):
@@ -2506,6 +2504,59 @@ class FreeThreadingMethodTests(unittest.TestCase):
actual = b''.join(output)
self.assertEqual(expected, actual)
+ @threading_helper.reap_threads
+ @threading_helper.requires_working_threading()
+ def test_compress_shared_dict(self):
+ num_threads = 8
+
+ def run_method(b):
+ level = threading.get_ident() % 4
+ # sync threads to increase chance of contention on
+ # capsule storing dictionary levels
+ b.wait()
+ ZstdCompressor(level=level,
+ zstd_dict=TRAINED_DICT.as_digested_dict)
+ b.wait()
+ ZstdCompressor(level=level,
+ zstd_dict=TRAINED_DICT.as_undigested_dict)
+ b.wait()
+ ZstdCompressor(level=level,
+ zstd_dict=TRAINED_DICT.as_prefix)
+ threads = []
+
+ b = threading.Barrier(num_threads)
+ for i in range(num_threads):
+ thread = threading.Thread(target=run_method, args=(b,))
+
+ threads.append(thread)
+
+ with threading_helper.start_threads(threads):
+ pass
+
+ @threading_helper.reap_threads
+ @threading_helper.requires_working_threading()
+ def test_decompress_shared_dict(self):
+ num_threads = 8
+
+ def run_method(b):
+ # sync threads to increase chance of contention on
+ # decompression dictionary
+ b.wait()
+ ZstdDecompressor(zstd_dict=TRAINED_DICT.as_digested_dict)
+ b.wait()
+ ZstdDecompressor(zstd_dict=TRAINED_DICT.as_undigested_dict)
+ b.wait()
+ ZstdDecompressor(zstd_dict=TRAINED_DICT.as_prefix)
+ threads = []
+
+ b = threading.Barrier(num_threads)
+ for i in range(num_threads):
+ thread = threading.Thread(target=run_method, args=(b,))
+
+ threads.append(thread)
+
+ with threading_helper.start_threads(threads):
+ pass
if __name__ == "__main__":
diff --git a/Lib/types.py b/Lib/types.py
index 6efac339434..cf0549315a7 100644
--- a/Lib/types.py
+++ b/Lib/types.py
@@ -250,7 +250,6 @@ class DynamicClassAttribute:
class _GeneratorWrapper:
- # TODO: Implement this in C.
def __init__(self, gen):
self.__wrapped = gen
self.__isgen = gen.__class__ is GeneratorType
@@ -305,7 +304,6 @@ def coroutine(func):
# Check if 'func' is a generator function.
# (0x20 == CO_GENERATOR)
if co_flags & 0x20:
- # TODO: Implement this in C.
co = func.__code__
# 0x100 == CO_ITERABLE_COROUTINE
func.__code__ = co.replace(co_flags=co.co_flags | 0x100)
diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py
index 884fc1b21f6..db10de68e4a 100644
--- a/Lib/unittest/case.py
+++ b/Lib/unittest/case.py
@@ -149,9 +149,7 @@ def doModuleCleanups():
except Exception as exc:
exceptions.append(exc)
if exceptions:
- # Swallows all but first exception. If a multi-exception handler
- # gets written we should use that here instead.
- raise exceptions[0]
+ raise ExceptionGroup('module cleanup failed', exceptions)
def skip(reason):
diff --git a/Lib/unittest/suite.py b/Lib/unittest/suite.py
index 6f45b6fe5f6..ae9ca2d615d 100644
--- a/Lib/unittest/suite.py
+++ b/Lib/unittest/suite.py
@@ -223,6 +223,11 @@ class TestSuite(BaseTestSuite):
if result._moduleSetUpFailed:
try:
case.doModuleCleanups()
+ except ExceptionGroup as eg:
+ for e in eg.exceptions:
+ self._createClassOrModuleLevelException(result, e,
+ 'setUpModule',
+ currentModule)
except Exception as e:
self._createClassOrModuleLevelException(result, e,
'setUpModule',
@@ -235,15 +240,15 @@ class TestSuite(BaseTestSuite):
errorName = f'{method_name} ({parent})'
self._addClassOrModuleLevelException(result, exc, errorName, info)
- def _addClassOrModuleLevelException(self, result, exception, errorName,
+ def _addClassOrModuleLevelException(self, result, exc, errorName,
info=None):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
- if addSkip is not None and isinstance(exception, case.SkipTest):
- addSkip(error, str(exception))
+ if addSkip is not None and isinstance(exc, case.SkipTest):
+ addSkip(error, str(exc))
else:
if not info:
- result.addError(error, sys.exc_info())
+ result.addError(error, (type(exc), exc, exc.__traceback__))
else:
result.addError(error, info)
@@ -273,6 +278,13 @@ class TestSuite(BaseTestSuite):
previousModule)
try:
case.doModuleCleanups()
+ except ExceptionGroup as eg:
+ if isinstance(result, _DebugResult):
+ raise
+ for e in eg.exceptions:
+ self._createClassOrModuleLevelException(result, e,
+ 'tearDownModule',
+ previousModule)
except Exception as e:
if isinstance(result, _DebugResult):
raise
diff --git a/Misc/NEWS.d/next/Build/2025-05-21-22-13-30.gh-issue-134486.yvdL6f.rst b/Misc/NEWS.d/next/Build/2025-05-21-22-13-30.gh-issue-134486.yvdL6f.rst
new file mode 100644
index 00000000000..2754e61f018
--- /dev/null
+++ b/Misc/NEWS.d/next/Build/2025-05-21-22-13-30.gh-issue-134486.yvdL6f.rst
@@ -0,0 +1,3 @@
+The :mod:`ctypes` module now performs a more portable test for the
+definition of :manpage:`alloca(3)`, fixing a compilation failure on
+NetBSD.
diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-05-22-14-48-19.gh-issue-134381.2BXhth.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-05-22-14-48-19.gh-issue-134381.2BXhth.rst
new file mode 100644
index 00000000000..aa8900296ae
--- /dev/null
+++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-05-22-14-48-19.gh-issue-134381.2BXhth.rst
@@ -0,0 +1 @@
+Fix :exc:`RuntimeError` when using a not-started :class:`threading.Thread` after calling :func:`os.fork`
diff --git a/Misc/NEWS.d/next/Library/2024-06-06-17-49-07.gh-issue-120170.DUxhmT.rst b/Misc/NEWS.d/next/Library/2024-06-06-17-49-07.gh-issue-120170.DUxhmT.rst
new file mode 100644
index 00000000000..ce7d874aa20
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2024-06-06-17-49-07.gh-issue-120170.DUxhmT.rst
@@ -0,0 +1,3 @@
+Fix an issue in the :mod:`!_pickle` extension module in which importing
+:mod:`multiprocessing` could change how pickle identifies which module an
+object belongs to, potentially breaking the unpickling of those objects.
diff --git a/Misc/NEWS.d/next/Library/2025-05-18-13-23-29.gh-issue-134168.hgx3Xg.rst b/Misc/NEWS.d/next/Library/2025-05-18-13-23-29.gh-issue-134168.hgx3Xg.rst
new file mode 100644
index 00000000000..5a0e20005db
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-18-13-23-29.gh-issue-134168.hgx3Xg.rst
@@ -0,0 +1,2 @@
+:mod:`http.server`: Fix IPv6 address binding and
+:option:`--directory <http.server --directory>` handling when using HTTPS.
diff --git a/Misc/NEWS.d/next/Library/2025-05-22-13-10-32.gh-issue-114177.3TYUJ3.rst b/Misc/NEWS.d/next/Library/2025-05-22-13-10-32.gh-issue-114177.3TYUJ3.rst
new file mode 100644
index 00000000000..c98fde5fb04
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-22-13-10-32.gh-issue-114177.3TYUJ3.rst
@@ -0,0 +1 @@
+Fix :mod:`asyncio` to not close subprocess pipes which would otherwise error out when the event loop is already closed.
diff --git a/Misc/NEWS.d/next/Library/2025-05-22-14-12-53.gh-issue-134451.M1rD-j.rst b/Misc/NEWS.d/next/Library/2025-05-22-14-12-53.gh-issue-134451.M1rD-j.rst
new file mode 100644
index 00000000000..3c8339f8842
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-22-14-12-53.gh-issue-134451.M1rD-j.rst
@@ -0,0 +1 @@
+Converted ``asyncio.tools.CycleFoundException`` from dataclass to a regular exception type.
diff --git a/Misc/NEWS.d/next/Library/2025-05-22-18-14-13.gh-issue-134546.fjLVzK.rst b/Misc/NEWS.d/next/Library/2025-05-22-18-14-13.gh-issue-134546.fjLVzK.rst
new file mode 100644
index 00000000000..eea897f5918
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-22-18-14-13.gh-issue-134546.fjLVzK.rst
@@ -0,0 +1 @@
+Ensure :mod:`pdb` remote debugging script is readable by remote Python process.
diff --git a/Misc/NEWS.d/next/Library/2025-05-23-10-15-36.gh-issue-134565.zmb66C.rst b/Misc/NEWS.d/next/Library/2025-05-23-10-15-36.gh-issue-134565.zmb66C.rst
new file mode 100644
index 00000000000..17d2b23b62d
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-23-10-15-36.gh-issue-134565.zmb66C.rst
@@ -0,0 +1,3 @@
+:func:`unittest.doModuleCleanups` no longer swallows all but first exception
+raised in the cleanup code, but raises a :exc:`ExceptionGroup` if multiple
+errors occurred.
diff --git a/Misc/NEWS.d/next/Library/2025-05-24-03-10-36.gh-issue-80334.z21cMa.rst b/Misc/NEWS.d/next/Library/2025-05-24-03-10-36.gh-issue-80334.z21cMa.rst
new file mode 100644
index 00000000000..228429516db
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-05-24-03-10-36.gh-issue-80334.z21cMa.rst
@@ -0,0 +1,2 @@
+:func:`multiprocessing.freeze_support` now checks for work on any "spawn"
+start method platform rather than only on Windows.
diff --git a/Misc/NEWS.d/next/Security/2025-01-14-11-19-07.gh-issue-128840.M1doZW.rst b/Misc/NEWS.d/next/Security/2025-01-14-11-19-07.gh-issue-128840.M1doZW.rst
new file mode 100644
index 00000000000..b57ec3e70dc
--- /dev/null
+++ b/Misc/NEWS.d/next/Security/2025-01-14-11-19-07.gh-issue-128840.M1doZW.rst
@@ -0,0 +1,2 @@
+Short-circuit the processing of long IPv6 addresses early in :mod:`ipaddress` to prevent excessive
+memory consumption and a minor denial-of-service.
diff --git a/Modules/_ctypes/callbacks.c b/Modules/_ctypes/callbacks.c
index ec113e41d16..fd508ae61f2 100644
--- a/Modules/_ctypes/callbacks.c
+++ b/Modules/_ctypes/callbacks.c
@@ -11,18 +11,9 @@
#include "pycore_call.h" // _PyObject_CallNoArgs()
#include "pycore_runtime.h" // _Py_ID()
-#ifdef MS_WIN32
-# include <malloc.h>
-#endif
-
#include <ffi.h>
#include "ctypes.h"
-#ifdef HAVE_ALLOCA_H
-/* AIX needs alloca.h for alloca() */
-#include <alloca.h>
-#endif
-
/**************************************************************/
static int
diff --git a/Modules/_ctypes/callproc.c b/Modules/_ctypes/callproc.c
index 9f5ad9f57b7..65a0f14e2b0 100644
--- a/Modules/_ctypes/callproc.c
+++ b/Modules/_ctypes/callproc.c
@@ -77,16 +77,8 @@ module _ctypes
#include <mach-o/dyld.h>
#endif
-#ifdef MS_WIN32
-#include <malloc.h>
-#endif
-
#include <ffi.h>
#include "ctypes.h"
-#ifdef HAVE_ALLOCA_H
-/* AIX needs alloca.h for alloca() */
-#include <alloca.h>
-#endif
#ifdef _Py_MEMORY_SANITIZER
#include <sanitizer/msan_interface.h>
diff --git a/Modules/_ctypes/ctypes.h b/Modules/_ctypes/ctypes.h
index 2d859ed63e1..6a45c11e61a 100644
--- a/Modules/_ctypes/ctypes.h
+++ b/Modules/_ctypes/ctypes.h
@@ -1,5 +1,17 @@
-#if defined (__SVR4) && defined (__sun)
+/* Get a definition of alloca(). */
+#if (defined (__SVR4) && defined (__sun)) || defined(HAVE_ALLOCA_H)
# include <alloca.h>
+#elif defined(MS_WIN32)
+# include <malloc.h>
+#endif
+
+/* If the system does not define alloca(), we have to hope for a compiler builtin. */
+#ifndef alloca
+# if defined __GNUC__ || (__clang_major__ >= 4)
+# define alloca __builtin_alloca
+# else
+# error "Could not define alloca() on your platform."
+# endif
#endif
#include <stdbool.h>
diff --git a/Modules/_interpretersmodule.c b/Modules/_interpretersmodule.c
index 376517ab923..f4807fd214b 100644
--- a/Modules/_interpretersmodule.c
+++ b/Modules/_interpretersmodule.c
@@ -9,6 +9,7 @@
#include "pycore_code.h" // _PyCode_HAS_EXECUTORS()
#include "pycore_crossinterp.h" // _PyXIData_t
#include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
+#include "pycore_function.h" // _PyFunction_VerifyStateless()
#include "pycore_interp.h" // _PyInterpreterState_IDIncref()
#include "pycore_modsupport.h" // _PyArg_BadArgument()
#include "pycore_namespace.h" // _PyNamespace_New()
@@ -360,6 +361,81 @@ _get_current_xibufferview_type(void)
}
+/* Python code **************************************************************/
+
+static const char *
+check_code_str(PyUnicodeObject *text)
+{
+ assert(text != NULL);
+ if (PyUnicode_GET_LENGTH(text) == 0) {
+ return "too short";
+ }
+
+ // XXX Verify that it parses?
+
+ return NULL;
+}
+
+#ifndef NDEBUG
+static int
+code_has_args(PyCodeObject *code)
+{
+ assert(code != NULL);
+ return (code->co_argcount > 0
+ || code->co_posonlyargcount > 0
+ || code->co_kwonlyargcount > 0
+ || code->co_flags & (CO_VARARGS | CO_VARKEYWORDS));
+}
+#endif
+
+#define RUN_TEXT 1
+#define RUN_CODE 2
+
+static const char *
+get_code_str(PyObject *arg, Py_ssize_t *len_p, PyObject **bytes_p, int *flags_p)
+{
+ const char *codestr = NULL;
+ Py_ssize_t len = -1;
+ PyObject *bytes_obj = NULL;
+ int flags = 0;
+
+ if (PyUnicode_Check(arg)) {
+ assert(PyUnicode_Check(arg)
+ && (check_code_str((PyUnicodeObject *)arg) == NULL));
+ codestr = PyUnicode_AsUTF8AndSize(arg, &len);
+ if (codestr == NULL) {
+ return NULL;
+ }
+ if (strlen(codestr) != (size_t)len) {
+ PyErr_SetString(PyExc_ValueError,
+ "source code string cannot contain null bytes");
+ return NULL;
+ }
+ flags = RUN_TEXT;
+ }
+ else {
+ assert(PyCode_Check(arg));
+ assert(_PyCode_VerifyStateless(
+ PyThreadState_Get(), (PyCodeObject *)arg, NULL, NULL, NULL) == 0);
+ assert(!code_has_args((PyCodeObject *)arg));
+ flags = RUN_CODE;
+
+ // Serialize the code object.
+ bytes_obj = PyMarshal_WriteObjectToString(arg, Py_MARSHAL_VERSION);
+ if (bytes_obj == NULL) {
+ return NULL;
+ }
+ codestr = PyBytes_AS_STRING(bytes_obj);
+ len = PyBytes_GET_SIZE(bytes_obj);
+ }
+
+ *flags_p = flags;
+ *bytes_p = bytes_obj;
+ *len_p = len;
+ return codestr;
+}
+
+
/* interpreter-specific code ************************************************/
static int
@@ -423,14 +499,22 @@ config_from_object(PyObject *configobj, PyInterpreterConfig *config)
static int
-_run_script(_PyXIData_t *script, PyObject *ns)
+_run_script(PyObject *ns, const char *codestr, Py_ssize_t codestrlen, int flags)
{
- PyObject *code = _PyXIData_NewObject(script);
- if (code == NULL) {
- return -1;
+ PyObject *result = NULL;
+ if (flags & RUN_TEXT) {
+ result = PyRun_StringFlags(codestr, Py_file_input, ns, ns, NULL);
+ }
+ else if (flags & RUN_CODE) {
+ PyObject *code = PyMarshal_ReadObjectFromString(codestr, codestrlen);
+ if (code != NULL) {
+ result = PyEval_EvalCode(code, ns, ns);
+ Py_DECREF(code);
+ }
+ }
+ else {
+ Py_UNREACHABLE();
}
- PyObject *result = PyEval_EvalCode(code, ns, ns);
- Py_DECREF(code);
if (result == NULL) {
return -1;
}
@@ -439,11 +523,12 @@ _run_script(_PyXIData_t *script, PyObject *ns)
}
static int
-_exec_in_interpreter(PyThreadState *tstate, PyInterpreterState *interp,
- _PyXIData_t *script, PyObject *shareables,
+_run_in_interpreter(PyInterpreterState *interp,
+ const char *codestr, Py_ssize_t codestrlen,
+ PyObject *shareables, int flags,
PyObject **p_excinfo)
{
- assert(!_PyErr_Occurred(tstate));
+ assert(!PyErr_Occurred());
_PyXI_session *session = _PyXI_NewSession();
if (session == NULL) {
return -1;
@@ -451,7 +536,7 @@ _exec_in_interpreter(PyThreadState *tstate, PyInterpreterState *interp,
// Prep and switch interpreters.
if (_PyXI_Enter(session, interp, shareables) < 0) {
- if (_PyErr_Occurred(tstate)) {
+ if (PyErr_Occurred()) {
// If an error occured at this step, it means that interp
// was not prepared and switched.
_PyXI_FreeSession(session);
@@ -473,7 +558,7 @@ _exec_in_interpreter(PyThreadState *tstate, PyInterpreterState *interp,
if (mainns == NULL) {
goto finally;
}
- res = _run_script(script, mainns);
+ res = _run_script(mainns, codestr, codestrlen, flags);
finally:
// Clean up and switch back.
@@ -867,23 +952,104 @@ PyDoc_STRVAR(set___main___attrs_doc,
Bind the given attributes in the interpreter's __main__ module.");
-static void
-unwrap_not_shareable(PyThreadState *tstate)
+static PyUnicodeObject *
+convert_script_arg(PyThreadState *tstate,
+ PyObject *arg, const char *fname, const char *displayname,
+ const char *expected)
{
- PyObject *exctype = _PyXIData_GetNotShareableErrorType(tstate);
- if (!_PyErr_ExceptionMatches(tstate, exctype)) {
- return;
+ PyUnicodeObject *str = NULL;
+ if (PyUnicode_CheckExact(arg)) {
+ str = (PyUnicodeObject *)Py_NewRef(arg);
}
- PyObject *exc = _PyErr_GetRaisedException(tstate);
- PyObject *cause = PyException_GetCause(exc);
- if (cause != NULL) {
- Py_DECREF(exc);
- exc = cause;
+ else if (PyUnicode_Check(arg)) {
+ // XXX str = PyUnicode_FromObject(arg);
+ str = (PyUnicodeObject *)Py_NewRef(arg);
}
else {
- assert(PyException_GetContext(exc) == NULL);
+ _PyArg_BadArgument(fname, displayname, expected, arg);
+ return NULL;
}
+
+ const char *err = check_code_str(str);
+ if (err != NULL) {
+ Py_DECREF(str);
+ _PyErr_Format(tstate, PyExc_ValueError,
+ "%.200s(): bad script text (%s)", fname, err);
+ return NULL;
+ }
+
+ return str;
+}
+
+static PyCodeObject *
+convert_code_arg(PyThreadState *tstate,
+ PyObject *arg, const char *fname, const char *displayname,
+ const char *expected)
+{
+ PyObject *cause;
+ PyCodeObject *code = NULL;
+ if (PyFunction_Check(arg)) {
+ // For now we allow globals, so we can't use
+ // _PyFunction_VerifyStateless().
+ PyObject *codeobj = PyFunction_GetCode(arg);
+ if (_PyCode_VerifyStateless(
+ tstate, (PyCodeObject *)codeobj, NULL, NULL, NULL) < 0) {
+ goto chained;
+ }
+ code = (PyCodeObject *)Py_NewRef(codeobj);
+ }
+ else if (PyCode_Check(arg)) {
+ if (_PyCode_VerifyStateless(
+ tstate, (PyCodeObject *)arg, NULL, NULL, NULL) < 0) {
+ goto chained;
+ }
+ code = (PyCodeObject *)Py_NewRef(arg);
+ }
+ else {
+ _PyArg_BadArgument(fname, displayname, expected, arg);
+ return NULL;
+ }
+
+ return code;
+
+chained:
+ cause = _PyErr_GetRaisedException(tstate);
+ assert(cause != NULL);
+ _PyArg_BadArgument(fname, displayname, expected, arg);
+ PyObject *exc = _PyErr_GetRaisedException(tstate);
+ PyException_SetCause(exc, cause);
_PyErr_SetRaisedException(tstate, exc);
+ return NULL;
+}
+
+static int
+_interp_exec(PyObject *self, PyInterpreterState *interp,
+ PyObject *code_arg, PyObject *shared_arg, PyObject **p_excinfo)
+{
+ if (shared_arg != NULL && !PyDict_CheckExact(shared_arg)) {
+ PyErr_SetString(PyExc_TypeError, "expected 'shared' to be a dict");
+ return -1;
+ }
+
+ // Extract code.
+ Py_ssize_t codestrlen = -1;
+ PyObject *bytes_obj = NULL;
+ int flags = 0;
+ const char *codestr = get_code_str(code_arg,
+ &codestrlen, &bytes_obj, &flags);
+ if (codestr == NULL) {
+ return -1;
+ }
+
+ // Run the code in the interpreter.
+ int res = _run_in_interpreter(interp, codestr, codestrlen,
+ shared_arg, flags, p_excinfo);
+ Py_XDECREF(bytes_obj);
+ if (res < 0) {
+ return -1;
+ }
+
+ return 0;
}
static PyObject *
@@ -896,9 +1062,8 @@ interp_exec(PyObject *self, PyObject *args, PyObject *kwds)
PyObject *shared = NULL;
int restricted = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds,
- "OO|O!$p:" FUNCNAME, kwlist,
- &id, &code, &PyDict_Type, &shared,
- &restricted))
+ "OO|O$p:" FUNCNAME, kwlist,
+ &id, &code, &shared, &restricted))
{
return NULL;
}
@@ -910,17 +1075,22 @@ interp_exec(PyObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
- // We don't need the script to be "pure", which means it can use
- // global variables. They will be resolved against __main__.
- _PyXIData_t xidata = {0};
- if (_PyCode_GetScriptXIData(tstate, code, &xidata) < 0) {
- unwrap_not_shareable(tstate);
+ const char *expected = "a string, a function, or a code object";
+ if (PyUnicode_Check(code)) {
+ code = (PyObject *)convert_script_arg(tstate, code, FUNCNAME,
+ "argument 2", expected);
+ }
+ else {
+ code = (PyObject *)convert_code_arg(tstate, code, FUNCNAME,
+ "argument 2", expected);
+ }
+ if (code == NULL) {
return NULL;
}
PyObject *excinfo = NULL;
- int res = _exec_in_interpreter(tstate, interp, &xidata, shared, &excinfo);
- _PyXIData_Release(&xidata);
+ int res = _interp_exec(self, interp, code, shared, &excinfo);
+ Py_DECREF(code);
if (res < 0) {
assert((excinfo == NULL) != (PyErr_Occurred() == NULL));
return excinfo;
@@ -956,9 +1126,8 @@ interp_run_string(PyObject *self, PyObject *args, PyObject *kwds)
PyObject *shared = NULL;
int restricted = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds,
- "OU|O!$p:" FUNCNAME, kwlist,
- &id, &script, &PyDict_Type, &shared,
- &restricted))
+ "OU|O$p:" FUNCNAME, kwlist,
+ &id, &script, &shared, &restricted))
{
return NULL;
}
@@ -970,20 +1139,15 @@ interp_run_string(PyObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
- if (PyFunction_Check(script) || PyCode_Check(script)) {
- _PyArg_BadArgument(FUNCNAME, "argument 2", "a string", script);
- return NULL;
- }
-
- _PyXIData_t xidata = {0};
- if (_PyCode_GetScriptXIData(tstate, script, &xidata) < 0) {
- unwrap_not_shareable(tstate);
+ script = (PyObject *)convert_script_arg(tstate, script, FUNCNAME,
+ "argument 2", "a string");
+ if (script == NULL) {
return NULL;
}
PyObject *excinfo = NULL;
- int res = _exec_in_interpreter(tstate, interp, &xidata, shared, &excinfo);
- _PyXIData_Release(&xidata);
+ int res = _interp_exec(self, interp, script, shared, &excinfo);
+ Py_DECREF(script);
if (res < 0) {
assert((excinfo == NULL) != (PyErr_Occurred() == NULL));
return excinfo;
@@ -1009,9 +1173,8 @@ interp_run_func(PyObject *self, PyObject *args, PyObject *kwds)
PyObject *shared = NULL;
int restricted = 0;
if (!PyArg_ParseTupleAndKeywords(args, kwds,
- "OO|O!$p:" FUNCNAME, kwlist,
- &id, &func, &PyDict_Type, &shared,
- &restricted))
+ "OO|O$p:" FUNCNAME, kwlist,
+ &id, &func, &shared, &restricted))
{
return NULL;
}
@@ -1023,29 +1186,16 @@ interp_run_func(PyObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
- // We don't worry about checking globals. They will be resolved
- // against __main__.
- PyObject *code;
- if (PyFunction_Check(func)) {
- code = PyFunction_GET_CODE(func);
- }
- else if (PyCode_Check(func)) {
- code = func;
- }
- else {
- _PyArg_BadArgument(FUNCNAME, "argument 2", "a function", func);
- return NULL;
- }
-
- _PyXIData_t xidata = {0};
- if (_PyCode_GetScriptXIData(tstate, code, &xidata) < 0) {
- unwrap_not_shareable(tstate);
+ PyCodeObject *code = convert_code_arg(tstate, func, FUNCNAME,
+ "argument 2",
+ "a function or a code object");
+ if (code == NULL) {
return NULL;
}
PyObject *excinfo = NULL;
- int res = _exec_in_interpreter(tstate, interp, &xidata, shared, &excinfo);
- _PyXIData_Release(&xidata);
+ int res = _interp_exec(self, interp, (PyObject *)code, shared, &excinfo);
+ Py_DECREF(code);
if (res < 0) {
assert((excinfo == NULL) != (PyErr_Occurred() == NULL));
return excinfo;
@@ -1098,15 +1248,15 @@ interp_call(PyObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
- _PyXIData_t xidata = {0};
- if (_PyCode_GetPureScriptXIData(tstate, callable, &xidata) < 0) {
- unwrap_not_shareable(tstate);
+ PyObject *code = (PyObject *)convert_code_arg(tstate, callable, FUNCNAME,
+ "argument 2", "a function");
+ if (code == NULL) {
return NULL;
}
PyObject *excinfo = NULL;
- int res = _exec_in_interpreter(tstate, interp, &xidata, NULL, &excinfo);
- _PyXIData_Release(&xidata);
+ int res = _interp_exec(self, interp, code, NULL, &excinfo);
+ Py_DECREF(code);
if (res < 0) {
assert((excinfo == NULL) != (PyErr_Occurred() == NULL));
return excinfo;
diff --git a/Modules/_pickle.c b/Modules/_pickle.c
index d260f1a68f8..29ef0cb0c2e 100644
--- a/Modules/_pickle.c
+++ b/Modules/_pickle.c
@@ -1879,6 +1879,10 @@ _checkmodule(PyObject *module_name, PyObject *module,
_PyUnicode_EqualToASCIIString(module_name, "__main__")) {
return -1;
}
+ if (PyUnicode_Check(module_name) &&
+ _PyUnicode_EqualToASCIIString(module_name, "__mp_main__")) {
+ return -1;
+ }
PyObject *candidate = getattribute(module, dotted_path, 0);
if (candidate == NULL) {
diff --git a/Modules/_threadmodule.c b/Modules/_threadmodule.c
index 74b972b201a..cc83be4b5ff 100644
--- a/Modules/_threadmodule.c
+++ b/Modules/_threadmodule.c
@@ -296,6 +296,12 @@ _PyThread_AfterFork(struct _pythread_runtime_state *state)
continue;
}
+ // Keep handles for threads that have not been started yet. They are
+ // safe to start in the child process.
+ if (handle->state == THREAD_HANDLE_NOT_STARTED) {
+ continue;
+ }
+
// Mark all threads as done. Any attempts to join or detach the
// underlying OS thread (if any) could crash. We are the only thread;
// it's safe to set this non-atomically.
diff --git a/Modules/_zstd/_zstdmodule.c b/Modules/_zstd/_zstdmodule.c
index 17d3bff1e98..56ad999e5cd 100644
--- a/Modules/_zstd/_zstdmodule.c
+++ b/Modules/_zstd/_zstdmodule.c
@@ -28,41 +28,42 @@ set_zstd_error(const _zstd_state* const state,
char *msg;
assert(ZSTD_isError(zstd_ret));
- switch (type)
- {
- case ERR_DECOMPRESS:
- msg = "Unable to decompress Zstandard data: %s";
- break;
- case ERR_COMPRESS:
- msg = "Unable to compress Zstandard data: %s";
- break;
-
- case ERR_LOAD_D_DICT:
- msg = "Unable to load Zstandard dictionary or prefix for decompression: %s";
- break;
- case ERR_LOAD_C_DICT:
- msg = "Unable to load Zstandard dictionary or prefix for compression: %s";
- break;
-
- case ERR_GET_C_BOUNDS:
- msg = "Unable to get zstd compression parameter bounds: %s";
- break;
- case ERR_GET_D_BOUNDS:
- msg = "Unable to get zstd decompression parameter bounds: %s";
- break;
- case ERR_SET_C_LEVEL:
- msg = "Unable to set zstd compression level: %s";
- break;
-
- case ERR_TRAIN_DICT:
- msg = "Unable to train the Zstandard dictionary: %s";
- break;
- case ERR_FINALIZE_DICT:
- msg = "Unable to finalize the Zstandard dictionary: %s";
- break;
-
- default:
- Py_UNREACHABLE();
+ switch (type) {
+ case ERR_DECOMPRESS:
+ msg = "Unable to decompress Zstandard data: %s";
+ break;
+ case ERR_COMPRESS:
+ msg = "Unable to compress Zstandard data: %s";
+ break;
+
+ case ERR_LOAD_D_DICT:
+ msg = "Unable to load Zstandard dictionary or prefix for "
+ "decompression: %s";
+ break;
+ case ERR_LOAD_C_DICT:
+ msg = "Unable to load Zstandard dictionary or prefix for "
+ "compression: %s";
+ break;
+
+ case ERR_GET_C_BOUNDS:
+ msg = "Unable to get zstd compression parameter bounds: %s";
+ break;
+ case ERR_GET_D_BOUNDS:
+ msg = "Unable to get zstd decompression parameter bounds: %s";
+ break;
+ case ERR_SET_C_LEVEL:
+ msg = "Unable to set zstd compression level: %s";
+ break;
+
+ case ERR_TRAIN_DICT:
+ msg = "Unable to train the Zstandard dictionary: %s";
+ break;
+ case ERR_FINALIZE_DICT:
+ msg = "Unable to finalize the Zstandard dictionary: %s";
+ break;
+
+ default:
+ Py_UNREACHABLE();
}
PyErr_Format(state->ZstdError, msg, ZSTD_getErrorName(zstd_ret));
}
@@ -183,7 +184,7 @@ calculate_samples_stats(PyBytesObject *samples_bytes, PyObject *samples_sizes,
chunks_number = Py_SIZE(samples_sizes);
if ((size_t) chunks_number > UINT32_MAX) {
PyErr_Format(PyExc_ValueError,
- "The number of samples should be <= %u.", UINT32_MAX);
+ "The number of samples should be <= %u.", UINT32_MAX);
return -1;
}
@@ -200,8 +201,8 @@ calculate_samples_stats(PyBytesObject *samples_bytes, PyObject *samples_sizes,
(*chunk_sizes)[i] = PyLong_AsSize_t(size);
if ((*chunk_sizes)[i] == (size_t)-1 && PyErr_Occurred()) {
PyErr_Format(PyExc_ValueError,
- "Items in samples_sizes should be an int "
- "object, with a value between 0 and %u.", SIZE_MAX);
+ "Items in samples_sizes should be an int "
+ "object, with a value between 0 and %u.", SIZE_MAX);
return -1;
}
sizes_sum += (*chunk_sizes)[i];
@@ -209,7 +210,8 @@ calculate_samples_stats(PyBytesObject *samples_bytes, PyObject *samples_sizes,
if (sizes_sum != Py_SIZE(samples_bytes)) {
PyErr_SetString(PyExc_ValueError,
- "The samples size tuple doesn't match the concatenation's size.");
+ "The samples size tuple doesn't match the "
+ "concatenation's size.");
return -1;
}
return chunks_number;
@@ -242,15 +244,15 @@ _zstd_train_dict_impl(PyObject *module, PyBytesObject *samples_bytes,
/* Check arguments */
if (dict_size <= 0) {
- PyErr_SetString(PyExc_ValueError, "dict_size argument should be positive number.");
+ PyErr_SetString(PyExc_ValueError,
+ "dict_size argument should be positive number.");
return NULL;
}
/* Check that the samples are valid and get their sizes */
chunks_number = calculate_samples_stats(samples_bytes, samples_sizes,
&chunk_sizes);
- if (chunks_number < 0)
- {
+ if (chunks_number < 0) {
goto error;
}
@@ -271,7 +273,7 @@ _zstd_train_dict_impl(PyObject *module, PyBytesObject *samples_bytes,
/* Check Zstandard dict error */
if (ZDICT_isError(zstd_ret)) {
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
set_zstd_error(mod_state, ERR_TRAIN_DICT, zstd_ret);
goto error;
}
@@ -324,15 +326,15 @@ _zstd_finalize_dict_impl(PyObject *module, PyBytesObject *custom_dict_bytes,
/* Check arguments */
if (dict_size <= 0) {
- PyErr_SetString(PyExc_ValueError, "dict_size argument should be positive number.");
+ PyErr_SetString(PyExc_ValueError,
+ "dict_size argument should be positive number.");
return NULL;
}
/* Check that the samples are valid and get their sizes */
chunks_number = calculate_samples_stats(samples_bytes, samples_sizes,
&chunk_sizes);
- if (chunks_number < 0)
- {
+ if (chunks_number < 0) {
goto error;
}
@@ -355,14 +357,15 @@ _zstd_finalize_dict_impl(PyObject *module, PyBytesObject *custom_dict_bytes,
Py_BEGIN_ALLOW_THREADS
zstd_ret = ZDICT_finalizeDictionary(
PyBytes_AS_STRING(dst_dict_bytes), dict_size,
- PyBytes_AS_STRING(custom_dict_bytes), Py_SIZE(custom_dict_bytes),
+ PyBytes_AS_STRING(custom_dict_bytes),
+ Py_SIZE(custom_dict_bytes),
PyBytes_AS_STRING(samples_bytes), chunk_sizes,
(uint32_t)chunks_number, params);
Py_END_ALLOW_THREADS
/* Check Zstandard dict error */
if (ZDICT_isError(zstd_ret)) {
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
set_zstd_error(mod_state, ERR_FINALIZE_DICT, zstd_ret);
goto error;
}
@@ -402,7 +405,7 @@ _zstd_get_param_bounds_impl(PyObject *module, int parameter, int is_compress)
if (is_compress) {
bound = ZSTD_cParam_getBounds(parameter);
if (ZSTD_isError(bound.error)) {
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
set_zstd_error(mod_state, ERR_GET_C_BOUNDS, bound.error);
return NULL;
}
@@ -410,7 +413,7 @@ _zstd_get_param_bounds_impl(PyObject *module, int parameter, int is_compress)
else {
bound = ZSTD_dParam_getBounds(parameter);
if (ZSTD_isError(bound.error)) {
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
set_zstd_error(mod_state, ERR_GET_D_BOUNDS, bound.error);
return NULL;
}
@@ -435,9 +438,10 @@ _zstd_get_frame_size_impl(PyObject *module, Py_buffer *frame_buffer)
{
size_t frame_size;
- frame_size = ZSTD_findFrameCompressedSize(frame_buffer->buf, frame_buffer->len);
+ frame_size = ZSTD_findFrameCompressedSize(frame_buffer->buf,
+ frame_buffer->len);
if (ZSTD_isError(frame_size)) {
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
PyErr_Format(mod_state->ZstdError,
"Error when finding the compressed size of a Zstandard frame. "
"Ensure the frame_buffer argument starts from the "
@@ -473,7 +477,7 @@ _zstd_get_frame_info_impl(PyObject *module, Py_buffer *frame_buffer)
/* #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) */
if (decompressed_size == ZSTD_CONTENTSIZE_ERROR) {
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
PyErr_SetString(mod_state->ZstdError,
"Error when getting information from the header of "
"a Zstandard frame. Ensure the frame_buffer argument "
@@ -508,7 +512,7 @@ _zstd_set_parameter_types_impl(PyObject *module, PyObject *c_parameter_type,
PyObject *d_parameter_type)
/*[clinic end generated code: output=f3313b1294f19502 input=75d7a953580fae5f]*/
{
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
if (!PyType_Check(c_parameter_type) || !PyType_Check(d_parameter_type)) {
PyErr_SetString(PyExc_ValueError,
@@ -568,7 +572,7 @@ do { \
Py_DECREF(v); \
} while (0)
- _zstd_state* const mod_state = get_zstd_state(m);
+ _zstd_state* mod_state = get_zstd_state(m);
/* Reusable objects & variables */
mod_state->CParameter_type = NULL;
@@ -674,7 +678,7 @@ do { \
static int
_zstd_traverse(PyObject *module, visitproc visit, void *arg)
{
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
Py_VISIT(mod_state->ZstdDict_type);
Py_VISIT(mod_state->ZstdCompressor_type);
@@ -691,7 +695,7 @@ _zstd_traverse(PyObject *module, visitproc visit, void *arg)
static int
_zstd_clear(PyObject *module)
{
- _zstd_state* const mod_state = get_zstd_state(module);
+ _zstd_state* mod_state = get_zstd_state(module);
Py_CLEAR(mod_state->ZstdDict_type);
Py_CLEAR(mod_state->ZstdCompressor_type);
diff --git a/Modules/_zstd/buffer.h b/Modules/_zstd/buffer.h
index bff3a81d8aa..4c885fa0d72 100644
--- a/Modules/_zstd/buffer.h
+++ b/Modules/_zstd/buffer.h
@@ -19,7 +19,8 @@ _OutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer, ZSTD_outBuffer *ob,
/* Ensure .list was set to NULL */
assert(buffer->list == NULL);
- Py_ssize_t res = _BlocksOutputBuffer_InitAndGrow(buffer, max_length, &ob->dst);
+ Py_ssize_t res = _BlocksOutputBuffer_InitAndGrow(buffer, max_length,
+ &ob->dst);
if (res < 0) {
return -1;
}
@@ -34,8 +35,7 @@ _OutputBuffer_InitAndGrow(_BlocksOutputBuffer *buffer, ZSTD_outBuffer *ob,
Return -1 on failure */
static inline int
_OutputBuffer_InitWithSize(_BlocksOutputBuffer *buffer, ZSTD_outBuffer *ob,
- Py_ssize_t max_length,
- Py_ssize_t init_size)
+ Py_ssize_t max_length, Py_ssize_t init_size)
{
Py_ssize_t block_size;
@@ -50,7 +50,8 @@ _OutputBuffer_InitWithSize(_BlocksOutputBuffer *buffer, ZSTD_outBuffer *ob,
block_size = init_size;
}
- Py_ssize_t res = _BlocksOutputBuffer_InitWithSize(buffer, block_size, &ob->dst);
+ Py_ssize_t res = _BlocksOutputBuffer_InitWithSize(buffer, block_size,
+ &ob->dst);
if (res < 0) {
return -1;
}
diff --git a/Modules/_zstd/clinic/decompressor.c.h b/Modules/_zstd/clinic/decompressor.c.h
index 4ecb19e9bde..c6fdae74ab0 100644
--- a/Modules/_zstd/clinic/decompressor.c.h
+++ b/Modules/_zstd/clinic/decompressor.c.h
@@ -7,7 +7,6 @@ preserve
# include "pycore_runtime.h" // _Py_ID()
#endif
#include "pycore_abstract.h" // _PyNumber_Index()
-#include "pycore_critical_section.h"// Py_BEGIN_CRITICAL_SECTION()
#include "pycore_modsupport.h" // _PyArg_UnpackKeywords()
PyDoc_STRVAR(_zstd_ZstdDecompressor_new__doc__,
@@ -114,13 +113,7 @@ _zstd_ZstdDecompressor_unused_data_get_impl(ZstdDecompressor *self);
static PyObject *
_zstd_ZstdDecompressor_unused_data_get(PyObject *self, void *Py_UNUSED(context))
{
- PyObject *return_value = NULL;
-
- Py_BEGIN_CRITICAL_SECTION(self);
- return_value = _zstd_ZstdDecompressor_unused_data_get_impl((ZstdDecompressor *)self);
- Py_END_CRITICAL_SECTION();
-
- return return_value;
+ return _zstd_ZstdDecompressor_unused_data_get_impl((ZstdDecompressor *)self);
}
PyDoc_STRVAR(_zstd_ZstdDecompressor_decompress__doc__,
@@ -227,4 +220,4 @@ exit:
return return_value;
}
-/*[clinic end generated code: output=7a4d278f9244e684 input=a9049054013a1b77]*/
+/*[clinic end generated code: output=30c12ef047027ede input=a9049054013a1b77]*/
diff --git a/Modules/_zstd/clinic/zstddict.c.h b/Modules/_zstd/clinic/zstddict.c.h
index 34e0e4b3ecf..810befdaf71 100644
--- a/Modules/_zstd/clinic/zstddict.c.h
+++ b/Modules/_zstd/clinic/zstddict.c.h
@@ -6,7 +6,6 @@ preserve
# include "pycore_gc.h" // PyGC_Head
# include "pycore_runtime.h" // _Py_ID()
#endif
-#include "pycore_critical_section.h"// Py_BEGIN_CRITICAL_SECTION()
#include "pycore_modsupport.h" // _PyArg_UnpackKeywords()
PyDoc_STRVAR(_zstd_ZstdDict_new__doc__,
@@ -90,7 +89,9 @@ exit:
PyDoc_STRVAR(_zstd_ZstdDict_as_digested_dict__doc__,
"Load as a digested dictionary to compressor.\n"
"\n"
-"Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_digested_dict)\n"
+"Pass this attribute as zstd_dict argument:\n"
+"compress(dat, zstd_dict=zd.as_digested_dict)\n"
+"\n"
"1. Some advanced compression parameters of compressor may be overridden\n"
" by parameters of digested dictionary.\n"
"2. ZstdDict has a digested dictionaries cache for each compression level.\n"
@@ -118,19 +119,15 @@ _zstd_ZstdDict_as_digested_dict_get_impl(ZstdDict *self);
static PyObject *
_zstd_ZstdDict_as_digested_dict_get(PyObject *self, void *Py_UNUSED(context))
{
- PyObject *return_value = NULL;
-
- Py_BEGIN_CRITICAL_SECTION(self);
- return_value = _zstd_ZstdDict_as_digested_dict_get_impl((ZstdDict *)self);
- Py_END_CRITICAL_SECTION();
-
- return return_value;
+ return _zstd_ZstdDict_as_digested_dict_get_impl((ZstdDict *)self);
}
PyDoc_STRVAR(_zstd_ZstdDict_as_undigested_dict__doc__,
"Load as an undigested dictionary to compressor.\n"
"\n"
-"Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_undigested_dict)\n"
+"Pass this attribute as zstd_dict argument:\n"
+"compress(dat, zstd_dict=zd.as_undigested_dict)\n"
+"\n"
"1. The advanced compression parameters of compressor will not be overridden.\n"
"2. Loading an undigested dictionary is costly. If load an undigested dictionary\n"
" multiple times, consider reusing a compressor object.\n"
@@ -156,19 +153,15 @@ _zstd_ZstdDict_as_undigested_dict_get_impl(ZstdDict *self);
static PyObject *
_zstd_ZstdDict_as_undigested_dict_get(PyObject *self, void *Py_UNUSED(context))
{
- PyObject *return_value = NULL;
-
- Py_BEGIN_CRITICAL_SECTION(self);
- return_value = _zstd_ZstdDict_as_undigested_dict_get_impl((ZstdDict *)self);
- Py_END_CRITICAL_SECTION();
-
- return return_value;
+ return _zstd_ZstdDict_as_undigested_dict_get_impl((ZstdDict *)self);
}
PyDoc_STRVAR(_zstd_ZstdDict_as_prefix__doc__,
"Load as a prefix to compressor/decompressor.\n"
"\n"
-"Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_prefix)\n"
+"Pass this attribute as zstd_dict argument:\n"
+"compress(dat, zstd_dict=zd.as_prefix)\n"
+"\n"
"1. Prefix is compatible with long distance matching, while dictionary is not.\n"
"2. It only works for the first frame, then the compressor/decompressor will\n"
" return to no prefix state.\n"
@@ -194,12 +187,6 @@ _zstd_ZstdDict_as_prefix_get_impl(ZstdDict *self);
static PyObject *
_zstd_ZstdDict_as_prefix_get(PyObject *self, void *Py_UNUSED(context))
{
- PyObject *return_value = NULL;
-
- Py_BEGIN_CRITICAL_SECTION(self);
- return_value = _zstd_ZstdDict_as_prefix_get_impl((ZstdDict *)self);
- Py_END_CRITICAL_SECTION();
-
- return return_value;
+ return _zstd_ZstdDict_as_prefix_get_impl((ZstdDict *)self);
}
-/*[clinic end generated code: output=bfb31c1187477afd input=a9049054013a1b77]*/
+/*[clinic end generated code: output=47b12b5848b53ed8 input=a9049054013a1b77]*/
diff --git a/Modules/_zstd/compressor.c b/Modules/_zstd/compressor.c
index 38baee2be1e..31cb8c535c0 100644
--- a/Modules/_zstd/compressor.c
+++ b/Modules/_zstd/compressor.c
@@ -17,6 +17,7 @@ class _zstd.ZstdCompressor "ZstdCompressor *" "&zstd_compressor_type_spec"
#include "_zstdmodule.h"
#include "buffer.h"
#include "zstddict.h"
+#include "internal/pycore_lock.h" // PyMutex_IsLocked
#include <stddef.h> // offsetof()
#include <zstd.h> // ZSTD_*()
@@ -38,6 +39,9 @@ typedef struct {
/* Compression level */
int compression_level;
+
+ /* Lock to protect the compression context */
+ PyMutex lock;
} ZstdCompressor;
#define ZstdCompressor_CAST(op) ((ZstdCompressor *)op)
@@ -49,7 +53,7 @@ _zstd_set_c_parameters(ZstdCompressor *self, PyObject *level_or_options,
const char *arg_name, const char* arg_type)
{
size_t zstd_ret;
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state == NULL) {
return -1;
}
@@ -59,8 +63,8 @@ _zstd_set_c_parameters(ZstdCompressor *self, PyObject *level_or_options,
int level = PyLong_AsInt(level_or_options);
if (level == -1 && PyErr_Occurred()) {
PyErr_Format(PyExc_ValueError,
- "Compression level should be an int value between %d and %d.",
- ZSTD_minCLevel(), ZSTD_maxCLevel());
+ "Compression level should be an int value between "
+ "%d and %d.", ZSTD_minCLevel(), ZSTD_maxCLevel());
return -1;
}
@@ -89,24 +93,23 @@ _zstd_set_c_parameters(ZstdCompressor *self, PyObject *level_or_options,
/* Check key type */
if (Py_TYPE(key) == mod_state->DParameter_type) {
PyErr_SetString(PyExc_TypeError,
- "Key of compression option dict should "
- "NOT be DecompressionParameter.");
+ "Key of compression options dict should "
+ "NOT be a DecompressionParameter attribute.");
return -1;
}
int key_v = PyLong_AsInt(key);
if (key_v == -1 && PyErr_Occurred()) {
PyErr_SetString(PyExc_ValueError,
- "Key of options dict should be a CompressionParameter attribute.");
+ "Key of options dict should be either a "
+ "CompressionParameter attribute or an int.");
return -1;
}
- // TODO(emmatyping): check bounds when there is a value error here for better
- // error message?
int value_v = PyLong_AsInt(value);
if (value_v == -1 && PyErr_Occurred()) {
PyErr_SetString(PyExc_ValueError,
- "Value of option dict should be an int.");
+ "Value of options dict should be an int.");
return -1;
}
@@ -135,7 +138,8 @@ _zstd_set_c_parameters(ZstdCompressor *self, PyObject *level_or_options,
}
return 0;
}
- PyErr_Format(PyExc_TypeError, "Invalid type for %s. Expected %s", arg_name, arg_type);
+ PyErr_Format(PyExc_TypeError,
+ "Invalid type for %s. Expected %s", arg_name, arg_type);
return -1;
}
@@ -149,12 +153,12 @@ capsule_free_cdict(PyObject *capsule)
ZSTD_CDict *
_get_CDict(ZstdDict *self, int compressionLevel)
{
+ assert(PyMutex_IsLocked(&self->lock));
PyObject *level = NULL;
- PyObject *capsule;
+ PyObject *capsule = NULL;
ZSTD_CDict *cdict;
+ int ret;
- // TODO(emmatyping): refactor critical section code into a lock_held function
- Py_BEGIN_CRITICAL_SECTION(self);
/* int level object */
level = PyLong_FromLong(compressionLevel);
@@ -163,12 +167,11 @@ _get_CDict(ZstdDict *self, int compressionLevel)
}
/* Get PyCapsule object from self->c_dicts */
- capsule = PyDict_GetItemWithError(self->c_dicts, level);
+ ret = PyDict_GetItemRef(self->c_dicts, level, &capsule);
+ if (ret < 0) {
+ goto error;
+ }
if (capsule == NULL) {
- if (PyErr_Occurred()) {
- goto error;
- }
-
/* Create ZSTD_CDict instance */
char *dict_buffer = PyBytes_AS_STRING(self->dict_content);
Py_ssize_t dict_len = Py_SIZE(self->dict_content);
@@ -179,7 +182,7 @@ _get_CDict(ZstdDict *self, int compressionLevel)
Py_END_ALLOW_THREADS
if (cdict == NULL) {
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state != NULL) {
PyErr_SetString(mod_state->ZstdError,
"Failed to create a ZSTD_CDict instance from "
@@ -196,11 +199,10 @@ _get_CDict(ZstdDict *self, int compressionLevel)
}
/* Add PyCapsule object to self->c_dicts */
- if (PyDict_SetItem(self->c_dicts, level, capsule) < 0) {
- Py_DECREF(capsule);
+ ret = PyDict_SetItem(self->c_dicts, level, capsule);
+ if (ret < 0) {
goto error;
}
- Py_DECREF(capsule);
}
else {
/* ZSTD_CDict instance already exists */
@@ -212,16 +214,56 @@ error:
cdict = NULL;
success:
Py_XDECREF(level);
- Py_END_CRITICAL_SECTION();
+ Py_XDECREF(capsule);
return cdict;
}
static int
-_zstd_load_c_dict(ZstdCompressor *self, PyObject *dict)
+_zstd_load_impl(ZstdCompressor *self, ZstdDict *zd,
+ _zstd_state *mod_state, int type)
{
-
size_t zstd_ret;
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ if (type == DICT_TYPE_DIGESTED) {
+ /* Get ZSTD_CDict */
+ ZSTD_CDict *c_dict = _get_CDict(zd, self->compression_level);
+ if (c_dict == NULL) {
+ return -1;
+ }
+ /* Reference a prepared dictionary.
+ It overrides some compression context's parameters. */
+ zstd_ret = ZSTD_CCtx_refCDict(self->cctx, c_dict);
+ }
+ else if (type == DICT_TYPE_UNDIGESTED) {
+ /* Load a dictionary.
+ It doesn't override compression context's parameters. */
+ zstd_ret = ZSTD_CCtx_loadDictionary(
+ self->cctx,
+ PyBytes_AS_STRING(zd->dict_content),
+ Py_SIZE(zd->dict_content));
+ }
+ else if (type == DICT_TYPE_PREFIX) {
+ /* Load a prefix */
+ zstd_ret = ZSTD_CCtx_refPrefix(
+ self->cctx,
+ PyBytes_AS_STRING(zd->dict_content),
+ Py_SIZE(zd->dict_content));
+ }
+ else {
+ Py_UNREACHABLE();
+ }
+
+ /* Check error */
+ if (ZSTD_isError(zstd_ret)) {
+ set_zstd_error(mod_state, ERR_LOAD_C_DICT, zstd_ret);
+ return -1;
+ }
+ return 0;
+}
+
+static int
+_zstd_load_c_dict(ZstdCompressor *self, PyObject *dict)
+{
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state == NULL) {
return -1;
}
@@ -237,7 +279,10 @@ _zstd_load_c_dict(ZstdCompressor *self, PyObject *dict)
/* When compressing, use undigested dictionary by default. */
zd = (ZstdDict*)dict;
type = DICT_TYPE_UNDIGESTED;
- goto load;
+ PyMutex_Lock(&zd->lock);
+ ret = _zstd_load_impl(self, zd, mod_state, type);
+ PyMutex_Unlock(&zd->lock);
+ return ret;
}
/* Check (ZstdDict, type) */
@@ -251,13 +296,16 @@ _zstd_load_c_dict(ZstdCompressor *self, PyObject *dict)
else if (ret > 0) {
/* type == -1 may indicate an error. */
type = PyLong_AsInt(PyTuple_GET_ITEM(dict, 1));
- if (type == DICT_TYPE_DIGESTED ||
- type == DICT_TYPE_UNDIGESTED ||
- type == DICT_TYPE_PREFIX)
+ if (type == DICT_TYPE_DIGESTED
+ || type == DICT_TYPE_UNDIGESTED
+ || type == DICT_TYPE_PREFIX)
{
assert(type >= 0);
zd = (ZstdDict*)PyTuple_GET_ITEM(dict, 0);
- goto load;
+ PyMutex_Lock(&zd->lock);
+ ret = _zstd_load_impl(self, zd, mod_state, type);
+ PyMutex_Unlock(&zd->lock);
+ return ret;
}
}
}
@@ -266,49 +314,6 @@ _zstd_load_c_dict(ZstdCompressor *self, PyObject *dict)
PyErr_SetString(PyExc_TypeError,
"zstd_dict argument should be ZstdDict object.");
return -1;
-
-load:
- if (type == DICT_TYPE_DIGESTED) {
- /* Get ZSTD_CDict */
- ZSTD_CDict *c_dict = _get_CDict(zd, self->compression_level);
- if (c_dict == NULL) {
- return -1;
- }
- /* Reference a prepared dictionary.
- It overrides some compression context's parameters. */
- Py_BEGIN_CRITICAL_SECTION(self);
- zstd_ret = ZSTD_CCtx_refCDict(self->cctx, c_dict);
- Py_END_CRITICAL_SECTION();
- }
- else if (type == DICT_TYPE_UNDIGESTED) {
- /* Load a dictionary.
- It doesn't override compression context's parameters. */
- Py_BEGIN_CRITICAL_SECTION2(self, zd);
- zstd_ret = ZSTD_CCtx_loadDictionary(
- self->cctx,
- PyBytes_AS_STRING(zd->dict_content),
- Py_SIZE(zd->dict_content));
- Py_END_CRITICAL_SECTION2();
- }
- else if (type == DICT_TYPE_PREFIX) {
- /* Load a prefix */
- Py_BEGIN_CRITICAL_SECTION2(self, zd);
- zstd_ret = ZSTD_CCtx_refPrefix(
- self->cctx,
- PyBytes_AS_STRING(zd->dict_content),
- Py_SIZE(zd->dict_content));
- Py_END_CRITICAL_SECTION2();
- }
- else {
- Py_UNREACHABLE();
- }
-
- /* Check error */
- if (ZSTD_isError(zstd_ret)) {
- set_zstd_error(mod_state, ERR_LOAD_C_DICT, zstd_ret);
- return -1;
- }
- return 0;
}
/*[clinic input]
@@ -339,11 +344,12 @@ _zstd_ZstdCompressor_new_impl(PyTypeObject *type, PyObject *level,
self->use_multithread = 0;
self->dict = NULL;
+ self->lock = (PyMutex){0};
/* Compression context */
self->cctx = ZSTD_createCCtx();
if (self->cctx == NULL) {
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state != NULL) {
PyErr_SetString(mod_state->ZstdError,
"Unable to create ZSTD_CCtx instance.");
@@ -355,7 +361,8 @@ _zstd_ZstdCompressor_new_impl(PyTypeObject *type, PyObject *level,
self->last_mode = ZSTD_e_end;
if (level != Py_None && options != Py_None) {
- PyErr_SetString(PyExc_RuntimeError, "Only one of level or options should be used.");
+ PyErr_SetString(PyExc_RuntimeError,
+ "Only one of level or options should be used.");
goto error;
}
@@ -403,6 +410,8 @@ ZstdCompressor_dealloc(PyObject *ob)
ZSTD_freeCCtx(self->cctx);
}
+ assert(!PyMutex_IsLocked(&self->lock));
+
/* Py_XDECREF the dict after free the compression context */
Py_CLEAR(self->dict);
@@ -412,9 +421,10 @@ ZstdCompressor_dealloc(PyObject *ob)
}
static PyObject *
-compress_impl(ZstdCompressor *self, Py_buffer *data,
- ZSTD_EndDirective end_directive)
+compress_lock_held(ZstdCompressor *self, Py_buffer *data,
+ ZSTD_EndDirective end_directive)
{
+ assert(PyMutex_IsLocked(&self->lock));
ZSTD_inBuffer in;
ZSTD_outBuffer out;
_BlocksOutputBuffer buffer = {.list = NULL};
@@ -441,7 +451,7 @@ compress_impl(ZstdCompressor *self, Py_buffer *data,
}
if (_OutputBuffer_InitWithSize(&buffer, &out, -1,
- (Py_ssize_t) output_buffer_size) < 0) {
+ (Py_ssize_t) output_buffer_size) < 0) {
goto error;
}
@@ -454,7 +464,7 @@ compress_impl(ZstdCompressor *self, Py_buffer *data,
/* Check error */
if (ZSTD_isError(zstd_ret)) {
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state != NULL) {
set_zstd_error(mod_state, ERR_COMPRESS, zstd_ret);
}
@@ -495,8 +505,9 @@ mt_continue_should_break(ZSTD_inBuffer *in, ZSTD_outBuffer *out)
#endif
static PyObject *
-compress_mt_continue_impl(ZstdCompressor *self, Py_buffer *data)
+compress_mt_continue_lock_held(ZstdCompressor *self, Py_buffer *data)
{
+ assert(PyMutex_IsLocked(&self->lock));
ZSTD_inBuffer in;
ZSTD_outBuffer out;
_BlocksOutputBuffer buffer = {.list = NULL};
@@ -516,20 +527,23 @@ compress_mt_continue_impl(ZstdCompressor *self, Py_buffer *data)
while (1) {
Py_BEGIN_ALLOW_THREADS
do {
- zstd_ret = ZSTD_compressStream2(self->cctx, &out, &in, ZSTD_e_continue);
- } while (out.pos != out.size && in.pos != in.size && !ZSTD_isError(zstd_ret));
+ zstd_ret = ZSTD_compressStream2(self->cctx, &out, &in,
+ ZSTD_e_continue);
+ } while (out.pos != out.size
+ && in.pos != in.size
+ && !ZSTD_isError(zstd_ret));
Py_END_ALLOW_THREADS
/* Check error */
if (ZSTD_isError(zstd_ret)) {
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state != NULL) {
set_zstd_error(mod_state, ERR_COMPRESS, zstd_ret);
}
goto error;
}
- /* Like compress_impl(), output as much as possible. */
+ /* Like compress_lock_held(), output as much as possible. */
if (out.pos == out.size) {
if (_OutputBuffer_Grow(&buffer, &out) < 0) {
goto error;
@@ -588,14 +602,14 @@ _zstd_ZstdCompressor_compress_impl(ZstdCompressor *self, Py_buffer *data,
}
/* Thread-safe code */
- Py_BEGIN_CRITICAL_SECTION(self);
+ PyMutex_Lock(&self->lock);
/* Compress */
if (self->use_multithread && mode == ZSTD_e_continue) {
- ret = compress_mt_continue_impl(self, data);
+ ret = compress_mt_continue_lock_held(self, data);
}
else {
- ret = compress_impl(self, data, mode);
+ ret = compress_lock_held(self, data, mode);
}
if (ret) {
@@ -607,7 +621,7 @@ _zstd_ZstdCompressor_compress_impl(ZstdCompressor *self, Py_buffer *data,
/* Resetting cctx's session never fail */
ZSTD_CCtx_reset(self->cctx, ZSTD_reset_session_only);
}
- Py_END_CRITICAL_SECTION();
+ PyMutex_Unlock(&self->lock);
return ret;
}
@@ -642,8 +656,9 @@ _zstd_ZstdCompressor_flush_impl(ZstdCompressor *self, int mode)
}
/* Thread-safe code */
- Py_BEGIN_CRITICAL_SECTION(self);
- ret = compress_impl(self, NULL, mode);
+ PyMutex_Lock(&self->lock);
+
+ ret = compress_lock_held(self, NULL, mode);
if (ret) {
self->last_mode = mode;
@@ -654,7 +669,7 @@ _zstd_ZstdCompressor_flush_impl(ZstdCompressor *self, int mode)
/* Resetting cctx's session never fail */
ZSTD_CCtx_reset(self->cctx, ZSTD_reset_session_only);
}
- Py_END_CRITICAL_SECTION();
+ PyMutex_Unlock(&self->lock);
return ret;
}
@@ -668,12 +683,12 @@ static PyMethodDef ZstdCompressor_methods[] = {
PyDoc_STRVAR(ZstdCompressor_last_mode_doc,
"The last mode used to this compressor object, its value can be .CONTINUE,\n"
".FLUSH_BLOCK, .FLUSH_FRAME. Initialized to .FLUSH_FRAME.\n\n"
-"It can be used to get the current state of a compressor, such as, data flushed,\n"
-"a frame ended.");
+"It can be used to get the current state of a compressor, such as, data\n"
+"flushed, or a frame ended.");
static PyMemberDef ZstdCompressor_members[] = {
{"last_mode", Py_T_INT, offsetof(ZstdCompressor, last_mode),
- Py_READONLY, ZstdCompressor_last_mode_doc},
+ Py_READONLY, ZstdCompressor_last_mode_doc},
{NULL}
};
diff --git a/Modules/_zstd/decompressor.c b/Modules/_zstd/decompressor.c
index 58f9c9f804e..d084f0847c7 100644
--- a/Modules/_zstd/decompressor.c
+++ b/Modules/_zstd/decompressor.c
@@ -17,6 +17,7 @@ class _zstd.ZstdDecompressor "ZstdDecompressor *" "&zstd_decompressor_type_spec"
#include "_zstdmodule.h"
#include "buffer.h"
#include "zstddict.h"
+#include "internal/pycore_lock.h" // PyMutex_IsLocked
#include <stdbool.h> // bool
#include <stddef.h> // offsetof()
@@ -45,6 +46,9 @@ typedef struct {
/* For ZstdDecompressor, 0 or 1.
1 means the end of the first frame has been reached. */
bool eof;
+
+ /* Lock to protect the decompression context */
+ PyMutex lock;
} ZstdDecompressor;
#define ZstdDecompressor_CAST(op) ((ZstdDecompressor *)op)
@@ -54,6 +58,7 @@ typedef struct {
static inline ZSTD_DDict *
_get_DDict(ZstdDict *self)
{
+ assert(PyMutex_IsLocked(&self->lock));
ZSTD_DDict *ret;
/* Already created */
@@ -61,18 +66,17 @@ _get_DDict(ZstdDict *self)
return self->d_dict;
}
- Py_BEGIN_CRITICAL_SECTION(self);
if (self->d_dict == NULL) {
/* Create ZSTD_DDict instance from dictionary content */
char *dict_buffer = PyBytes_AS_STRING(self->dict_content);
Py_ssize_t dict_len = Py_SIZE(self->dict_content);
Py_BEGIN_ALLOW_THREADS
- self->d_dict = ZSTD_createDDict(dict_buffer,
- dict_len);
+ ret = ZSTD_createDDict(dict_buffer, dict_len);
Py_END_ALLOW_THREADS
+ self->d_dict = ret;
if (self->d_dict == NULL) {
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state != NULL) {
PyErr_SetString(mod_state->ZstdError,
"Failed to create a ZSTD_DDict instance from "
@@ -81,11 +85,7 @@ _get_DDict(ZstdDict *self)
}
}
- /* Don't lose any exception */
- ret = self->d_dict;
- Py_END_CRITICAL_SECTION();
-
- return ret;
+ return self->d_dict;
}
/* Set decompression parameters to decompression context */
@@ -95,7 +95,7 @@ _zstd_set_d_parameters(ZstdDecompressor *self, PyObject *options)
size_t zstd_ret;
PyObject *key, *value;
Py_ssize_t pos;
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state == NULL) {
return -1;
}
@@ -112,7 +112,7 @@ _zstd_set_d_parameters(ZstdDecompressor *self, PyObject *options)
if (Py_TYPE(key) == mod_state->CParameter_type) {
PyErr_SetString(PyExc_TypeError,
"Key of decompression options dict should "
- "NOT be CompressionParameter.");
+ "NOT be a CompressionParameter attribute.");
return -1;
}
@@ -120,12 +120,11 @@ _zstd_set_d_parameters(ZstdDecompressor *self, PyObject *options)
int key_v = PyLong_AsInt(key);
if (key_v == -1 && PyErr_Occurred()) {
PyErr_SetString(PyExc_ValueError,
- "Key of options dict should be a DecompressionParameter attribute.");
+ "Key of options dict should be either a "
+ "DecompressionParameter attribute or an int.");
return -1;
}
- // TODO(emmatyping): check bounds when there is a value error here for better
- // error message?
int value_v = PyLong_AsInt(value);
if (value_v == -1 && PyErr_Occurred()) {
PyErr_SetString(PyExc_ValueError,
@@ -134,9 +133,7 @@ _zstd_set_d_parameters(ZstdDecompressor *self, PyObject *options)
}
/* Set parameter to compression context */
- Py_BEGIN_CRITICAL_SECTION(self);
zstd_ret = ZSTD_DCtx_setParameter(self->dctx, key_v, value_v);
- Py_END_CRITICAL_SECTION();
/* Check error */
if (ZSTD_isError(zstd_ret)) {
@@ -147,12 +144,54 @@ _zstd_set_d_parameters(ZstdDecompressor *self, PyObject *options)
return 0;
}
+static int
+_zstd_load_impl(ZstdDecompressor *self, ZstdDict *zd,
+ _zstd_state *mod_state, int type)
+{
+ size_t zstd_ret;
+ if (type == DICT_TYPE_DIGESTED) {
+ /* Get ZSTD_DDict */
+ ZSTD_DDict *d_dict = _get_DDict(zd);
+ if (d_dict == NULL) {
+ return -1;
+ }
+ /* Reference a prepared dictionary */
+ zstd_ret = ZSTD_DCtx_refDDict(self->dctx, d_dict);
+ }
+ else if (type == DICT_TYPE_UNDIGESTED) {
+ /* Load a dictionary */
+ zstd_ret = ZSTD_DCtx_loadDictionary(
+ self->dctx,
+ PyBytes_AS_STRING(zd->dict_content),
+ Py_SIZE(zd->dict_content));
+ }
+ else if (type == DICT_TYPE_PREFIX) {
+ /* Load a prefix */
+ zstd_ret = ZSTD_DCtx_refPrefix(
+ self->dctx,
+ PyBytes_AS_STRING(zd->dict_content),
+ Py_SIZE(zd->dict_content));
+ }
+ else {
+ /* Impossible code path */
+ PyErr_SetString(PyExc_SystemError,
+ "load_d_dict() impossible code path");
+ return -1;
+ }
+
+ /* Check error */
+ if (ZSTD_isError(zstd_ret)) {
+ set_zstd_error(mod_state, ERR_LOAD_D_DICT, zstd_ret);
+ return -1;
+ }
+ return 0;
+}
+
/* Load dictionary or prefix to decompression context */
static int
_zstd_load_d_dict(ZstdDecompressor *self, PyObject *dict)
{
- size_t zstd_ret;
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state == NULL) {
return -1;
}
@@ -168,7 +207,10 @@ _zstd_load_d_dict(ZstdDecompressor *self, PyObject *dict)
/* When decompressing, use digested dictionary by default. */
zd = (ZstdDict*)dict;
type = DICT_TYPE_DIGESTED;
- goto load;
+ PyMutex_Lock(&zd->lock);
+ ret = _zstd_load_impl(self, zd, mod_state, type);
+ PyMutex_Unlock(&zd->lock);
+ return ret;
}
/* Check (ZstdDict, type) */
@@ -182,13 +224,16 @@ _zstd_load_d_dict(ZstdDecompressor *self, PyObject *dict)
else if (ret > 0) {
/* type == -1 may indicate an error. */
type = PyLong_AsInt(PyTuple_GET_ITEM(dict, 1));
- if (type == DICT_TYPE_DIGESTED ||
- type == DICT_TYPE_UNDIGESTED ||
- type == DICT_TYPE_PREFIX)
+ if (type == DICT_TYPE_DIGESTED
+ || type == DICT_TYPE_UNDIGESTED
+ || type == DICT_TYPE_PREFIX)
{
assert(type >= 0);
zd = (ZstdDict*)PyTuple_GET_ITEM(dict, 0);
- goto load;
+ PyMutex_Lock(&zd->lock);
+ ret = _zstd_load_impl(self, zd, mod_state, type);
+ PyMutex_Unlock(&zd->lock);
+ return ret;
}
}
}
@@ -197,50 +242,6 @@ _zstd_load_d_dict(ZstdDecompressor *self, PyObject *dict)
PyErr_SetString(PyExc_TypeError,
"zstd_dict argument should be ZstdDict object.");
return -1;
-
-load:
- if (type == DICT_TYPE_DIGESTED) {
- /* Get ZSTD_DDict */
- ZSTD_DDict *d_dict = _get_DDict(zd);
- if (d_dict == NULL) {
- return -1;
- }
- /* Reference a prepared dictionary */
- Py_BEGIN_CRITICAL_SECTION(self);
- zstd_ret = ZSTD_DCtx_refDDict(self->dctx, d_dict);
- Py_END_CRITICAL_SECTION();
- }
- else if (type == DICT_TYPE_UNDIGESTED) {
- /* Load a dictionary */
- Py_BEGIN_CRITICAL_SECTION2(self, zd);
- zstd_ret = ZSTD_DCtx_loadDictionary(
- self->dctx,
- PyBytes_AS_STRING(zd->dict_content),
- Py_SIZE(zd->dict_content));
- Py_END_CRITICAL_SECTION2();
- }
- else if (type == DICT_TYPE_PREFIX) {
- /* Load a prefix */
- Py_BEGIN_CRITICAL_SECTION2(self, zd);
- zstd_ret = ZSTD_DCtx_refPrefix(
- self->dctx,
- PyBytes_AS_STRING(zd->dict_content),
- Py_SIZE(zd->dict_content));
- Py_END_CRITICAL_SECTION2();
- }
- else {
- /* Impossible code path */
- PyErr_SetString(PyExc_SystemError,
- "load_d_dict() impossible code path");
- return -1;
- }
-
- /* Check error */
- if (ZSTD_isError(zstd_ret)) {
- set_zstd_error(mod_state, ERR_LOAD_D_DICT, zstd_ret);
- return -1;
- }
- return 0;
}
/*
@@ -268,8 +269,8 @@ load:
Note, decompressing "an empty input" in any case will make it > 0.
*/
static PyObject *
-decompress_impl(ZstdDecompressor *self, ZSTD_inBuffer *in,
- Py_ssize_t max_length)
+decompress_lock_held(ZstdDecompressor *self, ZSTD_inBuffer *in,
+ Py_ssize_t max_length)
{
size_t zstd_ret;
ZSTD_outBuffer out;
@@ -290,7 +291,7 @@ decompress_impl(ZstdDecompressor *self, ZSTD_inBuffer *in,
/* Check error */
if (ZSTD_isError(zstd_ret)) {
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state != NULL) {
set_zstd_error(mod_state, ERR_DECOMPRESS, zstd_ret);
}
@@ -339,10 +340,9 @@ error:
}
static void
-decompressor_reset_session(ZstdDecompressor *self)
+decompressor_reset_session_lock_held(ZstdDecompressor *self)
{
- // TODO(emmatyping): use _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED here
- // and ensure lock is always held
+ assert(PyMutex_IsLocked(&self->lock));
/* Reset variables */
self->in_begin = 0;
@@ -359,15 +359,18 @@ decompressor_reset_session(ZstdDecompressor *self)
}
static PyObject *
-stream_decompress(ZstdDecompressor *self, Py_buffer *data, Py_ssize_t max_length)
+stream_decompress_lock_held(ZstdDecompressor *self, Py_buffer *data,
+ Py_ssize_t max_length)
{
+ assert(PyMutex_IsLocked(&self->lock));
ZSTD_inBuffer in;
PyObject *ret = NULL;
int use_input_buffer;
/* Check .eof flag */
if (self->eof) {
- PyErr_SetString(PyExc_EOFError, "Already at the end of a Zstandard frame.");
+ PyErr_SetString(PyExc_EOFError,
+ "Already at the end of a Zstandard frame.");
assert(ret == NULL);
return NULL;
}
@@ -456,7 +459,7 @@ stream_decompress(ZstdDecompressor *self, Py_buffer *data, Py_ssize_t max_length
assert(in.pos == 0);
/* Decompress */
- ret = decompress_impl(self, &in, max_length);
+ ret = decompress_lock_held(self, &in, max_length);
if (ret == NULL) {
goto error;
}
@@ -484,8 +487,8 @@ stream_decompress(ZstdDecompressor *self, Py_buffer *data, Py_ssize_t max_length
if (!use_input_buffer) {
/* Discard buffer if it's too small
(resizing it may needlessly copy the current contents) */
- if (self->input_buffer != NULL &&
- self->input_buffer_size < data_size)
+ if (self->input_buffer != NULL
+ && self->input_buffer_size < data_size)
{
PyMem_Free(self->input_buffer);
self->input_buffer = NULL;
@@ -517,7 +520,7 @@ stream_decompress(ZstdDecompressor *self, Py_buffer *data, Py_ssize_t max_length
error:
/* Reset decompressor's states/session */
- decompressor_reset_session(self);
+ decompressor_reset_session_lock_held(self);
Py_CLEAR(ret);
return NULL;
@@ -555,6 +558,7 @@ _zstd_ZstdDecompressor_new_impl(PyTypeObject *type, PyObject *zstd_dict,
self->unused_data = NULL;
self->eof = 0;
self->dict = NULL;
+ self->lock = (PyMutex){0};
/* needs_input flag */
self->needs_input = 1;
@@ -562,7 +566,7 @@ _zstd_ZstdDecompressor_new_impl(PyTypeObject *type, PyObject *zstd_dict,
/* Decompression context */
self->dctx = ZSTD_createDCtx();
if (self->dctx == NULL) {
- _zstd_state* const mod_state = PyType_GetModuleState(Py_TYPE(self));
+ _zstd_state* mod_state = PyType_GetModuleState(Py_TYPE(self));
if (mod_state != NULL) {
PyErr_SetString(mod_state->ZstdError,
"Unable to create ZSTD_DCtx instance.");
@@ -608,6 +612,8 @@ ZstdDecompressor_dealloc(PyObject *ob)
ZSTD_freeDCtx(self->dctx);
}
+ assert(!PyMutex_IsLocked(&self->lock));
+
/* Py_CLEAR the dict after free decompression context */
Py_CLEAR(self->dict);
@@ -623,7 +629,6 @@ ZstdDecompressor_dealloc(PyObject *ob)
}
/*[clinic input]
-@critical_section
@getter
_zstd.ZstdDecompressor.unused_data
@@ -635,11 +640,14 @@ decompressed, unused input data after the frame. Otherwise this will be b''.
static PyObject *
_zstd_ZstdDecompressor_unused_data_get_impl(ZstdDecompressor *self)
-/*[clinic end generated code: output=f3a20940f11b6b09 input=5233800bef00df04]*/
+/*[clinic end generated code: output=f3a20940f11b6b09 input=54d41ecd681a3444]*/
{
PyObject *ret;
+ PyMutex_Lock(&self->lock);
+
if (!self->eof) {
+ PyMutex_Unlock(&self->lock);
return Py_GetConstant(Py_CONSTANT_EMPTY_BYTES);
}
else {
@@ -656,6 +664,7 @@ _zstd_ZstdDecompressor_unused_data_get_impl(ZstdDecompressor *self)
}
}
+ PyMutex_Unlock(&self->lock);
return ret;
}
@@ -693,10 +702,9 @@ _zstd_ZstdDecompressor_decompress_impl(ZstdDecompressor *self,
{
PyObject *ret;
/* Thread-safe code */
- Py_BEGIN_CRITICAL_SECTION(self);
-
- ret = stream_decompress(self, data, max_length);
- Py_END_CRITICAL_SECTION();
+ PyMutex_Lock(&self->lock);
+ ret = stream_decompress_lock_held(self, data, max_length);
+ PyMutex_Unlock(&self->lock);
return ret;
}
@@ -710,9 +718,10 @@ PyDoc_STRVAR(ZstdDecompressor_eof_doc,
"after that, an EOFError exception will be raised.");
PyDoc_STRVAR(ZstdDecompressor_needs_input_doc,
-"If the max_length output limit in .decompress() method has been reached, and\n"
-"the decompressor has (or may has) unconsumed input data, it will be set to\n"
-"False. In this case, pass b'' to .decompress() method may output further data.");
+"If the max_length output limit in .decompress() method has been reached,\n"
+"and the decompressor has (or may has) unconsumed input data, it will be set\n"
+"to False. In this case, passing b'' to the .decompress() method may output\n"
+"further data.");
static PyMemberDef ZstdDecompressor_members[] = {
{"eof", Py_T_BOOL, offsetof(ZstdDecompressor, eof),
diff --git a/Modules/_zstd/zstddict.c b/Modules/_zstd/zstddict.c
index 7df187a6fa6..e3e9e5d0645 100644
--- a/Modules/_zstd/zstddict.c
+++ b/Modules/_zstd/zstddict.c
@@ -17,6 +17,7 @@ class _zstd.ZstdDict "ZstdDict *" "&zstd_dict_type_spec"
#include "_zstdmodule.h"
#include "zstddict.h"
#include "clinic/zstddict.c.h"
+#include "internal/pycore_lock.h" // PyMutex_IsLocked
#include <zstd.h> // ZSTD_freeDDict(), ZSTD_getDictID_fromDict()
@@ -53,6 +54,7 @@ _zstd_ZstdDict_new_impl(PyTypeObject *type, PyObject *dict_content,
self->dict_content = NULL;
self->d_dict = NULL;
self->dict_id = 0;
+ self->lock = (PyMutex){0};
/* ZSTD_CDict dict */
self->c_dicts = PyDict_New();
@@ -72,13 +74,15 @@ _zstd_ZstdDict_new_impl(PyTypeObject *type, PyObject *dict_content,
at least 8 bytes */
if (Py_SIZE(self->dict_content) < 8) {
PyErr_SetString(PyExc_ValueError,
- "Zstandard dictionary content should at least 8 bytes.");
+ "Zstandard dictionary content should at least "
+ "8 bytes.");
goto error;
}
/* Get dict_id, 0 means "raw content" dictionary. */
- self->dict_id = ZSTD_getDictID_fromDict(PyBytes_AS_STRING(self->dict_content),
- Py_SIZE(self->dict_content));
+ self->dict_id = ZSTD_getDictID_fromDict(
+ PyBytes_AS_STRING(self->dict_content),
+ Py_SIZE(self->dict_content));
/* Check validity for ordinary dictionary */
if (!is_raw && self->dict_id == 0) {
@@ -109,6 +113,8 @@ ZstdDict_dealloc(PyObject *ob)
ZSTD_freeDDict(self->d_dict);
}
+ assert(!PyMutex_IsLocked(&self->lock));
+
/* Release dict_content after Free ZSTD_CDict/ZSTD_DDict instances */
Py_CLEAR(self->dict_content);
Py_CLEAR(self->c_dicts);
@@ -137,19 +143,22 @@ ZstdDict_str(PyObject *ob)
}
static PyMemberDef ZstdDict_members[] = {
- {"dict_id", Py_T_UINT, offsetof(ZstdDict, dict_id), Py_READONLY, ZstdDict_dictid_doc},
- {"dict_content", Py_T_OBJECT_EX, offsetof(ZstdDict, dict_content), Py_READONLY, ZstdDict_dictcontent_doc},
+ {"dict_id", Py_T_UINT, offsetof(ZstdDict, dict_id), Py_READONLY,
+ ZstdDict_dictid_doc},
+ {"dict_content", Py_T_OBJECT_EX, offsetof(ZstdDict, dict_content),
+ Py_READONLY, ZstdDict_dictcontent_doc},
{NULL}
};
/*[clinic input]
-@critical_section
@getter
_zstd.ZstdDict.as_digested_dict
Load as a digested dictionary to compressor.
-Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_digested_dict)
+Pass this attribute as zstd_dict argument:
+compress(dat, zstd_dict=zd.as_digested_dict)
+
1. Some advanced compression parameters of compressor may be overridden
by parameters of digested dictionary.
2. ZstdDict has a digested dictionaries cache for each compression level.
@@ -160,19 +169,20 @@ Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_digeste
static PyObject *
_zstd_ZstdDict_as_digested_dict_get_impl(ZstdDict *self)
-/*[clinic end generated code: output=09b086e7a7320dbb input=585448c79f31f74a]*/
+/*[clinic end generated code: output=09b086e7a7320dbb input=ee45e1b4a48f6f2c]*/
{
return Py_BuildValue("Oi", self, DICT_TYPE_DIGESTED);
}
/*[clinic input]
-@critical_section
@getter
_zstd.ZstdDict.as_undigested_dict
Load as an undigested dictionary to compressor.
-Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_undigested_dict)
+Pass this attribute as zstd_dict argument:
+compress(dat, zstd_dict=zd.as_undigested_dict)
+
1. The advanced compression parameters of compressor will not be overridden.
2. Loading an undigested dictionary is costly. If load an undigested dictionary
multiple times, consider reusing a compressor object.
@@ -181,19 +191,20 @@ Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_undiges
static PyObject *
_zstd_ZstdDict_as_undigested_dict_get_impl(ZstdDict *self)
-/*[clinic end generated code: output=43c7a989e6d4253a input=022b0829ffb1c220]*/
+/*[clinic end generated code: output=43c7a989e6d4253a input=d39210eedec76fed]*/
{
return Py_BuildValue("Oi", self, DICT_TYPE_UNDIGESTED);
}
/*[clinic input]
-@critical_section
@getter
_zstd.ZstdDict.as_prefix
Load as a prefix to compressor/decompressor.
-Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_prefix)
+Pass this attribute as zstd_dict argument:
+compress(dat, zstd_dict=zd.as_prefix)
+
1. Prefix is compatible with long distance matching, while dictionary is not.
2. It only works for the first frame, then the compressor/decompressor will
return to no prefix state.
@@ -202,7 +213,7 @@ Pass this attribute as zstd_dict argument: compress(dat, zstd_dict=zd.as_prefix)
static PyObject *
_zstd_ZstdDict_as_prefix_get_impl(ZstdDict *self)
-/*[clinic end generated code: output=6f7130c356595a16 input=09fb82a6a5407e87]*/
+/*[clinic end generated code: output=6f7130c356595a16 input=d59757b0b5a9551a]*/
{
return Py_BuildValue("Oi", self, DICT_TYPE_PREFIX);
}
diff --git a/Modules/_zstd/zstddict.h b/Modules/_zstd/zstddict.h
index e8a55a3670b..dcba0f21852 100644
--- a/Modules/_zstd/zstddict.h
+++ b/Modules/_zstd/zstddict.h
@@ -19,6 +19,9 @@ typedef struct {
PyObject *dict_content;
/* Dictionary id */
uint32_t dict_id;
+
+ /* Lock to protect the digested dictionaries */
+ PyMutex lock;
} ZstdDict;
#endif // !ZSTD_DICT_H
diff --git a/Python/optimizer_analysis.c b/Python/optimizer_analysis.c
index 851e1efa049..6a7df233819 100644
--- a/Python/optimizer_analysis.c
+++ b/Python/optimizer_analysis.c
@@ -558,6 +558,7 @@ const uint16_t op_without_push[MAX_UOP_ID + 1] = {
const bool op_skip[MAX_UOP_ID + 1] = {
[_NOP] = true,
[_CHECK_VALIDITY] = true,
+ [_CHECK_PERIODIC] = true,
[_SET_IP] = true,
};
@@ -617,7 +618,7 @@ remove_unneeded_uops(_PyUOpInstruction *buffer, int buffer_size)
while (op_skip[last->opcode]) {
last--;
}
- if (op_without_push[last->opcode]) {
+ if (op_without_push[last->opcode] && op_without_pop[opcode]) {
last->opcode = op_without_push[last->opcode];
opcode = buffer[pc].opcode = op_without_pop[opcode];
if (op_without_pop[last->opcode]) {