diff options
70 files changed, 1100 insertions, 495 deletions
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 775d9c63260..63a28490043 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -281,9 +281,13 @@ Doc/howto/clinic.rst @erlend-aasland # Subinterpreters **/*interpreteridobject.* @ericsnowcurrently **/*crossinterp* @ericsnowcurrently -Lib/test/support/interpreters/ @ericsnowcurrently Modules/_interp*module.c @ericsnowcurrently +Lib/test/test__interp*.py @ericsnowcurrently +Lib/concurrent/interpreters/ @ericsnowcurrently +Lib/test/support/channels.py @ericsnowcurrently +Doc/library/concurrent.interpreters.rst @ericsnowcurrently Lib/test/test_interpreters/ @ericsnowcurrently +Lib/concurrent/futures/interpreter.py @ericsnowcurrently # Android **/*Android* @mhsmith @freakboy3742 diff --git a/Doc/c-api/init.rst b/Doc/c-api/init.rst index 9c866438b48..3106bf9808f 100644 --- a/Doc/c-api/init.rst +++ b/Doc/c-api/init.rst @@ -492,17 +492,8 @@ Initializing and finalizing the interpreter strings other than those passed in (however, the contents of the strings pointed to by the argument list are not modified). - The return value will be ``0`` if the interpreter exits normally (i.e., - without an exception), ``1`` if the interpreter exits due to an exception, - or ``2`` if the argument list does not represent a valid Python command - line. - - Note that if an otherwise unhandled :exc:`SystemExit` is raised, this - function will not return ``1``, but exit the process, as long as - ``Py_InspectFlag`` is not set. If ``Py_InspectFlag`` is set, execution will - drop into the interactive Python prompt, at which point a second otherwise - unhandled :exc:`SystemExit` will still exit the process, while any other - means of exiting will set the return value as described above. + The return value is ``2`` if the argument list does not represent a valid + Python command line, and otherwise the same as :c:func:`Py_RunMain`. In terms of the CPython runtime configuration APIs documented in the :ref:`runtime configuration <init-config>` section (and without accounting @@ -539,23 +530,18 @@ Initializing and finalizing the interpreter If :c:member:`PyConfig.inspect` is not set (the default), the return value will be ``0`` if the interpreter exits normally (that is, without raising - an exception), or ``1`` if the interpreter exits due to an exception. If an - otherwise unhandled :exc:`SystemExit` is raised, the function will immediately - exit the process instead of returning ``1``. + an exception), the exit status of an unhandled :exc:`SystemExit`, or ``1`` + for any other unhandled exception. If :c:member:`PyConfig.inspect` is set (such as when the :option:`-i` option is used), rather than returning when the interpreter exits, execution will instead resume in an interactive Python prompt (REPL) using the ``__main__`` module's global namespace. If the interpreter exited with an exception, it is immediately raised in the REPL session. The function return value is - then determined by the way the *REPL session* terminates: returning ``0`` - if the session terminates without raising an unhandled exception, exiting - immediately for an unhandled :exc:`SystemExit`, and returning ``1`` for - any other unhandled exception. - - This function always finalizes the Python interpreter regardless of whether - it returns a value or immediately exits the process due to an unhandled - :exc:`SystemExit` exception. + then determined by the way the *REPL session* terminates: ``0``, ``1``, or + the status of a :exc:`SystemExit`, as specified above. + + This function always finalizes the Python interpreter before it returns. See :ref:`Python Configuration <init-python-config>` for an example of a customized Python that always runs in isolated mode using diff --git a/Doc/library/concurrency.rst b/Doc/library/concurrency.rst index 5be1a1106b0..18f9443cbfe 100644 --- a/Doc/library/concurrency.rst +++ b/Doc/library/concurrency.rst @@ -18,6 +18,7 @@ multitasking). Here's an overview: multiprocessing.shared_memory.rst concurrent.rst concurrent.futures.rst + concurrent.interpreters.rst subprocess.rst sched.rst queue.rst diff --git a/Doc/library/concurrent.interpreters.rst b/Doc/library/concurrent.interpreters.rst new file mode 100644 index 00000000000..8860418e87a --- /dev/null +++ b/Doc/library/concurrent.interpreters.rst @@ -0,0 +1,198 @@ +:mod:`!concurrent.interpreters` --- Multiple interpreters in the same process +============================================================================= + +.. module:: concurrent.interpreters + :synopsis: Multiple interpreters in the same process + +.. moduleauthor:: Eric Snow <ericsnowcurrently@gmail.com> +.. sectionauthor:: Eric Snow <ericsnowcurrently@gmail.com> + +.. versionadded:: 3.14 + +**Source code:** :source:`Lib/concurrent/interpreters.py` + +-------------- + + +Introduction +------------ + +The :mod:`!concurrent.interpreters` module constructs higher-level +interfaces on top of the lower level :mod:`!_interpreters` module. + +.. XXX Add references to the upcoming HOWTO docs in the seealso block. + +.. seealso:: + + :ref:`isolating-extensions-howto` + how to update an extension module to support multiple interpreters + + :pep:`554` + + :pep:`734` + + :pep:`684` + +.. XXX Why do we disallow multiple interpreters on WASM? + +.. include:: ../includes/wasm-notavail.rst + + +Key details +----------- + +Before we dive into examples, there are a small number of details +to keep in mind about using multiple interpreters: + +* isolated, by default +* no implicit threads +* not all PyPI packages support use in multiple interpreters yet + +.. XXX Are there other relevant details to list? + +In the context of multiple interpreters, "isolated" means that +different interpreters do not share any state. In practice, there is some +process-global data they all share, but that is managed by the runtime. + + +Reference +--------- + +This module defines the following functions: + +.. function:: list_all() + + Return a :class:`list` of :class:`Interpreter` objects, + one for each existing interpreter. + +.. function:: get_current() + + Return an :class:`Interpreter` object for the currently running + interpreter. + +.. function:: get_main() + + Return an :class:`Interpreter` object for the main interpreter. + +.. function:: create() + + Initialize a new (idle) Python interpreter + and return a :class:`Interpreter` object for it. + + +Interpreter objects +^^^^^^^^^^^^^^^^^^^ + +.. class:: Interpreter(id) + + A single interpreter in the current process. + + Generally, :class:`Interpreter` shouldn't be called directly. + Instead, use :func:`create` or one of the other module functions. + + .. attribute:: id + + (read-only) + + The interpreter's ID. + + .. attribute:: whence + + (read-only) + + A string describing where the interpreter came from. + + .. method:: is_running() + + Return ``True`` if the interpreter is currently executing code + in its :mod:`!__main__` module and ``False`` otherwise. + + .. method:: close() + + Finalize and destroy the interpreter. + + .. method:: prepare_main(ns=None, **kwargs) + + Bind "shareable" objects in the interpreter's + :mod:`!__main__` module. + + .. method:: exec(code, /, dedent=True) + + Run the given source code in the interpreter (in the current thread). + + .. method:: call(callable, /, *args, **kwargs) + + Return the result of calling running the given function in the + interpreter (in the current thread). + + .. method:: call_in_thread(callable, /, *args, **kwargs) + + Run the given function in the interpreter (in a new thread). + +Exceptions +^^^^^^^^^^ + +.. exception:: InterpreterError + + This exception, a subclass of :exc:`Exception`, is raised when + an interpreter-related error happens. + +.. exception:: InterpreterNotFoundError + + This exception, a subclass of :exc:`InterpreterError`, is raised when + the targeted interpreter no longer exists. + +.. exception:: ExecutionFailed + + This exception, a subclass of :exc:`InterpreterError`, is raised when + the running code raised an uncaught exception. + + .. attribute:: excinfo + + A basic snapshot of the exception raised in the other interpreter. + +.. XXX Document the excinfoattrs? + +.. exception:: NotShareableError + + This exception, a subclass of :exc:`TypeError`, is raised when + an object cannot be sent to another interpreter. + + +.. XXX Add functions for communicating between interpreters. + + +Basic usage +----------- + +Creating an interpreter and running code in it:: + + from concurrent import interpreters + + interp = interpreters.create() + + # Run in the current OS thread. + + interp.exec('print("spam!")') + + interp.exec("""if True: + print('spam!') + """) + + from textwrap import dedent + interp.exec(dedent(""" + print('spam!') + """)) + + def run(): + print('spam!') + + interp.call(run) + + # Run in new OS thread. + + t = interp.call_in_thread(run) + t.join() + + +.. XXX Explain about object "sharing". diff --git a/Doc/library/concurrent.rst b/Doc/library/concurrent.rst index 8caea78bbb5..748c72c733b 100644 --- a/Doc/library/concurrent.rst +++ b/Doc/library/concurrent.rst @@ -1,6 +1,7 @@ The :mod:`!concurrent` package ============================== -Currently, there is only one module in this package: +This package contains the following modules: * :mod:`concurrent.futures` -- Launching parallel tasks +* :mod:`concurrent.interpreters` -- Multiple interpreters in the same process diff --git a/Doc/library/dataclasses.rst b/Doc/library/dataclasses.rst index f18c7cc9c02..299c8aa399c 100644 --- a/Doc/library/dataclasses.rst +++ b/Doc/library/dataclasses.rst @@ -121,8 +121,11 @@ Module contents :meth:`!__le__`, :meth:`!__gt__`, or :meth:`!__ge__`, then :exc:`TypeError` is raised. - - *unsafe_hash*: If ``False`` (the default), a :meth:`~object.__hash__` method - is generated according to how *eq* and *frozen* are set. + - *unsafe_hash*: If true, force ``dataclasses`` to create a + :meth:`~object.__hash__` method, even though it may not be safe to do so. + Otherwise, generate a :meth:`~object.__hash__` method according to how + *eq* and *frozen* are set. + The default value is ``False``. :meth:`!__hash__` is used by built-in :meth:`hash`, and when objects are added to hashed collections such as dictionaries and sets. Having a diff --git a/Doc/library/logging.config.rst b/Doc/library/logging.config.rst index 0e9dc33ae21..f8c71005a53 100644 --- a/Doc/library/logging.config.rst +++ b/Doc/library/logging.config.rst @@ -548,7 +548,7 @@ mnemonic that the corresponding value is a callable. The ``filters`` member of ``handlers`` and ``loggers`` can take filter instances in addition to ids. -You can also specify a special key ``'.'`` whose value is a dictionary is a +You can also specify a special key ``'.'`` whose value is a mapping of attribute names to values. If found, the specified attributes will be set on the user-defined object before it is returned. Thus, with the following configuration:: diff --git a/Doc/library/python.rst b/Doc/library/python.rst index c2c231af7c3..c5c762e11b9 100644 --- a/Doc/library/python.rst +++ b/Doc/library/python.rst @@ -27,3 +27,8 @@ overview: inspect.rst annotationlib.rst site.rst + +.. seealso:: + + * See the :mod:`concurrent.interpreters` module, which similarly + exposes core runtime functionality. diff --git a/Doc/whatsnew/3.14.rst b/Doc/whatsnew/3.14.rst index 45e68aea5fb..ca330a32b33 100644 --- a/Doc/whatsnew/3.14.rst +++ b/Doc/whatsnew/3.14.rst @@ -83,6 +83,7 @@ and improvements in user-friendliness and correctness. .. PEP-sized items next. * :ref:`PEP 649 and 749: deferred evaluation of annotations <whatsnew314-pep649>` +* :ref:`PEP 734: Multiple Interpreters in the Stdlib <whatsnew314-pep734>` * :ref:`PEP 741: Python Configuration C API <whatsnew314-pep741>` * :ref:`PEP 750: Template strings <whatsnew314-pep750>` * :ref:`PEP 758: Allow except and except* expressions without parentheses <whatsnew314-pep758>` @@ -123,6 +124,101 @@ of Python. See :ref:`below <whatsnew314-refcount>` for details. New features ============ +.. _whatsnew314-pep734: + +PEP 734: Multiple Interpreters in the Stdlib +-------------------------------------------- + +The CPython runtime supports running multiple copies of Python in the +same process simultaneously and has done so for over 20 years. +Each of these separate copies is called an "interpreter". +However, the feature had been available only through the C-API. + +That limitation is removed in the 3.14 release, +with the new :mod:`concurrent.interpreters` module. + +There are at least two notable reasons why using multiple interpreters +is worth considering: + +* they support a new (to Python), human-friendly concurrency model +* true multi-core parallelism + +For some use cases, concurrency in software enables efficiency and +can simplify software, at a high level. At the same time, implementing +and maintaining all but the simplest concurrency is often a struggle +for the human brain. That especially applies to plain threads +(for example, :mod:`threading`), where all memory is shared between all threads. + +With multiple isolated interpreters, you can take advantage of a class +of concurrency models, like CSP or the actor model, that have found +success in other programming languages, like Smalltalk, Erlang, +Haskell, and Go. Think of multiple interpreters like threads +but with opt-in sharing. + +Regarding multi-core parallelism: as of the 3.12 release, interpreters +are now sufficiently isolated from one another to be used in parallel. +(See :pep:`684`.) This unlocks a variety of CPU-intensive use cases +for Python that were limited by the :term:`GIL`. + +Using multiple interpreters is similar in many ways to +:mod:`multiprocessing`, in that they both provide isolated logical +"processes" that can run in parallel, with no sharing by default. +However, when using multiple interpreters, an application will use +fewer system resources and will operate more efficiently (since it +stays within the same process). Think of multiple interpreters as +having the isolation of processes with the efficiency of threads. + +.. XXX Add an example or two. +.. XXX Link to the not-yet-added HOWTO doc. + +While the feature has been around for decades, multiple interpreters +have not been used widely, due to low awareness and the lack of a stdlib +module. Consequently, they currently have several notable limitations, +which will improve significantly now that the feature is finally +going mainstream. + +Current limitations: + +* starting each interpreter has not been optimized yet +* each interpreter uses more memory than necessary + (we will be working next on extensive internal sharing between + interpreters) +* there aren't many options *yet* for truly sharing objects or other + data between interpreters (other than :type:`memoryview`) +* many extension modules on PyPI are not compatible with multiple + interpreters yet (stdlib extension modules *are* compatible) +* the approach to writing applications that use multiple isolated + interpreters is mostly unfamiliar to Python users, for now + +The impact of these limitations will depend on future CPython +improvements, how interpreters are used, and what the community solves +through PyPI packages. Depending on the use case, the limitations may +not have much impact, so try it out! + +Furthermore, future CPython releases will reduce or eliminate overhead +and provide utilities that are less appropriate on PyPI. In the +meantime, most of the limitations can also be addressed through +extension modules, meaning PyPI packages can fill any gap for 3.14, and +even back to 3.12 where interpreters were finally properly isolated and +stopped sharing the :term:`GIL`. Likewise, we expect to slowly see +libraries on PyPI for high-level abstractions on top of interpreters. + +Regarding extension modules, work is in progress to update some PyPI +projects, as well as tools like Cython, pybind11, nanobind, and PyO3. +The steps for isolating an extension module are found at +:ref:`isolating-extensions-howto`. Isolating a module has a lot of +overlap with what is required to support +:ref:`free-threading <whatsnew314-free-threaded-cpython>`, +so the ongoing work in the community in that area will help accelerate +support for multiple interpreters. + +Also added in 3.14: :ref:`concurrent.futures.InterpreterPoolExecutor +<whatsnew314-concurrent-futures-interp-pool>`. + +.. seealso:: + :pep:`734`. + + .. _whatsnew314-pep750: PEP 750: Template strings @@ -1109,6 +1205,8 @@ calendar concurrent.futures ------------------ +.. _whatsnew314-concurrent-futures-interp-pool: + * Add :class:`~concurrent.futures.InterpreterPoolExecutor`, which exposes "subinterpreters" (multiple Python interpreters in the same process) to Python code. This is separate from the proposed API diff --git a/Doc/whatsnew/3.15.rst b/Doc/whatsnew/3.15.rst index 88e7462f688..9f327cf904d 100644 --- a/Doc/whatsnew/3.15.rst +++ b/Doc/whatsnew/3.15.rst @@ -134,6 +134,13 @@ shelve (Contributed by Andrea Oliveri in :gh:`134004`.) +sqlite3 +------- + +* Support SQL keyword completion in the :mod:`sqlite3` command-line interface. + (Contributed by Long Tan in :gh:`133393`.) + + ssl --- diff --git a/Include/Python.h b/Include/Python.h index f34d581f0b4..64be8014589 100644 --- a/Include/Python.h +++ b/Include/Python.h @@ -59,14 +59,6 @@ # include <intrin.h> // __readgsqword() #endif -// Suppress known warnings in Python header files. -#if defined(_MSC_VER) -// Warning that alignas behaviour has changed. Doesn't affect us, because we -// never relied on the old behaviour. -#pragma warning(push) -#pragma warning(disable: 5274) -#endif - // Include Python header files #include "pyport.h" #include "pymacro.h" @@ -146,9 +138,4 @@ #include "cpython/pyfpe.h" #include "cpython/tracemalloc.h" -// Restore warning filter -#ifdef _MSC_VER -#pragma warning(pop) -#endif - #endif /* !Py_PYTHON_H */ diff --git a/Include/cpython/unicodeobject.h b/Include/cpython/unicodeobject.h index 7c1aac9696d..86c502730f4 100644 --- a/Include/cpython/unicodeobject.h +++ b/Include/cpython/unicodeobject.h @@ -47,6 +47,63 @@ static inline Py_UCS4 Py_UNICODE_LOW_SURROGATE(Py_UCS4 ch) { /* --- Unicode Type ------------------------------------------------------- */ +struct _PyUnicodeObject_state { + /* If interned is non-zero, the two references from the + dictionary to this object are *not* counted in ob_refcnt. + The possible values here are: + 0: Not Interned + 1: Interned + 2: Interned and Immortal + 3: Interned, Immortal, and Static + This categorization allows the runtime to determine the right + cleanup mechanism at runtime shutdown. */ +#ifdef Py_GIL_DISABLED + // Needs to be accessed atomically, so can't be a bit field. + unsigned char interned; +#else + unsigned int interned:2; +#endif + /* Character size: + + - PyUnicode_1BYTE_KIND (1): + + * character type = Py_UCS1 (8 bits, unsigned) + * all characters are in the range U+0000-U+00FF (latin1) + * if ascii is set, all characters are in the range U+0000-U+007F + (ASCII), otherwise at least one character is in the range + U+0080-U+00FF + + - PyUnicode_2BYTE_KIND (2): + + * character type = Py_UCS2 (16 bits, unsigned) + * all characters are in the range U+0000-U+FFFF (BMP) + * at least one character is in the range U+0100-U+FFFF + + - PyUnicode_4BYTE_KIND (4): + + * character type = Py_UCS4 (32 bits, unsigned) + * all characters are in the range U+0000-U+10FFFF + * at least one character is in the range U+10000-U+10FFFF + */ + unsigned int kind:3; + /* Compact is with respect to the allocation scheme. Compact unicode + objects only require one memory block while non-compact objects use + one block for the PyUnicodeObject struct and another for its data + buffer. */ + unsigned int compact:1; + /* The string only contains characters in the range U+0000-U+007F (ASCII) + and the kind is PyUnicode_1BYTE_KIND. If ascii is set and compact is + set, use the PyASCIIObject structure. */ + unsigned int ascii:1; + /* The object is statically allocated. */ + unsigned int statically_allocated:1; +#ifndef Py_GIL_DISABLED + /* Historical: padding to ensure that PyUnicode_DATA() is always aligned to + 4 bytes (see issue gh-63736 on m68k) */ + unsigned int :24; +#endif +}; + /* ASCII-only strings created through PyUnicode_New use the PyASCIIObject structure. state.ascii and state.compact are set, and the data immediately follow the structure. utf8_length can be found @@ -99,67 +156,8 @@ typedef struct { PyObject_HEAD Py_ssize_t length; /* Number of code points in the string */ Py_hash_t hash; /* Hash value; -1 if not set */ -#ifdef Py_GIL_DISABLED - /* Ensure 4 byte alignment for PyUnicode_DATA(), see gh-63736 on m68k. - In the non-free-threaded build, we'll use explicit padding instead */ - _Py_ALIGN_AS(4) -#endif - struct { - /* If interned is non-zero, the two references from the - dictionary to this object are *not* counted in ob_refcnt. - The possible values here are: - 0: Not Interned - 1: Interned - 2: Interned and Immortal - 3: Interned, Immortal, and Static - This categorization allows the runtime to determine the right - cleanup mechanism at runtime shutdown. */ -#ifdef Py_GIL_DISABLED - // Needs to be accessed atomically, so can't be a bit field. - unsigned char interned; -#else - unsigned int interned:2; -#endif - /* Character size: - - - PyUnicode_1BYTE_KIND (1): - - * character type = Py_UCS1 (8 bits, unsigned) - * all characters are in the range U+0000-U+00FF (latin1) - * if ascii is set, all characters are in the range U+0000-U+007F - (ASCII), otherwise at least one character is in the range - U+0080-U+00FF - - - PyUnicode_2BYTE_KIND (2): - - * character type = Py_UCS2 (16 bits, unsigned) - * all characters are in the range U+0000-U+FFFF (BMP) - * at least one character is in the range U+0100-U+FFFF - - - PyUnicode_4BYTE_KIND (4): - - * character type = Py_UCS4 (32 bits, unsigned) - * all characters are in the range U+0000-U+10FFFF - * at least one character is in the range U+10000-U+10FFFF - */ - unsigned int kind:3; - /* Compact is with respect to the allocation scheme. Compact unicode - objects only require one memory block while non-compact objects use - one block for the PyUnicodeObject struct and another for its data - buffer. */ - unsigned int compact:1; - /* The string only contains characters in the range U+0000-U+007F (ASCII) - and the kind is PyUnicode_1BYTE_KIND. If ascii is set and compact is - set, use the PyASCIIObject structure. */ - unsigned int ascii:1; - /* The object is statically allocated. */ - unsigned int statically_allocated:1; -#ifndef Py_GIL_DISABLED - /* Padding to ensure that PyUnicode_DATA() is always aligned to - 4 bytes (see issue gh-63736 on m68k) */ - unsigned int :24; -#endif - } state; + /* Ensure 4 byte alignment for PyUnicode_DATA(), see gh-63736 on m68k. */ + _Py_ALIGNED_DEF(4, struct _PyUnicodeObject_state) state; } PyASCIIObject; /* Non-ASCII strings allocated through PyUnicode_New use the diff --git a/Include/internal/pycore_interp_structs.h b/Include/internal/pycore_interp_structs.h index f25f5847b3b..f1f427d99de 100644 --- a/Include/internal/pycore_interp_structs.h +++ b/Include/internal/pycore_interp_structs.h @@ -159,10 +159,11 @@ struct atexit_state { typedef struct { // Tagged pointer to next object in the list. // 0 means the object is not tracked - uintptr_t _gc_next; + _Py_ALIGNED_DEF(_PyObject_MIN_ALIGNMENT, uintptr_t) _gc_next; // Tagged pointer to previous object in the list. // Lowest two bits are used for flags documented later. + // Those bits are made available by the struct's minimum alignment. uintptr_t _gc_prev; } PyGC_Head; diff --git a/Include/internal/pycore_stackref.h b/Include/internal/pycore_stackref.h index 87914767252..10e7199269e 100644 --- a/Include/internal/pycore_stackref.h +++ b/Include/internal/pycore_stackref.h @@ -264,6 +264,32 @@ PyStackRef_IsNullOrInt(_PyStackRef ref); static const _PyStackRef PyStackRef_ERROR = { .bits = Py_TAG_INVALID }; +/* Wrap a pointer in a stack ref. + * The resulting stack reference is not safe and should only be used + * in the interpreter to pass values from one uop to another. + * The GC should never see one of these stack refs. */ +static inline _PyStackRef +PyStackRef_Wrap(void *ptr) +{ + assert(ptr != NULL); +#ifdef Py_DEBUG + return (_PyStackRef){ .bits = ((uintptr_t)ptr) | Py_TAG_INVALID }; +#else + return (_PyStackRef){ .bits = (uintptr_t)ptr }; +#endif +} + +static inline void * +PyStackRef_Unwrap(_PyStackRef ref) +{ +#ifdef Py_DEBUG + assert ((ref.bits & Py_TAG_BITS) == Py_TAG_INVALID); + return (void *)(ref.bits & ~Py_TAG_BITS); +#else + return (void *)(ref.bits); +#endif +} + static inline bool PyStackRef_IsError(_PyStackRef ref) { diff --git a/Include/object.h b/Include/object.h index 42aed614d4a..c75e9db0cbd 100644 --- a/Include/object.h +++ b/Include/object.h @@ -101,6 +101,12 @@ whose size is determined when the object is allocated. #define PyObject_VAR_HEAD PyVarObject ob_base; #define Py_INVALID_SIZE (Py_ssize_t)-1 +/* PyObjects are given a minimum alignment so that the least significant bits + * of an object pointer become available for other purposes. + * This must be an integer literal with the value (1 << _PyGC_PREV_SHIFT), number of bytes. + */ +#define _PyObject_MIN_ALIGNMENT 4 + /* Nothing is actually declared to be a PyObject, but every pointer to * a Python object can be cast to a PyObject*. This is inheritance built * by hand. Similarly every pointer to a variable-size Python object can, @@ -136,6 +142,7 @@ struct _object { #else Py_ssize_t ob_refcnt; #endif + _Py_ALIGNED_DEF(_PyObject_MIN_ALIGNMENT, char) _aligner; }; #ifdef _MSC_VER __pragma(warning(pop)) @@ -153,7 +160,7 @@ struct _object { // ob_tid stores the thread id (or zero). It is also used by the GC and the // trashcan mechanism as a linked list pointer and by the GC to store the // computed "gc_refs" refcount. - uintptr_t ob_tid; + _Py_ALIGNED_DEF(_PyObject_MIN_ALIGNMENT, uintptr_t) ob_tid; uint16_t ob_flags; PyMutex ob_mutex; // per-object lock uint8_t ob_gc_bits; // gc-related state diff --git a/Include/pymacro.h b/Include/pymacro.h index d410645034d..bfe660e8303 100644 --- a/Include/pymacro.h +++ b/Include/pymacro.h @@ -24,44 +24,66 @@ #endif -// _Py_ALIGN_AS: this compiler's spelling of `alignas` keyword, -// We currently use alignas for free-threaded builds only; additional compat -// checking would be great before we add it to the default build. -// Standards/compiler support: +// _Py_ALIGNED_DEF(N, T): Define a variable/member with increased alignment +// +// `N`: the desired minimum alignment, an integer literal, number of bytes +// `T`: the type of the defined variable +// (or a type with at least the defined variable's alignment) +// +// May not be used on a struct definition. +// +// Standards/compiler support for `alignas` alternatives: // - `alignas` is a keyword in C23 and C++11. // - `_Alignas` is a keyword in C11 // - GCC & clang has __attribute__((aligned)) // (use that for older standards in pedantic mode) // - MSVC has __declspec(align) // - `_Alignas` is common C compiler extension -// Older compilers may name it differently; to allow compilation on such -// unsupported platforms, we don't redefine _Py_ALIGN_AS if it's already +// Older compilers may name `alignas` differently; to allow compilation on such +// unsupported platforms, we don't redefine _Py_ALIGNED_DEF if it's already // defined. Note that defining it wrong (including defining it to nothing) will // cause ABI incompatibilities. -#ifdef Py_GIL_DISABLED -# ifndef _Py_ALIGN_AS -# ifdef __cplusplus -# if __cplusplus >= 201103L -# define _Py_ALIGN_AS(V) alignas(V) -# elif defined(__GNUC__) || defined(__clang__) -# define _Py_ALIGN_AS(V) __attribute__((aligned(V))) -# elif defined(_MSC_VER) -# define _Py_ALIGN_AS(V) __declspec(align(V)) -# else -# define _Py_ALIGN_AS(V) alignas(V) -# endif -# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L -# define _Py_ALIGN_AS(V) alignas(V) -# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L -# define _Py_ALIGN_AS(V) _Alignas(V) -# elif (defined(__GNUC__) || defined(__clang__)) -# define _Py_ALIGN_AS(V) __attribute__((aligned(V))) -# elif defined(_MSC_VER) -# define _Py_ALIGN_AS(V) __declspec(align(V)) -# else -# define _Py_ALIGN_AS(V) _Alignas(V) -# endif -# endif +// +// Behavior of `alignas` alternatives: +// - `alignas` & `_Alignas`: +// - Can be used multiple times; the greatest alignment applies. +// - It is an *error* if the combined effect of all `alignas` modifiers would +// decrease the alignment. +// - Takes types or numbers. +// - May not be used on a struct definition, unless also defining a variable. +// - `__declspec(align)`: +// - Has no effect if it would decrease alignment. +// - Only takes an integer literal. +// - May be used on struct or variable definitions. +// However, when defining both the struct and the variable at once, +// `declspec(aligned)` causes compiler warning 5274 and possible ABI +// incompatibility. +// - ` __attribute__((aligned))`: +// - Has no effect if it would decrease alignment. +// - Takes types or numbers +// - May be used on struct or variable definitions. +#ifndef _Py_ALIGNED_DEF +# ifdef __cplusplus +# if __cplusplus >= 201103L +# define _Py_ALIGNED_DEF(N, T) alignas(N) alignas(T) T +# elif defined(__GNUC__) || defined(__clang__) +# define _Py_ALIGNED_DEF(N, T) __attribute__((aligned(N))) T +# elif defined(_MSC_VER) +# define _Py_ALIGNED_DEF(N, T) __declspec(align(N)) T +# else +# define _Py_ALIGNED_DEF(N, T) alignas(N) alignas(T) T +# endif +# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L +# define _Py_ALIGNED_DEF(N, T) alignas(N) alignas(T) T +# elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +# define _Py_ALIGNED_DEF(N, T) _Alignas(N) _Alignas(T) T +# elif (defined(__GNUC__) || defined(__clang__)) +# define _Py_ALIGNED_DEF(N, T) __attribute__((aligned(N))) T +# elif defined(_MSC_VER) +# define _Py_ALIGNED_DEF(N, T) __declspec(align(N)) T +# else +# define _Py_ALIGNED_DEF(N, T) _Alignas(N) _Alignas(T) T +# endif #endif /* Minimum value between x and y */ diff --git a/Lib/concurrent/futures/interpreter.py b/Lib/concurrent/futures/interpreter.py index a2c4fbfd3fb..f12b4ac33cd 100644 --- a/Lib/concurrent/futures/interpreter.py +++ b/Lib/concurrent/futures/interpreter.py @@ -167,7 +167,7 @@ class WorkerContext(_thread.WorkerContext): except _interpqueues.QueueError: continue except ModuleNotFoundError: - # interpreters.queues doesn't exist, which means + # interpreters._queues doesn't exist, which means # QueueEmpty doesn't. Act as though it does. continue else: diff --git a/Lib/test/support/interpreters/__init__.py b/Lib/concurrent/interpreters/__init__.py index 6d1b0690805..0fd661249a2 100644 --- a/Lib/test/support/interpreters/__init__.py +++ b/Lib/concurrent/interpreters/__init__.py @@ -9,6 +9,10 @@ from _interpreters import ( InterpreterError, InterpreterNotFoundError, NotShareableError, is_shareable, ) +from ._queues import ( + create as create_queue, + Queue, QueueEmpty, QueueFull, +) __all__ = [ @@ -20,21 +24,6 @@ __all__ = [ ] -_queuemod = None - -def __getattr__(name): - if name in ('Queue', 'QueueEmpty', 'QueueFull', 'create_queue'): - global create_queue, Queue, QueueEmpty, QueueFull - ns = globals() - from .queues import ( - create as create_queue, - Queue, QueueEmpty, QueueFull, - ) - return ns[name] - else: - raise AttributeError(name) - - _EXEC_FAILURE_STR = """ {superstr} diff --git a/Lib/test/support/interpreters/_crossinterp.py b/Lib/concurrent/interpreters/_crossinterp.py index 544e197ba4c..f47eb693ac8 100644 --- a/Lib/test/support/interpreters/_crossinterp.py +++ b/Lib/concurrent/interpreters/_crossinterp.py @@ -61,7 +61,7 @@ class UnboundItem: def __repr__(self): return f'{self._MODULE}.{self._NAME}' -# return f'interpreters.queues.UNBOUND' +# return f'interpreters._queues.UNBOUND' UNBOUND = object.__new__(UnboundItem) diff --git a/Lib/test/support/interpreters/queues.py b/Lib/concurrent/interpreters/_queues.py index 99987f2f692..99987f2f692 100644 --- a/Lib/test/support/interpreters/queues.py +++ b/Lib/concurrent/interpreters/_queues.py diff --git a/Lib/locale.py b/Lib/locale.py index 2feb10e59c9..dfedc6386cb 100644 --- a/Lib/locale.py +++ b/Lib/locale.py @@ -883,6 +883,10 @@ del k, v # updated 'sr@latn' -> 'sr_CS.UTF-8@latin' to 'sr_RS.UTF-8@latin' # removed 'univ' # removed 'universal' +# +# SS 2025-06-10: +# Remove 'c.utf8' -> 'en_US.UTF-8' because 'en_US.UTF-8' does not exist +# on all platforms. locale_alias = { 'a3': 'az_AZ.KOI8-C', @@ -962,7 +966,6 @@ locale_alias = { 'c.ascii': 'C', 'c.en': 'C', 'c.iso88591': 'en_US.ISO8859-1', - 'c.utf8': 'en_US.UTF-8', 'c_c': 'C', 'c_c.c': 'C', 'ca': 'ca_ES.ISO8859-1', diff --git a/Lib/sqlite3/__main__.py b/Lib/sqlite3/__main__.py index c2fa23c46cf..9e74b49ee82 100644 --- a/Lib/sqlite3/__main__.py +++ b/Lib/sqlite3/__main__.py @@ -12,6 +12,8 @@ from code import InteractiveConsole from textwrap import dedent from _colorize import get_theme, theme_no_color +from ._completer import completer + def execute(c, sql, suppress_errors=True, theme=theme_no_color): """Helper that wraps execution of SQL code. @@ -136,12 +138,9 @@ def main(*args): execute(con, args.sql, suppress_errors=False, theme=theme) else: # No SQL provided; start the REPL. - console = SqliteInteractiveConsole(con, use_color=True) - try: - import readline # noqa: F401 - except ImportError: - pass - console.interact(banner, exitmsg="") + with completer(): + console = SqliteInteractiveConsole(con, use_color=True) + console.interact(banner, exitmsg="") finally: con.close() diff --git a/Lib/sqlite3/_completer.py b/Lib/sqlite3/_completer.py new file mode 100644 index 00000000000..f21ef69cad6 --- /dev/null +++ b/Lib/sqlite3/_completer.py @@ -0,0 +1,42 @@ +from contextlib import contextmanager + +try: + from _sqlite3 import SQLITE_KEYWORDS +except ImportError: + SQLITE_KEYWORDS = () + +_completion_matches = [] + + +def _complete(text, state): + global _completion_matches + + if state == 0: + text_upper = text.upper() + _completion_matches = [c for c in SQLITE_KEYWORDS if c.startswith(text_upper)] + try: + return _completion_matches[state] + " " + except IndexError: + return None + + +@contextmanager +def completer(): + try: + import readline + except ImportError: + yield + return + + old_completer = readline.get_completer() + try: + readline.set_completer(_complete) + if readline.backend == "editline": + # libedit uses "^I" instead of "tab" + command_string = "bind ^I rl_complete" + else: + command_string = "tab: complete" + readline.parse_and_bind(command_string) + yield + finally: + readline.set_completer(old_completer) diff --git a/Lib/test/pickletester.py b/Lib/test/pickletester.py index 9d6ae3e4d00..9a3a26a8400 100644 --- a/Lib/test/pickletester.py +++ b/Lib/test/pickletester.py @@ -1100,6 +1100,11 @@ class AbstractUnpickleTests: self.check_unpickling_error((pickle.UnpicklingError, OverflowError), dumped) + def test_large_binstring(self): + errmsg = 'BINSTRING pickle has negative byte count' + with self.assertRaisesRegex(pickle.UnpicklingError, errmsg): + self.loads(b'T\0\0\0\x80') + def test_get(self): pickled = b'((lp100000\ng100000\nt.' unpickled = self.loads(pickled) diff --git a/Lib/test/support/interpreters/channels.py b/Lib/test/support/channels.py index 1724759b75a..b2de24d9d3e 100644 --- a/Lib/test/support/interpreters/channels.py +++ b/Lib/test/support/channels.py @@ -2,14 +2,14 @@ import time import _interpchannels as _channels -from . import _crossinterp +from concurrent.interpreters import _crossinterp # aliases: from _interpchannels import ( ChannelError, ChannelNotFoundError, ChannelClosedError, # noqa: F401 ChannelEmptyError, ChannelNotEmptyError, # noqa: F401 ) -from ._crossinterp import ( +from concurrent.interpreters._crossinterp import ( UNBOUND_ERROR, UNBOUND_REMOVE, ) diff --git a/Lib/test/test__interpchannels.py b/Lib/test/test__interpchannels.py index 88eee03a3de..858d31a73cf 100644 --- a/Lib/test/test__interpchannels.py +++ b/Lib/test/test__interpchannels.py @@ -9,7 +9,7 @@ import unittest from test.support import import_helper, skip_if_sanitizer _channels = import_helper.import_module('_interpchannels') -from test.support.interpreters import _crossinterp +from concurrent.interpreters import _crossinterp from test.test__interpreters import ( _interpreters, _run_output, diff --git a/Lib/test/test_concurrent_futures/test_interpreter_pool.py b/Lib/test/test_concurrent_futures/test_interpreter_pool.py index f6c62ae4b20..5fd5684e103 100644 --- a/Lib/test/test_concurrent_futures/test_interpreter_pool.py +++ b/Lib/test/test_concurrent_futures/test_interpreter_pool.py @@ -8,10 +8,10 @@ import unittest from concurrent.futures.interpreter import ( ExecutionFailed, BrokenInterpreterPool, ) +from concurrent.interpreters import _queues as queues import _interpreters from test import support import test.test_asyncio.utils as testasyncio_utils -from test.support.interpreters import queues from .executor import ExecutorTest, mul from .util import BaseTestCase, InterpreterPoolMixin, setup_module diff --git a/Lib/test/test_cprofile.py b/Lib/test/test_cprofile.py index 192c8eab26e..57e818b1c68 100644 --- a/Lib/test/test_cprofile.py +++ b/Lib/test/test_cprofile.py @@ -125,21 +125,22 @@ class CProfileTest(ProfileTest): """ gh-106152 generator.throw() should trigger a call in cProfile - In the any() call below, there should be two entries for the generator: - * one for the call to __next__ which gets a True and terminates any - * one when the generator is garbage collected which will effectively - do a throw. """ + + def gen(): + yield + pr = self.profilerclass() pr.enable() - any(a == 1 for a in (1, 2)) + g = gen() + try: + g.throw(SyntaxError) + except SyntaxError: + pass pr.disable() pr.create_stats() - for func, (cc, nc, _, _, _) in pr.stats.items(): - if func[2] == "<genexpr>": - self.assertEqual(cc, 1) - self.assertEqual(nc, 1) + self.assertTrue(any("throw" in func[2] for func in pr.stats.keys())), def test_bad_descriptor(self): # gh-132250 diff --git a/Lib/test/test_generated_cases.py b/Lib/test/test_generated_cases.py index 37046d8e1c0..6411e4318b6 100644 --- a/Lib/test/test_generated_cases.py +++ b/Lib/test/test_generated_cases.py @@ -56,14 +56,14 @@ class TestEffects(unittest.TestCase): def test_effect_sizes(self): stack = Stack() inputs = [ - x := StackItem("x", None, "1"), - y := StackItem("y", None, "oparg"), - z := StackItem("z", None, "oparg*2"), + x := StackItem("x", "1"), + y := StackItem("y", "oparg"), + z := StackItem("z", "oparg*2"), ] outputs = [ - StackItem("x", None, "1"), - StackItem("b", None, "oparg*4"), - StackItem("c", None, "1"), + StackItem("x", "1"), + StackItem("b", "oparg*4"), + StackItem("c", "1"), ] null = CWriter.null() stack.pop(z, null) @@ -1103,32 +1103,6 @@ class TestGeneratedCases(unittest.TestCase): """ self.run_cases_test(input, output) - def test_pointer_to_stackref(self): - input = """ - inst(OP, (arg: _PyStackRef * -- out)) { - out = *arg; - DEAD(arg); - } - """ - output = """ - TARGET(OP) { - #if Py_TAIL_CALL_INTERP - int opcode = OP; - (void)(opcode); - #endif - frame->instr_ptr = next_instr; - next_instr += 1; - INSTRUCTION_STATS(OP); - _PyStackRef *arg; - _PyStackRef out; - arg = (_PyStackRef *)stack_pointer[-1].bits; - out = *arg; - stack_pointer[-1] = out; - DISPATCH(); - } - """ - self.run_cases_test(input, output) - def test_unused_cached_value(self): input = """ op(FIRST, (arg1 -- out)) { diff --git a/Lib/test/test_interpreters/test_api.py b/Lib/test/test_interpreters/test_api.py index b3c9ef8efba..1403cd145b6 100644 --- a/Lib/test/test_interpreters/test_api.py +++ b/Lib/test/test_interpreters/test_api.py @@ -13,11 +13,11 @@ from test.support import script_helper from test.support import import_helper # Raise SkipTest if subinterpreters not supported. _interpreters = import_helper.import_module('_interpreters') +from concurrent import interpreters from test.support import Py_GIL_DISABLED -from test.support import interpreters from test.support import force_not_colorized import test._crossinterp_definitions as defs -from test.support.interpreters import ( +from concurrent.interpreters import ( InterpreterError, InterpreterNotFoundError, ExecutionFailed, ) from .utils import ( @@ -133,7 +133,7 @@ class CreateTests(TestBase): main, = interpreters.list_all() interp = interpreters.create() out = _run_output(interp, dedent(""" - from test.support import interpreters + from concurrent import interpreters interp = interpreters.create() print(interp.id) """)) @@ -196,7 +196,7 @@ class GetCurrentTests(TestBase): main = interpreters.get_main() interp = interpreters.create() out = _run_output(interp, dedent(""" - from test.support import interpreters + from concurrent import interpreters cur = interpreters.get_current() print(cur.id) """)) @@ -213,7 +213,7 @@ class GetCurrentTests(TestBase): with self.subTest('subinterpreter'): interp = interpreters.create() out = _run_output(interp, dedent(""" - from test.support import interpreters + from concurrent import interpreters cur = interpreters.get_current() print(id(cur)) cur = interpreters.get_current() @@ -225,7 +225,7 @@ class GetCurrentTests(TestBase): with self.subTest('per-interpreter'): interp = interpreters.create() out = _run_output(interp, dedent(""" - from test.support import interpreters + from concurrent import interpreters cur = interpreters.get_current() print(id(cur)) """)) @@ -582,7 +582,7 @@ class TestInterpreterClose(TestBase): main, = interpreters.list_all() interp = interpreters.create() out = _run_output(interp, dedent(f""" - from test.support import interpreters + from concurrent import interpreters interp = interpreters.Interpreter({interp.id}) try: interp.close() @@ -599,7 +599,7 @@ class TestInterpreterClose(TestBase): self.assertEqual(set(interpreters.list_all()), {main, interp1, interp2}) interp1.exec(dedent(f""" - from test.support import interpreters + from concurrent import interpreters interp2 = interpreters.Interpreter({interp2.id}) interp2.close() interp3 = interpreters.create() @@ -806,7 +806,7 @@ class TestInterpreterExec(TestBase): ham() """) scriptfile = self.make_script('script.py', tempdir, text=""" - from test.support import interpreters + from concurrent import interpreters def script(): import spam @@ -827,7 +827,7 @@ class TestInterpreterExec(TestBase): ~~~~~~~~~~~^^^^^^^^ {interpmod_line.strip()} raise ExecutionFailed(excinfo) - test.support.interpreters.ExecutionFailed: RuntimeError: uh-oh! + concurrent.interpreters.ExecutionFailed: RuntimeError: uh-oh! Uncaught in the interpreter: @@ -1281,7 +1281,7 @@ class TestInterpreterCall(TestBase): # no module indirection with self.subTest('no indirection'): text = run(f""" - from test.support import interpreters + from concurrent import interpreters def spam(): # This a global var... @@ -1301,7 +1301,7 @@ class TestInterpreterCall(TestBase): """) with self.subTest('indirect as func, direct interp'): text = run(f""" - from test.support import interpreters + from concurrent import interpreters import mymod def spam(): @@ -1317,7 +1317,7 @@ class TestInterpreterCall(TestBase): # indirect as func, indirect interp new_mod('mymod', f""" - from test.support import interpreters + from concurrent import interpreters def run(func): interp = interpreters.create() return interp.call(func) diff --git a/Lib/test/test_interpreters/test_channels.py b/Lib/test/test_interpreters/test_channels.py index 0c027b17cea..109ddf34453 100644 --- a/Lib/test/test_interpreters/test_channels.py +++ b/Lib/test/test_interpreters/test_channels.py @@ -8,8 +8,8 @@ import time from test.support import import_helper # Raise SkipTest if subinterpreters not supported. _channels = import_helper.import_module('_interpchannels') -from test.support import interpreters -from test.support.interpreters import channels +from concurrent import interpreters +from test.support import channels from .utils import _run_output, TestBase @@ -171,7 +171,7 @@ class TestSendRecv(TestBase): def test_send_recv_same_interpreter(self): interp = interpreters.create() interp.exec(dedent(""" - from test.support.interpreters import channels + from test.support import channels r, s = channels.create() orig = b'spam' s.send_nowait(orig) @@ -244,7 +244,7 @@ class TestSendRecv(TestBase): def test_send_recv_nowait_same_interpreter(self): interp = interpreters.create() interp.exec(dedent(""" - from test.support.interpreters import channels + from test.support import channels r, s = channels.create() orig = b'spam' s.send_nowait(orig) @@ -387,7 +387,7 @@ class TestSendRecv(TestBase): interp = interpreters.create() _run_output(interp, dedent(f""" - from test.support.interpreters import channels + from test.support import channels sch = channels.SendChannel({sch.id}) obj1 = b'spam' obj2 = b'eggs' @@ -482,7 +482,7 @@ class TestSendRecv(TestBase): self.assertEqual(_channels.get_count(rch.id), 0) _run_output(interp, dedent(f""" - from test.support.interpreters import channels + from test.support import channels sch = channels.SendChannel({sch.id}) sch.send_nowait(1, unbounditems=channels.UNBOUND) sch.send_nowait(2, unbounditems=channels.UNBOUND_ERROR) @@ -518,7 +518,7 @@ class TestSendRecv(TestBase): sch.send_nowait(1) _run_output(interp1, dedent(f""" - from test.support.interpreters import channels + from test.support import channels rch = channels.RecvChannel({rch.id}) sch = channels.SendChannel({sch.id}) obj1 = rch.recv() @@ -526,7 +526,7 @@ class TestSendRecv(TestBase): sch.send_nowait(obj1, unbounditems=channels.UNBOUND_REMOVE) """)) _run_output(interp2, dedent(f""" - from test.support.interpreters import channels + from test.support import channels rch = channels.RecvChannel({rch.id}) sch = channels.SendChannel({sch.id}) obj2 = rch.recv() diff --git a/Lib/test/test_interpreters/test_lifecycle.py b/Lib/test/test_interpreters/test_lifecycle.py index ac24f6568ac..15537ac6cc8 100644 --- a/Lib/test/test_interpreters/test_lifecycle.py +++ b/Lib/test/test_interpreters/test_lifecycle.py @@ -119,7 +119,7 @@ class StartupTests(TestBase): # The main interpreter's sys.path[0] should be used by subinterpreters. script = ''' import sys - from test.support import interpreters + from concurrent import interpreters orig = sys.path[0] @@ -170,7 +170,7 @@ class FinalizationTests(TestBase): # is reported, even when subinterpreters get cleaned up at the end. import subprocess argv = [sys.executable, '-c', '''if True: - from test.support import interpreters + from concurrent import interpreters interp = interpreters.create() raise Exception '''] diff --git a/Lib/test/test_interpreters/test_queues.py b/Lib/test/test_interpreters/test_queues.py index 757373904d7..3e982d76e86 100644 --- a/Lib/test/test_interpreters/test_queues.py +++ b/Lib/test/test_interpreters/test_queues.py @@ -7,8 +7,8 @@ import unittest from test.support import import_helper, Py_DEBUG # Raise SkipTest if subinterpreters not supported. _queues = import_helper.import_module('_interpqueues') -from test.support import interpreters -from test.support.interpreters import queues, _crossinterp +from concurrent import interpreters +from concurrent.interpreters import _queues as queues, _crossinterp from .utils import _run_output, TestBase as _TestBase @@ -126,7 +126,7 @@ class QueueTests(TestBase): interp = interpreters.create() interp.exec(dedent(f""" - from test.support.interpreters import queues + from concurrent.interpreters import _queues as queues queue1 = queues.Queue({queue1.id}) """)); @@ -324,7 +324,7 @@ class TestQueueOps(TestBase): def test_put_get_same_interpreter(self): interp = interpreters.create() interp.exec(dedent(""" - from test.support.interpreters import queues + from concurrent.interpreters import _queues as queues queue = queues.create() """)) for methname in ('get', 'get_nowait'): @@ -351,7 +351,7 @@ class TestQueueOps(TestBase): out = _run_output( interp, dedent(f""" - from test.support.interpreters import queues + from concurrent.interpreters import _queues as queues queue1 = queues.Queue({queue1.id}) queue2 = queues.Queue({queue2.id}) assert queue1.qsize() == 1, 'expected: queue1.qsize() == 1' @@ -390,7 +390,7 @@ class TestQueueOps(TestBase): interp = interpreters.create() _run_output(interp, dedent(f""" - from test.support.interpreters import queues + from concurrent.interpreters import _queues as queues queue = queues.Queue({queue.id}) obj1 = b'spam' obj2 = b'eggs' @@ -468,7 +468,7 @@ class TestQueueOps(TestBase): queue = queues.create() interp = interpreters.create() _run_output(interp, dedent(f""" - from test.support.interpreters import queues + from concurrent.interpreters import _queues as queues queue = queues.Queue({queue.id}) queue.put(1, unbounditems=queues.UNBOUND) queue.put(2, unbounditems=queues.UNBOUND_ERROR) @@ -504,14 +504,14 @@ class TestQueueOps(TestBase): queue.put(1) _run_output(interp1, dedent(f""" - from test.support.interpreters import queues + from concurrent.interpreters import _queues as queues queue = queues.Queue({queue.id}) obj1 = queue.get() queue.put(2, unbounditems=queues.UNBOUND) queue.put(obj1, unbounditems=queues.UNBOUND_REMOVE) """)) _run_output(interp2, dedent(f""" - from test.support.interpreters import queues + from concurrent.interpreters import _queues as queues queue = queues.Queue({queue.id}) obj2 = queue.get() obj1 = queue.get() diff --git a/Lib/test/test_interpreters/test_stress.py b/Lib/test/test_interpreters/test_stress.py index fae2f38cb55..e25e67a0d4f 100644 --- a/Lib/test/test_interpreters/test_stress.py +++ b/Lib/test/test_interpreters/test_stress.py @@ -6,7 +6,7 @@ from test.support import import_helper from test.support import threading_helper # Raise SkipTest if subinterpreters not supported. import_helper.import_module('_interpreters') -from test.support import interpreters +from concurrent import interpreters from .utils import TestBase diff --git a/Lib/test/test_interpreters/utils.py b/Lib/test/test_interpreters/utils.py index c25e0fb7475..ae09aa457b4 100644 --- a/Lib/test/test_interpreters/utils.py +++ b/Lib/test/test_interpreters/utils.py @@ -21,7 +21,7 @@ try: import _interpreters except ImportError as exc: raise unittest.SkipTest(str(exc)) -from test.support import interpreters +from concurrent import interpreters try: diff --git a/Lib/test/test_locale.py b/Lib/test/test_locale.py index 455d2af37ef..55b502e52ca 100644 --- a/Lib/test/test_locale.py +++ b/Lib/test/test_locale.py @@ -387,6 +387,10 @@ class NormalizeTest(unittest.TestCase): self.check('c', 'C') self.check('posix', 'C') + def test_c_utf8(self): + self.check('c.utf8', 'C.UTF-8') + self.check('C.UTF-8', 'C.UTF-8') + def test_english(self): self.check('en', 'en_US.ISO8859-1') self.check('EN', 'en_US.ISO8859-1') diff --git a/Lib/test/test_random.py b/Lib/test/test_random.py index 54910cd8054..31ebcb3b8b0 100644 --- a/Lib/test/test_random.py +++ b/Lib/test/test_random.py @@ -14,6 +14,15 @@ from test import support from fractions import Fraction from collections import abc, Counter + +class MyIndex: + def __init__(self, value): + self.value = value + + def __index__(self): + return self.value + + class TestBasicOps: # Superclass with tests common to all generators. # Subclasses must arrange for self.gen to retrieve the Random instance @@ -809,6 +818,9 @@ class MersenneTwister_TestBasicOps(TestBasicOps, unittest.TestCase): self.gen.seed(1234567) self.assertEqual(self.gen.getrandbits(100), 97904845777343510404718956115) + self.gen.seed(1234567) + self.assertEqual(self.gen.getrandbits(MyIndex(100)), + 97904845777343510404718956115) def test_getrandbits_2G_bits(self): size = 2**31 diff --git a/Lib/test/test_sqlite3/test_cli.py b/Lib/test/test_sqlite3/test_cli.py index 37e0f74f688..d993e28c4bb 100644 --- a/Lib/test/test_sqlite3/test_cli.py +++ b/Lib/test/test_sqlite3/test_cli.py @@ -1,14 +1,22 @@ """sqlite3 CLI tests.""" import sqlite3 +import sys +import textwrap import unittest +import unittest.mock +import os from sqlite3.__main__ import main as cli +from test.support.import_helper import import_module from test.support.os_helper import TESTFN, unlink +from test.support.pty_helper import run_pty from test.support import ( captured_stdout, captured_stderr, captured_stdin, force_not_colorized_test_class, + requires_subprocess, + verbose, ) @@ -200,5 +208,108 @@ class InteractiveSession(unittest.TestCase): self.assertIn('\x1b[1;35mOperationalError (SQLITE_ERROR)\x1b[0m: ' '\x1b[35mnear "sel": syntax error\x1b[0m', err) + +@requires_subprocess() +@force_not_colorized_test_class +class Completion(unittest.TestCase): + PS1 = "sqlite> " + + @classmethod + def setUpClass(cls): + _sqlite3 = import_module("_sqlite3") + if not hasattr(_sqlite3, "SQLITE_KEYWORDS"): + raise unittest.SkipTest("unable to determine SQLite keywords") + + readline = import_module("readline") + if readline.backend == "editline": + raise unittest.SkipTest("libedit readline is not supported") + + def write_input(self, input_, env=None): + script = textwrap.dedent(""" + import readline + from sqlite3.__main__ import main + + readline.parse_and_bind("set colored-completion-prefix off") + main() + """) + return run_pty(script, input_, env) + + def test_complete_sql_keywords(self): + # List candidates starting with 'S', there should be multiple matches. + input_ = b"S\t\tEL\t 1;\n.quit\n" + output = self.write_input(input_) + self.assertIn(b"SELECT", output) + self.assertIn(b"SET", output) + self.assertIn(b"SAVEPOINT", output) + self.assertIn(b"(1,)", output) + + # Keywords are completed in upper case for even lower case user input. + input_ = b"sel\t\t 1;\n.quit\n" + output = self.write_input(input_) + self.assertIn(b"SELECT", output) + self.assertIn(b"(1,)", output) + + @unittest.skipIf(sys.platform.startswith("freebsd"), + "Two actual tabs are inserted when there are no matching" + " completions in the pseudo-terminal opened by run_pty()" + " on FreeBSD") + def test_complete_no_match(self): + input_ = b"xyzzy\t\t\b\b\b\b\b\b\b.quit\n" + # Set NO_COLOR to disable coloring for self.PS1. + output = self.write_input(input_, env={**os.environ, "NO_COLOR": "1"}) + lines = output.decode().splitlines() + indices = ( + i for i, line in enumerate(lines, 1) + if line.startswith(f"{self.PS1}xyzzy") + ) + line_num = next(indices, -1) + self.assertNotEqual(line_num, -1) + # Completions occupy lines, assert no extra lines when there is nothing + # to complete. + self.assertEqual(line_num, len(lines)) + + def test_complete_no_input(self): + from _sqlite3 import SQLITE_KEYWORDS + + script = textwrap.dedent(""" + import readline + from sqlite3.__main__ import main + + # Configure readline to ...: + # - hide control sequences surrounding each candidate + # - hide "Display all xxx possibilities? (y or n)" + # - hide "--More--" + # - show candidates one per line + readline.parse_and_bind("set colored-completion-prefix off") + readline.parse_and_bind("set colored-stats off") + readline.parse_and_bind("set completion-query-items 0") + readline.parse_and_bind("set page-completions off") + readline.parse_and_bind("set completion-display-width 0") + readline.parse_and_bind("set show-all-if-ambiguous off") + readline.parse_and_bind("set show-all-if-unmodified off") + + main() + """) + input_ = b"\t\t.quit\n" + output = run_pty(script, input_, env={**os.environ, "NO_COLOR": "1"}) + try: + lines = output.decode().splitlines() + indices = [ + i for i, line in enumerate(lines) + if line.startswith(self.PS1) + ] + self.assertEqual(len(indices), 2) + start, end = indices + candidates = [l.strip() for l in lines[start+1:end]] + self.assertEqual(candidates, sorted(SQLITE_KEYWORDS)) + except: + if verbose: + print(' PTY output: '.center(30, '-')) + print(output.decode(errors='replace')) + print(' end PTY output '.center(30, '-')) + raise + + + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_sys.py b/Lib/test/test_sys.py index bf415894903..39e62027f03 100644 --- a/Lib/test/test_sys.py +++ b/Lib/test/test_sys.py @@ -24,7 +24,7 @@ from test.support import import_helper from test.support import force_not_colorized from test.support import SHORT_TIMEOUT try: - from test.support import interpreters + from concurrent import interpreters except ImportError: interpreters = None import textwrap diff --git a/Lib/test/test_threading.py b/Lib/test/test_threading.py index 59b3a749d2f..125c2744698 100644 --- a/Lib/test/test_threading.py +++ b/Lib/test/test_threading.py @@ -28,7 +28,7 @@ from test import lock_tests from test import support try: - from test.support import interpreters + from concurrent import interpreters except ImportError: interpreters = None diff --git a/Lib/test/test_types.py b/Lib/test/test_types.py index 9011e0e1962..a117413301b 100644 --- a/Lib/test/test_types.py +++ b/Lib/test/test_types.py @@ -2513,15 +2513,16 @@ class SubinterpreterTests(unittest.TestCase): def setUpClass(cls): global interpreters try: - from test.support import interpreters + from concurrent import interpreters except ModuleNotFoundError: raise unittest.SkipTest('subinterpreters required') - import test.support.interpreters.channels # noqa: F401 + from test.support import channels # noqa: F401 + cls.create_channel = staticmethod(channels.create) @cpython_only @no_rerun('channels (and queues) might have a refleak; see gh-122199') def test_static_types_inherited_slots(self): - rch, sch = interpreters.channels.create() + rch, sch = self.create_channel() script = textwrap.dedent(""" import test.support @@ -2547,7 +2548,7 @@ class SubinterpreterTests(unittest.TestCase): main_results = collate_results(raw) interp = interpreters.create() - interp.exec('from test.support import interpreters') + interp.exec('from concurrent import interpreters') interp.prepare_main(sch=sch) interp.exec(script) raw = rch.recv_nowait() diff --git a/Makefile.pre.in b/Makefile.pre.in index b5703fbe6ae..66b34b779f2 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -2514,7 +2514,7 @@ XMLLIBSUBDIRS= xml xml/dom xml/etree xml/parsers xml/sax LIBSUBDIRS= asyncio \ collections \ compression compression/_common compression/zstd \ - concurrent concurrent/futures \ + concurrent concurrent/futures concurrent/interpreters \ csv \ ctypes ctypes/macholib \ curses \ @@ -2573,7 +2573,6 @@ TESTSUBDIRS= idlelib/idle_test \ test/subprocessdata \ test/support \ test/support/_hypothesis_stubs \ - test/support/interpreters \ test/test_asyncio \ test/test_capi \ test/test_cext \ diff --git a/Misc/ACKS b/Misc/ACKS index 0be31560387..d4557a03eb5 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -1869,6 +1869,7 @@ Neil Tallim Geoff Talvola Anish Tambe Musashi Tamura +Long Tan William Tanksley Christian Tanzer Steven Taschuk diff --git a/Misc/NEWS.d/next/Build/2024-12-04-10-00-35.gh-issue-127545.t0THjE.rst b/Misc/NEWS.d/next/Build/2024-12-04-10-00-35.gh-issue-127545.t0THjE.rst new file mode 100644 index 00000000000..3667e2778b7 --- /dev/null +++ b/Misc/NEWS.d/next/Build/2024-12-04-10-00-35.gh-issue-127545.t0THjE.rst @@ -0,0 +1 @@ +Fix crash when building on Linux/m68k. diff --git a/Misc/NEWS.d/next/Library/2025-05-05-03-14-08.gh-issue-133390.AuTggn.rst b/Misc/NEWS.d/next/Library/2025-05-05-03-14-08.gh-issue-133390.AuTggn.rst new file mode 100644 index 00000000000..38d5c311b1d --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-05-05-03-14-08.gh-issue-133390.AuTggn.rst @@ -0,0 +1 @@ +Support keyword completion in the :mod:`sqlite3` command-line interface. diff --git a/Misc/NEWS.d/next/Library/2025-05-30-09-46-21.gh-issue-134939.Pu3nnm.rst b/Misc/NEWS.d/next/Library/2025-05-30-09-46-21.gh-issue-134939.Pu3nnm.rst new file mode 100644 index 00000000000..2bda69bff52 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-05-30-09-46-21.gh-issue-134939.Pu3nnm.rst @@ -0,0 +1 @@ +Add the :mod:`concurrent.interpreters` module. See :pep:`734`. diff --git a/Misc/NEWS.d/next/Library/2025-06-10-00-42-30.gh-issue-135321.UHh9jT.rst b/Misc/NEWS.d/next/Library/2025-06-10-00-42-30.gh-issue-135321.UHh9jT.rst new file mode 100644 index 00000000000..9e63d8e28b7 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-06-10-00-42-30.gh-issue-135321.UHh9jT.rst @@ -0,0 +1 @@ +Raise a correct exception for values greater than 0x7fffffff for the ``BINSTRING`` opcode in the C implementation of :mod:`pickle`. diff --git a/Misc/NEWS.d/next/Library/2025-06-10-16-11-00.gh-issue-133967.P0c24q.rst b/Misc/NEWS.d/next/Library/2025-06-10-16-11-00.gh-issue-133967.P0c24q.rst new file mode 100644 index 00000000000..1976981727e --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-06-10-16-11-00.gh-issue-133967.P0c24q.rst @@ -0,0 +1 @@ +Do not normalize :mod:`locale` name 'C.UTF-8' to 'en_US.UTF-8'. diff --git a/Misc/NEWS.d/next/Library/2025-06-12-18-15-31.gh-issue-135429.mch75_.rst b/Misc/NEWS.d/next/Library/2025-06-12-18-15-31.gh-issue-135429.mch75_.rst new file mode 100644 index 00000000000..b5213520a95 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2025-06-12-18-15-31.gh-issue-135429.mch75_.rst @@ -0,0 +1 @@ +Fix the argument mismatch in ``_lsprof`` for ``PY_THROW`` event. diff --git a/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst new file mode 100644 index 00000000000..25599a865b7 --- /dev/null +++ b/Misc/NEWS.d/next/Tools-Demos/2025-06-11-12-14-06.gh-issue-135379.25ttXq.rst @@ -0,0 +1,4 @@ +The cases generator no longer accepts type annotations on stack items. +Conversions to non-default types are now done explictly in bytecodes.c and +optimizer_bytecodes.c. This will simplify code generation for top-of-stack +caching and other future features. diff --git a/Modules/_interpchannelsmodule.c b/Modules/_interpchannelsmodule.c index ea2e5f99dfa..ee5e2b005e0 100644 --- a/Modules/_interpchannelsmodule.c +++ b/Modules/_interpchannelsmodule.c @@ -220,6 +220,22 @@ wait_for_lock(PyThread_type_lock mutex, PY_TIMEOUT_T timeout) return 0; } +static int +ensure_highlevel_module_loaded(void) +{ + PyObject *highlevel = + PyImport_ImportModule("concurrent.interpreters._channels"); + if (highlevel == NULL) { + PyErr_Clear(); + highlevel = PyImport_ImportModule("test.support.channels"); + if (highlevel == NULL) { + return -1; + } + } + Py_DECREF(highlevel); + return 0; +} + /* module state *************************************************************/ @@ -2742,15 +2758,9 @@ _get_current_channelend_type(int end) } if (cls == NULL) { // Force the module to be loaded, to register the type. - PyObject *highlevel = PyImport_ImportModule("interpreters.channels"); - if (highlevel == NULL) { - PyErr_Clear(); - highlevel = PyImport_ImportModule("test.support.interpreters.channels"); - if (highlevel == NULL) { - return NULL; - } + if (ensure_highlevel_module_loaded() < 0) { + return NULL; } - Py_DECREF(highlevel); if (end == CHANNEL_SEND) { cls = state->send_channel_type; } diff --git a/Modules/_interpqueuesmodule.c b/Modules/_interpqueuesmodule.c index 71d8fd8716c..e22709d5119 100644 --- a/Modules/_interpqueuesmodule.c +++ b/Modules/_interpqueuesmodule.c @@ -136,13 +136,10 @@ idarg_int64_converter(PyObject *arg, void *ptr) static int ensure_highlevel_module_loaded(void) { - PyObject *highlevel = PyImport_ImportModule("interpreters.queues"); + PyObject *highlevel = + PyImport_ImportModule("concurrent.interpreters._queues"); if (highlevel == NULL) { - PyErr_Clear(); - highlevel = PyImport_ImportModule("test.support.interpreters.queues"); - if (highlevel == NULL) { - return -1; - } + return -1; } Py_DECREF(highlevel); return 0; @@ -299,7 +296,7 @@ add_QueueError(PyObject *mod) { module_state *state = get_module_state(mod); -#define PREFIX "test.support.interpreters." +#define PREFIX "concurrent.interpreters." #define ADD_EXCTYPE(NAME, BASE, DOC) \ assert(state->NAME == NULL); \ if (add_exctype(mod, &state->NAME, PREFIX #NAME, DOC, BASE) < 0) { \ diff --git a/Modules/_lsprof.c b/Modules/_lsprof.c index bbad5eb6903..d0074b2a0d1 100644 --- a/Modules/_lsprof.c +++ b/Modules/_lsprof.c @@ -632,6 +632,27 @@ _lsprof_Profiler__pystart_callback_impl(ProfilerObject *self, PyObject *code, } /*[clinic input] +_lsprof.Profiler._pythrow_callback + + code: object + instruction_offset: object + exception: object + / + +[clinic start generated code]*/ + +static PyObject * +_lsprof_Profiler__pythrow_callback_impl(ProfilerObject *self, PyObject *code, + PyObject *instruction_offset, + PyObject *exception) +/*[clinic end generated code: output=0a32988919dfb94c input=fd728fc2c074f5e6]*/ +{ + ptrace_enter_call((PyObject*)self, (void *)code, code); + + Py_RETURN_NONE; +} + +/*[clinic input] _lsprof.Profiler._pyreturn_callback code: object @@ -747,7 +768,7 @@ static const struct { } callback_table[] = { {PY_MONITORING_EVENT_PY_START, "_pystart_callback"}, {PY_MONITORING_EVENT_PY_RESUME, "_pystart_callback"}, - {PY_MONITORING_EVENT_PY_THROW, "_pystart_callback"}, + {PY_MONITORING_EVENT_PY_THROW, "_pythrow_callback"}, {PY_MONITORING_EVENT_PY_RETURN, "_pyreturn_callback"}, {PY_MONITORING_EVENT_PY_YIELD, "_pyreturn_callback"}, {PY_MONITORING_EVENT_PY_UNWIND, "_pyreturn_callback"}, @@ -1002,6 +1023,7 @@ static PyMethodDef profiler_methods[] = { _LSPROF_PROFILER_DISABLE_METHODDEF _LSPROF_PROFILER_CLEAR_METHODDEF _LSPROF_PROFILER__PYSTART_CALLBACK_METHODDEF + _LSPROF_PROFILER__PYTHROW_CALLBACK_METHODDEF _LSPROF_PROFILER__PYRETURN_CALLBACK_METHODDEF _LSPROF_PROFILER__CCALL_CALLBACK_METHODDEF _LSPROF_PROFILER__CRETURN_CALLBACK_METHODDEF diff --git a/Modules/_pickle.c b/Modules/_pickle.c index 86d8b38620c..cf3ceb43fb3 100644 --- a/Modules/_pickle.c +++ b/Modules/_pickle.c @@ -5543,17 +5543,16 @@ static int load_counted_binstring(PickleState *st, UnpicklerObject *self, int nbytes) { PyObject *obj; - Py_ssize_t size; + long size; char *s; if (_Unpickler_Read(self, st, &s, nbytes) < 0) return -1; - size = calc_binsize(s, nbytes); + size = calc_binint(s, nbytes); if (size < 0) { - PyErr_Format(st->UnpicklingError, - "BINSTRING exceeds system's maximum size of %zd bytes", - PY_SSIZE_T_MAX); + PyErr_SetString(st->UnpicklingError, + "BINSTRING pickle has negative byte count"); return -1; } diff --git a/Modules/_sqlite/module.c b/Modules/_sqlite/module.c index 909ddd1f990..5464fd1227a 100644 --- a/Modules/_sqlite/module.c +++ b/Modules/_sqlite/module.c @@ -32,6 +32,7 @@ #include "microprotocols.h" #include "row.h" #include "blob.h" +#include "util.h" #if SQLITE_VERSION_NUMBER < 3015002 #error "SQLite 3.15.2 or higher required" @@ -405,6 +406,40 @@ pysqlite_error_name(int rc) } static int +add_keyword_tuple(PyObject *module) +{ +#if SQLITE_VERSION_NUMBER >= 3024000 + int count = sqlite3_keyword_count(); + PyObject *keywords = PyTuple_New(count); + if (keywords == NULL) { + return -1; + } + for (int i = 0; i < count; i++) { + const char *keyword; + int size; + int result = sqlite3_keyword_name(i, &keyword, &size); + if (result != SQLITE_OK) { + pysqlite_state *state = pysqlite_get_state(module); + set_error_from_code(state, result); + goto error; + } + PyObject *kwd = PyUnicode_FromStringAndSize(keyword, size); + if (!kwd) { + goto error; + } + PyTuple_SET_ITEM(keywords, i, kwd); + } + return PyModule_Add(module, "SQLITE_KEYWORDS", keywords); + +error: + Py_DECREF(keywords); + return -1; +#else + return 0; +#endif +} + +static int add_integer_constants(PyObject *module) { #define ADD_INT(ival) \ do { \ @@ -702,6 +737,10 @@ module_exec(PyObject *module) goto error; } + if (add_keyword_tuple(module) < 0) { + goto error; + } + if (PyModule_AddStringConstant(module, "sqlite_version", sqlite3_libversion())) { goto error; } diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c index 845c218e679..804cb4e4d1c 100644 --- a/Modules/_testinternalcapi.c +++ b/Modules/_testinternalcapi.c @@ -1788,9 +1788,9 @@ finally: /* To run some code in a sub-interpreter. -Generally you can use test.support.interpreters, +Generally you can use the interpreters module, but we keep this helper as a distinct implementation. -That's especially important for testing test.support.interpreters. +That's especially important for testing the interpreters module. */ static PyObject * run_in_subinterp_with_config(PyObject *self, PyObject *args, PyObject *kwargs) diff --git a/Modules/clinic/_lsprof.c.h b/Modules/clinic/_lsprof.c.h index 2918a6bc7ab..c426cd6fe02 100644 --- a/Modules/clinic/_lsprof.c.h +++ b/Modules/clinic/_lsprof.c.h @@ -82,6 +82,39 @@ exit: return return_value; } +PyDoc_STRVAR(_lsprof_Profiler__pythrow_callback__doc__, +"_pythrow_callback($self, code, instruction_offset, exception, /)\n" +"--\n" +"\n"); + +#define _LSPROF_PROFILER__PYTHROW_CALLBACK_METHODDEF \ + {"_pythrow_callback", _PyCFunction_CAST(_lsprof_Profiler__pythrow_callback), METH_FASTCALL, _lsprof_Profiler__pythrow_callback__doc__}, + +static PyObject * +_lsprof_Profiler__pythrow_callback_impl(ProfilerObject *self, PyObject *code, + PyObject *instruction_offset, + PyObject *exception); + +static PyObject * +_lsprof_Profiler__pythrow_callback(PyObject *self, PyObject *const *args, Py_ssize_t nargs) +{ + PyObject *return_value = NULL; + PyObject *code; + PyObject *instruction_offset; + PyObject *exception; + + if (!_PyArg_CheckPositional("_pythrow_callback", nargs, 3, 3)) { + goto exit; + } + code = args[0]; + instruction_offset = args[1]; + exception = args[2]; + return_value = _lsprof_Profiler__pythrow_callback_impl((ProfilerObject *)self, code, instruction_offset, exception); + +exit: + return return_value; +} + PyDoc_STRVAR(_lsprof_Profiler__pyreturn_callback__doc__, "_pyreturn_callback($self, code, instruction_offset, retval, /)\n" "--\n" @@ -411,4 +444,4 @@ skip_optional_pos: exit: return return_value; } -/*[clinic end generated code: output=fe231309776df7a7 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=9e46985561166c37 input=a9049054013a1b77]*/ diff --git a/Python/bytecodes.c b/Python/bytecodes.c index c4b13da5db4..032e76f72af 100644 --- a/Python/bytecodes.c +++ b/Python/bytecodes.c @@ -985,12 +985,13 @@ dummy_func( STAT_INC(BINARY_OP, hit); } - op(_BINARY_OP_SUBSCR_INIT_CALL, (container, sub, getitem -- new_frame: _PyInterpreterFrame* )) { - new_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame); - new_frame->localsplus[0] = container; - new_frame->localsplus[1] = sub; + op(_BINARY_OP_SUBSCR_INIT_CALL, (container, sub, getitem -- new_frame)) { + _PyInterpreterFrame* pushed_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame); + pushed_frame->localsplus[0] = container; + pushed_frame->localsplus[1] = sub; INPUTS_DEAD(); frame->return_offset = INSTRUCTION_SIZE; + new_frame = PyStackRef_Wrap(pushed_frame); } macro(BINARY_OP_SUBSCR_GETITEM) = @@ -1296,20 +1297,21 @@ dummy_func( macro(SEND) = _SPECIALIZE_SEND + _SEND; - op(_SEND_GEN_FRAME, (receiver, v -- receiver, gen_frame: _PyInterpreterFrame *)) { + op(_SEND_GEN_FRAME, (receiver, v -- receiver, gen_frame)) { PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(receiver); DEOPT_IF(Py_TYPE(gen) != &PyGen_Type && Py_TYPE(gen) != &PyCoro_Type); DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING); STAT_INC(SEND, hit); - gen_frame = &gen->gi_iframe; - _PyFrame_StackPush(gen_frame, PyStackRef_MakeHeapSafe(v)); + _PyInterpreterFrame *pushed_frame = &gen->gi_iframe; + _PyFrame_StackPush(pushed_frame, PyStackRef_MakeHeapSafe(v)); DEAD(v); gen->gi_frame_state = FRAME_EXECUTING; gen->gi_exc_state.previous_item = tstate->exc_info; tstate->exc_info = &gen->gi_exc_state; assert(INSTRUCTION_SIZE + oparg <= UINT16_MAX); frame->return_offset = (uint16_t)(INSTRUCTION_SIZE + oparg); - gen_frame->previous = frame; + pushed_frame->previous = frame; + gen_frame = PyStackRef_Wrap(pushed_frame); } macro(SEND_GEN) = @@ -2463,7 +2465,7 @@ dummy_func( _LOAD_ATTR_CLASS + _PUSH_NULL_CONDITIONAL; - op(_LOAD_ATTR_PROPERTY_FRAME, (fget/4, owner -- new_frame: _PyInterpreterFrame *)) { + op(_LOAD_ATTR_PROPERTY_FRAME, (fget/4, owner -- new_frame)) { assert((oparg & 1) == 0); assert(Py_IS_TYPE(fget, &PyFunction_Type)); PyFunctionObject *f = (PyFunctionObject *)fget; @@ -2473,9 +2475,10 @@ dummy_func( DEOPT_IF(code->co_argcount != 1); DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize)); STAT_INC(LOAD_ATTR, hit); - new_frame = _PyFrame_PushUnchecked(tstate, PyStackRef_FromPyObjectNew(fget), 1, frame); - new_frame->localsplus[0] = owner; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, PyStackRef_FromPyObjectNew(fget), 1, frame); + pushed_frame->localsplus[0] = owner; DEAD(owner); + new_frame = PyStackRef_Wrap(pushed_frame); } macro(LOAD_ATTR_PROPERTY) = @@ -3344,7 +3347,7 @@ dummy_func( _ITER_JUMP_RANGE + _ITER_NEXT_RANGE; - op(_FOR_ITER_GEN_FRAME, (iter, null -- iter, null, gen_frame: _PyInterpreterFrame*)) { + op(_FOR_ITER_GEN_FRAME, (iter, null -- iter, null, gen_frame)) { PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter); DEOPT_IF(Py_TYPE(gen) != &PyGen_Type); #ifdef Py_GIL_DISABLED @@ -3356,14 +3359,15 @@ dummy_func( #endif DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING); STAT_INC(FOR_ITER, hit); - gen_frame = &gen->gi_iframe; - _PyFrame_StackPush(gen_frame, PyStackRef_None); + _PyInterpreterFrame *pushed_frame = &gen->gi_iframe; + _PyFrame_StackPush(pushed_frame, PyStackRef_None); gen->gi_frame_state = FRAME_EXECUTING; gen->gi_exc_state.previous_item = tstate->exc_info; tstate->exc_info = &gen->gi_exc_state; - gen_frame->previous = frame; + pushed_frame->previous = frame; // oparg is the return offset from the next instruction. frame->return_offset = (uint16_t)(INSTRUCTION_SIZE + oparg); + gen_frame = PyStackRef_Wrap(pushed_frame); } macro(FOR_ITER_GEN) = @@ -3715,7 +3719,7 @@ dummy_func( macro(CALL) = _SPECIALIZE_CALL + unused/2 + _MAYBE_EXPAND_METHOD + _DO_CALL + _CHECK_PERIODIC; macro(INSTRUMENTED_CALL) = unused/3 + _MAYBE_EXPAND_METHOD + _MONITOR_CALL + _DO_CALL + _CHECK_PERIODIC; - op(_PY_FRAME_GENERAL, (callable, self_or_null, args[oparg] -- new_frame: _PyInterpreterFrame*)) { + op(_PY_FRAME_GENERAL, (callable, self_or_null, args[oparg] -- new_frame)) { PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable); // oparg counts all of the args, but *not* self: @@ -3737,7 +3741,7 @@ dummy_func( if (temp == NULL) { ERROR_NO_POP(); } - new_frame = temp; + new_frame = PyStackRef_Wrap(temp); } op(_CHECK_FUNCTION_VERSION, (func_version/2, callable, unused, unused[oparg] -- callable, unused, unused[oparg])) { @@ -3874,27 +3878,26 @@ dummy_func( DEOPT_IF(tstate->py_recursion_remaining <= 1); } - replicate(5) pure op(_INIT_CALL_PY_EXACT_ARGS, (callable, self_or_null, args[oparg] -- new_frame: _PyInterpreterFrame*)) { + replicate(5) pure op(_INIT_CALL_PY_EXACT_ARGS, (callable, self_or_null, args[oparg] -- new_frame)) { int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } INPUTS_DEAD(); + new_frame = PyStackRef_Wrap(pushed_frame); } - op(_PUSH_FRAME, (new_frame: _PyInterpreterFrame* -- )) { - // Write it out explicitly because it's subtly different. - // Eventually this should be the only occurrence of this code. + op(_PUSH_FRAME, (new_frame -- )) { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); DEAD(new_frame); SYNC_SP(); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -4046,7 +4049,7 @@ dummy_func( PyStackRef_CLOSE(temp); } - op(_CREATE_INIT_FRAME, (init, self, args[oparg] -- init_frame: _PyInterpreterFrame *)) { + op(_CREATE_INIT_FRAME, (init, self, args[oparg] -- init_frame)) { _PyInterpreterFrame *shim = _PyFrame_PushTrampolineUnchecked( tstate, (PyCodeObject *)&_Py_InitCleanup, 1, frame); assert(_PyFrame_GetBytecode(shim)[0].op.code == EXIT_INIT_CHECK); @@ -4063,12 +4066,12 @@ dummy_func( _PyEval_FrameClearAndPop(tstate, shim); ERROR_NO_POP(); } - init_frame = temp; frame->return_offset = 1 + INLINE_CACHE_ENTRIES_CALL; /* Account for pushing the extra frame. * We don't check recursion depth here, * as it will be checked after start_frame */ tstate->py_recursion_remaining--; + init_frame = PyStackRef_Wrap(temp); } macro(CALL_ALLOC_AND_ENTER_INIT) = @@ -4594,7 +4597,7 @@ dummy_func( res = PyStackRef_FromPyObjectSteal(res_o); } - op(_PY_FRAME_KW, (callable, self_or_null, args[oparg], kwnames -- new_frame: _PyInterpreterFrame*)) { + op(_PY_FRAME_KW, (callable, self_or_null, args[oparg], kwnames -- new_frame)) { PyObject *callable_o = PyStackRef_AsPyObjectBorrow(callable); // oparg counts all of the args, but *not* self: @@ -4621,7 +4624,7 @@ dummy_func( DEAD(callable); SYNC_SP(); ERROR_IF(temp == NULL); - new_frame = temp; + new_frame = PyStackRef_Wrap(temp); } op(_CHECK_FUNCTION_VERSION_KW, (func_version/2, callable, unused, unused[oparg], unused -- callable, unused, unused[oparg], unused)) { diff --git a/Python/crossinterp_exceptions.h b/Python/crossinterp_exceptions.h index ca4ca1cf123..12cd61db1b6 100644 --- a/Python/crossinterp_exceptions.h +++ b/Python/crossinterp_exceptions.h @@ -24,7 +24,7 @@ _ensure_current_cause(PyThreadState *tstate, PyObject *cause) static PyTypeObject _PyExc_InterpreterError = { PyVarObject_HEAD_INIT(NULL, 0) - .tp_name = "interpreters.InterpreterError", + .tp_name = "concurrent.interpreters.InterpreterError", .tp_doc = PyDoc_STR("A cross-interpreter operation failed"), .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, //.tp_traverse = ((PyTypeObject *)PyExc_Exception)->tp_traverse, @@ -37,7 +37,7 @@ PyObject *PyExc_InterpreterError = (PyObject *)&_PyExc_InterpreterError; static PyTypeObject _PyExc_InterpreterNotFoundError = { PyVarObject_HEAD_INIT(NULL, 0) - .tp_name = "interpreters.InterpreterNotFoundError", + .tp_name = "concurrent.interpreters.InterpreterNotFoundError", .tp_doc = PyDoc_STR("An interpreter was not found"), .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, //.tp_traverse = ((PyTypeObject *)PyExc_Exception)->tp_traverse, @@ -51,7 +51,7 @@ PyObject *PyExc_InterpreterNotFoundError = (PyObject *)&_PyExc_InterpreterNotFou static int _init_notshareableerror(exceptions_t *state) { - const char *name = "interpreters.NotShareableError"; + const char *name = "concurrent.interpreters.NotShareableError"; PyObject *base = PyExc_TypeError; PyObject *ns = NULL; PyObject *exctype = PyErr_NewException(name, base, ns); diff --git a/Python/emscripten_trampoline.c b/Python/emscripten_trampoline.c index cc5047d6bda..975c28eec10 100644 --- a/Python/emscripten_trampoline.c +++ b/Python/emscripten_trampoline.c @@ -71,7 +71,16 @@ EM_JS(CountArgsFunc, _PyEM_GetCountArgsPtr, (), { // ) function getPyEMCountArgsPtr() { - let isIOS = globalThis.navigator && /iPad|iPhone|iPod/.test(navigator.platform); + // Starting with iOS 18.3.1, WebKit on iOS has an issue with the garbage + // collector that breaks the call trampoline. See #130418 and + // https://bugs.webkit.org/show_bug.cgi?id=293113 for details. + let isIOS = globalThis.navigator && ( + /iPad|iPhone|iPod/.test(navigator.userAgent) || + // Starting with iPadOS 13, iPads might send a platform string that looks like a desktop Mac. + // To differentiate, we check if the platform is 'MacIntel' (common for Macs and newer iPads) + // AND if the device has multi-touch capabilities (navigator.maxTouchPoints > 1) + (navigator.platform === 'MacIntel' && typeof navigator.maxTouchPoints !== 'undefined' && navigator.maxTouchPoints > 1) + ) if (isIOS) { return 0; } diff --git a/Python/executor_cases.c.h b/Python/executor_cases.c.h index d19605169d5..4f772f916d1 100644 --- a/Python/executor_cases.c.h +++ b/Python/executor_cases.c.h @@ -1551,15 +1551,16 @@ _PyStackRef getitem; _PyStackRef sub; _PyStackRef container; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; getitem = stack_pointer[-1]; sub = stack_pointer[-2]; container = stack_pointer[-3]; - new_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame); - new_frame->localsplus[0] = container; - new_frame->localsplus[1] = sub; + _PyInterpreterFrame* pushed_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame); + pushed_frame->localsplus[0] = container; + pushed_frame->localsplus[1] = sub; frame->return_offset = 6 ; - stack_pointer[-3].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-3] = new_frame; stack_pointer += -2; assert(WITHIN_STACK_BOUNDS()); break; @@ -1907,7 +1908,7 @@ case _SEND_GEN_FRAME: { _PyStackRef v; _PyStackRef receiver; - _PyInterpreterFrame *gen_frame; + _PyStackRef gen_frame; oparg = CURRENT_OPARG(); v = stack_pointer[-1]; receiver = stack_pointer[-2]; @@ -1921,15 +1922,16 @@ JUMP_TO_JUMP_TARGET(); } STAT_INC(SEND, hit); - gen_frame = &gen->gi_iframe; - _PyFrame_StackPush(gen_frame, PyStackRef_MakeHeapSafe(v)); + _PyInterpreterFrame *pushed_frame = &gen->gi_iframe; + _PyFrame_StackPush(pushed_frame, PyStackRef_MakeHeapSafe(v)); gen->gi_frame_state = FRAME_EXECUTING; gen->gi_exc_state.previous_item = tstate->exc_info; tstate->exc_info = &gen->gi_exc_state; assert( 2 + oparg <= UINT16_MAX); frame->return_offset = (uint16_t)( 2 + oparg); - gen_frame->previous = frame; - stack_pointer[-1].bits = (uintptr_t)gen_frame; + pushed_frame->previous = frame; + gen_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-1] = gen_frame; break; } @@ -3471,7 +3473,7 @@ case _LOAD_ATTR_PROPERTY_FRAME: { _PyStackRef owner; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = CURRENT_OPARG(); owner = stack_pointer[-1]; PyObject *fget = (PyObject *)CURRENT_OPERAND0(); @@ -3496,9 +3498,10 @@ JUMP_TO_JUMP_TARGET(); } STAT_INC(LOAD_ATTR, hit); - new_frame = _PyFrame_PushUnchecked(tstate, PyStackRef_FromPyObjectNew(fget), 1, frame); - new_frame->localsplus[0] = owner; - stack_pointer[-1].bits = (uintptr_t)new_frame; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, PyStackRef_FromPyObjectNew(fget), 1, frame); + pushed_frame->localsplus[0] = owner; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-1] = new_frame; break; } @@ -4467,7 +4470,7 @@ case _FOR_ITER_GEN_FRAME: { _PyStackRef iter; - _PyInterpreterFrame *gen_frame; + _PyStackRef gen_frame; oparg = CURRENT_OPARG(); iter = stack_pointer[-2]; PyGenObject *gen = (PyGenObject *)PyStackRef_AsPyObjectBorrow(iter); @@ -4487,14 +4490,15 @@ JUMP_TO_JUMP_TARGET(); } STAT_INC(FOR_ITER, hit); - gen_frame = &gen->gi_iframe; - _PyFrame_StackPush(gen_frame, PyStackRef_None); + _PyInterpreterFrame *pushed_frame = &gen->gi_iframe; + _PyFrame_StackPush(pushed_frame, PyStackRef_None); gen->gi_frame_state = FRAME_EXECUTING; gen->gi_exc_state.previous_item = tstate->exc_info; tstate->exc_info = &gen->gi_exc_state; - gen_frame->previous = frame; + pushed_frame->previous = frame; frame->return_offset = (uint16_t)( 2 + oparg); - stack_pointer[0].bits = (uintptr_t)gen_frame; + gen_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[0] = gen_frame; stack_pointer += 1; assert(WITHIN_STACK_BOUNDS()); break; @@ -4775,7 +4779,7 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = CURRENT_OPARG(); args = &stack_pointer[-oparg]; self_or_null = stack_pointer[-1 - oparg]; @@ -4800,8 +4804,8 @@ if (temp == NULL) { JUMP_TO_ERROR(); } - new_frame = temp; - stack_pointer[0].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(temp); + stack_pointer[0] = new_frame; stack_pointer += 1; assert(WITHIN_STACK_BOUNDS()); break; @@ -5067,7 +5071,7 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = 0; assert(oparg == CURRENT_OPARG()); args = &stack_pointer[-oparg]; @@ -5075,13 +5079,14 @@ callable = stack_pointer[-2 - oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } - stack_pointer[-2 - oparg].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; @@ -5091,7 +5096,7 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = 1; assert(oparg == CURRENT_OPARG()); args = &stack_pointer[-oparg]; @@ -5099,13 +5104,14 @@ callable = stack_pointer[-2 - oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } - stack_pointer[-2 - oparg].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; @@ -5115,7 +5121,7 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = 2; assert(oparg == CURRENT_OPARG()); args = &stack_pointer[-oparg]; @@ -5123,13 +5129,14 @@ callable = stack_pointer[-2 - oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } - stack_pointer[-2 - oparg].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; @@ -5139,7 +5146,7 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = 3; assert(oparg == CURRENT_OPARG()); args = &stack_pointer[-oparg]; @@ -5147,13 +5154,14 @@ callable = stack_pointer[-2 - oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } - stack_pointer[-2 - oparg].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; @@ -5163,7 +5171,7 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = 4; assert(oparg == CURRENT_OPARG()); args = &stack_pointer[-oparg]; @@ -5171,13 +5179,14 @@ callable = stack_pointer[-2 - oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } - stack_pointer[-2 - oparg].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; @@ -5187,34 +5196,35 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = CURRENT_OPARG(); args = &stack_pointer[-oparg]; self_or_null = stack_pointer[-1 - oparg]; callable = stack_pointer[-2 - oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } - stack_pointer[-2 - oparg].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(pushed_frame); + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; } case _PUSH_FRAME: { - _PyInterpreterFrame *new_frame; - new_frame = (_PyInterpreterFrame *)stack_pointer[-1].bits; + _PyStackRef new_frame; + new_frame = stack_pointer[-1]; assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); stack_pointer += -1; assert(WITHIN_STACK_BOUNDS()); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -5429,7 +5439,7 @@ _PyStackRef *args; _PyStackRef self; _PyStackRef init; - _PyInterpreterFrame *init_frame; + _PyStackRef init_frame; oparg = CURRENT_OPARG(); args = &stack_pointer[-oparg]; self = stack_pointer[-1 - oparg]; @@ -5453,10 +5463,10 @@ stack_pointer = _PyFrame_GetStackPointer(frame); JUMP_TO_ERROR(); } - init_frame = temp; frame->return_offset = 1 + INLINE_CACHE_ENTRIES_CALL; tstate->py_recursion_remaining--; - stack_pointer[0].bits = (uintptr_t)init_frame; + init_frame = PyStackRef_Wrap(temp); + stack_pointer[0] = init_frame; stack_pointer += 1; assert(WITHIN_STACK_BOUNDS()); break; @@ -6309,7 +6319,7 @@ _PyStackRef *args; _PyStackRef self_or_null; _PyStackRef callable; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; oparg = CURRENT_OPARG(); kwnames = stack_pointer[-1]; args = &stack_pointer[-1 - oparg]; @@ -6343,8 +6353,8 @@ if (temp == NULL) { JUMP_TO_ERROR(); } - new_frame = temp; - stack_pointer[0].bits = (uintptr_t)new_frame; + new_frame = PyStackRef_Wrap(temp); + stack_pointer[0] = new_frame; stack_pointer += 1; assert(WITHIN_STACK_BOUNDS()); break; diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h index c8825df3ade..5ac519bb1b6 100644 --- a/Python/generated_cases.c.h +++ b/Python/generated_cases.c.h @@ -604,7 +604,7 @@ _PyStackRef container; _PyStackRef getitem; _PyStackRef sub; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 5 cache entries */ // _CHECK_PEP_523 { @@ -650,19 +650,20 @@ // _BINARY_OP_SUBSCR_INIT_CALL { sub = stack_pointer[-1]; - new_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame); - new_frame->localsplus[0] = container; - new_frame->localsplus[1] = sub; + _PyInterpreterFrame* pushed_frame = _PyFrame_PushUnchecked(tstate, getitem, 2, frame); + pushed_frame->localsplus[0] = container; + pushed_frame->localsplus[1] = sub; frame->return_offset = 6 ; + new_frame = PyStackRef_Wrap(pushed_frame); } // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); stack_pointer += -2; assert(WITHIN_STACK_BOUNDS()); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -1708,8 +1709,8 @@ _PyStackRef init; _PyStackRef self; _PyStackRef *args; - _PyInterpreterFrame *init_frame; - _PyInterpreterFrame *new_frame; + _PyStackRef init_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -1792,17 +1793,17 @@ stack_pointer = _PyFrame_GetStackPointer(frame); JUMP_TO_LABEL(error); } - init_frame = temp; frame->return_offset = 1 + INLINE_CACHE_ENTRIES_CALL; tstate->py_recursion_remaining--; + init_frame = PyStackRef_Wrap(temp); } // _PUSH_FRAME { new_frame = init_frame; assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -1828,7 +1829,7 @@ _PyStackRef null; _PyStackRef self_or_null; _PyStackRef *args; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -1921,12 +1922,13 @@ args = &stack_pointer[-oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } + new_frame = PyStackRef_Wrap(pushed_frame); } // _SAVE_RETURN_OFFSET { @@ -1940,11 +1942,11 @@ // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); stack_pointer += -2 - oparg; assert(WITHIN_STACK_BOUNDS()); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -1970,7 +1972,7 @@ _PyStackRef null; _PyStackRef self_or_null; _PyStackRef *args; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -2056,7 +2058,7 @@ if (temp == NULL) { JUMP_TO_LABEL(error); } - new_frame = temp; + new_frame = PyStackRef_Wrap(temp); } // _SAVE_RETURN_OFFSET { @@ -2070,9 +2072,9 @@ // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -3040,7 +3042,7 @@ _PyStackRef self_or_null; _PyStackRef *args; _PyStackRef kwnames; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -3127,7 +3129,7 @@ if (temp == NULL) { JUMP_TO_LABEL(error); } - new_frame = temp; + new_frame = PyStackRef_Wrap(temp); } // _SAVE_RETURN_OFFSET { @@ -3141,9 +3143,9 @@ // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -3304,7 +3306,7 @@ _PyStackRef self_or_null; _PyStackRef *args; _PyStackRef kwnames; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -3364,7 +3366,7 @@ if (temp == NULL) { JUMP_TO_LABEL(error); } - new_frame = temp; + new_frame = PyStackRef_Wrap(temp); } // _SAVE_RETURN_OFFSET { @@ -3378,9 +3380,9 @@ // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -4163,7 +4165,7 @@ _PyStackRef callable; _PyStackRef self_or_null; _PyStackRef *args; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -4227,12 +4229,13 @@ args = &stack_pointer[-oparg]; int has_self = !PyStackRef_IsNull(self_or_null); STAT_INC(CALL, hit); - new_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); - _PyStackRef *first_non_self_local = new_frame->localsplus + has_self; - new_frame->localsplus[0] = self_or_null; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, callable, oparg + has_self, frame); + _PyStackRef *first_non_self_local = pushed_frame->localsplus + has_self; + pushed_frame->localsplus[0] = self_or_null; for (int i = 0; i < oparg; i++) { first_non_self_local[i] = args[i]; } + new_frame = PyStackRef_Wrap(pushed_frame); } // _SAVE_RETURN_OFFSET { @@ -4246,11 +4249,11 @@ // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); stack_pointer += -2 - oparg; assert(WITHIN_STACK_BOUNDS()); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -4275,7 +4278,7 @@ _PyStackRef callable; _PyStackRef self_or_null; _PyStackRef *args; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -4334,7 +4337,7 @@ if (temp == NULL) { JUMP_TO_LABEL(error); } - new_frame = temp; + new_frame = PyStackRef_Wrap(temp); } // _SAVE_RETURN_OFFSET { @@ -4348,9 +4351,9 @@ // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -5785,8 +5788,8 @@ INSTRUCTION_STATS(FOR_ITER_GEN); static_assert(INLINE_CACHE_ENTRIES_FOR_ITER == 1, "incorrect cache size"); _PyStackRef iter; - _PyInterpreterFrame *gen_frame; - _PyInterpreterFrame *new_frame; + _PyStackRef gen_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -5818,21 +5821,22 @@ JUMP_TO_PREDICTED(FOR_ITER); } STAT_INC(FOR_ITER, hit); - gen_frame = &gen->gi_iframe; - _PyFrame_StackPush(gen_frame, PyStackRef_None); + _PyInterpreterFrame *pushed_frame = &gen->gi_iframe; + _PyFrame_StackPush(pushed_frame, PyStackRef_None); gen->gi_frame_state = FRAME_EXECUTING; gen->gi_exc_state.previous_item = tstate->exc_info; tstate->exc_info = &gen->gi_exc_state; - gen_frame->previous = frame; + pushed_frame->previous = frame; frame->return_offset = (uint16_t)( 2 + oparg); + gen_frame = PyStackRef_Wrap(pushed_frame); } // _PUSH_FRAME { new_frame = gen_frame; assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -8650,7 +8654,7 @@ INSTRUCTION_STATS(LOAD_ATTR_PROPERTY); static_assert(INLINE_CACHE_ENTRIES_LOAD_ATTR == 9, "incorrect cache size"); _PyStackRef owner; - _PyInterpreterFrame *new_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -8701,8 +8705,9 @@ JUMP_TO_PREDICTED(LOAD_ATTR); } STAT_INC(LOAD_ATTR, hit); - new_frame = _PyFrame_PushUnchecked(tstate, PyStackRef_FromPyObjectNew(fget), 1, frame); - new_frame->localsplus[0] = owner; + _PyInterpreterFrame *pushed_frame = _PyFrame_PushUnchecked(tstate, PyStackRef_FromPyObjectNew(fget), 1, frame); + pushed_frame->localsplus[0] = owner; + new_frame = PyStackRef_Wrap(pushed_frame); } // _SAVE_RETURN_OFFSET { @@ -8716,11 +8721,11 @@ // _PUSH_FRAME { assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); stack_pointer += -1; assert(WITHIN_STACK_BOUNDS()); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; @@ -10661,8 +10666,8 @@ static_assert(INLINE_CACHE_ENTRIES_SEND == 1, "incorrect cache size"); _PyStackRef receiver; _PyStackRef v; - _PyInterpreterFrame *gen_frame; - _PyInterpreterFrame *new_frame; + _PyStackRef gen_frame; + _PyStackRef new_frame; /* Skip 1 cache entry */ // _CHECK_PEP_523 { @@ -10688,24 +10693,25 @@ JUMP_TO_PREDICTED(SEND); } STAT_INC(SEND, hit); - gen_frame = &gen->gi_iframe; - _PyFrame_StackPush(gen_frame, PyStackRef_MakeHeapSafe(v)); + _PyInterpreterFrame *pushed_frame = &gen->gi_iframe; + _PyFrame_StackPush(pushed_frame, PyStackRef_MakeHeapSafe(v)); gen->gi_frame_state = FRAME_EXECUTING; gen->gi_exc_state.previous_item = tstate->exc_info; tstate->exc_info = &gen->gi_exc_state; assert( 2 + oparg <= UINT16_MAX); frame->return_offset = (uint16_t)( 2 + oparg); - gen_frame->previous = frame; + pushed_frame->previous = frame; + gen_frame = PyStackRef_Wrap(pushed_frame); } // _PUSH_FRAME { new_frame = gen_frame; assert(tstate->interp->eval_frame == NULL); - _PyInterpreterFrame *temp = new_frame; + _PyInterpreterFrame *temp = PyStackRef_Unwrap(new_frame); stack_pointer += -1; assert(WITHIN_STACK_BOUNDS()); _PyFrame_SetStackPointer(frame, stack_pointer); - assert(new_frame->previous == frame || new_frame->previous->previous == frame); + assert(temp->previous == frame || temp->previous->previous == frame); CALL_STAT_INC(inlined_py_calls); frame = tstate->current_frame = temp; tstate->py_recursion_remaining--; diff --git a/Python/optimizer_bytecodes.c b/Python/optimizer_bytecodes.c index a9d5e92ca02..babd3e46b8d 100644 --- a/Python/optimizer_bytecodes.c +++ b/Python/optimizer_bytecodes.c @@ -373,7 +373,7 @@ dummy_func(void) { GETLOCAL(this_instr->operand0) = res; } - op(_BINARY_OP_SUBSCR_INIT_CALL, (container, sub, getitem -- new_frame: _Py_UOpsAbstractFrame *)) { + op(_BINARY_OP_SUBSCR_INIT_CALL, (container, sub, getitem -- new_frame)) { new_frame = NULL; ctx->done = true; } @@ -697,7 +697,7 @@ dummy_func(void) { self = owner; } - op(_LOAD_ATTR_PROPERTY_FRAME, (fget/4, owner -- new_frame: _Py_UOpsAbstractFrame *)) { + op(_LOAD_ATTR_PROPERTY_FRAME, (fget/4, owner -- new_frame)) { (void)fget; new_frame = NULL; ctx->done = true; @@ -735,7 +735,7 @@ dummy_func(void) { sym_set_type(callable, &PyMethod_Type); } - op(_INIT_CALL_PY_EXACT_ARGS, (callable, self_or_null, args[oparg] -- new_frame: _Py_UOpsAbstractFrame *)) { + op(_INIT_CALL_PY_EXACT_ARGS, (callable, self_or_null, args[oparg] -- new_frame)) { int argcount = oparg; PyCodeObject *co = NULL; @@ -756,10 +756,9 @@ dummy_func(void) { } if (sym_is_null(self_or_null) || sym_is_not_null(self_or_null)) { - new_frame = frame_new(ctx, co, 0, args, argcount); + new_frame = (JitOptSymbol *)frame_new(ctx, co, 0, args, argcount); } else { - new_frame = frame_new(ctx, co, 0, NULL, 0); - + new_frame = (JitOptSymbol *)frame_new(ctx, co, 0, NULL, 0); } } @@ -769,7 +768,7 @@ dummy_func(void) { self_or_null = sym_new_not_null(ctx); } - op(_PY_FRAME_GENERAL, (callable, self_or_null, args[oparg] -- new_frame: _Py_UOpsAbstractFrame *)) { + op(_PY_FRAME_GENERAL, (callable, self_or_null, args[oparg] -- new_frame)) { PyCodeObject *co = NULL; assert((this_instr + 2)->opcode == _PUSH_FRAME); co = get_code_with_logging((this_instr + 2)); @@ -778,10 +777,10 @@ dummy_func(void) { break; } - new_frame = frame_new(ctx, co, 0, NULL, 0); + new_frame = (JitOptSymbol *)frame_new(ctx, co, 0, NULL, 0); } - op(_PY_FRAME_KW, (callable, self_or_null, args[oparg], kwnames -- new_frame: _Py_UOpsAbstractFrame *)) { + op(_PY_FRAME_KW, (callable, self_or_null, args[oparg], kwnames -- new_frame)) { new_frame = NULL; ctx->done = true; } @@ -793,7 +792,7 @@ dummy_func(void) { self_or_null = sym_new_not_null(ctx); } - op(_CREATE_INIT_FRAME, (init, self, args[oparg] -- init_frame: _Py_UOpsAbstractFrame *)) { + op(_CREATE_INIT_FRAME, (init, self, args[oparg] -- init_frame)) { init_frame = NULL; ctx->done = true; } @@ -860,13 +859,13 @@ dummy_func(void) { } } - op(_FOR_ITER_GEN_FRAME, (unused, unused -- unused, unused, gen_frame: _Py_UOpsAbstractFrame*)) { + op(_FOR_ITER_GEN_FRAME, (unused, unused -- unused, unused, gen_frame)) { gen_frame = NULL; /* We are about to hit the end of the trace */ ctx->done = true; } - op(_SEND_GEN_FRAME, (unused, unused -- unused, gen_frame: _Py_UOpsAbstractFrame *)) { + op(_SEND_GEN_FRAME, (unused, unused -- unused, gen_frame)) { gen_frame = NULL; // We are about to hit the end of the trace: ctx->done = true; @@ -884,12 +883,12 @@ dummy_func(void) { Py_UNREACHABLE(); } - op(_PUSH_FRAME, (new_frame: _Py_UOpsAbstractFrame * -- )) { + op(_PUSH_FRAME, (new_frame -- )) { SYNC_SP(); ctx->frame->stack_pointer = stack_pointer; - ctx->frame = new_frame; + ctx->frame = (_Py_UOpsAbstractFrame *)new_frame; ctx->curr_frame_depth++; - stack_pointer = new_frame->stack_pointer; + stack_pointer = ctx->frame->stack_pointer; co = get_code(this_instr); if (co == NULL) { // should be about to _EXIT_TRACE anyway diff --git a/Python/optimizer_cases.c.h b/Python/optimizer_cases.c.h index 4780e492f61..adab110c5ce 100644 --- a/Python/optimizer_cases.c.h +++ b/Python/optimizer_cases.c.h @@ -715,10 +715,10 @@ } case _BINARY_OP_SUBSCR_INIT_CALL: { - _Py_UOpsAbstractFrame *new_frame; + JitOptSymbol *new_frame; new_frame = NULL; ctx->done = true; - stack_pointer[-3] = (JitOptSymbol *)new_frame; + stack_pointer[-3] = new_frame; stack_pointer += -2; assert(WITHIN_STACK_BOUNDS()); break; @@ -829,10 +829,10 @@ /* _SEND is not a viable micro-op for tier 2 */ case _SEND_GEN_FRAME: { - _Py_UOpsAbstractFrame *gen_frame; + JitOptSymbol *gen_frame; gen_frame = NULL; ctx->done = true; - stack_pointer[-1] = (JitOptSymbol *)gen_frame; + stack_pointer[-1] = gen_frame; break; } @@ -1323,12 +1323,12 @@ } case _LOAD_ATTR_PROPERTY_FRAME: { - _Py_UOpsAbstractFrame *new_frame; + JitOptSymbol *new_frame; PyObject *fget = (PyObject *)this_instr->operand0; (void)fget; new_frame = NULL; ctx->done = true; - stack_pointer[-1] = (JitOptSymbol *)new_frame; + stack_pointer[-1] = new_frame; break; } @@ -1685,10 +1685,10 @@ } case _FOR_ITER_GEN_FRAME: { - _Py_UOpsAbstractFrame *gen_frame; + JitOptSymbol *gen_frame; gen_frame = NULL; ctx->done = true; - stack_pointer[0] = (JitOptSymbol *)gen_frame; + stack_pointer[0] = gen_frame; stack_pointer += 1; assert(WITHIN_STACK_BOUNDS()); break; @@ -1857,7 +1857,7 @@ /* _MONITOR_CALL is not a viable micro-op for tier 2 */ case _PY_FRAME_GENERAL: { - _Py_UOpsAbstractFrame *new_frame; + JitOptSymbol *new_frame; PyCodeObject *co = NULL; assert((this_instr + 2)->opcode == _PUSH_FRAME); co = get_code_with_logging((this_instr + 2)); @@ -1865,8 +1865,8 @@ ctx->done = true; break; } - new_frame = frame_new(ctx, co, 0, NULL, 0); - stack_pointer[-2 - oparg] = (JitOptSymbol *)new_frame; + new_frame = (JitOptSymbol *)frame_new(ctx, co, 0, NULL, 0); + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; @@ -1970,7 +1970,7 @@ case _INIT_CALL_PY_EXACT_ARGS: { JitOptSymbol **args; JitOptSymbol *self_or_null; - _Py_UOpsAbstractFrame *new_frame; + JitOptSymbol *new_frame; args = &stack_pointer[-oparg]; self_or_null = stack_pointer[-1 - oparg]; int argcount = oparg; @@ -1988,25 +1988,25 @@ argcount++; } if (sym_is_null(self_or_null) || sym_is_not_null(self_or_null)) { - new_frame = frame_new(ctx, co, 0, args, argcount); + new_frame = (JitOptSymbol *)frame_new(ctx, co, 0, args, argcount); } else { - new_frame = frame_new(ctx, co, 0, NULL, 0); + new_frame = (JitOptSymbol *)frame_new(ctx, co, 0, NULL, 0); } - stack_pointer[-2 - oparg] = (JitOptSymbol *)new_frame; + stack_pointer[-2 - oparg] = new_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; } case _PUSH_FRAME: { - _Py_UOpsAbstractFrame *new_frame; - new_frame = (_Py_UOpsAbstractFrame *)stack_pointer[-1]; + JitOptSymbol *new_frame; + new_frame = stack_pointer[-1]; stack_pointer += -1; assert(WITHIN_STACK_BOUNDS()); ctx->frame->stack_pointer = stack_pointer; - ctx->frame = new_frame; + ctx->frame = (_Py_UOpsAbstractFrame *)new_frame; ctx->curr_frame_depth++; - stack_pointer = new_frame->stack_pointer; + stack_pointer = ctx->frame->stack_pointer; co = get_code(this_instr); if (co == NULL) { ctx->done = true; @@ -2159,10 +2159,10 @@ } case _CREATE_INIT_FRAME: { - _Py_UOpsAbstractFrame *init_frame; + JitOptSymbol *init_frame; init_frame = NULL; ctx->done = true; - stack_pointer[-2 - oparg] = (JitOptSymbol *)init_frame; + stack_pointer[-2 - oparg] = init_frame; stack_pointer += -1 - oparg; assert(WITHIN_STACK_BOUNDS()); break; @@ -2326,10 +2326,10 @@ /* _DO_CALL_KW is not a viable micro-op for tier 2 */ case _PY_FRAME_KW: { - _Py_UOpsAbstractFrame *new_frame; + JitOptSymbol *new_frame; new_frame = NULL; ctx->done = true; - stack_pointer[-3 - oparg] = (JitOptSymbol *)new_frame; + stack_pointer[-3 - oparg] = new_frame; stack_pointer += -2 - oparg; assert(WITHIN_STACK_BOUNDS()); break; diff --git a/Tools/cases_generator/analyzer.py b/Tools/cases_generator/analyzer.py index 1447f365336..fca9b29f9eb 100644 --- a/Tools/cases_generator/analyzer.py +++ b/Tools/cases_generator/analyzer.py @@ -135,15 +135,13 @@ class Flush: @dataclass class StackItem: name: str - type: str | None size: str peek: bool = False used: bool = False def __str__(self) -> str: size = f"[{self.size}]" if self.size else "" - type = "" if self.type is None else f"{self.type} " - return f"{type}{self.name}{size} {self.peek}" + return f"{self.name}{size} {self.peek}" def is_array(self) -> bool: return self.size != "" @@ -345,7 +343,7 @@ def override_error( def convert_stack_item( item: parser.StackEffect, replace_op_arg_1: str | None ) -> StackItem: - return StackItem(item.name, item.type, item.size) + return StackItem(item.name, item.size) def check_unused(stack: list[StackItem], input_names: dict[str, lexer.Token]) -> None: "Unused items cannot be on the stack above used, non-peek items" @@ -683,6 +681,8 @@ NON_ESCAPING_FUNCTIONS = ( "PyStackRef_IsNullOrInt", "PyStackRef_IsError", "PyStackRef_IsValid", + "PyStackRef_Wrap", + "PyStackRef_Unwrap", ) @@ -811,7 +811,7 @@ def stack_effect_only_peeks(instr: parser.InstDef) -> bool: if len(stack_inputs) == 0: return False return all( - (s.name == other.name and s.type == other.type and s.size == other.size) + (s.name == other.name and s.size == other.size) for s, other in zip(stack_inputs, instr.outputs) ) diff --git a/Tools/cases_generator/generators_common.py b/Tools/cases_generator/generators_common.py index 02f9a952754..47de205c0e9 100644 --- a/Tools/cases_generator/generators_common.py +++ b/Tools/cases_generator/generators_common.py @@ -56,9 +56,7 @@ def root_relative_path(filename: str) -> str: def type_and_null(var: StackItem) -> tuple[str, str]: - if var.type: - return var.type, "NULL" - elif var.is_array(): + if var.is_array(): return "_PyStackRef *", "NULL" else: return "_PyStackRef", "PyStackRef_NULL" diff --git a/Tools/cases_generator/optimizer_generator.py b/Tools/cases_generator/optimizer_generator.py index fda022a44e5..75805dbd7f3 100644 --- a/Tools/cases_generator/optimizer_generator.py +++ b/Tools/cases_generator/optimizer_generator.py @@ -73,8 +73,6 @@ def validate_uop(override: Uop, uop: Uop) -> None: def type_name(var: StackItem) -> str: if var.is_array(): return "JitOptSymbol **" - if var.type: - return var.type return "JitOptSymbol *" @@ -230,7 +228,7 @@ def generate_abstract_interpreter( declare_variables(override, out, skip_inputs=False) else: declare_variables(uop, out, skip_inputs=True) - stack = Stack(extract_bits=False, cast_type="JitOptSymbol *") + stack = Stack() write_uop(override, uop, out, stack, debug, skip_inputs=(override is None)) out.start_line() out.emit("break;\n") diff --git a/Tools/cases_generator/parsing.py b/Tools/cases_generator/parsing.py index 9c9b0053a59..a6dac481875 100644 --- a/Tools/cases_generator/parsing.py +++ b/Tools/cases_generator/parsing.py @@ -247,12 +247,11 @@ class SimpleStmt(Stmt): @dataclass class StackEffect(Node): name: str = field(compare=False) # __eq__ only uses type, cond, size - type: str = "" # Optional `:type` size: str = "" # Optional `[size]` # Note: size cannot be combined with type or cond def __repr__(self) -> str: - items = [self.name, self.type, self.size] + items = [self.name, self.size] while items and items[-1] == "": del items[-1] return f"StackEffect({', '.join(repr(item) for item in items)})" @@ -463,20 +462,13 @@ class Parser(PLexer): # IDENTIFIER [':' IDENTIFIER [TIMES]] ['if' '(' expression ')'] # | IDENTIFIER '[' expression ']' if tkn := self.expect(lx.IDENTIFIER): - type_text = "" - if self.expect(lx.COLON): - type_text = self.require(lx.IDENTIFIER).text.strip() - if self.expect(lx.TIMES): - type_text += " *" size_text = "" if self.expect(lx.LBRACKET): - if type_text: - raise self.make_syntax_error("Unexpected [") if not (size := self.expression()): raise self.make_syntax_error("Expected expression") self.require(lx.RBRACKET) size_text = size.text.strip() - return StackEffect(tkn.text, type_text, size_text) + return StackEffect(tkn.text, size_text) return None @contextual diff --git a/Tools/cases_generator/stack.py b/Tools/cases_generator/stack.py index df168afa888..3a0e7e5d0d5 100644 --- a/Tools/cases_generator/stack.py +++ b/Tools/cases_generator/stack.py @@ -168,7 +168,7 @@ class Local: @staticmethod def register(name: str) -> "Local": - item = StackItem(name, None, "", False, True) + item = StackItem(name, "", False, True) return Local(item, None, True) def kill(self) -> None: @@ -216,13 +216,11 @@ def array_or_scalar(var: StackItem | Local) -> str: return "array" if var.is_array() else "scalar" class Stack: - def __init__(self, extract_bits: bool=True, cast_type: str = "uintptr_t") -> None: + def __init__(self) -> None: self.base_offset = PointerOffset.zero() self.physical_sp = PointerOffset.zero() self.logical_sp = PointerOffset.zero() self.variables: list[Local] = [] - self.extract_bits = extract_bits - self.cast_type = cast_type def drop(self, var: StackItem, check_liveness: bool) -> None: self.logical_sp = self.logical_sp.pop(var) @@ -268,10 +266,8 @@ class Stack: self.base_offset = self.logical_sp if var.name in UNUSED or not var.used: return Local.unused(var, self.base_offset) - cast = f"({var.type})" if (not indirect and var.type) else "" - bits = ".bits" if cast and self.extract_bits else "" c_offset = (self.base_offset - self.physical_sp).to_c() - assign = f"{var.name} = {cast}{indirect}stack_pointer[{c_offset}]{bits};\n" + assign = f"{var.name} = {indirect}stack_pointer[{c_offset}];\n" out.emit(assign) self._print(out) return Local.from_memory(var, self.base_offset) @@ -292,12 +288,8 @@ class Stack: out: CWriter, var: StackItem, stack_offset: PointerOffset, - cast_type: str, - extract_bits: bool, ) -> None: - cast = f"({cast_type})" if var.type else "" - bits = ".bits" if cast and extract_bits else "" - out.emit(f"stack_pointer[{stack_offset.to_c()}]{bits} = {cast}{var.name};\n") + out.emit(f"stack_pointer[{stack_offset.to_c()}] = {var.name};\n") def _save_physical_sp(self, out: CWriter) -> None: if self.physical_sp != self.logical_sp: @@ -320,7 +312,7 @@ class Stack: self._print(out) var.memory_offset = var_offset stack_offset = var_offset - self.physical_sp - Stack._do_emit(out, var.item, stack_offset, self.cast_type, self.extract_bits) + Stack._do_emit(out, var.item, stack_offset) self._print(out) var_offset = var_offset.push(var.item) @@ -350,7 +342,7 @@ class Stack: out.emit(self.as_comment() + "\n") def copy(self) -> "Stack": - other = Stack(self.extract_bits, self.cast_type) + other = Stack() other.base_offset = self.base_offset other.physical_sp = self.physical_sp other.logical_sp = self.logical_sp diff --git a/Tools/i18n/makelocalealias.py b/Tools/i18n/makelocalealias.py index b407a8a643b..02af1caff7d 100755 --- a/Tools/i18n/makelocalealias.py +++ b/Tools/i18n/makelocalealias.py @@ -140,6 +140,9 @@ if __name__ == '__main__': data = locale.locale_alias.copy() data.update(parse_glibc_supported(args.glibc_supported)) data.update(parse(args.locale_alias)) + # Hardcode 'c.utf8' -> 'C.UTF-8' because 'en_US.UTF-8' does not exist + # on all platforms. + data['c.utf8'] = 'C.UTF-8' while True: # Repeat optimization while the size is decreased. n = len(data) |