aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
-rw-r--r--.github/CODEOWNERS1
-rw-r--r--.github/workflows/build.yml86
-rwxr-xr-x.github/workflows/posix-deps-apt.sh1
-rw-r--r--Doc/library/codecs.rst6
-rw-r--r--Doc/library/email.parser.rst10
-rw-r--r--Doc/library/functools.rst33
-rw-r--r--Doc/tools/.nitignore2
-rw-r--r--Include/internal/mimalloc/mimalloc/types.h2
-rw-r--r--Include/internal/pycore_interp_structs.h3
-rw-r--r--Lib/collections/__init__.py11
-rw-r--r--Lib/concurrent/interpreters/_crossinterp.py19
-rw-r--r--Lib/enum.py30
-rw-r--r--Lib/test/test_argparse.py2
-rw-r--r--Lib/test/test_cext/__init__.py47
-rw-r--r--Lib/test/test_cext/extension.c20
-rw-r--r--Lib/test/test_cext/setup.py33
-rw-r--r--Lib/test/test_cppext/__init__.py24
-rw-r--r--Lib/test/test_cppext/extension.cpp9
-rw-r--r--Lib/test/test_cppext/setup.py4
-rw-r--r--Lib/test/test_dictcomps.py2
-rw-r--r--Lib/test/test_enum.py37
-rw-r--r--Lib/test/test_setcomps.py2
-rw-r--r--Lib/test/test_tempfile.py24
-rw-r--r--Lib/test/test_zipfile/_path/test_path.py2
-rw-r--r--Misc/NEWS.d/3.10.0a3.rst2
-rw-r--r--Misc/NEWS.d/3.12.0a5.rst2
-rw-r--r--Misc/NEWS.d/3.13.0a1.rst2
-rw-r--r--Misc/NEWS.d/3.14.0b1.rst2
-rw-r--r--Misc/NEWS.d/next/Core_and_Builtins/2025-07-11-13-45-48.gh-issue-136541.uZ_-Ju.rst3
-rw-r--r--Misc/NEWS.d/next/Library/2023-07-05-14-34-10.gh-issue-105497.HU5u89.rst1
-rw-r--r--Misc/NEWS.d/next/Library/2025-04-08-07-25-10.gh-issue-107583.JGfbhq.rst4
-rw-r--r--Misc/NEWS.d/next/Library/2025-07-08-20-58-01.gh-issue-136434.uuJsjS.rst2
-rw-r--r--Misc/NEWS.d/next/Tests/2025-06-11-16-52-49.gh-issue-135401.ccMXmL.rst1
-rw-r--r--Modules/_asynciomodule.c2
-rw-r--r--Modules/_testbuffer.c3
-rw-r--r--Modules/_testcapi/monitoring.c2
-rw-r--r--Modules/_testcapi/time.c3
-rw-r--r--Modules/_testcapimodule.c3
-rw-r--r--Modules/_testinternalcapi.c3
-rw-r--r--Objects/typeobject.c2
-rw-r--r--Python/asm_trampoline.S7
-rw-r--r--Python/perf_jit_trampoline.c163
-rw-r--r--Python/perf_trampoline.c15
-rw-r--r--Python/preconfig.c2
-rwxr-xr-xTools/ssl/multissltests.py88
-rwxr-xr-xconfigure2
-rw-r--r--configure.ac2
47 files changed, 538 insertions, 188 deletions
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 98efdb65146..02a7b5d45b4 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -27,6 +27,7 @@ Modules/Setup* @erlend-aasland
**/*genobject* @markshannon
**/*hamt* @1st1
**/*jit* @brandtbucher @savannahostrowski @diegorusso
+Python/perf_jit_trampoline.c # Exclude the owners of "**/*jit*", above.
Objects/set* @rhettinger
Objects/dict* @methane @markshannon
Objects/typevarobject.c @JelleZijlstra
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index c6171571857..05f20e12f46 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -260,7 +260,7 @@ jobs:
free-threading: ${{ matrix.free-threading }}
os: ${{ matrix.os }}
- build-ubuntu-ssltests:
+ build-ubuntu-ssltests-openssl:
name: 'Ubuntu SSL tests with OpenSSL'
runs-on: ${{ matrix.os }}
timeout-minutes: 60
@@ -322,6 +322,81 @@ jobs:
- name: SSL tests
run: ./python Lib/test/ssltests.py
+ build-ubuntu-ssltests-awslc:
+ name: 'Ubuntu SSL tests with AWS-LC'
+ runs-on: ${{ matrix.os }}
+ timeout-minutes: 60
+ needs: build-context
+ if: needs.build-context.outputs.run-tests == 'true'
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-24.04]
+ awslc_ver: [1.55.0]
+ env:
+ AWSLC_VER: ${{ matrix.awslc_ver}}
+ MULTISSL_DIR: ${{ github.workspace }}/multissl
+ OPENSSL_DIR: ${{ github.workspace }}/multissl/aws-lc/${{ matrix.awslc_ver }}
+ LD_LIBRARY_PATH: ${{ github.workspace }}/multissl/aws-lc/${{ matrix.awslc_ver }}/lib
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ persist-credentials: false
+ - name: Runner image version
+ run: echo "IMAGE_OS_VERSION=${ImageOS}-${ImageVersion}" >> "$GITHUB_ENV"
+ - name: Restore config.cache
+ uses: actions/cache@v4
+ with:
+ path: config.cache
+ key: ${{ github.job }}-${{ env.IMAGE_OS_VERSION }}-${{ needs.build-context.outputs.config-hash }}
+ - name: Register gcc problem matcher
+ run: echo "::add-matcher::.github/problem-matchers/gcc.json"
+ - name: Install dependencies
+ run: sudo ./.github/workflows/posix-deps-apt.sh
+ - name: Configure SSL lib env vars
+ run: |
+ echo "MULTISSL_DIR=${GITHUB_WORKSPACE}/multissl" >> "$GITHUB_ENV"
+ echo "OPENSSL_DIR=${GITHUB_WORKSPACE}/multissl/aws-lc/${AWSLC_VER}" >> "$GITHUB_ENV"
+ echo "LD_LIBRARY_PATH=${GITHUB_WORKSPACE}/multissl/aws-lc/${AWSLC_VER}/lib" >> "$GITHUB_ENV"
+ - name: 'Restore AWS-LC build'
+ id: cache-aws-lc
+ uses: actions/cache@v4
+ with:
+ path: ./multissl/aws-lc/${{ matrix.awslc_ver }}
+ key: ${{ matrix.os }}-multissl-aws-lc-${{ matrix.awslc_ver }}
+ - name: Install AWS-LC
+ if: steps.cache-aws-lc.outputs.cache-hit != 'true'
+ run: |
+ python3 Tools/ssl/multissltests.py \
+ --steps=library \
+ --base-directory "$MULTISSL_DIR" \
+ --awslc ${{ matrix.awslc_ver }} \
+ --system Linux
+ - name: Add ccache to PATH
+ run: |
+ echo "PATH=/usr/lib/ccache:$PATH" >> "$GITHUB_ENV"
+ - name: Configure ccache action
+ uses: hendrikmuhs/ccache-action@v1.2
+ with:
+ save: false
+ - name: Configure CPython
+ run: |
+ ./configure CFLAGS="-fdiagnostics-format=json" \
+ --config-cache \
+ --enable-slower-safety \
+ --with-pydebug \
+ --with-openssl="$OPENSSL_DIR" \
+ --with-builtin-hashlib-hashes=blake2 \
+ --with-ssl-default-suites=openssl
+ - name: Build CPython
+ run: make -j
+ - name: Display build info
+ run: make pythoninfo
+ - name: Verify python is linked to AWS-LC
+ run: ./python -c 'import ssl; print(ssl.OPENSSL_VERSION)' | grep AWS-LC
+ - name: SSL tests
+ run: ./python Lib/test/ssltests.py
+
build-wasi:
name: 'WASI'
needs: build-context
@@ -628,7 +703,8 @@ jobs:
- build-windows-msi
- build-macos
- build-ubuntu
- - build-ubuntu-ssltests
+ - build-ubuntu-ssltests-awslc
+ - build-ubuntu-ssltests-openssl
- build-wasi
- test-hypothesis
- build-asan
@@ -643,7 +719,8 @@ jobs:
with:
allowed-failures: >-
build-windows-msi,
- build-ubuntu-ssltests,
+ build-ubuntu-ssltests-awslc,
+ build-ubuntu-ssltests-openssl,
test-hypothesis,
cifuzz,
allowed-skips: >-
@@ -661,7 +738,8 @@ jobs:
check-generated-files,
build-macos,
build-ubuntu,
- build-ubuntu-ssltests,
+ build-ubuntu-ssltests-awslc,
+ build-ubuntu-ssltests-openssl,
build-wasi,
test-hypothesis,
build-asan,
diff --git a/.github/workflows/posix-deps-apt.sh b/.github/workflows/posix-deps-apt.sh
index 44e6a9ce2d0..0b64367e6c4 100755
--- a/.github/workflows/posix-deps-apt.sh
+++ b/.github/workflows/posix-deps-apt.sh
@@ -5,6 +5,7 @@ apt-get -yq install \
build-essential \
pkg-config \
ccache \
+ cmake \
gdb \
lcov \
libb2-dev \
diff --git a/Doc/library/codecs.rst b/Doc/library/codecs.rst
index 37bd913b765..c5dae7c8e8f 100644
--- a/Doc/library/codecs.rst
+++ b/Doc/library/codecs.rst
@@ -1395,7 +1395,11 @@ encodings.
| | | It is used in the Python |
| | | pickle protocol. |
+--------------------+---------+---------------------------+
-| undefined | | Raise an exception for |
+| undefined | | This Codec should only |
+| | | be used for testing |
+| | | purposes. |
+| | | |
+| | | Raise an exception for |
| | | all conversions, even |
| | | empty strings. The error |
| | | handler is ignored. |
diff --git a/Doc/library/email.parser.rst b/Doc/library/email.parser.rst
index 439b5c8f34b..90796370ebb 100644
--- a/Doc/library/email.parser.rst
+++ b/Doc/library/email.parser.rst
@@ -48,8 +48,8 @@ methods.
FeedParser API
^^^^^^^^^^^^^^
-The :class:`BytesFeedParser`, imported from the :mod:`email.feedparser` module,
-provides an API that is conducive to incremental parsing of email messages,
+The :class:`BytesFeedParser`, imported from the :mod:`email.parser.FeedParser`
+module, provides an API that is conducive to incremental parsing of email messages,
such as would be necessary when reading the text of an email message from a
source that can block (such as a socket). The :class:`BytesFeedParser` can of
course be used to parse an email message fully contained in a :term:`bytes-like
@@ -116,7 +116,7 @@ Here is the API for the :class:`BytesFeedParser`:
Works like :class:`BytesFeedParser` except that the input to the
:meth:`~BytesFeedParser.feed` method must be a string. This is of limited
utility, since the only way for such a message to be valid is for it to
- contain only ASCII text or, if :attr:`~email.policy.Policy.utf8` is
+ contain only ASCII text or, if :attr:`~email.policy.EmailPolicy.utf8` is
``True``, no binary attachments.
.. versionchanged:: 3.3 Added the *policy* keyword.
@@ -155,11 +155,11 @@ message body, instead setting the payload to the raw body.
Read all the data from the binary file-like object *fp*, parse the
resulting bytes, and return the message object. *fp* must support
- both the :meth:`~io.IOBase.readline` and the :meth:`~io.IOBase.read`
+ both the :meth:`~io.IOBase.readline` and the :meth:`~io.TextIOBase.read`
methods.
The bytes contained in *fp* must be formatted as a block of :rfc:`5322`
- (or, if :attr:`~email.policy.Policy.utf8` is ``True``, :rfc:`6532`)
+ (or, if :attr:`~email.policy.EmailPolicy.utf8` is ``True``, :rfc:`6532`)
style headers and header continuation lines, optionally preceded by an
envelope header. The header block is terminated either by the end of the
data or by a blank line. Following the header block is the body of the
diff --git a/Doc/library/functools.rst b/Doc/library/functools.rst
index 3e75621be6d..beec9b942af 100644
--- a/Doc/library/functools.rst
+++ b/Doc/library/functools.rst
@@ -199,12 +199,18 @@ The :mod:`functools` module defines the following functions:
and *typed*. This is for information purposes only. Mutating the values
has no effect.
+ .. method:: lru_cache.cache_info()
+ :no-typesetting:
+
To help measure the effectiveness of the cache and tune the *maxsize*
- parameter, the wrapped function is instrumented with a :func:`cache_info`
+ parameter, the wrapped function is instrumented with a :func:`!cache_info`
function that returns a :term:`named tuple` showing *hits*, *misses*,
*maxsize* and *currsize*.
- The decorator also provides a :func:`cache_clear` function for clearing or
+ .. method:: lru_cache.cache_clear()
+ :no-typesetting:
+
+ The decorator also provides a :func:`!cache_clear` function for clearing or
invalidating the cache.
The original underlying function is accessible through the
@@ -284,9 +290,9 @@ The :mod:`functools` module defines the following functions:
class decorator supplies the rest. This simplifies the effort involved
in specifying all of the possible rich comparison operations:
- The class must define one of :meth:`__lt__`, :meth:`__le__`,
- :meth:`__gt__`, or :meth:`__ge__`.
- In addition, the class should supply an :meth:`__eq__` method.
+ The class must define one of :meth:`~object.__lt__`, :meth:`~object.__le__`,
+ :meth:`~object.__gt__`, or :meth:`~object.__ge__`.
+ In addition, the class should supply an :meth:`~object.__eq__` method.
For example::
@@ -418,7 +424,7 @@ The :mod:`functools` module defines the following functions:
like normal functions, are handled as descriptors).
When *func* is a descriptor (such as a normal Python function,
- :func:`classmethod`, :func:`staticmethod`, :func:`abstractmethod` or
+ :func:`classmethod`, :func:`staticmethod`, :func:`~abc.abstractmethod` or
another instance of :class:`partialmethod`), calls to ``__get__`` are
delegated to the underlying descriptor, and an appropriate
:ref:`partial object<partial-objects>` returned as the result.
@@ -499,7 +505,10 @@ The :mod:`functools` module defines the following functions:
... print("Let me just say,", end=" ")
... print(arg)
- To add overloaded implementations to the function, use the :func:`register`
+ .. method:: singledispatch.register()
+ :no-typesetting:
+
+ To add overloaded implementations to the function, use the :func:`!register`
attribute of the generic function, which can be used as a decorator. For
functions annotated with types, the decorator will infer the type of the
first argument automatically::
@@ -565,14 +574,14 @@ The :mod:`functools` module defines the following functions:
runtime impact.
To enable registering :term:`lambdas<lambda>` and pre-existing functions,
- the :func:`register` attribute can also be used in a functional form::
+ the :func:`~singledispatch.register` attribute can also be used in a functional form::
>>> def nothing(arg, verbose=False):
... print("Nothing.")
...
>>> fun.register(type(None), nothing)
- The :func:`register` attribute returns the undecorated function. This
+ The :func:`~singledispatch.register` attribute returns the undecorated function. This
enables decorator stacking, :mod:`pickling<pickle>`, and the creation
of unit tests for each variant independently::
@@ -650,10 +659,10 @@ The :mod:`functools` module defines the following functions:
.. versionadded:: 3.4
.. versionchanged:: 3.7
- The :func:`register` attribute now supports using type annotations.
+ The :func:`~singledispatch.register` attribute now supports using type annotations.
.. versionchanged:: 3.11
- The :func:`register` attribute now supports
+ The :func:`~singledispatch.register` attribute now supports
:class:`typing.Union` as a type annotation.
@@ -783,7 +792,7 @@ The :mod:`functools` module defines the following functions:
'Docstring'
Without the use of this decorator factory, the name of the example function
- would have been ``'wrapper'``, and the docstring of the original :func:`example`
+ would have been ``'wrapper'``, and the docstring of the original :func:`!example`
would have been lost.
diff --git a/Doc/tools/.nitignore b/Doc/tools/.nitignore
index 4f5396857f3..1fbb45ecd73 100644
--- a/Doc/tools/.nitignore
+++ b/Doc/tools/.nitignore
@@ -15,8 +15,6 @@ Doc/extending/extending.rst
Doc/library/ast.rst
Doc/library/asyncio-extending.rst
Doc/library/email.charset.rst
-Doc/library/email.parser.rst
-Doc/library/functools.rst
Doc/library/http.cookiejar.rst
Doc/library/http.server.rst
Doc/library/importlib.rst
diff --git a/Include/internal/mimalloc/mimalloc/types.h b/Include/internal/mimalloc/mimalloc/types.h
index a17f637fe68..19e93224174 100644
--- a/Include/internal/mimalloc/mimalloc/types.h
+++ b/Include/internal/mimalloc/mimalloc/types.h
@@ -481,7 +481,7 @@ typedef struct mi_segment_s {
struct mi_segment_s* next; // the list of freed segments in the cache (must be first field, see `segment.c:mi_segment_init`)
size_t abandoned; // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
- size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim it it is too long)
+ size_t abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim if it is too long)
size_t used; // count of pages in use
uintptr_t cookie; // verify addresses in debug mode: `mi_ptr_cookie(segment) == segment->cookie`
diff --git a/Include/internal/pycore_interp_structs.h b/Include/internal/pycore_interp_structs.h
index f1f427d99de..542a75617b4 100644
--- a/Include/internal/pycore_interp_structs.h
+++ b/Include/internal/pycore_interp_structs.h
@@ -73,6 +73,7 @@ struct trampoline_api_st {
int (*free_state)(void* state);
void *state;
Py_ssize_t code_padding;
+ Py_ssize_t code_alignment;
};
#endif
@@ -129,8 +130,6 @@ struct _atexit_runtime_state {
//###################
// interpreter atexit
-typedef void (*atexit_datacallbackfunc)(void *);
-
typedef struct atexit_callback {
atexit_datacallbackfunc func;
void *data;
diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py
index d2ddc1cd9ec..b8653f40a94 100644
--- a/Lib/collections/__init__.py
+++ b/Lib/collections/__init__.py
@@ -776,23 +776,26 @@ class Counter(dict):
# When the multiplicities are all zero or one, multiset operations
# are guaranteed to be equivalent to the corresponding operations
# for regular sets.
+ #
# Given counter multisets such as:
# cp = Counter(a=1, b=0, c=1)
# cq = Counter(c=1, d=0, e=1)
+ #
# The corresponding regular sets would be:
# sp = {'a', 'c'}
# sq = {'c', 'e'}
+ #
# All of the following relations would hold:
- # set(cp + cq) == sp | sq
- # set(cp - cq) == sp - sq
- # set(cp | cq) == sp | sq
- # set(cp & cq) == sp & sq
# (cp == cq) == (sp == sq)
# (cp != cq) == (sp != sq)
# (cp <= cq) == (sp <= sq)
# (cp < cq) == (sp < sq)
# (cp >= cq) == (sp >= sq)
# (cp > cq) == (sp > sq)
+ # set(cp + cq) == sp | sq
+ # set(cp - cq) == sp - sq
+ # set(cp | cq) == sp | sq
+ # set(cp & cq) == sp & sq
def __eq__(self, other):
'True if all counts agree. Missing counts are treated as zero.'
diff --git a/Lib/concurrent/interpreters/_crossinterp.py b/Lib/concurrent/interpreters/_crossinterp.py
index f47eb693ac8..a5f46b20fbb 100644
--- a/Lib/concurrent/interpreters/_crossinterp.py
+++ b/Lib/concurrent/interpreters/_crossinterp.py
@@ -40,16 +40,21 @@ class UnboundItem:
@classonly
def singleton(cls, kind, module, name='UNBOUND'):
- doc = cls.__doc__.replace('cross-interpreter container', kind)
- doc = doc.replace('cross-interpreter', kind)
+ doc = cls.__doc__
+ if doc:
+ doc = doc.replace(
+ 'cross-interpreter container', kind,
+ ).replace(
+ 'cross-interpreter', kind,
+ )
subclass = type(
f'Unbound{kind.capitalize()}Item',
(cls,),
- dict(
- _MODULE=module,
- _NAME=name,
- __doc__=doc,
- ),
+ {
+ "_MODULE": module,
+ "_NAME": name,
+ "__doc__": doc,
+ },
)
return object.__new__(subclass)
diff --git a/Lib/enum.py b/Lib/enum.py
index 01fecca3e5a..538b9cc8e96 100644
--- a/Lib/enum.py
+++ b/Lib/enum.py
@@ -535,7 +535,7 @@ class EnumType(type):
# now set the __repr__ for the value
classdict['_value_repr_'] = metacls._find_data_repr_(cls, bases)
#
- # Flag structures (will be removed if final class is not a Flag
+ # Flag structures (will be removed if final class is not a Flag)
classdict['_boundary_'] = (
boundary
or getattr(first_enum, '_boundary_', None)
@@ -544,6 +544,29 @@ class EnumType(type):
classdict['_singles_mask_'] = 0
classdict['_all_bits_'] = 0
classdict['_inverted_'] = None
+ # check for negative flag values and invert if found (using _proto_members)
+ if Flag is not None and bases and issubclass(bases[-1], Flag):
+ bits = 0
+ inverted = []
+ for n in member_names:
+ p = classdict[n]
+ if isinstance(p.value, int):
+ if p.value < 0:
+ inverted.append(p)
+ else:
+ bits |= p.value
+ elif p.value is None:
+ pass
+ elif isinstance(p.value, tuple) and p.value and isinstance(p.value[0], int):
+ if p.value[0] < 0:
+ inverted.append(p)
+ else:
+ bits |= p.value[0]
+ for p in inverted:
+ if isinstance(p.value, int):
+ p.value = bits & p.value
+ else:
+ p.value = (bits & p.value[0], ) + p.value[1:]
try:
classdict['_%s__in_progress' % cls] = True
enum_class = super().__new__(metacls, cls, bases, classdict, **kwds)
@@ -1487,7 +1510,10 @@ class Flag(Enum, boundary=STRICT):
)
if value < 0:
neg_value = value
- value = all_bits + 1 + value
+ if cls._boundary_ in (EJECT, KEEP):
+ value = all_bits + 1 + value
+ else:
+ value = singles_mask & value
# get members and unknown
unknown = value & ~flag_mask
aliases = value & ~singles_mask
diff --git a/Lib/test/test_argparse.py b/Lib/test/test_argparse.py
index 08ff41368d9..ddd48b1bc0c 100644
--- a/Lib/test/test_argparse.py
+++ b/Lib/test/test_argparse.py
@@ -1829,7 +1829,7 @@ BIN_STDERR_SENTINEL = object()
class StdStreamComparer:
def __init__(self, attr):
# We try to use the actual stdXXX.buffer attribute as our
- # marker, but but under some test environments,
+ # marker, but under some test environments,
# sys.stdout/err are replaced by io.StringIO which won't have .buffer,
# so we use a sentinel simply to show that the tests do the right thing
# for any buffer supporting object
diff --git a/Lib/test/test_cext/__init__.py b/Lib/test/test_cext/__init__.py
index 46fde541494..93e7b2043d3 100644
--- a/Lib/test/test_cext/__init__.py
+++ b/Lib/test/test_cext/__init__.py
@@ -28,29 +28,13 @@ SETUP = os.path.join(os.path.dirname(__file__), 'setup.py')
@support.requires_venv_with_pip()
@support.requires_subprocess()
@support.requires_resource('cpu')
-class TestExt(unittest.TestCase):
+class BaseTests:
+ TEST_INTERNAL_C_API = False
+
# Default build with no options
def test_build(self):
self.check_build('_test_cext')
- def test_build_c11(self):
- self.check_build('_test_c11_cext', std='c11')
-
- @unittest.skipIf(support.MS_WINDOWS, "MSVC doesn't support /std:c99")
- def test_build_c99(self):
- # In public docs, we say C API is compatible with C11. However,
- # in practice we do maintain C99 compatibility in public headers.
- # Please ask the C API WG before adding a new C11-only feature.
- self.check_build('_test_c99_cext', std='c99')
-
- @support.requires_gil_enabled('incompatible with Free Threading')
- def test_build_limited(self):
- self.check_build('_test_limited_cext', limited=True)
-
- @support.requires_gil_enabled('broken for now with Free Threading')
- def test_build_limited_c11(self):
- self.check_build('_test_limited_c11_cext', limited=True, std='c11')
-
def check_build(self, extension_name, std=None, limited=False):
venv_dir = 'env'
with support.setup_venv_with_pip_setuptools(venv_dir) as python_exe:
@@ -70,6 +54,7 @@ class TestExt(unittest.TestCase):
if limited:
env['CPYTHON_TEST_LIMITED'] = '1'
env['CPYTHON_TEST_EXT_NAME'] = extension_name
+ env['TEST_INTERNAL_C_API'] = str(int(self.TEST_INTERNAL_C_API))
if support.verbose:
print('Run:', ' '.join(map(shlex.quote, cmd)))
subprocess.run(cmd, check=True, env=env)
@@ -110,5 +95,29 @@ class TestExt(unittest.TestCase):
run_cmd('Import', cmd)
+class TestPublicCAPI(BaseTests, unittest.TestCase):
+ @support.requires_gil_enabled('incompatible with Free Threading')
+ def test_build_limited(self):
+ self.check_build('_test_limited_cext', limited=True)
+
+ @support.requires_gil_enabled('broken for now with Free Threading')
+ def test_build_limited_c11(self):
+ self.check_build('_test_limited_c11_cext', limited=True, std='c11')
+
+ def test_build_c11(self):
+ self.check_build('_test_c11_cext', std='c11')
+
+ @unittest.skipIf(support.MS_WINDOWS, "MSVC doesn't support /std:c99")
+ def test_build_c99(self):
+ # In public docs, we say C API is compatible with C11. However,
+ # in practice we do maintain C99 compatibility in public headers.
+ # Please ask the C API WG before adding a new C11-only feature.
+ self.check_build('_test_c99_cext', std='c99')
+
+
+class TestInteralCAPI(BaseTests, unittest.TestCase):
+ TEST_INTERNAL_C_API = True
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_cext/extension.c b/Lib/test/test_cext/extension.c
index 64629c5a6da..4be2f24c60d 100644
--- a/Lib/test/test_cext/extension.c
+++ b/Lib/test/test_cext/extension.c
@@ -1,11 +1,31 @@
// gh-116869: Basic C test extension to check that the Python C API
// does not emit C compiler warnings.
+//
+// Test also the internal C API if the TEST_INTERNAL_C_API macro is defined.
// Always enable assertions
#undef NDEBUG
+#ifdef TEST_INTERNAL_C_API
+# define Py_BUILD_CORE_MODULE 1
+#endif
+
#include "Python.h"
+#ifdef TEST_INTERNAL_C_API
+ // gh-135906: Check for compiler warnings in the internal C API.
+ // - Cython uses pycore_frame.h.
+ // - greenlet uses pycore_frame.h, pycore_interpframe_structs.h and
+ // pycore_interpframe.h.
+# include "internal/pycore_frame.h"
+# include "internal/pycore_gc.h"
+# include "internal/pycore_interp.h"
+# include "internal/pycore_interpframe.h"
+# include "internal/pycore_interpframe_structs.h"
+# include "internal/pycore_object.h"
+# include "internal/pycore_pystate.h"
+#endif
+
#ifndef MODULE_NAME
# error "MODULE_NAME macro must be defined"
#endif
diff --git a/Lib/test/test_cext/setup.py b/Lib/test/test_cext/setup.py
index 1275282983f..587585e8086 100644
--- a/Lib/test/test_cext/setup.py
+++ b/Lib/test/test_cext/setup.py
@@ -14,10 +14,15 @@ SOURCE = 'extension.c'
if not support.MS_WINDOWS:
# C compiler flags for GCC and clang
- CFLAGS = [
+ BASE_CFLAGS = [
# The purpose of test_cext extension is to check that building a C
# extension using the Python C API does not emit C compiler warnings.
'-Werror',
+ ]
+
+ # C compiler flags for GCC and clang
+ PUBLIC_CFLAGS = [
+ *BASE_CFLAGS,
# gh-120593: Check the 'const' qualifier
'-Wcast-qual',
@@ -26,27 +31,40 @@ if not support.MS_WINDOWS:
'-pedantic-errors',
]
if not support.Py_GIL_DISABLED:
- CFLAGS.append(
+ PUBLIC_CFLAGS.append(
# gh-116869: The Python C API must be compatible with building
# with the -Werror=declaration-after-statement compiler flag.
'-Werror=declaration-after-statement',
)
+ INTERNAL_CFLAGS = [*BASE_CFLAGS]
else:
# MSVC compiler flags
- CFLAGS = [
- # Display warnings level 1 to 4
- '/W4',
+ BASE_CFLAGS = [
# Treat all compiler warnings as compiler errors
'/WX',
]
+ PUBLIC_CFLAGS = [
+ *BASE_CFLAGS,
+ # Display warnings level 1 to 4
+ '/W4',
+ ]
+ INTERNAL_CFLAGS = [
+ *BASE_CFLAGS,
+ # Display warnings level 1 to 3
+ '/W3',
+ ]
def main():
std = os.environ.get("CPYTHON_TEST_STD", "")
module_name = os.environ["CPYTHON_TEST_EXT_NAME"]
limited = bool(os.environ.get("CPYTHON_TEST_LIMITED", ""))
+ internal = bool(int(os.environ.get("TEST_INTERNAL_C_API", "0")))
- cflags = list(CFLAGS)
+ if not internal:
+ cflags = list(PUBLIC_CFLAGS)
+ else:
+ cflags = list(INTERNAL_CFLAGS)
cflags.append(f'-DMODULE_NAME={module_name}')
# Add -std=STD or /std:STD (MSVC) compiler flag
@@ -75,6 +93,9 @@ def main():
version = sys.hexversion
cflags.append(f'-DPy_LIMITED_API={version:#x}')
+ if internal:
+ cflags.append('-DTEST_INTERNAL_C_API=1')
+
# On Windows, add PCbuild\amd64\ to include and library directories
include_dirs = []
library_dirs = []
diff --git a/Lib/test/test_cppext/__init__.py b/Lib/test/test_cppext/__init__.py
index 2b7adac4bcc..2f54b3ccb35 100644
--- a/Lib/test/test_cppext/__init__.py
+++ b/Lib/test/test_cppext/__init__.py
@@ -24,7 +24,7 @@ SETUP = os.path.join(os.path.dirname(__file__), 'setup.py')
@support.requires_venv_with_pip()
@support.requires_subprocess()
@support.requires_resource('cpu')
-class TestCPPExt(unittest.TestCase):
+class BaseTests:
def test_build(self):
self.check_build('_testcppext')
@@ -34,10 +34,6 @@ class TestCPPExt(unittest.TestCase):
# Please ask the C API WG before adding a new C++11-only feature.
self.check_build('_testcpp03ext', std='c++03')
- @support.requires_gil_enabled('incompatible with Free Threading')
- def test_build_limited_cpp03(self):
- self.check_build('_test_limited_cpp03ext', std='c++03', limited=True)
-
@unittest.skipIf(support.MS_WINDOWS, "MSVC doesn't support /std:c++11")
def test_build_cpp11(self):
self.check_build('_testcpp11ext', std='c++11')
@@ -48,10 +44,6 @@ class TestCPPExt(unittest.TestCase):
def test_build_cpp14(self):
self.check_build('_testcpp14ext', std='c++14')
- @support.requires_gil_enabled('incompatible with Free Threading')
- def test_build_limited(self):
- self.check_build('_testcppext_limited', limited=True)
-
def check_build(self, extension_name, std=None, limited=False):
venv_dir = 'env'
with support.setup_venv_with_pip_setuptools(venv_dir) as python_exe:
@@ -111,5 +103,19 @@ class TestCPPExt(unittest.TestCase):
run_cmd('Import', cmd)
+class TestPublicCAPI(BaseTests, unittest.TestCase):
+ @support.requires_gil_enabled('incompatible with Free Threading')
+ def test_build_limited_cpp03(self):
+ self.check_build('_test_limited_cpp03ext', std='c++03', limited=True)
+
+ @support.requires_gil_enabled('incompatible with Free Threading')
+ def test_build_limited(self):
+ self.check_build('_testcppext_limited', limited=True)
+
+
+class TestInteralCAPI(BaseTests, unittest.TestCase):
+ TEST_INTERNAL_C_API = True
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/Lib/test/test_cppext/extension.cpp b/Lib/test/test_cppext/extension.cpp
index 5b3571b295b..1affa176088 100644
--- a/Lib/test/test_cppext/extension.cpp
+++ b/Lib/test/test_cppext/extension.cpp
@@ -6,8 +6,17 @@
// Always enable assertions
#undef NDEBUG
+#ifdef TEST_INTERNAL_C_API
+# define Py_BUILD_CORE 1
+#endif
+
#include "Python.h"
+#ifdef TEST_INTERNAL_C_API
+ // gh-135906: Check for compiler warnings in the internal C API
+# include "internal/pycore_frame.h"
+#endif
+
#ifndef MODULE_NAME
# error "MODULE_NAME macro must be defined"
#endif
diff --git a/Lib/test/test_cppext/setup.py b/Lib/test/test_cppext/setup.py
index ea1ed64bf7a..98442b106b6 100644
--- a/Lib/test/test_cppext/setup.py
+++ b/Lib/test/test_cppext/setup.py
@@ -47,6 +47,7 @@ def main():
std = os.environ.get("CPYTHON_TEST_CPP_STD", "")
module_name = os.environ["CPYTHON_TEST_EXT_NAME"]
limited = bool(os.environ.get("CPYTHON_TEST_LIMITED", ""))
+ internal = bool(int(os.environ.get("TEST_INTERNAL_C_API", "0")))
cppflags = list(CPPFLAGS)
cppflags.append(f'-DMODULE_NAME={module_name}')
@@ -82,6 +83,9 @@ def main():
version = sys.hexversion
cppflags.append(f'-DPy_LIMITED_API={version:#x}')
+ if internal:
+ cppflags.append('-DTEST_INTERNAL_C_API=1')
+
# On Windows, add PCbuild\amd64\ to include and library directories
include_dirs = []
library_dirs = []
diff --git a/Lib/test/test_dictcomps.py b/Lib/test/test_dictcomps.py
index 26b56dac503..a7a46216787 100644
--- a/Lib/test/test_dictcomps.py
+++ b/Lib/test/test_dictcomps.py
@@ -132,7 +132,7 @@ class DictComprehensionTest(unittest.TestCase):
def test_exception_locations(self):
# The location of an exception raised from __init__ or
- # __next__ should should be the iterator expression
+ # __next__ should be the iterator expression
def init_raises():
try:
{x:x for x in BrokenIter(init_raises=True)}
diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py
index bbc7630fa83..2dd585f246d 100644
--- a/Lib/test/test_enum.py
+++ b/Lib/test/test_enum.py
@@ -1002,12 +1002,18 @@ class _FlagTests:
self.assertIs(~(A|B), OpenAB(252))
self.assertIs(~AB_MASK, OpenAB(0))
self.assertIs(~OpenAB(0), AB_MASK)
+ self.assertIs(OpenAB(~4), OpenAB(251))
else:
self.assertIs(~A, B)
self.assertIs(~B, A)
+ self.assertIs(OpenAB(~1), B)
+ self.assertIs(OpenAB(~2), A)
self.assertIs(~(A|B), OpenAB(0))
self.assertIs(~AB_MASK, OpenAB(0))
self.assertIs(~OpenAB(0), (A|B))
+ self.assertIs(OpenAB(~3), OpenAB(0))
+ self.assertIs(OpenAB(~4), OpenAB(3))
+ self.assertIs(OpenAB(~33), B)
#
class OpenXYZ(self.enum_type):
X = 4
@@ -1031,6 +1037,9 @@ class _FlagTests:
self.assertIs(~X, Y|Z)
self.assertIs(~Y, X|Z)
self.assertIs(~Z, X|Y)
+ self.assertIs(OpenXYZ(~4), Y|Z)
+ self.assertIs(OpenXYZ(~2), X|Z)
+ self.assertIs(OpenXYZ(~1), X|Y)
self.assertIs(~(X|Y), Z)
self.assertIs(~(X|Z), Y)
self.assertIs(~(Y|Z), X)
@@ -1038,6 +1047,28 @@ class _FlagTests:
self.assertIs(~XYZ_MASK, OpenXYZ(0))
self.assertTrue(~OpenXYZ(0), (X|Y|Z))
+ def test_assigned_negative_value(self):
+ class X(self.enum_type):
+ A = auto()
+ B = auto()
+ C = A | B
+ D = ~A
+ self.assertEqual(list(X), [X.A, X.B])
+ self.assertIs(~X.A, X.B)
+ self.assertIs(X.D, X.B)
+ self.assertEqual(X.D.value, 2)
+ #
+ class Y(self.enum_type):
+ A = auto()
+ B = auto()
+ C = A | B
+ D = ~A
+ E = auto()
+ self.assertEqual(list(Y), [Y.A, Y.B, Y.E])
+ self.assertIs(~Y.A, Y.B|Y.E)
+ self.assertIs(Y.D, Y.B|Y.E)
+ self.assertEqual(Y.D.value, 6)
+
class TestPlainEnumClass(_EnumTests, _PlainOutputTests, unittest.TestCase):
enum_type = Enum
@@ -3680,6 +3711,8 @@ class OldTestFlag(unittest.TestCase):
C = 4 | B
#
self.assertTrue(SkipFlag.C in (SkipFlag.A|SkipFlag.C))
+ self.assertTrue(SkipFlag.B in SkipFlag.C)
+ self.assertIs(SkipFlag(~1), SkipFlag.B)
self.assertRaisesRegex(ValueError, 'SkipFlag.. invalid value 42', SkipFlag, 42)
#
class SkipIntFlag(enum.IntFlag):
@@ -3688,6 +3721,8 @@ class OldTestFlag(unittest.TestCase):
C = 4 | B
#
self.assertTrue(SkipIntFlag.C in (SkipIntFlag.A|SkipIntFlag.C))
+ self.assertTrue(SkipIntFlag.B in SkipIntFlag.C)
+ self.assertIs(SkipIntFlag(~1), SkipIntFlag.B|SkipIntFlag.C)
self.assertEqual(SkipIntFlag(42).value, 42)
#
class MethodHint(Flag):
@@ -4727,6 +4762,8 @@ class TestVerify(unittest.TestCase):
BLUE = 4
WHITE = -1
# no error means success
+ self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE])
+ self.assertEqual(Color.WHITE.value, 7)
class TestInternals(unittest.TestCase):
diff --git a/Lib/test/test_setcomps.py b/Lib/test/test_setcomps.py
index 0bb02ef11f6..6fc5bb74036 100644
--- a/Lib/test/test_setcomps.py
+++ b/Lib/test/test_setcomps.py
@@ -154,7 +154,7 @@ We also repeat each of the above scoping tests inside a function
class SetComprehensionTest(unittest.TestCase):
def test_exception_locations(self):
# The location of an exception raised from __init__ or
- # __next__ should should be the iterator expression
+ # __next__ should be the iterator expression
def init_raises():
try:
diff --git a/Lib/test/test_tempfile.py b/Lib/test/test_tempfile.py
index aeca62cf256..52b13b98cbc 100644
--- a/Lib/test/test_tempfile.py
+++ b/Lib/test/test_tempfile.py
@@ -1594,30 +1594,6 @@ if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
mock_close.assert_called()
self.assertEqual(os.listdir(dir), [])
- @os_helper.skip_unless_hardlink
- @unittest.skipUnless(tempfile._O_TMPFILE_WORKS, 'need os.O_TMPFILE')
- @unittest.skipUnless(os.path.exists('/proc/self/fd'),
- 'need /proc/self/fd')
- def test_link_tmpfile(self):
- dir = tempfile.mkdtemp()
- self.addCleanup(os_helper.rmtree, dir)
- filename = os.path.join(dir, "link")
-
- with tempfile.TemporaryFile('w', dir=dir) as tmp:
- # the flag can become False on Linux <= 3.11
- if not tempfile._O_TMPFILE_WORKS:
- self.skipTest("O_TMPFILE doesn't work")
-
- tmp.write("hello")
- tmp.flush()
- fd = tmp.fileno()
-
- os.link(f'/proc/self/fd/{fd}',
- filename,
- follow_symlinks=True)
- with open(filename) as fp:
- self.assertEqual(fp.read(), "hello")
-
# Helper for test_del_on_shutdown
class NulledModules:
diff --git a/Lib/test/test_zipfile/_path/test_path.py b/Lib/test/test_zipfile/_path/test_path.py
index 696134023a5..958a586b0dc 100644
--- a/Lib/test/test_zipfile/_path/test_path.py
+++ b/Lib/test/test_zipfile/_path/test_path.py
@@ -316,7 +316,7 @@ class TestPath(unittest.TestCase):
HUGE_ZIPFILE_NUM_ENTRIES = 2**13
def huge_zipfile(self):
- """Create a read-only zipfile with a huge number of entries entries."""
+ """Create a read-only zipfile with a huge number of entries."""
strm = io.BytesIO()
zf = zipfile.ZipFile(strm, "w")
for entry in map(str, range(self.HUGE_ZIPFILE_NUM_ENTRIES)):
diff --git a/Misc/NEWS.d/3.10.0a3.rst b/Misc/NEWS.d/3.10.0a3.rst
index 3f3fb7ec599..6cf3db3eb43 100644
--- a/Misc/NEWS.d/3.10.0a3.rst
+++ b/Misc/NEWS.d/3.10.0a3.rst
@@ -394,7 +394,7 @@ Removed the ``formatter`` module, which was deprecated in Python 3.4. It is
somewhat obsolete, little used, and not tested. It was originally scheduled
to be removed in Python 3.6, but such removals were delayed until after
Python 2.7 EOL. Existing users should copy whatever classes they use into
-their code. Patch by Donghee Na and and Terry J. Reedy.
+their code. Patch by Donghee Na and Terry J. Reedy.
..
diff --git a/Misc/NEWS.d/3.12.0a5.rst b/Misc/NEWS.d/3.12.0a5.rst
index 5dc443bb55b..b73bbfbfdc4 100644
--- a/Misc/NEWS.d/3.12.0a5.rst
+++ b/Misc/NEWS.d/3.12.0a5.rst
@@ -253,7 +253,7 @@ Adapt the ``_elementtree`` extension module to multi-phase init
.. section: Library
Avoid potential unexpected ``freeaddrinfo`` call (double free) in
-:mod:`socket` when when a libc ``getaddrinfo()`` implementation leaves
+:mod:`socket` when a libc ``getaddrinfo()`` implementation leaves
garbage in an output pointer when returning an error. Original patch by
Sergey G. Brester.
diff --git a/Misc/NEWS.d/3.13.0a1.rst b/Misc/NEWS.d/3.13.0a1.rst
index 0a93cbcea0f..0741eab4eca 100644
--- a/Misc/NEWS.d/3.13.0a1.rst
+++ b/Misc/NEWS.d/3.13.0a1.rst
@@ -153,7 +153,7 @@ about a 10% improvement.
.. section: Core and Builtins
Guard ``assert(tstate->thread_id > 0)`` with ``#ifndef HAVE_PTHREAD_STUBS``.
-This allows for for pydebug builds to work under WASI which (currently)
+This allows for pydebug builds to work under WASI which (currently)
lacks thread support.
..
diff --git a/Misc/NEWS.d/3.14.0b1.rst b/Misc/NEWS.d/3.14.0b1.rst
index 5847dea7d5e..041fbaf2051 100644
--- a/Misc/NEWS.d/3.14.0b1.rst
+++ b/Misc/NEWS.d/3.14.0b1.rst
@@ -1051,7 +1051,7 @@ warning filtering state if the :data:`sys.flags.context_aware_warnings` flag
is set to true. This makes using the context manager thread-safe in
multi-threaded programs. The flag is true by default in free-threaded
builds and is otherwise false. The value of the flag can be overridden by
-the the :option:`-X context_aware_warnings <-X>` command-line option or by
+the :option:`-X context_aware_warnings <-X>` command-line option or by
the :envvar:`PYTHON_CONTEXT_AWARE_WARNINGS` environment variable.
..
diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2025-07-11-13-45-48.gh-issue-136541.uZ_-Ju.rst b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-11-13-45-48.gh-issue-136541.uZ_-Ju.rst
new file mode 100644
index 00000000000..af9b94ad061
--- /dev/null
+++ b/Misc/NEWS.d/next/Core_and_Builtins/2025-07-11-13-45-48.gh-issue-136541.uZ_-Ju.rst
@@ -0,0 +1,3 @@
+Fix some issues with the perf trampolines on x86-64 and aarch64. The
+trampolines were not being generated correctly for some cases, which could
+lead to the perf integration not working correctly. Patch by Pablo Galindo.
diff --git a/Misc/NEWS.d/next/Library/2023-07-05-14-34-10.gh-issue-105497.HU5u89.rst b/Misc/NEWS.d/next/Library/2023-07-05-14-34-10.gh-issue-105497.HU5u89.rst
new file mode 100644
index 00000000000..f4f2db08f73
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2023-07-05-14-34-10.gh-issue-105497.HU5u89.rst
@@ -0,0 +1 @@
+Fix flag mask inversion when unnamed flags exist.
diff --git a/Misc/NEWS.d/next/Library/2025-04-08-07-25-10.gh-issue-107583.JGfbhq.rst b/Misc/NEWS.d/next/Library/2025-04-08-07-25-10.gh-issue-107583.JGfbhq.rst
new file mode 100644
index 00000000000..42356126273
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-04-08-07-25-10.gh-issue-107583.JGfbhq.rst
@@ -0,0 +1,4 @@
+Fix :class:`!Flag` inversion when flag set has missing values
+(:class:`!IntFlag` still flips all bits); fix negative assigned values
+during flag creation (both :class:`!Flag` and :class:`!IntFlag` ignore
+missing values).
diff --git a/Misc/NEWS.d/next/Library/2025-07-08-20-58-01.gh-issue-136434.uuJsjS.rst b/Misc/NEWS.d/next/Library/2025-07-08-20-58-01.gh-issue-136434.uuJsjS.rst
new file mode 100644
index 00000000000..951f57100b6
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2025-07-08-20-58-01.gh-issue-136434.uuJsjS.rst
@@ -0,0 +1,2 @@
+Fix docs generation of ``UnboundItem`` in :mod:`concurrent.interpreters`
+when running with :option:`-OO`.
diff --git a/Misc/NEWS.d/next/Tests/2025-06-11-16-52-49.gh-issue-135401.ccMXmL.rst b/Misc/NEWS.d/next/Tests/2025-06-11-16-52-49.gh-issue-135401.ccMXmL.rst
new file mode 100644
index 00000000000..6885fba30db
--- /dev/null
+++ b/Misc/NEWS.d/next/Tests/2025-06-11-16-52-49.gh-issue-135401.ccMXmL.rst
@@ -0,0 +1 @@
+Add a new GitHub CI job to test the :mod:`ssl` module with `AWS-LC <https://github.com/aws/aws-lc>`_ as the backing cryptography and TLS library.
diff --git a/Modules/_asynciomodule.c b/Modules/_asynciomodule.c
index 5f9181395c4..99408e60721 100644
--- a/Modules/_asynciomodule.c
+++ b/Modules/_asynciomodule.c
@@ -821,7 +821,7 @@ future_add_done_callback(asyncio_state *state, FutureObj *fut, PyObject *arg,
Invariants:
* callbacks != NULL:
- There are some callbacks in in the list. Just
+ There are some callbacks in the list. Just
add the new callback to it.
* callbacks == NULL and callback0 == NULL:
diff --git a/Modules/_testbuffer.c b/Modules/_testbuffer.c
index 7fc4d61db29..d2e61e9d6ac 100644
--- a/Modules/_testbuffer.c
+++ b/Modules/_testbuffer.c
@@ -1855,8 +1855,7 @@ ndarray_subscript(PyObject *op, PyObject *key)
type_error:
PyErr_Format(PyExc_TypeError,
- "cannot index memory using \"%.200s\"",
- Py_TYPE(key)->tp_name);
+ "cannot index memory using \"%T\"", key);
err_occurred:
Py_DECREF(nd);
return NULL;
diff --git a/Modules/_testcapi/monitoring.c b/Modules/_testcapi/monitoring.c
index 08a2055c51b..e041943492d 100644
--- a/Modules/_testcapi/monitoring.c
+++ b/Modules/_testcapi/monitoring.c
@@ -109,7 +109,7 @@ static PyTypeObject PyCodeLike_Type = {
};
#define RAISE_UNLESS_CODELIKE(v) if (!Py_IS_TYPE((v), &PyCodeLike_Type)) { \
- PyErr_Format(PyExc_TypeError, "expected a code-like, got %s", Py_TYPE(v)->tp_name); \
+ PyErr_Format(PyExc_TypeError, "expected a code-like, got %T", v); \
return NULL; \
}
diff --git a/Modules/_testcapi/time.c b/Modules/_testcapi/time.c
index 464cf5c3125..4ca6ff587b9 100644
--- a/Modules/_testcapi/time.c
+++ b/Modules/_testcapi/time.c
@@ -5,8 +5,7 @@ static int
pytime_from_nanoseconds(PyTime_t *tp, PyObject *obj)
{
if (!PyLong_Check(obj)) {
- PyErr_Format(PyExc_TypeError, "expect int, got %s",
- Py_TYPE(obj)->tp_name);
+ PyErr_Format(PyExc_TypeError, "expect int, got %T", obj);
return -1;
}
diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c
index 71fffedee14..334f2a53041 100644
--- a/Modules/_testcapimodule.c
+++ b/Modules/_testcapimodule.c
@@ -515,8 +515,7 @@ test_thread_state(PyObject *self, PyObject *args)
return NULL;
if (!PyCallable_Check(fn)) {
- PyErr_Format(PyExc_TypeError, "'%s' object is not callable",
- Py_TYPE(fn)->tp_name);
+ PyErr_Format(PyExc_TypeError, "'%T' object is not callable", fn);
return NULL;
}
diff --git a/Modules/_testinternalcapi.c b/Modules/_testinternalcapi.c
index 8027f0015c7..533e7dd3a7e 100644
--- a/Modules/_testinternalcapi.c
+++ b/Modules/_testinternalcapi.c
@@ -2207,8 +2207,7 @@ get_code(PyObject *obj)
return (PyCodeObject *)PyFunction_GetCode(obj);
}
return (PyCodeObject *)PyErr_Format(
- PyExc_TypeError, "expected function or code object, got %s",
- Py_TYPE(obj)->tp_name);
+ PyExc_TypeError, "expected function or code object, got %T", obj);
}
static PyObject *
diff --git a/Objects/typeobject.c b/Objects/typeobject.c
index e84278d13c3..379c4d0467c 100644
--- a/Objects/typeobject.c
+++ b/Objects/typeobject.c
@@ -225,7 +225,7 @@ type_from_ref(PyObject *ref)
}
-/* helpers for for static builtin types */
+/* helpers for static builtin types */
#ifndef NDEBUG
static inline int
diff --git a/Python/asm_trampoline.S b/Python/asm_trampoline.S
index 616752459ba..a14e68c0e81 100644
--- a/Python/asm_trampoline.S
+++ b/Python/asm_trampoline.S
@@ -12,9 +12,10 @@ _Py_trampoline_func_start:
#if defined(__CET__) && (__CET__ & 1)
endbr64
#endif
- sub $8, %rsp
- call *%rcx
- add $8, %rsp
+ push %rbp
+ mov %rsp, %rbp
+ call *%rcx
+ pop %rbp
ret
#endif // __x86_64__
#if defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
diff --git a/Python/perf_jit_trampoline.c b/Python/perf_jit_trampoline.c
index 2ca18c23593..469882d9b2f 100644
--- a/Python/perf_jit_trampoline.c
+++ b/Python/perf_jit_trampoline.c
@@ -97,10 +97,9 @@
* /tmp/jitted-PID-0.so: [headers][.text][unwind_info][padding]
* /tmp/jitted-PID-1.so: [headers][.text][unwind_info][padding]
*
- * The padding size (0x100) is chosen to accommodate typical unwind info sizes
- * while maintaining 16-byte alignment requirements.
+ * The padding size is now calculated automatically during initialization
+ * based on the actual unwind information requirements.
*/
-#define PERF_JIT_CODE_PADDING 0x100
/* Convenient access to the global trampoline API state */
#define trampoline_api _PyRuntime.ceval.perf.trampoline_api
@@ -401,10 +400,12 @@ enum {
DWRF_CFA_nop = 0x0, // No operation
DWRF_CFA_offset_extended = 0x5, // Extended offset instruction
DWRF_CFA_def_cfa = 0xc, // Define CFA rule
+ DWRF_CFA_def_cfa_register = 0xd, // Define CFA register
DWRF_CFA_def_cfa_offset = 0xe, // Define CFA offset
DWRF_CFA_offset_extended_sf = 0x11, // Extended signed offset
DWRF_CFA_advance_loc = 0x40, // Advance location counter
- DWRF_CFA_offset = 0x80 // Simple offset instruction
+ DWRF_CFA_offset = 0x80, // Simple offset instruction
+ DWRF_CFA_restore = 0xc0 // Restore register
};
/* DWARF Exception Handling pointer encodings */
@@ -519,6 +520,7 @@ typedef struct ELFObjectContext {
uint8_t* p; // Current write position in buffer
uint8_t* startp; // Start of buffer (for offset calculations)
uint8_t* eh_frame_p; // Start of EH frame data (for relative offsets)
+ uint8_t* fde_p; // Start of FDE data (for PC-relative calculations)
uint32_t code_size; // Size of the code being described
} ELFObjectContext;
@@ -643,6 +645,8 @@ static void elfctx_append_uleb128(ELFObjectContext* ctx, uint32_t v) {
// DWARF EH FRAME GENERATION
// =============================================================================
+static void elf_init_ehframe(ELFObjectContext* ctx);
+
/*
* Initialize DWARF .eh_frame section for a code region
*
@@ -657,6 +661,23 @@ static void elfctx_append_uleb128(ELFObjectContext* ctx, uint32_t v) {
* Args:
* ctx: ELF object context containing code size and buffer pointers
*/
+static size_t calculate_eh_frame_size(void) {
+ /* Calculate the EH frame size for the trampoline function */
+ extern void *_Py_trampoline_func_start;
+ extern void *_Py_trampoline_func_end;
+
+ size_t code_size = (char*)&_Py_trampoline_func_end - (char*)&_Py_trampoline_func_start;
+
+ ELFObjectContext ctx;
+ char buffer[1024]; // Buffer for DWARF data (1KB should be sufficient)
+ ctx.code_size = code_size;
+ ctx.startp = ctx.p = (uint8_t*)buffer;
+ ctx.fde_p = NULL;
+
+ elf_init_ehframe(&ctx);
+ return ctx.p - ctx.startp;
+}
+
static void elf_init_ehframe(ELFObjectContext* ctx) {
uint8_t* p = ctx->p;
uint8_t* framep = p; // Remember start of frame data
@@ -784,7 +805,7 @@ static void elf_init_ehframe(ELFObjectContext* ctx) {
*
* DWRF_SECTION(FDE,
* DWRF_U32((uint32_t)(p - framep)); // Offset to CIE (relative from here)
- * DWRF_U32(-0x30); // Initial PC-relative location of the code
+ * DWRF_U32(pc_relative_offset); // PC-relative location of the code (calculated dynamically)
* DWRF_U32(ctx->code_size); // Code range covered by this FDE
* DWRF_U8(0); // Augmentation data length (none)
*
@@ -830,19 +851,31 @@ static void elf_init_ehframe(ELFObjectContext* ctx) {
DWRF_U32(0); // CIE ID (0 indicates this is a CIE)
DWRF_U8(DWRF_CIE_VERSION); // CIE version (1)
DWRF_STR("zR"); // Augmentation string ("zR" = has LSDA)
- DWRF_UV(1); // Code alignment factor
+#ifdef __x86_64__
+ DWRF_UV(1); // Code alignment factor (x86_64: 1 byte)
+#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
+ DWRF_UV(4); // Code alignment factor (AArch64: 4 bytes per instruction)
+#endif
DWRF_SV(-(int64_t)sizeof(uintptr_t)); // Data alignment factor (negative)
DWRF_U8(DWRF_REG_RA); // Return address register number
DWRF_UV(1); // Augmentation data length
DWRF_U8(DWRF_EH_PE_pcrel | DWRF_EH_PE_sdata4); // FDE pointer encoding
/* Initial CFI instructions - describe default calling convention */
+#ifdef __x86_64__
+ /* x86_64 initial CFI state */
DWRF_U8(DWRF_CFA_def_cfa); // Define CFA (Call Frame Address)
DWRF_UV(DWRF_REG_SP); // CFA = SP register
DWRF_UV(sizeof(uintptr_t)); // CFA = SP + pointer_size
DWRF_U8(DWRF_CFA_offset|DWRF_REG_RA); // Return address is saved
DWRF_UV(1); // At offset 1 from CFA
-
+#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
+ /* AArch64 initial CFI state */
+ DWRF_U8(DWRF_CFA_def_cfa); // Define CFA (Call Frame Address)
+ DWRF_UV(DWRF_REG_SP); // CFA = SP register
+ DWRF_UV(0); // CFA = SP + 0 (AArch64 starts with offset 0)
+ // No initial register saves in AArch64 CIE
+#endif
DWRF_ALIGNNOP(sizeof(uintptr_t)); // Align to pointer boundary
)
@@ -853,11 +886,15 @@ static void elf_init_ehframe(ELFObjectContext* ctx) {
*
* The FDE describes unwinding information specific to this function.
* It references the CIE and provides function-specific CFI instructions.
+ *
+ * The PC-relative offset is calculated after the entire EH frame is built
+ * to ensure accurate positioning relative to the synthesized DSO layout.
*/
DWRF_SECTION(FDE,
DWRF_U32((uint32_t)(p - framep)); // Offset to CIE (backwards reference)
- DWRF_U32(-0x30); // Machine code offset relative to .text
- DWRF_U32(ctx->code_size); // Address range covered by this FDE (code lenght)
+ ctx->fde_p = p; // Remember where PC offset field is located for later calculation
+ DWRF_U32(0); // Placeholder for PC-relative offset (calculated at end of elf_init_ehframe)
+ DWRF_U32(ctx->code_size); // Address range covered by this FDE (code length)
DWRF_U8(0); // Augmentation data length (none)
/*
@@ -868,32 +905,36 @@ static void elf_init_ehframe(ELFObjectContext* ctx) {
* conventions and register usage patterns.
*/
#ifdef __x86_64__
- /* x86_64 calling convention unwinding rules */
+ /* x86_64 calling convention unwinding rules with frame pointer */
# if defined(__CET__) && (__CET__ & 1)
- DWRF_U8(DWRF_CFA_advance_loc | 8); // Advance location by 8 bytes when CET protection is enabled
-# else
- DWRF_U8(DWRF_CFA_advance_loc | 4); // Advance location by 4 bytes
+ DWRF_U8(DWRF_CFA_advance_loc | 4); // Advance past endbr64 (4 bytes)
# endif
- DWRF_U8(DWRF_CFA_def_cfa_offset); // Redefine CFA offset
+ DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance past push %rbp (1 byte)
+ DWRF_U8(DWRF_CFA_def_cfa_offset); // def_cfa_offset 16
DWRF_UV(16); // New offset: SP + 16
- DWRF_U8(DWRF_CFA_advance_loc | 6); // Advance location by 6 bytes
- DWRF_U8(DWRF_CFA_def_cfa_offset); // Redefine CFA offset
+ DWRF_U8(DWRF_CFA_offset | DWRF_REG_BP); // offset r6 at cfa-16
+ DWRF_UV(2); // Offset factor: 2 * 8 = 16 bytes
+ DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance past mov %rsp,%rbp (3 bytes)
+ DWRF_U8(DWRF_CFA_def_cfa_register); // def_cfa_register r6
+ DWRF_UV(DWRF_REG_BP); // Use base pointer register
+ DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance past call *%rcx (2 bytes) + pop %rbp (1 byte) = 3
+ DWRF_U8(DWRF_CFA_def_cfa); // def_cfa r7 ofs 8
+ DWRF_UV(DWRF_REG_SP); // Use stack pointer register
DWRF_UV(8); // New offset: SP + 8
#elif defined(__aarch64__) && defined(__AARCH64EL__) && !defined(__ILP32__)
/* AArch64 calling convention unwinding rules */
- DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance location by 1 instruction (stp x29, x30)
- DWRF_U8(DWRF_CFA_def_cfa_offset); // Redefine CFA offset
- DWRF_UV(16); // CFA = SP + 16 (stack pointer after push)
- DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Frame pointer (x29) saved
- DWRF_UV(2); // At offset 2 from CFA (2 * 8 = 16 bytes)
- DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Link register (x30) saved
- DWRF_UV(1); // At offset 1 from CFA (1 * 8 = 8 bytes)
- DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance by 3 instructions (mov x16, x3; mov x29, sp; ldp...)
- DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // Restore frame pointer (x29)
- DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // Restore link register (x30)
- DWRF_U8(DWRF_CFA_def_cfa_offset); // Final CFA adjustment
- DWRF_UV(0); // CFA = SP + 0 (stack restored)
-
+ DWRF_U8(DWRF_CFA_advance_loc | 1); // Advance by 1 instruction (4 bytes)
+ DWRF_U8(DWRF_CFA_def_cfa_offset); // CFA = SP + 16
+ DWRF_UV(16); // Stack pointer moved by 16 bytes
+ DWRF_U8(DWRF_CFA_offset | DWRF_REG_FP); // x29 (frame pointer) saved
+ DWRF_UV(2); // At CFA-16 (2 * 8 = 16 bytes from CFA)
+ DWRF_U8(DWRF_CFA_offset | DWRF_REG_RA); // x30 (link register) saved
+ DWRF_UV(1); // At CFA-8 (1 * 8 = 8 bytes from CFA)
+ DWRF_U8(DWRF_CFA_advance_loc | 3); // Advance by 3 instructions (12 bytes)
+ DWRF_U8(DWRF_CFA_restore | DWRF_REG_RA); // Restore x30 - NO DWRF_UV() after this!
+ DWRF_U8(DWRF_CFA_restore | DWRF_REG_FP); // Restore x29 - NO DWRF_UV() after this!
+ DWRF_U8(DWRF_CFA_def_cfa_offset); // CFA = SP + 0 (stack restored)
+ DWRF_UV(0); // Back to original stack position
#else
# error "Unsupported target architecture"
#endif
@@ -902,6 +943,58 @@ static void elf_init_ehframe(ELFObjectContext* ctx) {
)
ctx->p = p; // Update context pointer to end of generated data
+
+ /* Calculate and update the PC-relative offset in the FDE
+ *
+ * When perf processes the jitdump, it creates a synthesized DSO with this layout:
+ *
+ * Synthesized DSO Memory Layout:
+ * ┌─────────────────────────────────────────────────────────────┐ < code_start
+ * │ Code Section │
+ * │ (round_up(code_size, 8) bytes) │
+ * ├─────────────────────────────────────────────────────────────┤ < start of EH frame data
+ * │ EH Frame Data │
+ * │ ┌─────────────────────────────────────────────────────┐ │
+ * │ │ CIE data │ │
+ * │ └─────────────────────────────────────────────────────┘ │
+ * │ ┌─────────────────────────────────────────────────────┐ │
+ * │ │ FDE Header: │ │
+ * │ │ - CIE offset (4 bytes) │ │
+ * │ │ - PC offset (4 bytes) <─ fde_offset_in_frame ─────┼────┼─> points to code_start
+ * │ │ - address range (4 bytes) │ │ (this specific field)
+ * │ │ CFI Instructions... │ │
+ * │ └─────────────────────────────────────────────────────┘ │
+ * ├─────────────────────────────────────────────────────────────┤ < reference_point
+ * │ EhFrameHeader │
+ * │ (navigation metadata) │
+ * └─────────────────────────────────────────────────────────────┘
+ *
+ * The PC offset field in the FDE must contain the distance from itself to code_start:
+ *
+ * distance = code_start - fde_pc_field
+ *
+ * Where:
+ * fde_pc_field_location = reference_point - eh_frame_size + fde_offset_in_frame
+ * code_start_location = reference_point - eh_frame_size - round_up(code_size, 8)
+ *
+ * Therefore:
+ * distance = code_start_location - fde_pc_field_location
+ * = (ref - eh_frame_size - rounded_code_size) - (ref - eh_frame_size + fde_offset_in_frame)
+ * = -rounded_code_size - fde_offset_in_frame
+ * = -(round_up(code_size, 8) + fde_offset_in_frame)
+ *
+ * Note: fde_offset_in_frame is the offset from EH frame start to the PC offset field,
+ *
+ */
+ if (ctx->fde_p != NULL) {
+ int32_t fde_offset_in_frame = (ctx->fde_p - ctx->startp);
+ int32_t rounded_code_size = round_up(ctx->code_size, 8);
+ int32_t pc_relative_offset = -(rounded_code_size + fde_offset_in_frame);
+
+
+ // Update the PC-relative offset in the FDE
+ *(int32_t*)ctx->fde_p = pc_relative_offset;
+ }
}
// =============================================================================
@@ -1002,8 +1095,11 @@ static void* perf_map_jit_init(void) {
/* Initialize code ID counter */
perf_jit_map_state.code_id = 0;
- /* Configure trampoline API with padding information */
- trampoline_api.code_padding = PERF_JIT_CODE_PADDING;
+ /* Calculate padding size based on actual unwind info requirements */
+ size_t eh_frame_size = calculate_eh_frame_size();
+ size_t unwind_data_size = sizeof(EhFrameHeader) + eh_frame_size;
+ trampoline_api.code_padding = round_up(unwind_data_size, 16);
+ trampoline_api.code_alignment = 32;
return &perf_jit_map_state;
}
@@ -1092,6 +1188,7 @@ static void perf_map_jit_write_entry(void *state, const void *code_addr,
char buffer[1024]; // Buffer for DWARF data (1KB should be sufficient)
ctx.code_size = code_size;
ctx.startp = ctx.p = (uint8_t*)buffer;
+ ctx.fde_p = NULL; // Initialize to NULL, will be set when FDE is written
/* Generate EH frame (Exception Handling frame) data */
elf_init_ehframe(&ctx);
@@ -1110,7 +1207,7 @@ static void perf_map_jit_write_entry(void *state, const void *code_addr,
ev2.unwind_data_size = sizeof(EhFrameHeader) + eh_frame_size;
/* Verify we don't exceed our padding budget */
- assert(ev2.unwind_data_size <= PERF_JIT_CODE_PADDING);
+ assert(ev2.unwind_data_size <= (uint64_t)trampoline_api.code_padding);
ev2.eh_frame_hdr_size = sizeof(EhFrameHeader);
ev2.mapped_size = round_up(ev2.unwind_data_size, 16); // 16-byte alignment
@@ -1262,4 +1359,4 @@ _PyPerf_Callbacks _Py_perfmap_jit_callbacks = {
&perf_map_jit_fini, // Cleanup function
};
-#endif /* PY_HAVE_PERF_TRAMPOLINE */ \ No newline at end of file
+#endif /* PY_HAVE_PERF_TRAMPOLINE */
diff --git a/Python/perf_trampoline.c b/Python/perf_trampoline.c
index 996e54b82b6..a2da3c7d56d 100644
--- a/Python/perf_trampoline.c
+++ b/Python/perf_trampoline.c
@@ -230,6 +230,7 @@ perf_map_init_state(void)
{
PyUnstable_PerfMapState_Init();
trampoline_api.code_padding = 0;
+ trampoline_api.code_alignment = 32;
perf_trampoline_type = PERF_TRAMPOLINE_TYPE_MAP;
return NULL;
}
@@ -291,7 +292,9 @@ new_code_arena(void)
void *start = &_Py_trampoline_func_start;
void *end = &_Py_trampoline_func_end;
size_t code_size = end - start;
- size_t chunk_size = round_up(code_size + trampoline_api.code_padding, 16);
+ size_t unaligned_size = code_size + trampoline_api.code_padding;
+ size_t chunk_size = round_up(unaligned_size, trampoline_api.code_alignment);
+ assert(chunk_size % trampoline_api.code_alignment == 0);
// TODO: Check the effect of alignment of the code chunks. Initial investigation
// showed that this has no effect on performance in x86-64 or aarch64 and the current
// version has the advantage that the unwinder in GDB can unwind across JIT-ed code.
@@ -356,7 +359,9 @@ static inline py_trampoline
code_arena_new_code(code_arena_t *code_arena)
{
py_trampoline trampoline = (py_trampoline)code_arena->current_addr;
- size_t total_code_size = round_up(code_arena->code_size + trampoline_api.code_padding, 16);
+ size_t total_code_size = round_up(code_arena->code_size + trampoline_api.code_padding,
+ trampoline_api.code_alignment);
+ assert(total_code_size % trampoline_api.code_alignment == 0);
code_arena->size_left -= total_code_size;
code_arena->current_addr += total_code_size;
return trampoline;
@@ -489,9 +494,6 @@ _PyPerfTrampoline_Init(int activate)
}
else {
_PyInterpreterState_SetEvalFrameFunc(tstate->interp, py_trampoline_evaluator);
- if (new_code_arena() < 0) {
- return -1;
- }
extra_code_index = _PyEval_RequestCodeExtraIndex(NULL);
if (extra_code_index == -1) {
return -1;
@@ -499,6 +501,9 @@ _PyPerfTrampoline_Init(int activate)
if (trampoline_api.state == NULL && trampoline_api.init_state != NULL) {
trampoline_api.state = trampoline_api.init_state();
}
+ if (new_code_arena() < 0) {
+ return -1;
+ }
perf_status = PERF_STATUS_OK;
}
#endif
diff --git a/Python/preconfig.c b/Python/preconfig.c
index 5b26c75de8b..67b2d2f2dc1 100644
--- a/Python/preconfig.c
+++ b/Python/preconfig.c
@@ -700,7 +700,7 @@ preconfig_init_coerce_c_locale(PyPreConfig *config)
/* Test if coerce_c_locale equals to -1 or equals to 1:
PYTHONCOERCECLOCALE=1 doesn't imply that the C locale is always coerced.
- It is only coerced if if the LC_CTYPE locale is "C". */
+ It is only coerced if the LC_CTYPE locale is "C". */
if (config->coerce_c_locale < 0 || config->coerce_c_locale == 1) {
/* The C locale enables the C locale coercion (PEP 538) */
if (_Py_LegacyLocaleDetected(0)) {
diff --git a/Tools/ssl/multissltests.py b/Tools/ssl/multissltests.py
index b1a5df91901..f4c8fde8346 100755
--- a/Tools/ssl/multissltests.py
+++ b/Tools/ssl/multissltests.py
@@ -1,12 +1,12 @@
#!./python
-"""Run Python tests against multiple installations of OpenSSL and LibreSSL
+"""Run Python tests against multiple installations of cryptography libraries
The script
- (1) downloads OpenSSL / LibreSSL tar bundle
+ (1) downloads the tar bundle
(2) extracts it to ./src
- (3) compiles OpenSSL / LibreSSL
- (4) installs OpenSSL / LibreSSL into ../multissl/$LIB/$VERSION/
+ (3) compiles the relevant library
+ (4) installs that library into ../multissl/$LIB/$VERSION/
(5) forces a recompilation of Python modules using the
header and library files from ../multissl/$LIB/$VERSION/
(6) runs Python's test suite
@@ -61,6 +61,10 @@ LIBRESSL_OLD_VERSIONS = [
LIBRESSL_RECENT_VERSIONS = [
]
+AWSLC_RECENT_VERSIONS = [
+ "1.55.0",
+]
+
# store files in ../multissl
HERE = os.path.dirname(os.path.abspath(__file__))
PYTHONROOT = os.path.abspath(os.path.join(HERE, '..', '..'))
@@ -70,9 +74,9 @@ MULTISSL_DIR = os.path.abspath(os.path.join(PYTHONROOT, '..', 'multissl'))
parser = argparse.ArgumentParser(
prog='multissl',
description=(
- "Run CPython tests with multiple OpenSSL and LibreSSL "
+ "Run CPython tests with multiple cryptography libraries"
"versions."
- )
+ ),
)
parser.add_argument(
'--debug',
@@ -103,6 +107,14 @@ parser.add_argument(
).format(LIBRESSL_RECENT_VERSIONS, LIBRESSL_OLD_VERSIONS)
)
parser.add_argument(
+ '--awslc',
+ nargs='+',
+ default=(),
+ help=(
+ "AWS-LC versions, defaults to '{}' if no crypto library versions are given."
+ ).format(AWSLC_RECENT_VERSIONS)
+)
+parser.add_argument(
'--tests',
nargs='*',
default=(),
@@ -111,7 +123,7 @@ parser.add_argument(
parser.add_argument(
'--base-directory',
default=MULTISSL_DIR,
- help="Base directory for OpenSSL / LibreSSL sources and builds."
+ help="Base directory for crypto library sources and builds."
)
parser.add_argument(
'--no-network',
@@ -124,8 +136,8 @@ parser.add_argument(
choices=['library', 'modules', 'tests'],
default='tests',
help=(
- "Which steps to perform. 'library' downloads and compiles OpenSSL "
- "or LibreSSL. 'module' also compiles Python modules. 'tests' builds "
+ "Which steps to perform. 'library' downloads and compiles a crypto"
+ "library. 'module' also compiles Python modules. 'tests' builds "
"all and runs the test suite."
)
)
@@ -453,6 +465,34 @@ class BuildLibreSSL(AbstractBuilder):
build_template = "libressl-{}"
+class BuildAWSLC(AbstractBuilder):
+ library = "AWS-LC"
+ url_templates = (
+ "https://github.com/aws/aws-lc/archive/refs/tags/v{v}.tar.gz",
+ )
+ src_template = "aws-lc-{}.tar.gz"
+ build_template = "aws-lc-{}"
+
+ def _build_src(self, config_args=()):
+ cwd = self.build_dir
+ log.info("Running build in {}".format(cwd))
+ env = os.environ.copy()
+ env["LD_RUN_PATH"] = self.lib_dir # set rpath
+ if self.system:
+ env['SYSTEM'] = self.system
+ cmd = [
+ "cmake",
+ "-DCMAKE_BUILD_TYPE=RelWithDebInfo",
+ "-DCMAKE_PREFIX_PATH={}".format(self.install_dir),
+ "-DCMAKE_INSTALL_PREFIX={}".format(self.install_dir),
+ "-DBUILD_SHARED_LIBS=ON",
+ "-DBUILD_TESTING=OFF",
+ "-DFIPS=OFF",
+ ]
+ self._subprocess_call(cmd, cwd=cwd, env=env)
+ self._subprocess_call(["make", "-j{}".format(self.jobs)], cwd=cwd, env=env)
+
+
def configure_make():
if not os.path.isfile('Makefile'):
log.info('Running ./configure')
@@ -467,9 +507,10 @@ def configure_make():
def main():
args = parser.parse_args()
- if not args.openssl and not args.libressl:
+ if not args.openssl and not args.libressl and not args.awslc:
args.openssl = list(OPENSSL_RECENT_VERSIONS)
args.libressl = list(LIBRESSL_RECENT_VERSIONS)
+ args.awslc = list(AWSLC_RECENT_VERSIONS)
if not args.disable_ancient:
args.openssl.extend(OPENSSL_OLD_VERSIONS)
args.libressl.extend(LIBRESSL_OLD_VERSIONS)
@@ -496,22 +537,15 @@ def main():
# download and register builder
builds = []
-
- for version in args.openssl:
- build = BuildOpenSSL(
- version,
- args
- )
- build.install()
- builds.append(build)
-
- for version in args.libressl:
- build = BuildLibreSSL(
- version,
- args
- )
- build.install()
- builds.append(build)
+ for build_class, versions in [
+ (BuildOpenSSL, args.openssl),
+ (BuildLibreSSL, args.libressl),
+ (BuildAWSLC, args.awslc),
+ ]:
+ for version in versions:
+ build = build_class(version, args)
+ build.install()
+ builds.append(build)
if args.steps in {'modules', 'tests'}:
for build in builds:
@@ -539,7 +573,7 @@ def main():
else:
print('Executed all SSL tests.')
- print('OpenSSL / LibreSSL versions:')
+ print('OpenSSL / LibreSSL / AWS-LC versions:')
for build in builds:
print(" * {0.library} {0.version}".format(build))
diff --git a/configure b/configure
index 94a0b810333..4292f33ce21 100755
--- a/configure
+++ b/configure
@@ -30848,8 +30848,8 @@ main (void)
OBJ_nid2sn(NID_md5);
OBJ_nid2sn(NID_sha1);
+ OBJ_nid2sn(NID_sha512);
OBJ_nid2sn(NID_sha3_512);
- OBJ_nid2sn(NID_blake2b512);
EVP_PBE_scrypt(NULL, 0, NULL, 0, 2, 8, 1, 0, NULL, 0);
;
diff --git a/configure.ac b/configure.ac
index ade71bc011e..cc7a6e9397d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -7529,8 +7529,8 @@ WITH_SAVE_ENV([
], [
OBJ_nid2sn(NID_md5);
OBJ_nid2sn(NID_sha1);
+ OBJ_nid2sn(NID_sha512);
OBJ_nid2sn(NID_sha3_512);
- OBJ_nid2sn(NID_blake2b512);
EVP_PBE_scrypt(NULL, 0, NULL, 0, 2, 8, 1, 0, NULL, 0);
])], [ac_cv_working_openssl_hashlib=yes], [ac_cv_working_openssl_hashlib=no])
])